aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-09-07 20:10:24 -0400
committerDave Airlie <airlied@redhat.com>2009-09-07 21:15:52 -0400
commit3ce0a23d2d253185df24e22e3d5f89800bb3dd1c (patch)
tree4b4defdbe33aec7317101cce0f89c33083f8d17b /drivers/gpu
parent4ce001abafafe77e5dd943d1480fc9f87894e96f (diff)
drm/radeon/kms: add r600 KMS support
This adds the r600 KMS + CS support to the Linux kernel. The r600 TTM support is quite basic and still needs more work esp around using interrupts, but the polled fencing should work okay for now. Also currently TTM is using memcpy to do VRAM moves, the code is here to use a 3D blit to do this, but isn't fully debugged yet. Authors: Alex Deucher <alexdeucher@gmail.com> Dave Airlie <airlied@redhat.com> Jerome Glisse <jglisse@redhat.com> Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/avivod.h (renamed from drivers/gpu/drm/radeon/r300.h)42
-rw-r--r--drivers/gpu/drm/radeon/r100.c135
-rw-r--r--drivers/gpu/drm/radeon/r100d.h76
-rw-r--r--drivers/gpu/drm/radeon/r300.c3
-rw-r--r--drivers/gpu/drm/radeon/r300d.h76
-rw-r--r--drivers/gpu/drm/radeon/r600.c1714
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c855
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c777
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c1072
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h14
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c252
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c658
-rw-r--r--drivers/gpu/drm/radeon/r600d.h661
-rw-r--r--drivers/gpu/drm/radeon/radeon.h120
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h156
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c340
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h141
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c119
-rw-r--r--drivers/gpu/drm/radeon/radeon_share.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs780.c102
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h (renamed from drivers/gpu/drm/radeon/rv515r.h)56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c987
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h341
33 files changed, 8289 insertions, 606 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c5db0c4fe788..14c3fe692723 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -46,8 +46,9 @@ radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
46 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \ 46 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
47 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ 47 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
48 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 48 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
49 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o \ 49 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
50 radeon_test.o r200.o radeon_legacy_tv.o 50 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
51 r600_blit_kms.o
51 52
52radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 53radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
53 54
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 8e31e992ec53..a7edd0f2ac37 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -389,6 +389,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
389 pll_flags |= RADEON_PLL_USE_REF_DIV; 389 pll_flags |= RADEON_PLL_USE_REF_DIV;
390 } 390 }
391 radeon_encoder = to_radeon_encoder(encoder); 391 radeon_encoder = to_radeon_encoder(encoder);
392 break;
392 } 393 }
393 } 394 }
394 395
diff --git a/drivers/gpu/drm/radeon/r300.h b/drivers/gpu/drm/radeon/avivod.h
index 8486b4da9d69..d4e6e6e4a938 100644
--- a/drivers/gpu/drm/radeon/r300.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -1,7 +1,6 @@
1/* 1/*
2 * Copyright 2008 Advanced Micro Devices, Inc. 2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc. 3 * Copyright 2009 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 * 4 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 5 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"), 6 * copy of this software and associated documentation files (the "Software"),
@@ -25,12 +24,37 @@
25 * Alex Deucher 24 * Alex Deucher
26 * Jerome Glisse 25 * Jerome Glisse
27 */ 26 */
28#ifndef R300_H 27#ifndef AVIVOD_H
29#define R300_H 28#define AVIVOD_H
30 29
31struct r300_asic { 30
32 const unsigned *reg_safe_bm; 31#define D1CRTC_CONTROL 0x6080
33 unsigned reg_safe_bm_size; 32#define CRTC_EN (1 << 0)
34}; 33#define D1CRTC_UPDATE_LOCK 0x60E8
34#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
35#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
36
37#define D2CRTC_CONTROL 0x6880
38#define D2CRTC_UPDATE_LOCK 0x68E8
39#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
40#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
41
42#define D1VGA_CONTROL 0x0330
43#define DVGA_CONTROL_MODE_ENABLE (1 << 0)
44#define DVGA_CONTROL_TIMING_SELECT (1 << 8)
45#define DVGA_CONTROL_SYNC_POLARITY_SELECT (1 << 9)
46#define DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1 << 10)
47#define DVGA_CONTROL_OVERSCAN_COLOR_EN (1 << 16)
48#define DVGA_CONTROL_ROTATE (1 << 24)
49#define D2VGA_CONTROL 0x0338
50
51#define VGA_HDP_CONTROL 0x328
52#define VGA_MEM_PAGE_SELECT_EN (1 << 0)
53#define VGA_MEMORY_DISABLE (1 << 4)
54#define VGA_RBBM_LOCK_DISABLE (1 << 8)
55#define VGA_SOFT_RESET (1 << 16)
56#define VGA_MEMORY_BASE_ADDRESS 0x0310
57#define VGA_RENDER_CONTROL 0x0300
58#define VGA_VSTATUS_CNTL_MASK 0x00030000
35 59
36#endif 60#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ee3ab62417e2..5708c07ce733 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -31,6 +31,8 @@
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32#include "radeon_reg.h" 32#include "radeon_reg.h"
33#include "radeon.h" 33#include "radeon.h"
34#include "r100d.h"
35
34#include <linux/firmware.h> 36#include <linux/firmware.h>
35#include <linux/platform_device.h> 37#include <linux/platform_device.h>
36 38
@@ -391,9 +393,9 @@ int r100_wb_init(struct radeon_device *rdev)
391 return r; 393 return r;
392 } 394 }
393 } 395 }
394 WREG32(0x774, rdev->wb.gpu_addr); 396 WREG32(RADEON_SCRATCH_ADDR, rdev->wb.gpu_addr);
395 WREG32(0x70C, rdev->wb.gpu_addr + 1024); 397 WREG32(RADEON_CP_RB_RPTR_ADDR, rdev->wb.gpu_addr + 1024);
396 WREG32(0x770, 0xff); 398 WREG32(RADEON_SCRATCH_UMSK, 0xff);
397 return 0; 399 return 0;
398} 400}
399 401
@@ -559,18 +561,18 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
559 fw_name = FIRMWARE_R520; 561 fw_name = FIRMWARE_R520;
560 } 562 }
561 563
562 err = request_firmware(&rdev->fw, fw_name, &pdev->dev); 564 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
563 platform_device_unregister(pdev); 565 platform_device_unregister(pdev);
564 if (err) { 566 if (err) {
565 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", 567 printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
566 fw_name); 568 fw_name);
567 } else if (rdev->fw->size % 8) { 569 } else if (rdev->me_fw->size % 8) {
568 printk(KERN_ERR 570 printk(KERN_ERR
569 "radeon_cp: Bogus length %zu in firmware \"%s\"\n", 571 "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
570 rdev->fw->size, fw_name); 572 rdev->me_fw->size, fw_name);
571 err = -EINVAL; 573 err = -EINVAL;
572 release_firmware(rdev->fw); 574 release_firmware(rdev->me_fw);
573 rdev->fw = NULL; 575 rdev->me_fw = NULL;
574 } 576 }
575 return err; 577 return err;
576} 578}
@@ -584,9 +586,9 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
584 "programming pipes. Bad things might happen.\n"); 586 "programming pipes. Bad things might happen.\n");
585 } 587 }
586 588
587 if (rdev->fw) { 589 if (rdev->me_fw) {
588 size = rdev->fw->size / 4; 590 size = rdev->me_fw->size / 4;
589 fw_data = (const __be32 *)&rdev->fw->data[0]; 591 fw_data = (const __be32 *)&rdev->me_fw->data[0];
590 WREG32(RADEON_CP_ME_RAM_ADDR, 0); 592 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
591 for (i = 0; i < size; i += 2) { 593 for (i = 0; i < size; i += 2) {
592 WREG32(RADEON_CP_ME_RAM_DATAH, 594 WREG32(RADEON_CP_ME_RAM_DATAH,
@@ -632,7 +634,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
632 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); 634 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
633 } 635 }
634 636
635 if (!rdev->fw) { 637 if (!rdev->me_fw) {
636 r = r100_cp_init_microcode(rdev); 638 r = r100_cp_init_microcode(rdev);
637 if (r) { 639 if (r) {
638 DRM_ERROR("Failed to load firmware!\n"); 640 DRM_ERROR("Failed to load firmware!\n");
@@ -765,6 +767,12 @@ int r100_cp_reset(struct radeon_device *rdev)
765 return -1; 767 return -1;
766} 768}
767 769
770void r100_cp_commit(struct radeon_device *rdev)
771{
772 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
773 (void)RREG32(RADEON_CP_RB_WPTR);
774}
775
768 776
769/* 777/*
770 * CS functions 778 * CS functions
@@ -2954,3 +2962,106 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
2954 } 2962 }
2955 } 2963 }
2956} 2964}
2965
2966int r100_ring_test(struct radeon_device *rdev)
2967{
2968 uint32_t scratch;
2969 uint32_t tmp = 0;
2970 unsigned i;
2971 int r;
2972
2973 r = radeon_scratch_get(rdev, &scratch);
2974 if (r) {
2975 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2976 return r;
2977 }
2978 WREG32(scratch, 0xCAFEDEAD);
2979 r = radeon_ring_lock(rdev, 2);
2980 if (r) {
2981 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2982 radeon_scratch_free(rdev, scratch);
2983 return r;
2984 }
2985 radeon_ring_write(rdev, PACKET0(scratch, 0));
2986 radeon_ring_write(rdev, 0xDEADBEEF);
2987 radeon_ring_unlock_commit(rdev);
2988 for (i = 0; i < rdev->usec_timeout; i++) {
2989 tmp = RREG32(scratch);
2990 if (tmp == 0xDEADBEEF) {
2991 break;
2992 }
2993 DRM_UDELAY(1);
2994 }
2995 if (i < rdev->usec_timeout) {
2996 DRM_INFO("ring test succeeded in %d usecs\n", i);
2997 } else {
2998 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
2999 scratch, tmp);
3000 r = -EINVAL;
3001 }
3002 radeon_scratch_free(rdev, scratch);
3003 return r;
3004}
3005
3006void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3007{
3008 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
3009 radeon_ring_write(rdev, ib->gpu_addr);
3010 radeon_ring_write(rdev, ib->length_dw);
3011}
3012
3013int r100_ib_test(struct radeon_device *rdev)
3014{
3015 struct radeon_ib *ib;
3016 uint32_t scratch;
3017 uint32_t tmp = 0;
3018 unsigned i;
3019 int r;
3020
3021 r = radeon_scratch_get(rdev, &scratch);
3022 if (r) {
3023 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3024 return r;
3025 }
3026 WREG32(scratch, 0xCAFEDEAD);
3027 r = radeon_ib_get(rdev, &ib);
3028 if (r) {
3029 return r;
3030 }
3031 ib->ptr[0] = PACKET0(scratch, 0);
3032 ib->ptr[1] = 0xDEADBEEF;
3033 ib->ptr[2] = PACKET2(0);
3034 ib->ptr[3] = PACKET2(0);
3035 ib->ptr[4] = PACKET2(0);
3036 ib->ptr[5] = PACKET2(0);
3037 ib->ptr[6] = PACKET2(0);
3038 ib->ptr[7] = PACKET2(0);
3039 ib->length_dw = 8;
3040 r = radeon_ib_schedule(rdev, ib);
3041 if (r) {
3042 radeon_scratch_free(rdev, scratch);
3043 radeon_ib_free(rdev, &ib);
3044 return r;
3045 }
3046 r = radeon_fence_wait(ib->fence, false);
3047 if (r) {
3048 return r;
3049 }
3050 for (i = 0; i < rdev->usec_timeout; i++) {
3051 tmp = RREG32(scratch);
3052 if (tmp == 0xDEADBEEF) {
3053 break;
3054 }
3055 DRM_UDELAY(1);
3056 }
3057 if (i < rdev->usec_timeout) {
3058 DRM_INFO("ib test succeeded in %u usecs\n", i);
3059 } else {
3060 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
3061 scratch, tmp);
3062 r = -EINVAL;
3063 }
3064 radeon_scratch_free(rdev, scratch);
3065 radeon_ib_free(rdev, &ib);
3066 return r;
3067}
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
new file mode 100644
index 000000000000..6da7d92c321c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R100D_H__
29#define __R100D_H__
30
31#define CP_PACKET0 0x00000000
32#define PACKET0_BASE_INDEX_SHIFT 0
33#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
34#define PACKET0_COUNT_SHIFT 16
35#define PACKET0_COUNT_MASK (0x3fff << 16)
36#define CP_PACKET1 0x40000000
37#define CP_PACKET2 0x80000000
38#define PACKET2_PAD_SHIFT 0
39#define PACKET2_PAD_MASK (0x3fffffff << 0)
40#define CP_PACKET3 0xC0000000
41#define PACKET3_IT_OPCODE_SHIFT 8
42#define PACKET3_IT_OPCODE_MASK (0xff << 8)
43#define PACKET3_COUNT_SHIFT 16
44#define PACKET3_COUNT_MASK (0x3fff << 16)
45/* PACKET3 op code */
46#define PACKET3_NOP 0x10
47#define PACKET3_3D_DRAW_VBUF 0x28
48#define PACKET3_3D_DRAW_IMMD 0x29
49#define PACKET3_3D_DRAW_INDX 0x2A
50#define PACKET3_3D_LOAD_VBPNTR 0x2F
51#define PACKET3_INDX_BUFFER 0x33
52#define PACKET3_3D_DRAW_VBUF_2 0x34
53#define PACKET3_3D_DRAW_IMMD_2 0x35
54#define PACKET3_3D_DRAW_INDX_2 0x36
55#define PACKET3_BITBLT_MULTI 0x9B
56
57#define PACKET0(reg, n) (CP_PACKET0 | \
58 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
59 REG_SET(PACKET0_COUNT, (n)))
60#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
61#define PACKET3(op, n) (CP_PACKET3 | \
62 REG_SET(PACKET3_IT_OPCODE, (op)) | \
63 REG_SET(PACKET3_COUNT, (n)))
64
65#define PACKET_TYPE0 0
66#define PACKET_TYPE1 1
67#define PACKET_TYPE2 2
68#define PACKET_TYPE3 3
69
70#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
71#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
72#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
73#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
74#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
75
76#endif
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 33a2c557eac4..a5f82f7beed6 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -33,6 +33,7 @@
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "radeon_share.h" 34#include "radeon_share.h"
35#include "r100_track.h" 35#include "r100_track.h"
36#include "r300d.h"
36 37
37#include "r300_reg_safe.h" 38#include "r300_reg_safe.h"
38 39
@@ -127,7 +128,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
127 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 128 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
128 rv370_pcie_gart_tlb_flush(rdev); 129 rv370_pcie_gart_tlb_flush(rdev);
129 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", 130 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
130 rdev->mc.gtt_size >> 20, table_addr); 131 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
131 rdev->gart.ready = true; 132 rdev->gart.ready = true;
132 return 0; 133 return 0;
133} 134}
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
new file mode 100644
index 000000000000..63ec076f2cd4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R300D_H__
29#define __R300D_H__
30
31#define CP_PACKET0 0x00000000
32#define PACKET0_BASE_INDEX_SHIFT 0
33#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
34#define PACKET0_COUNT_SHIFT 16
35#define PACKET0_COUNT_MASK (0x3fff << 16)
36#define CP_PACKET1 0x40000000
37#define CP_PACKET2 0x80000000
38#define PACKET2_PAD_SHIFT 0
39#define PACKET2_PAD_MASK (0x3fffffff << 0)
40#define CP_PACKET3 0xC0000000
41#define PACKET3_IT_OPCODE_SHIFT 8
42#define PACKET3_IT_OPCODE_MASK (0xff << 8)
43#define PACKET3_COUNT_SHIFT 16
44#define PACKET3_COUNT_MASK (0x3fff << 16)
45/* PACKET3 op code */
46#define PACKET3_NOP 0x10
47#define PACKET3_3D_DRAW_VBUF 0x28
48#define PACKET3_3D_DRAW_IMMD 0x29
49#define PACKET3_3D_DRAW_INDX 0x2A
50#define PACKET3_3D_LOAD_VBPNTR 0x2F
51#define PACKET3_INDX_BUFFER 0x33
52#define PACKET3_3D_DRAW_VBUF_2 0x34
53#define PACKET3_3D_DRAW_IMMD_2 0x35
54#define PACKET3_3D_DRAW_INDX_2 0x36
55#define PACKET3_BITBLT_MULTI 0x9B
56
57#define PACKET0(reg, n) (CP_PACKET0 | \
58 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
59 REG_SET(PACKET0_COUNT, (n)))
60#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
61#define PACKET3(op, n) (CP_PACKET3 | \
62 REG_SET(PACKET3_IT_OPCODE, (op)) | \
63 REG_SET(PACKET3_COUNT, (n)))
64
65#define PACKET_TYPE0 0
66#define PACKET_TYPE1 1
67#define PACKET_TYPE2 2
68#define PACKET_TYPE3 3
69
70#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
71#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
72#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
73#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
74#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
75
76#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 538cd907df69..d8fcef44a69f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -25,12 +25,46 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h>
29#include <linux/firmware.h>
30#include <linux/platform_device.h>
28#include "drmP.h" 31#include "drmP.h"
29#include "radeon_reg.h" 32#include "radeon_drm.h"
30#include "radeon.h" 33#include "radeon.h"
34#include "radeon_mode.h"
35#include "radeon_share.h"
36#include "r600d.h"
37#include "avivod.h"
38#include "atom.h"
31 39
32/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */ 40#define PFP_UCODE_SIZE 576
33void rs600_mc_disable_clients(struct radeon_device *rdev); 41#define PM4_UCODE_SIZE 1792
42#define R700_PFP_UCODE_SIZE 848
43#define R700_PM4_UCODE_SIZE 1360
44
45/* Firmware Names */
46MODULE_FIRMWARE("radeon/R600_pfp.bin");
47MODULE_FIRMWARE("radeon/R600_me.bin");
48MODULE_FIRMWARE("radeon/RV610_pfp.bin");
49MODULE_FIRMWARE("radeon/RV610_me.bin");
50MODULE_FIRMWARE("radeon/RV630_pfp.bin");
51MODULE_FIRMWARE("radeon/RV630_me.bin");
52MODULE_FIRMWARE("radeon/RV620_pfp.bin");
53MODULE_FIRMWARE("radeon/RV620_me.bin");
54MODULE_FIRMWARE("radeon/RV635_pfp.bin");
55MODULE_FIRMWARE("radeon/RV635_me.bin");
56MODULE_FIRMWARE("radeon/RV670_pfp.bin");
57MODULE_FIRMWARE("radeon/RV670_me.bin");
58MODULE_FIRMWARE("radeon/RS780_pfp.bin");
59MODULE_FIRMWARE("radeon/RS780_me.bin");
60MODULE_FIRMWARE("radeon/RV770_pfp.bin");
61MODULE_FIRMWARE("radeon/RV770_me.bin");
62MODULE_FIRMWARE("radeon/RV730_pfp.bin");
63MODULE_FIRMWARE("radeon/RV730_me.bin");
64MODULE_FIRMWARE("radeon/RV710_pfp.bin");
65MODULE_FIRMWARE("radeon/RV710_me.bin");
66
67int r600_debugfs_mc_info_init(struct radeon_device *rdev);
34 68
35/* This files gather functions specifics to: 69/* This files gather functions specifics to:
36 * r600,rv610,rv630,rv620,rv635,rv670 70 * r600,rv610,rv630,rv620,rv635,rv670
@@ -39,87 +73,270 @@ void rs600_mc_disable_clients(struct radeon_device *rdev);
39 */ 73 */
40int r600_mc_wait_for_idle(struct radeon_device *rdev); 74int r600_mc_wait_for_idle(struct radeon_device *rdev);
41void r600_gpu_init(struct radeon_device *rdev); 75void r600_gpu_init(struct radeon_device *rdev);
76void r600_fini(struct radeon_device *rdev);
42 77
43 78
44/* 79/*
45 * MC 80 * R600 PCIE GART
46 */ 81 */
47int r600_mc_init(struct radeon_device *rdev) 82int r600_gart_clear_page(struct radeon_device *rdev, int i)
48{ 83{
49 uint32_t tmp; 84 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
85 u64 pte;
50 86
51 r600_gpu_init(rdev); 87 if (i < 0 || i > rdev->gart.num_gpu_pages)
88 return -EINVAL;
89 pte = 0;
90 writeq(pte, ((void __iomem *)ptr) + (i * 8));
91 return 0;
92}
52 93
53 /* setup the gart before changing location so we can ask to 94void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
54 * discard unmapped mc request 95{
55 */ 96 unsigned i;
56 /* FIXME: disable out of gart access */ 97 u32 tmp;
57 tmp = rdev->mc.gtt_location / 4096; 98
58 tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp); 99 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
59 WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp); 100 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
60 tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096; 101 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
61 tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp); 102 for (i = 0; i < rdev->usec_timeout; i++) {
62 WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp); 103 /* read MC_STATUS */
63 104 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
64 rs600_mc_disable_clients(rdev); 105 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
65 if (r600_mc_wait_for_idle(rdev)) { 106 if (tmp == 2) {
66 printk(KERN_WARNING "Failed to wait MC idle while " 107 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
67 "programming pipes. Bad things might happen.\n"); 108 return;
109 }
110 if (tmp) {
111 return;
112 }
113 udelay(1);
114 }
115}
116
117int r600_pcie_gart_enable(struct radeon_device *rdev)
118{
119 u32 tmp;
120 int r, i;
121
122 /* Initialize common gart structure */
123 r = radeon_gart_init(rdev);
124 if (r) {
125 return r;
126 }
127 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
128 r = radeon_gart_table_vram_alloc(rdev);
129 if (r) {
130 return r;
68 } 131 }
132 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
133 r600_gart_clear_page(rdev, i);
134 /* Setup L2 cache */
135 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
136 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
137 EFFECTIVE_L2_QUEUE_SIZE(7));
138 WREG32(VM_L2_CNTL2, 0);
139 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
140 /* Setup TLB control */
141 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
142 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
143 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
144 ENABLE_WAIT_L2_QUERY;
145 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
146 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
147 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
148 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
149 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
150 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
151 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
152 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
153 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
154 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
155 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
156 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
157 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
158 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
159 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
160 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
161 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
162 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
163 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
164 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
165 (u32)(rdev->dummy_page.addr >> 12));
166 for (i = 1; i < 7; i++)
167 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
69 168
70 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 169 r600_pcie_gart_tlb_flush(rdev);
71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); 170 rdev->gart.ready = true;
72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R600_MC_VM_FB_LOCATION, tmp);
74 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
75 tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
76 WREG32(R600_MC_VM_AGP_TOP, tmp);
77 tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
78 WREG32(R600_MC_VM_AGP_BOT, tmp);
79 return 0; 171 return 0;
80} 172}
81 173
82void r600_mc_fini(struct radeon_device *rdev) 174void r600_pcie_gart_disable(struct radeon_device *rdev)
83{ 175{
84 /* FIXME: implement */ 176 u32 tmp;
85} 177 int i;
86 178
179 /* Clear ptes*/
180 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
181 r600_gart_clear_page(rdev, i);
182 r600_pcie_gart_tlb_flush(rdev);
183 /* Disable all tables */
184 for (i = 0; i < 7; i++)
185 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
87 186
88/* 187 /* Disable L2 cache */
89 * Global GPU functions 188 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
90 */ 189 EFFECTIVE_L2_QUEUE_SIZE(7));
91void r600_errata(struct radeon_device *rdev) 190 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
92{ 191 /* Setup L1 TLB control */
93 rdev->pll_errata = 0; 192 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
193 ENABLE_WAIT_L2_QUERY;
194 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
195 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
196 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
197 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
198 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
199 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
200 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
201 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
202 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
203 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
204 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
205 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
206 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
207 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
94} 208}
95 209
96int r600_mc_wait_for_idle(struct radeon_device *rdev) 210int r600_mc_wait_for_idle(struct radeon_device *rdev)
97{ 211{
98 /* FIXME: implement */ 212 unsigned i;
99 return 0; 213 u32 tmp;
214
215 for (i = 0; i < rdev->usec_timeout; i++) {
216 /* read MC_STATUS */
217 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
218 if (!tmp)
219 return 0;
220 udelay(1);
221 }
222 return -1;
100} 223}
101 224
102void r600_gpu_init(struct radeon_device *rdev) 225static void r600_mc_resume(struct radeon_device *rdev)
103{ 226{
104 /* FIXME: implement */ 227 u32 d1vga_control, d2vga_control;
105} 228 u32 vga_render_control, vga_hdp_control;
229 u32 d1crtc_control, d2crtc_control;
230 u32 new_d1grph_primary, new_d1grph_secondary;
231 u32 new_d2grph_primary, new_d2grph_secondary;
232 u64 old_vram_start;
233 u32 tmp;
234 int i, j;
106 235
236 /* Initialize HDP */
237 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
238 WREG32((0x2c14 + j), 0x00000000);
239 WREG32((0x2c18 + j), 0x00000000);
240 WREG32((0x2c1c + j), 0x00000000);
241 WREG32((0x2c20 + j), 0x00000000);
242 WREG32((0x2c24 + j), 0x00000000);
243 }
244 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
107 245
108/* 246 d1vga_control = RREG32(D1VGA_CONTROL);
109 * VRAM info 247 d2vga_control = RREG32(D2VGA_CONTROL);
110 */ 248 vga_render_control = RREG32(VGA_RENDER_CONTROL);
111void r600_vram_get_type(struct radeon_device *rdev) 249 vga_hdp_control = RREG32(VGA_HDP_CONTROL);
250 d1crtc_control = RREG32(D1CRTC_CONTROL);
251 d2crtc_control = RREG32(D2CRTC_CONTROL);
252 old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
253 new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
254 new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
255 new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
256 new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
257 new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
258 new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
259 new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
260 new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
261
262 /* Stop all video */
263 WREG32(D1VGA_CONTROL, 0);
264 WREG32(D2VGA_CONTROL, 0);
265 WREG32(VGA_RENDER_CONTROL, 0);
266 WREG32(D1CRTC_UPDATE_LOCK, 1);
267 WREG32(D2CRTC_UPDATE_LOCK, 1);
268 WREG32(D1CRTC_CONTROL, 0);
269 WREG32(D2CRTC_CONTROL, 0);
270 WREG32(D1CRTC_UPDATE_LOCK, 0);
271 WREG32(D2CRTC_UPDATE_LOCK, 0);
272
273 mdelay(1);
274 if (r600_mc_wait_for_idle(rdev)) {
275 printk(KERN_WARNING "[drm] MC not idle !\n");
276 }
277
278 /* Lockout access through VGA aperture*/
279 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
280
281 /* Update configuration */
282 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
283 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
284 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
285 tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
286 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
287 WREG32(MC_VM_FB_LOCATION, tmp);
288 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
289 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
290 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
291 if (rdev->flags & RADEON_IS_AGP) {
292 WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
293 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
294 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
295 } else {
296 WREG32(MC_VM_AGP_BASE, 0);
297 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
298 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
299 }
300 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
301 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
302 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
303 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
304 WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
305
306 /* Unlock host access */
307 WREG32(VGA_HDP_CONTROL, vga_hdp_control);
308
309 mdelay(1);
310 if (r600_mc_wait_for_idle(rdev)) {
311 printk(KERN_WARNING "[drm] MC not idle !\n");
312 }
313
314 /* Restore video state */
315 WREG32(D1CRTC_UPDATE_LOCK, 1);
316 WREG32(D2CRTC_UPDATE_LOCK, 1);
317 WREG32(D1CRTC_CONTROL, d1crtc_control);
318 WREG32(D2CRTC_CONTROL, d2crtc_control);
319 WREG32(D1CRTC_UPDATE_LOCK, 0);
320 WREG32(D2CRTC_UPDATE_LOCK, 0);
321 WREG32(D1VGA_CONTROL, d1vga_control);
322 WREG32(D2VGA_CONTROL, d2vga_control);
323 WREG32(VGA_RENDER_CONTROL, vga_render_control);
324}
325
326int r600_mc_init(struct radeon_device *rdev)
112{ 327{
113 uint32_t tmp; 328 fixed20_12 a;
329 u32 tmp;
114 int chansize; 330 int chansize;
331 int r;
115 332
333 /* Get VRAM informations */
116 rdev->mc.vram_width = 128; 334 rdev->mc.vram_width = 128;
117 rdev->mc.vram_is_ddr = true; 335 rdev->mc.vram_is_ddr = true;
118 336 tmp = RREG32(RAMCFG);
119 tmp = RREG32(R600_RAMCFG); 337 if (tmp & CHANSIZE_OVERRIDE) {
120 if (tmp & R600_CHANSIZE_OVERRIDE) {
121 chansize = 16; 338 chansize = 16;
122 } else if (tmp & R600_CHANSIZE) { 339 } else if (tmp & CHANSIZE_MASK) {
123 chansize = 64; 340 chansize = 64;
124 } else { 341 } else {
125 chansize = 32; 342 chansize = 32;
@@ -135,36 +352,1391 @@ void r600_vram_get_type(struct radeon_device *rdev)
135 (rdev->family == CHIP_RV635)) { 352 (rdev->family == CHIP_RV635)) {
136 rdev->mc.vram_width = 2 * chansize; 353 rdev->mc.vram_width = 2 * chansize;
137 } 354 }
355 /* Could aper size report 0 ? */
356 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
357 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
358 /* Setup GPU memory space */
359 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
360 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
361 if (rdev->flags & RADEON_IS_AGP) {
362 r = radeon_agp_init(rdev);
363 if (r)
364 return r;
365 /* gtt_size is setup by radeon_agp_init */
366 rdev->mc.gtt_location = rdev->mc.agp_base;
367 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
368 /* Try to put vram before or after AGP because we
369 * we want SYSTEM_APERTURE to cover both VRAM and
370 * AGP so that GPU can catch out of VRAM/AGP access
371 */
372 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
373 /* Enought place before */
374 rdev->mc.vram_location = rdev->mc.gtt_location -
375 rdev->mc.mc_vram_size;
376 } else if (tmp > rdev->mc.mc_vram_size) {
377 /* Enought place after */
378 rdev->mc.vram_location = rdev->mc.gtt_location +
379 rdev->mc.gtt_size;
380 } else {
381 /* Try to setup VRAM then AGP might not
382 * not work on some card
383 */
384 rdev->mc.vram_location = 0x00000000UL;
385 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
386 }
387 } else {
388 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
389 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
390 0xFFFF) << 24;
391 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
392 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
393 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
394 /* Enough place after vram */
395 rdev->mc.gtt_location = tmp;
396 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
397 /* Enough place before vram */
398 rdev->mc.gtt_location = 0;
399 } else {
400 /* Not enough place after or before shrink
401 * gart size
402 */
403 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
404 rdev->mc.gtt_location = 0;
405 rdev->mc.gtt_size = rdev->mc.vram_location;
406 } else {
407 rdev->mc.gtt_location = tmp;
408 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
409 }
410 }
411 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
412 } else {
413 rdev->mc.vram_location = 0x00000000UL;
414 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
415 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
416 }
417 }
418 rdev->mc.vram_start = rdev->mc.vram_location;
419 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
420 rdev->mc.gtt_start = rdev->mc.gtt_location;
421 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
422 /* FIXME: we should enforce default clock in case GPU is not in
423 * default setup
424 */
425 a.full = rfixed_const(100);
426 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
427 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
428 return 0;
138} 429}
139 430
140void r600_vram_info(struct radeon_device *rdev) 431/* We doesn't check that the GPU really needs a reset we simply do the
432 * reset, it's up to the caller to determine if the GPU needs one. We
433 * might add an helper function to check that.
434 */
435int r600_gpu_soft_reset(struct radeon_device *rdev)
141{ 436{
142 r600_vram_get_type(rdev); 437 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
143 rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); 438 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
144 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 439 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
440 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
441 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
442 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
443 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
444 S_008010_GUI_ACTIVE(1);
445 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
446 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
447 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
448 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
449 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
450 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
451 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
452 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
453 u32 srbm_reset = 0;
145 454
146 /* Could aper size report 0 ? */ 455 /* Disable CP parsing/prefetching */
147 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 456 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
148 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 457 /* Check if any of the rendering block is busy and reset it */
458 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
459 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
460 WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) |
461 S_008020_SOFT_RESET_DB(1) |
462 S_008020_SOFT_RESET_CB(1) |
463 S_008020_SOFT_RESET_PA(1) |
464 S_008020_SOFT_RESET_SC(1) |
465 S_008020_SOFT_RESET_SMX(1) |
466 S_008020_SOFT_RESET_SPI(1) |
467 S_008020_SOFT_RESET_SX(1) |
468 S_008020_SOFT_RESET_SH(1) |
469 S_008020_SOFT_RESET_TC(1) |
470 S_008020_SOFT_RESET_TA(1) |
471 S_008020_SOFT_RESET_VC(1) |
472 S_008020_SOFT_RESET_VGT(1));
473 (void)RREG32(R_008020_GRBM_SOFT_RESET);
474 udelay(50);
475 WREG32(R_008020_GRBM_SOFT_RESET, 0);
476 (void)RREG32(R_008020_GRBM_SOFT_RESET);
477 }
478 /* Reset CP (we always reset CP) */
479 WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1));
480 (void)RREG32(R_008020_GRBM_SOFT_RESET);
481 udelay(50);
482 WREG32(R_008020_GRBM_SOFT_RESET, 0);
483 (void)RREG32(R_008020_GRBM_SOFT_RESET);
484 /* Reset others GPU block if necessary */
485 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
486 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
487 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
488 srbm_reset |= S_000E60_SOFT_RESET_GRBM(1);
489 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS)))
490 srbm_reset |= S_000E60_SOFT_RESET_IH(1);
491 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
492 srbm_reset |= S_000E60_SOFT_RESET_VMC(1);
493 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS)))
494 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
495 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS)))
496 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
497 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS)))
498 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
499 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS)))
500 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
501 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS)))
502 srbm_reset |= S_000E60_SOFT_RESET_MC(1);
503 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS)))
504 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
505 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
506 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
507 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
508 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
509 udelay(50);
510 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
511 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
512 /* Wait a little for things to settle down */
513 udelay(50);
514 return 0;
515}
516
517int r600_gpu_reset(struct radeon_device *rdev)
518{
519 return r600_gpu_soft_reset(rdev);
520}
521
522static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
523 u32 num_backends,
524 u32 backend_disable_mask)
525{
526 u32 backend_map = 0;
527 u32 enabled_backends_mask;
528 u32 enabled_backends_count;
529 u32 cur_pipe;
530 u32 swizzle_pipe[R6XX_MAX_PIPES];
531 u32 cur_backend;
532 u32 i;
533
534 if (num_tile_pipes > R6XX_MAX_PIPES)
535 num_tile_pipes = R6XX_MAX_PIPES;
536 if (num_tile_pipes < 1)
537 num_tile_pipes = 1;
538 if (num_backends > R6XX_MAX_BACKENDS)
539 num_backends = R6XX_MAX_BACKENDS;
540 if (num_backends < 1)
541 num_backends = 1;
542
543 enabled_backends_mask = 0;
544 enabled_backends_count = 0;
545 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
546 if (((backend_disable_mask >> i) & 1) == 0) {
547 enabled_backends_mask |= (1 << i);
548 ++enabled_backends_count;
549 }
550 if (enabled_backends_count == num_backends)
551 break;
552 }
553
554 if (enabled_backends_count == 0) {
555 enabled_backends_mask = 1;
556 enabled_backends_count = 1;
557 }
558
559 if (enabled_backends_count != num_backends)
560 num_backends = enabled_backends_count;
561
562 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
563 switch (num_tile_pipes) {
564 case 1:
565 swizzle_pipe[0] = 0;
566 break;
567 case 2:
568 swizzle_pipe[0] = 0;
569 swizzle_pipe[1] = 1;
570 break;
571 case 3:
572 swizzle_pipe[0] = 0;
573 swizzle_pipe[1] = 1;
574 swizzle_pipe[2] = 2;
575 break;
576 case 4:
577 swizzle_pipe[0] = 0;
578 swizzle_pipe[1] = 1;
579 swizzle_pipe[2] = 2;
580 swizzle_pipe[3] = 3;
581 break;
582 case 5:
583 swizzle_pipe[0] = 0;
584 swizzle_pipe[1] = 1;
585 swizzle_pipe[2] = 2;
586 swizzle_pipe[3] = 3;
587 swizzle_pipe[4] = 4;
588 break;
589 case 6:
590 swizzle_pipe[0] = 0;
591 swizzle_pipe[1] = 2;
592 swizzle_pipe[2] = 4;
593 swizzle_pipe[3] = 5;
594 swizzle_pipe[4] = 1;
595 swizzle_pipe[5] = 3;
596 break;
597 case 7:
598 swizzle_pipe[0] = 0;
599 swizzle_pipe[1] = 2;
600 swizzle_pipe[2] = 4;
601 swizzle_pipe[3] = 6;
602 swizzle_pipe[4] = 1;
603 swizzle_pipe[5] = 3;
604 swizzle_pipe[6] = 5;
605 break;
606 case 8:
607 swizzle_pipe[0] = 0;
608 swizzle_pipe[1] = 2;
609 swizzle_pipe[2] = 4;
610 swizzle_pipe[3] = 6;
611 swizzle_pipe[4] = 1;
612 swizzle_pipe[5] = 3;
613 swizzle_pipe[6] = 5;
614 swizzle_pipe[7] = 7;
615 break;
616 }
617
618 cur_backend = 0;
619 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
620 while (((1 << cur_backend) & enabled_backends_mask) == 0)
621 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
622
623 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
624
625 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
626 }
627
628 return backend_map;
629}
630
631int r600_count_pipe_bits(uint32_t val)
632{
633 int i, ret = 0;
634
635 for (i = 0; i < 32; i++) {
636 ret += val & 1;
637 val >>= 1;
638 }
639 return ret;
149} 640}
150 641
642void r600_gpu_init(struct radeon_device *rdev)
643{
644 u32 tiling_config;
645 u32 ramcfg;
646 u32 tmp;
647 int i, j;
648 u32 sq_config;
649 u32 sq_gpr_resource_mgmt_1 = 0;
650 u32 sq_gpr_resource_mgmt_2 = 0;
651 u32 sq_thread_resource_mgmt = 0;
652 u32 sq_stack_resource_mgmt_1 = 0;
653 u32 sq_stack_resource_mgmt_2 = 0;
654
655 /* FIXME: implement */
656 switch (rdev->family) {
657 case CHIP_R600:
658 rdev->config.r600.max_pipes = 4;
659 rdev->config.r600.max_tile_pipes = 8;
660 rdev->config.r600.max_simds = 4;
661 rdev->config.r600.max_backends = 4;
662 rdev->config.r600.max_gprs = 256;
663 rdev->config.r600.max_threads = 192;
664 rdev->config.r600.max_stack_entries = 256;
665 rdev->config.r600.max_hw_contexts = 8;
666 rdev->config.r600.max_gs_threads = 16;
667 rdev->config.r600.sx_max_export_size = 128;
668 rdev->config.r600.sx_max_export_pos_size = 16;
669 rdev->config.r600.sx_max_export_smx_size = 128;
670 rdev->config.r600.sq_num_cf_insts = 2;
671 break;
672 case CHIP_RV630:
673 case CHIP_RV635:
674 rdev->config.r600.max_pipes = 2;
675 rdev->config.r600.max_tile_pipes = 2;
676 rdev->config.r600.max_simds = 3;
677 rdev->config.r600.max_backends = 1;
678 rdev->config.r600.max_gprs = 128;
679 rdev->config.r600.max_threads = 192;
680 rdev->config.r600.max_stack_entries = 128;
681 rdev->config.r600.max_hw_contexts = 8;
682 rdev->config.r600.max_gs_threads = 4;
683 rdev->config.r600.sx_max_export_size = 128;
684 rdev->config.r600.sx_max_export_pos_size = 16;
685 rdev->config.r600.sx_max_export_smx_size = 128;
686 rdev->config.r600.sq_num_cf_insts = 2;
687 break;
688 case CHIP_RV610:
689 case CHIP_RV620:
690 case CHIP_RS780:
691 case CHIP_RS880:
692 rdev->config.r600.max_pipes = 1;
693 rdev->config.r600.max_tile_pipes = 1;
694 rdev->config.r600.max_simds = 2;
695 rdev->config.r600.max_backends = 1;
696 rdev->config.r600.max_gprs = 128;
697 rdev->config.r600.max_threads = 192;
698 rdev->config.r600.max_stack_entries = 128;
699 rdev->config.r600.max_hw_contexts = 4;
700 rdev->config.r600.max_gs_threads = 4;
701 rdev->config.r600.sx_max_export_size = 128;
702 rdev->config.r600.sx_max_export_pos_size = 16;
703 rdev->config.r600.sx_max_export_smx_size = 128;
704 rdev->config.r600.sq_num_cf_insts = 1;
705 break;
706 case CHIP_RV670:
707 rdev->config.r600.max_pipes = 4;
708 rdev->config.r600.max_tile_pipes = 4;
709 rdev->config.r600.max_simds = 4;
710 rdev->config.r600.max_backends = 4;
711 rdev->config.r600.max_gprs = 192;
712 rdev->config.r600.max_threads = 192;
713 rdev->config.r600.max_stack_entries = 256;
714 rdev->config.r600.max_hw_contexts = 8;
715 rdev->config.r600.max_gs_threads = 16;
716 rdev->config.r600.sx_max_export_size = 128;
717 rdev->config.r600.sx_max_export_pos_size = 16;
718 rdev->config.r600.sx_max_export_smx_size = 128;
719 rdev->config.r600.sq_num_cf_insts = 2;
720 break;
721 default:
722 break;
723 }
724
725 /* Initialize HDP */
726 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
727 WREG32((0x2c14 + j), 0x00000000);
728 WREG32((0x2c18 + j), 0x00000000);
729 WREG32((0x2c1c + j), 0x00000000);
730 WREG32((0x2c20 + j), 0x00000000);
731 WREG32((0x2c24 + j), 0x00000000);
732 }
733
734 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
735
736 /* Setup tiling */
737 tiling_config = 0;
738 ramcfg = RREG32(RAMCFG);
739 switch (rdev->config.r600.max_tile_pipes) {
740 case 1:
741 tiling_config |= PIPE_TILING(0);
742 break;
743 case 2:
744 tiling_config |= PIPE_TILING(1);
745 break;
746 case 4:
747 tiling_config |= PIPE_TILING(2);
748 break;
749 case 8:
750 tiling_config |= PIPE_TILING(3);
751 break;
752 default:
753 break;
754 }
755 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
756 tiling_config |= GROUP_SIZE(0);
757 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
758 if (tmp > 3) {
759 tiling_config |= ROW_TILING(3);
760 tiling_config |= SAMPLE_SPLIT(3);
761 } else {
762 tiling_config |= ROW_TILING(tmp);
763 tiling_config |= SAMPLE_SPLIT(tmp);
764 }
765 tiling_config |= BANK_SWAPS(1);
766 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
767 rdev->config.r600.max_backends,
768 (0xff << rdev->config.r600.max_backends) & 0xff);
769 tiling_config |= BACKEND_MAP(tmp);
770 WREG32(GB_TILING_CONFIG, tiling_config);
771 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
772 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
773
774 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
775 WREG32(CC_RB_BACKEND_DISABLE, tmp);
776
777 /* Setup pipes */
778 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
779 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
780 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
781 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
782
783 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
784 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
785 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
786
787 /* Setup some CP states */
788 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
789 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
790
791 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
792 SYNC_WALKER | SYNC_ALIGNER));
793 /* Setup various GPU states */
794 if (rdev->family == CHIP_RV670)
795 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
796
797 tmp = RREG32(SX_DEBUG_1);
798 tmp |= SMX_EVENT_RELEASE;
799 if ((rdev->family > CHIP_R600))
800 tmp |= ENABLE_NEW_SMX_ADDRESS;
801 WREG32(SX_DEBUG_1, tmp);
802
803 if (((rdev->family) == CHIP_R600) ||
804 ((rdev->family) == CHIP_RV630) ||
805 ((rdev->family) == CHIP_RV610) ||
806 ((rdev->family) == CHIP_RV620) ||
807 ((rdev->family) == CHIP_RS780)) {
808 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
809 } else {
810 WREG32(DB_DEBUG, 0);
811 }
812 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
813 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
814
815 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
816 WREG32(VGT_NUM_INSTANCES, 0);
817
818 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
819 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
820
821 tmp = RREG32(SQ_MS_FIFO_SIZES);
822 if (((rdev->family) == CHIP_RV610) ||
823 ((rdev->family) == CHIP_RV620) ||
824 ((rdev->family) == CHIP_RS780)) {
825 tmp = (CACHE_FIFO_SIZE(0xa) |
826 FETCH_FIFO_HIWATER(0xa) |
827 DONE_FIFO_HIWATER(0xe0) |
828 ALU_UPDATE_FIFO_HIWATER(0x8));
829 } else if (((rdev->family) == CHIP_R600) ||
830 ((rdev->family) == CHIP_RV630)) {
831 tmp &= ~DONE_FIFO_HIWATER(0xff);
832 tmp |= DONE_FIFO_HIWATER(0x4);
833 }
834 WREG32(SQ_MS_FIFO_SIZES, tmp);
835
836 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
837 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
838 */
839 sq_config = RREG32(SQ_CONFIG);
840 sq_config &= ~(PS_PRIO(3) |
841 VS_PRIO(3) |
842 GS_PRIO(3) |
843 ES_PRIO(3));
844 sq_config |= (DX9_CONSTS |
845 VC_ENABLE |
846 PS_PRIO(0) |
847 VS_PRIO(1) |
848 GS_PRIO(2) |
849 ES_PRIO(3));
850
851 if ((rdev->family) == CHIP_R600) {
852 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
853 NUM_VS_GPRS(124) |
854 NUM_CLAUSE_TEMP_GPRS(4));
855 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
856 NUM_ES_GPRS(0));
857 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
858 NUM_VS_THREADS(48) |
859 NUM_GS_THREADS(4) |
860 NUM_ES_THREADS(4));
861 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
862 NUM_VS_STACK_ENTRIES(128));
863 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
864 NUM_ES_STACK_ENTRIES(0));
865 } else if (((rdev->family) == CHIP_RV610) ||
866 ((rdev->family) == CHIP_RV620) ||
867 ((rdev->family) == CHIP_RS780)) {
868 /* no vertex cache */
869 sq_config &= ~VC_ENABLE;
870
871 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
872 NUM_VS_GPRS(44) |
873 NUM_CLAUSE_TEMP_GPRS(2));
874 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
875 NUM_ES_GPRS(17));
876 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
877 NUM_VS_THREADS(78) |
878 NUM_GS_THREADS(4) |
879 NUM_ES_THREADS(31));
880 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
881 NUM_VS_STACK_ENTRIES(40));
882 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
883 NUM_ES_STACK_ENTRIES(16));
884 } else if (((rdev->family) == CHIP_RV630) ||
885 ((rdev->family) == CHIP_RV635)) {
886 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
887 NUM_VS_GPRS(44) |
888 NUM_CLAUSE_TEMP_GPRS(2));
889 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
890 NUM_ES_GPRS(18));
891 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
892 NUM_VS_THREADS(78) |
893 NUM_GS_THREADS(4) |
894 NUM_ES_THREADS(31));
895 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
896 NUM_VS_STACK_ENTRIES(40));
897 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
898 NUM_ES_STACK_ENTRIES(16));
899 } else if ((rdev->family) == CHIP_RV670) {
900 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
901 NUM_VS_GPRS(44) |
902 NUM_CLAUSE_TEMP_GPRS(2));
903 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
904 NUM_ES_GPRS(17));
905 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
906 NUM_VS_THREADS(78) |
907 NUM_GS_THREADS(4) |
908 NUM_ES_THREADS(31));
909 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
910 NUM_VS_STACK_ENTRIES(64));
911 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
912 NUM_ES_STACK_ENTRIES(64));
913 }
914
915 WREG32(SQ_CONFIG, sq_config);
916 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
917 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
918 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
919 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
920 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
921
922 if (((rdev->family) == CHIP_RV610) ||
923 ((rdev->family) == CHIP_RV620) ||
924 ((rdev->family) == CHIP_RS780)) {
925 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
926 } else {
927 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
928 }
929
930 /* More default values. 2D/3D driver should adjust as needed */
931 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
932 S1_X(0x4) | S1_Y(0xc)));
933 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
934 S1_X(0x2) | S1_Y(0x2) |
935 S2_X(0xa) | S2_Y(0x6) |
936 S3_X(0x6) | S3_Y(0xa)));
937 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
938 S1_X(0x4) | S1_Y(0xc) |
939 S2_X(0x1) | S2_Y(0x6) |
940 S3_X(0xa) | S3_Y(0xe)));
941 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
942 S5_X(0x0) | S5_Y(0x0) |
943 S6_X(0xb) | S6_Y(0x4) |
944 S7_X(0x7) | S7_Y(0x8)));
945
946 WREG32(VGT_STRMOUT_EN, 0);
947 tmp = rdev->config.r600.max_pipes * 16;
948 switch (rdev->family) {
949 case CHIP_RV610:
950 case CHIP_RS780:
951 case CHIP_RV620:
952 tmp += 32;
953 break;
954 case CHIP_RV670:
955 tmp += 128;
956 break;
957 default:
958 break;
959 }
960 if (tmp > 256) {
961 tmp = 256;
962 }
963 WREG32(VGT_ES_PER_GS, 128);
964 WREG32(VGT_GS_PER_ES, tmp);
965 WREG32(VGT_GS_PER_VS, 2);
966 WREG32(VGT_GS_VERTEX_REUSE, 16);
967
968 /* more default values. 2D/3D driver should adjust as needed */
969 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
970 WREG32(VGT_STRMOUT_EN, 0);
971 WREG32(SX_MISC, 0);
972 WREG32(PA_SC_MODE_CNTL, 0);
973 WREG32(PA_SC_AA_CONFIG, 0);
974 WREG32(PA_SC_LINE_STIPPLE, 0);
975 WREG32(SPI_INPUT_Z, 0);
976 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
977 WREG32(CB_COLOR7_FRAG, 0);
978
979 /* Clear render buffer base addresses */
980 WREG32(CB_COLOR0_BASE, 0);
981 WREG32(CB_COLOR1_BASE, 0);
982 WREG32(CB_COLOR2_BASE, 0);
983 WREG32(CB_COLOR3_BASE, 0);
984 WREG32(CB_COLOR4_BASE, 0);
985 WREG32(CB_COLOR5_BASE, 0);
986 WREG32(CB_COLOR6_BASE, 0);
987 WREG32(CB_COLOR7_BASE, 0);
988 WREG32(CB_COLOR7_FRAG, 0);
989
990 switch (rdev->family) {
991 case CHIP_RV610:
992 case CHIP_RS780:
993 case CHIP_RV620:
994 tmp = TC_L2_SIZE(8);
995 break;
996 case CHIP_RV630:
997 case CHIP_RV635:
998 tmp = TC_L2_SIZE(4);
999 break;
1000 case CHIP_R600:
1001 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1002 break;
1003 default:
1004 tmp = TC_L2_SIZE(0);
1005 break;
1006 }
1007 WREG32(TC_CNTL, tmp);
1008
1009 tmp = RREG32(HDP_HOST_PATH_CNTL);
1010 WREG32(HDP_HOST_PATH_CNTL, tmp);
1011
1012 tmp = RREG32(ARB_POP);
1013 tmp |= ENABLE_TC128;
1014 WREG32(ARB_POP, tmp);
1015
1016 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1017 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1018 NUM_CLIP_SEQ(3)));
1019 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1020}
1021
1022
151/* 1023/*
152 * Indirect registers accessor 1024 * Indirect registers accessor
153 */ 1025 */
154uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg) 1026u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1027{
1028 u32 r;
1029
1030 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1031 (void)RREG32(PCIE_PORT_INDEX);
1032 r = RREG32(PCIE_PORT_DATA);
1033 return r;
1034}
1035
1036void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1037{
1038 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1039 (void)RREG32(PCIE_PORT_INDEX);
1040 WREG32(PCIE_PORT_DATA, (v));
1041 (void)RREG32(PCIE_PORT_DATA);
1042}
1043
1044
1045/*
1046 * CP & Ring
1047 */
1048void r600_cp_stop(struct radeon_device *rdev)
1049{
1050 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1051}
1052
1053int r600_cp_init_microcode(struct radeon_device *rdev)
1054{
1055 struct platform_device *pdev;
1056 const char *chip_name;
1057 size_t pfp_req_size, me_req_size;
1058 char fw_name[30];
1059 int err;
1060
1061 DRM_DEBUG("\n");
1062
1063 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1064 err = IS_ERR(pdev);
1065 if (err) {
1066 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1067 return -EINVAL;
1068 }
1069
1070 switch (rdev->family) {
1071 case CHIP_R600: chip_name = "R600"; break;
1072 case CHIP_RV610: chip_name = "RV610"; break;
1073 case CHIP_RV630: chip_name = "RV630"; break;
1074 case CHIP_RV620: chip_name = "RV620"; break;
1075 case CHIP_RV635: chip_name = "RV635"; break;
1076 case CHIP_RV670: chip_name = "RV670"; break;
1077 case CHIP_RS780:
1078 case CHIP_RS880: chip_name = "RS780"; break;
1079 case CHIP_RV770: chip_name = "RV770"; break;
1080 case CHIP_RV730:
1081 case CHIP_RV740: chip_name = "RV730"; break;
1082 case CHIP_RV710: chip_name = "RV710"; break;
1083 default: BUG();
1084 }
1085
1086 if (rdev->family >= CHIP_RV770) {
1087 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1088 me_req_size = R700_PM4_UCODE_SIZE * 4;
1089 } else {
1090 pfp_req_size = PFP_UCODE_SIZE * 4;
1091 me_req_size = PM4_UCODE_SIZE * 12;
1092 }
1093
1094 DRM_INFO("Loading %s CP Microcode\n", chip_name);
1095
1096 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1097 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1098 if (err)
1099 goto out;
1100 if (rdev->pfp_fw->size != pfp_req_size) {
1101 printk(KERN_ERR
1102 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1103 rdev->pfp_fw->size, fw_name);
1104 err = -EINVAL;
1105 goto out;
1106 }
1107
1108 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1109 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1110 if (err)
1111 goto out;
1112 if (rdev->me_fw->size != me_req_size) {
1113 printk(KERN_ERR
1114 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1115 rdev->me_fw->size, fw_name);
1116 err = -EINVAL;
1117 }
1118out:
1119 platform_device_unregister(pdev);
1120
1121 if (err) {
1122 if (err != -EINVAL)
1123 printk(KERN_ERR
1124 "r600_cp: Failed to load firmware \"%s\"\n",
1125 fw_name);
1126 release_firmware(rdev->pfp_fw);
1127 rdev->pfp_fw = NULL;
1128 release_firmware(rdev->me_fw);
1129 rdev->me_fw = NULL;
1130 }
1131 return err;
1132}
1133
1134static int r600_cp_load_microcode(struct radeon_device *rdev)
1135{
1136 const __be32 *fw_data;
1137 int i;
1138
1139 if (!rdev->me_fw || !rdev->pfp_fw)
1140 return -EINVAL;
1141
1142 r600_cp_stop(rdev);
1143
1144 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1145
1146 /* Reset cp */
1147 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1148 RREG32(GRBM_SOFT_RESET);
1149 mdelay(15);
1150 WREG32(GRBM_SOFT_RESET, 0);
1151
1152 WREG32(CP_ME_RAM_WADDR, 0);
1153
1154 fw_data = (const __be32 *)rdev->me_fw->data;
1155 WREG32(CP_ME_RAM_WADDR, 0);
1156 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1157 WREG32(CP_ME_RAM_DATA,
1158 be32_to_cpup(fw_data++));
1159
1160 fw_data = (const __be32 *)rdev->pfp_fw->data;
1161 WREG32(CP_PFP_UCODE_ADDR, 0);
1162 for (i = 0; i < PFP_UCODE_SIZE; i++)
1163 WREG32(CP_PFP_UCODE_DATA,
1164 be32_to_cpup(fw_data++));
1165
1166 WREG32(CP_PFP_UCODE_ADDR, 0);
1167 WREG32(CP_ME_RAM_WADDR, 0);
1168 WREG32(CP_ME_RAM_RADDR, 0);
1169 return 0;
1170}
1171
1172int r600_cp_start(struct radeon_device *rdev)
1173{
1174 int r;
1175 uint32_t cp_me;
1176
1177 r = radeon_ring_lock(rdev, 7);
1178 if (r) {
1179 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1180 return r;
1181 }
1182 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1183 radeon_ring_write(rdev, 0x1);
1184 if (rdev->family < CHIP_RV770) {
1185 radeon_ring_write(rdev, 0x3);
1186 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1187 } else {
1188 radeon_ring_write(rdev, 0x0);
1189 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1190 }
1191 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1192 radeon_ring_write(rdev, 0);
1193 radeon_ring_write(rdev, 0);
1194 radeon_ring_unlock_commit(rdev);
1195
1196 cp_me = 0xff;
1197 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1198 return 0;
1199}
1200
1201int r600_cp_resume(struct radeon_device *rdev)
1202{
1203 u32 tmp;
1204 u32 rb_bufsz;
1205 int r;
1206
1207 /* Reset cp */
1208 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1209 RREG32(GRBM_SOFT_RESET);
1210 mdelay(15);
1211 WREG32(GRBM_SOFT_RESET, 0);
1212
1213 /* Set ring buffer size */
1214 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1215#ifdef __BIG_ENDIAN
1216 WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
1217 (drm_order(4096/8) << 8) | rb_bufsz);
1218#else
1219 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
1220#endif
1221 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1222
1223 /* Set the write pointer delay */
1224 WREG32(CP_RB_WPTR_DELAY, 0);
1225
1226 /* Initialize the ring buffer's read and write pointers */
1227 tmp = RREG32(CP_RB_CNTL);
1228 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1229 WREG32(CP_RB_RPTR_WR, 0);
1230 WREG32(CP_RB_WPTR, 0);
1231 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1232 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1233 mdelay(1);
1234 WREG32(CP_RB_CNTL, tmp);
1235
1236 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1237 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1238
1239 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1240 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1241
1242 r600_cp_start(rdev);
1243 rdev->cp.ready = true;
1244 r = radeon_ring_test(rdev);
1245 if (r) {
1246 rdev->cp.ready = false;
1247 return r;
1248 }
1249 return 0;
1250}
1251
1252void r600_cp_commit(struct radeon_device *rdev)
1253{
1254 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1255 (void)RREG32(CP_RB_WPTR);
1256}
1257
1258void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1259{
1260 u32 rb_bufsz;
1261
1262 /* Align ring size */
1263 rb_bufsz = drm_order(ring_size / 8);
1264 ring_size = (1 << (rb_bufsz + 1)) * 4;
1265 rdev->cp.ring_size = ring_size;
1266 rdev->cp.align_mask = 16 - 1;
1267}
1268
1269
1270/*
1271 * GPU scratch registers helpers function.
1272 */
1273void r600_scratch_init(struct radeon_device *rdev)
1274{
1275 int i;
1276
1277 rdev->scratch.num_reg = 7;
1278 for (i = 0; i < rdev->scratch.num_reg; i++) {
1279 rdev->scratch.free[i] = true;
1280 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1281 }
1282}
1283
1284int r600_ring_test(struct radeon_device *rdev)
1285{
1286 uint32_t scratch;
1287 uint32_t tmp = 0;
1288 unsigned i;
1289 int r;
1290
1291 r = radeon_scratch_get(rdev, &scratch);
1292 if (r) {
1293 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1294 return r;
1295 }
1296 WREG32(scratch, 0xCAFEDEAD);
1297 r = radeon_ring_lock(rdev, 3);
1298 if (r) {
1299 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1300 radeon_scratch_free(rdev, scratch);
1301 return r;
1302 }
1303 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1304 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1305 radeon_ring_write(rdev, 0xDEADBEEF);
1306 radeon_ring_unlock_commit(rdev);
1307 for (i = 0; i < rdev->usec_timeout; i++) {
1308 tmp = RREG32(scratch);
1309 if (tmp == 0xDEADBEEF)
1310 break;
1311 DRM_UDELAY(1);
1312 }
1313 if (i < rdev->usec_timeout) {
1314 DRM_INFO("ring test succeeded in %d usecs\n", i);
1315 } else {
1316 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1317 scratch, tmp);
1318 r = -EINVAL;
1319 }
1320 radeon_scratch_free(rdev, scratch);
1321 return r;
1322}
1323
1324/*
1325 * Writeback
1326 */
1327int r600_wb_init(struct radeon_device *rdev)
1328{
1329 int r;
1330
1331 if (rdev->wb.wb_obj == NULL) {
1332 r = radeon_object_create(rdev, NULL, 4096,
1333 true,
1334 RADEON_GEM_DOMAIN_GTT,
1335 false, &rdev->wb.wb_obj);
1336 if (r) {
1337 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
1338 return r;
1339 }
1340 r = radeon_object_pin(rdev->wb.wb_obj,
1341 RADEON_GEM_DOMAIN_GTT,
1342 &rdev->wb.gpu_addr);
1343 if (r) {
1344 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
1345 return r;
1346 }
1347 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1348 if (r) {
1349 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
1350 return r;
1351 }
1352 }
1353 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
1354 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
1355 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
1356 WREG32(SCRATCH_UMSK, 0xff);
1357 return 0;
1358}
1359
1360void r600_wb_fini(struct radeon_device *rdev)
1361{
1362 if (rdev->wb.wb_obj) {
1363 radeon_object_kunmap(rdev->wb.wb_obj);
1364 radeon_object_unpin(rdev->wb.wb_obj);
1365 radeon_object_unref(&rdev->wb.wb_obj);
1366 rdev->wb.wb = NULL;
1367 rdev->wb.wb_obj = NULL;
1368 }
1369}
1370
1371
1372/*
1373 * CS
1374 */
1375void r600_fence_ring_emit(struct radeon_device *rdev,
1376 struct radeon_fence *fence)
1377{
1378 /* Emit fence sequence & fire IRQ */
1379 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1380 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1381 radeon_ring_write(rdev, fence->seq);
1382}
1383
1384int r600_copy_dma(struct radeon_device *rdev,
1385 uint64_t src_offset,
1386 uint64_t dst_offset,
1387 unsigned num_pages,
1388 struct radeon_fence *fence)
1389{
1390 /* FIXME: implement */
1391 return 0;
1392}
1393
1394int r600_copy_blit(struct radeon_device *rdev,
1395 uint64_t src_offset, uint64_t dst_offset,
1396 unsigned num_pages, struct radeon_fence *fence)
1397{
1398 r600_blit_prepare_copy(rdev, num_pages * 4096);
1399 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
1400 r600_blit_done_copy(rdev, fence);
1401 return 0;
1402}
1403
1404int r600_irq_process(struct radeon_device *rdev)
1405{
1406 /* FIXME: implement */
1407 return 0;
1408}
1409
1410int r600_irq_set(struct radeon_device *rdev)
1411{
1412 /* FIXME: implement */
1413 return 0;
1414}
1415
1416int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1417 uint32_t tiling_flags, uint32_t pitch,
1418 uint32_t offset, uint32_t obj_size)
1419{
1420 /* FIXME: implement */
1421 return 0;
1422}
1423
1424void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
1425{
1426 /* FIXME: implement */
1427}
1428
1429
1430bool r600_card_posted(struct radeon_device *rdev)
1431{
1432 uint32_t reg;
1433
1434 /* first check CRTCs */
1435 reg = RREG32(D1CRTC_CONTROL) |
1436 RREG32(D2CRTC_CONTROL);
1437 if (reg & CRTC_EN)
1438 return true;
1439
1440 /* then check MEM_SIZE, in case the crtcs are off */
1441 if (RREG32(CONFIG_MEMSIZE))
1442 return true;
1443
1444 return false;
1445}
1446
1447int r600_resume(struct radeon_device *rdev)
1448{
1449 int r;
1450
1451 r600_gpu_reset(rdev);
1452 r600_mc_resume(rdev);
1453 r = r600_pcie_gart_enable(rdev);
1454 if (r)
1455 return r;
1456 r600_gpu_init(rdev);
1457 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1458 if (r)
1459 return r;
1460 r = r600_cp_load_microcode(rdev);
1461 if (r)
1462 return r;
1463 r = r600_cp_resume(rdev);
1464 if (r)
1465 return r;
1466 r = r600_wb_init(rdev);
1467 if (r)
1468 return r;
1469 return 0;
1470}
1471
1472int r600_suspend(struct radeon_device *rdev)
1473{
1474 /* FIXME: we should wait for ring to be empty */
1475 r600_cp_stop(rdev);
1476 return 0;
1477}
1478
1479/* Plan is to move initialization in that function and use
1480 * helper function so that radeon_device_init pretty much
1481 * do nothing more than calling asic specific function. This
1482 * should also allow to remove a bunch of callback function
1483 * like vram_info.
1484 */
1485int r600_init(struct radeon_device *rdev)
155{ 1486{
156 uint32_t r; 1487 int r;
157 1488
158 WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff)); 1489 rdev->new_init_path = true;
159 (void)RREG32(R600_PCIE_PORT_INDEX); 1490 r = radeon_dummy_page_init(rdev);
160 r = RREG32(R600_PCIE_PORT_DATA); 1491 if (r)
1492 return r;
1493 if (r600_debugfs_mc_info_init(rdev)) {
1494 DRM_ERROR("Failed to register debugfs file for mc !\n");
1495 }
1496 /* This don't do much */
1497 r = radeon_gem_init(rdev);
1498 if (r)
1499 return r;
1500 /* Read BIOS */
1501 if (!radeon_get_bios(rdev)) {
1502 if (ASIC_IS_AVIVO(rdev))
1503 return -EINVAL;
1504 }
1505 /* Must be an ATOMBIOS */
1506 if (!rdev->is_atom_bios)
1507 return -EINVAL;
1508 r = radeon_atombios_init(rdev);
1509 if (r)
1510 return r;
1511 /* Post card if necessary */
1512 if (!r600_card_posted(rdev) && rdev->bios) {
1513 DRM_INFO("GPU not posted. posting now...\n");
1514 atom_asic_init(rdev->mode_info.atom_context);
1515 }
1516 /* Initialize scratch registers */
1517 r600_scratch_init(rdev);
1518 /* Initialize surface registers */
1519 radeon_surface_init(rdev);
1520 r = radeon_clocks_init(rdev);
1521 if (r)
1522 return r;
1523 /* Fence driver */
1524 r = radeon_fence_driver_init(rdev);
1525 if (r)
1526 return r;
1527 r = r600_mc_init(rdev);
1528 if (r) {
1529 if (rdev->flags & RADEON_IS_AGP) {
1530 /* Retry with disabling AGP */
1531 r600_fini(rdev);
1532 rdev->flags &= ~RADEON_IS_AGP;
1533 return r600_init(rdev);
1534 }
1535 return r;
1536 }
1537 /* Memory manager */
1538 r = radeon_object_init(rdev);
1539 if (r)
1540 return r;
1541 rdev->cp.ring_obj = NULL;
1542 r600_ring_init(rdev, 1024 * 1024);
1543
1544 if (!rdev->me_fw || !rdev->pfp_fw) {
1545 r = r600_cp_init_microcode(rdev);
1546 if (r) {
1547 DRM_ERROR("Failed to load firmware!\n");
1548 return r;
1549 }
1550 }
1551
1552 r = r600_resume(rdev);
1553 if (r) {
1554 if (rdev->flags & RADEON_IS_AGP) {
1555 /* Retry with disabling AGP */
1556 r600_fini(rdev);
1557 rdev->flags &= ~RADEON_IS_AGP;
1558 return r600_init(rdev);
1559 }
1560 return r;
1561 }
1562 r = radeon_ib_pool_init(rdev);
1563 if (r) {
1564 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1565 return r;
1566 }
1567 r = r600_blit_init(rdev);
1568 if (r) {
1569 DRM_ERROR("radeon: failled blitter (%d).\n", r);
1570 return r;
1571 }
1572 r = radeon_ib_test(rdev);
1573 if (r) {
1574 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1575 return r;
1576 }
1577 return 0;
1578}
1579
1580void r600_fini(struct radeon_device *rdev)
1581{
1582 /* Suspend operations */
1583 r600_suspend(rdev);
1584
1585 r600_blit_fini(rdev);
1586 radeon_ring_fini(rdev);
1587 r600_pcie_gart_disable(rdev);
1588 radeon_gart_table_vram_free(rdev);
1589 radeon_gart_fini(rdev);
1590 radeon_gem_fini(rdev);
1591 radeon_fence_driver_fini(rdev);
1592 radeon_clocks_fini(rdev);
1593#if __OS_HAS_AGP
1594 if (rdev->flags & RADEON_IS_AGP)
1595 radeon_agp_fini(rdev);
1596#endif
1597 radeon_object_fini(rdev);
1598 if (rdev->is_atom_bios)
1599 radeon_atombios_fini(rdev);
1600 else
1601 radeon_combios_fini(rdev);
1602 kfree(rdev->bios);
1603 rdev->bios = NULL;
1604 radeon_dummy_page_fini(rdev);
1605}
1606
1607
1608/*
1609 * CS stuff
1610 */
1611void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1612{
1613 /* FIXME: implement */
1614 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1615 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
1616 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1617 radeon_ring_write(rdev, ib->length_dw);
1618}
1619
1620int r600_ib_test(struct radeon_device *rdev)
1621{
1622 struct radeon_ib *ib;
1623 uint32_t scratch;
1624 uint32_t tmp = 0;
1625 unsigned i;
1626 int r;
1627
1628 r = radeon_scratch_get(rdev, &scratch);
1629 if (r) {
1630 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
1631 return r;
1632 }
1633 WREG32(scratch, 0xCAFEDEAD);
1634 r = radeon_ib_get(rdev, &ib);
1635 if (r) {
1636 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
1637 return r;
1638 }
1639 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
1640 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1641 ib->ptr[2] = 0xDEADBEEF;
1642 ib->ptr[3] = PACKET2(0);
1643 ib->ptr[4] = PACKET2(0);
1644 ib->ptr[5] = PACKET2(0);
1645 ib->ptr[6] = PACKET2(0);
1646 ib->ptr[7] = PACKET2(0);
1647 ib->ptr[8] = PACKET2(0);
1648 ib->ptr[9] = PACKET2(0);
1649 ib->ptr[10] = PACKET2(0);
1650 ib->ptr[11] = PACKET2(0);
1651 ib->ptr[12] = PACKET2(0);
1652 ib->ptr[13] = PACKET2(0);
1653 ib->ptr[14] = PACKET2(0);
1654 ib->ptr[15] = PACKET2(0);
1655 ib->length_dw = 16;
1656 r = radeon_ib_schedule(rdev, ib);
1657 if (r) {
1658 radeon_scratch_free(rdev, scratch);
1659 radeon_ib_free(rdev, &ib);
1660 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
1661 return r;
1662 }
1663 r = radeon_fence_wait(ib->fence, false);
1664 if (r) {
1665 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
1666 return r;
1667 }
1668 for (i = 0; i < rdev->usec_timeout; i++) {
1669 tmp = RREG32(scratch);
1670 if (tmp == 0xDEADBEEF)
1671 break;
1672 DRM_UDELAY(1);
1673 }
1674 if (i < rdev->usec_timeout) {
1675 DRM_INFO("ib test succeeded in %u usecs\n", i);
1676 } else {
1677 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
1678 scratch, tmp);
1679 r = -EINVAL;
1680 }
1681 radeon_scratch_free(rdev, scratch);
1682 radeon_ib_free(rdev, &ib);
161 return r; 1683 return r;
162} 1684}
163 1685
164void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1686
1687
1688
1689/*
1690 * Debugfs info
1691 */
1692#if defined(CONFIG_DEBUG_FS)
1693
1694static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
165{ 1695{
166 WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff)); 1696 struct drm_info_node *node = (struct drm_info_node *) m->private;
167 (void)RREG32(R600_PCIE_PORT_INDEX); 1697 struct drm_device *dev = node->minor->dev;
168 WREG32(R600_PCIE_PORT_DATA, (v)); 1698 struct radeon_device *rdev = dev->dev_private;
169 (void)RREG32(R600_PCIE_PORT_DATA); 1699 uint32_t rdp, wdp;
1700 unsigned count, i, j;
1701
1702 radeon_ring_free_size(rdev);
1703 rdp = RREG32(CP_RB_RPTR);
1704 wdp = RREG32(CP_RB_WPTR);
1705 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1706 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1707 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1708 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1709 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1710 seq_printf(m, "%u dwords in ring\n", count);
1711 for (j = 0; j <= count; j++) {
1712 i = (rdp + j) & rdev->cp.ptr_mask;
1713 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1714 }
1715 return 0;
1716}
1717
1718static int r600_debugfs_mc_info(struct seq_file *m, void *data)
1719{
1720 struct drm_info_node *node = (struct drm_info_node *) m->private;
1721 struct drm_device *dev = node->minor->dev;
1722 struct radeon_device *rdev = dev->dev_private;
1723
1724 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
1725 DREG32_SYS(m, rdev, VM_L2_STATUS);
1726 return 0;
1727}
1728
1729static struct drm_info_list r600_mc_info_list[] = {
1730 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
1731 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
1732};
1733#endif
1734
1735int r600_debugfs_mc_info_init(struct radeon_device *rdev)
1736{
1737#if defined(CONFIG_DEBUG_FS)
1738 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
1739#else
1740 return 0;
1741#endif
170} 1742}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
new file mode 100644
index 000000000000..c51402e92493
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -0,0 +1,855 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
26#include "drmP.h"
27#include "drm.h"
28#include "radeon_drm.h"
29#include "radeon_drv.h"
30
31#include "r600_blit_shaders.h"
32
33#define DI_PT_RECTLIST 0x11
34#define DI_INDEX_SIZE_16_BIT 0x0
35#define DI_SRC_SEL_AUTO_INDEX 0x2
36
37#define FMT_8 0x1
38#define FMT_5_6_5 0x8
39#define FMT_8_8_8_8 0x1a
40#define COLOR_8 0x1
41#define COLOR_5_6_5 0x8
42#define COLOR_8_8_8_8 0x1a
43
44static inline void
45set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
46{
47 u32 cb_color_info;
48 int pitch, slice;
49 RING_LOCALS;
50 DRM_DEBUG("\n");
51
52 h = (h + 7) & ~7;
53 if (h < 8)
54 h = 8;
55
56 cb_color_info = ((format << 2) | (1 << 27));
57 pitch = (w / 8) - 1;
58 slice = ((w * h) / 64) - 1;
59
60 if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
61 ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
62 BEGIN_RING(21 + 2);
63 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
64 OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
65 OUT_RING(gpu_addr >> 8);
66 OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
67 OUT_RING(2 << 0);
68 } else {
69 BEGIN_RING(21);
70 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
71 OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
72 OUT_RING(gpu_addr >> 8);
73 }
74
75 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
76 OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
77 OUT_RING((pitch << 0) | (slice << 10));
78
79 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
80 OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
81 OUT_RING(0);
82
83 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
84 OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
85 OUT_RING(cb_color_info);
86
87 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
88 OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
89 OUT_RING(0);
90
91 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
92 OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
93 OUT_RING(0);
94
95 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
96 OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
97 OUT_RING(0);
98
99 ADVANCE_RING();
100}
101
102static inline void
103cp_set_surface_sync(drm_radeon_private_t *dev_priv,
104 u32 sync_type, u32 size, u64 mc_addr)
105{
106 u32 cp_coher_size;
107 RING_LOCALS;
108 DRM_DEBUG("\n");
109
110 if (size == 0xffffffff)
111 cp_coher_size = 0xffffffff;
112 else
113 cp_coher_size = ((size + 255) >> 8);
114
115 BEGIN_RING(5);
116 OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
117 OUT_RING(sync_type);
118 OUT_RING(cp_coher_size);
119 OUT_RING((mc_addr >> 8));
120 OUT_RING(10); /* poll interval */
121 ADVANCE_RING();
122}
123
124static inline void
125set_shaders(struct drm_device *dev)
126{
127 drm_radeon_private_t *dev_priv = dev->dev_private;
128 u64 gpu_addr;
129 int shader_size, i;
130 u32 *vs, *ps;
131 uint32_t sq_pgm_resources;
132 RING_LOCALS;
133 DRM_DEBUG("\n");
134
135 /* load shaders */
136 vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
138
139 shader_size = r6xx_vs_size;
140 for (i = 0; i < shader_size; i++)
141 vs[i] = r6xx_vs[i];
142 shader_size = r6xx_ps_size;
143 for (i = 0; i < shader_size; i++)
144 ps[i] = r6xx_ps[i];
145
146 dev_priv->blit_vb->used = 512;
147
148 gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
149
150 /* setup shader regs */
151 sq_pgm_resources = (1 << 0);
152
153 BEGIN_RING(9 + 12);
154 /* VS */
155 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
156 OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
157 OUT_RING(gpu_addr >> 8);
158
159 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
160 OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
161 OUT_RING(sq_pgm_resources);
162
163 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
164 OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
165 OUT_RING(0);
166
167 /* PS */
168 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
169 OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
170 OUT_RING((gpu_addr + 256) >> 8);
171
172 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
173 OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
174 OUT_RING(sq_pgm_resources | (1 << 28));
175
176 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
177 OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
178 OUT_RING(2);
179
180 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
181 OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
182 OUT_RING(0);
183 ADVANCE_RING();
184
185 cp_set_surface_sync(dev_priv,
186 R600_SH_ACTION_ENA, 512, gpu_addr);
187}
188
189static inline void
190set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
191{
192 uint32_t sq_vtx_constant_word2;
193 RING_LOCALS;
194 DRM_DEBUG("\n");
195
196 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
197
198 BEGIN_RING(9);
199 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
200 OUT_RING(0x460);
201 OUT_RING(gpu_addr & 0xffffffff);
202 OUT_RING(48 - 1);
203 OUT_RING(sq_vtx_constant_word2);
204 OUT_RING(1 << 0);
205 OUT_RING(0);
206 OUT_RING(0);
207 OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
208 ADVANCE_RING();
209
210 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
211 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
212 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
213 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
214 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
215 cp_set_surface_sync(dev_priv,
216 R600_TC_ACTION_ENA, 48, gpu_addr);
217 else
218 cp_set_surface_sync(dev_priv,
219 R600_VC_ACTION_ENA, 48, gpu_addr);
220}
221
222static inline void
223set_tex_resource(drm_radeon_private_t *dev_priv,
224 int format, int w, int h, int pitch, u64 gpu_addr)
225{
226 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
227 RING_LOCALS;
228 DRM_DEBUG("\n");
229
230 if (h < 1)
231 h = 1;
232
233 sq_tex_resource_word0 = (1 << 0);
234 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
235 ((w - 1) << 19));
236
237 sq_tex_resource_word1 = (format << 26);
238 sq_tex_resource_word1 |= ((h - 1) << 0);
239
240 sq_tex_resource_word4 = ((1 << 14) |
241 (0 << 16) |
242 (1 << 19) |
243 (2 << 22) |
244 (3 << 25));
245
246 BEGIN_RING(9);
247 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
248 OUT_RING(0);
249 OUT_RING(sq_tex_resource_word0);
250 OUT_RING(sq_tex_resource_word1);
251 OUT_RING(gpu_addr >> 8);
252 OUT_RING(gpu_addr >> 8);
253 OUT_RING(sq_tex_resource_word4);
254 OUT_RING(0);
255 OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
256 ADVANCE_RING();
257
258}
259
260static inline void
261set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
262{
263 RING_LOCALS;
264 DRM_DEBUG("\n");
265
266 BEGIN_RING(12);
267 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
268 OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
269 OUT_RING((x1 << 0) | (y1 << 16));
270 OUT_RING((x2 << 0) | (y2 << 16));
271
272 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
273 OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
274 OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
275 OUT_RING((x2 << 0) | (y2 << 16));
276
277 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
278 OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
279 OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
280 OUT_RING((x2 << 0) | (y2 << 16));
281 ADVANCE_RING();
282}
283
284static inline void
285draw_auto(drm_radeon_private_t *dev_priv)
286{
287 RING_LOCALS;
288 DRM_DEBUG("\n");
289
290 BEGIN_RING(10);
291 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
292 OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
293 OUT_RING(DI_PT_RECTLIST);
294
295 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
296 OUT_RING(DI_INDEX_SIZE_16_BIT);
297
298 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
299 OUT_RING(1);
300
301 OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
302 OUT_RING(3);
303 OUT_RING(DI_SRC_SEL_AUTO_INDEX);
304
305 ADVANCE_RING();
306 COMMIT_RING();
307}
308
309static inline void
310set_default_state(drm_radeon_private_t *dev_priv)
311{
312 int default_state_dw, i;
313 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
314 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
315 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
316 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
317 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
318 RING_LOCALS;
319
320 switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
321 case CHIP_R600:
322 num_ps_gprs = 192;
323 num_vs_gprs = 56;
324 num_temp_gprs = 4;
325 num_gs_gprs = 0;
326 num_es_gprs = 0;
327 num_ps_threads = 136;
328 num_vs_threads = 48;
329 num_gs_threads = 4;
330 num_es_threads = 4;
331 num_ps_stack_entries = 128;
332 num_vs_stack_entries = 128;
333 num_gs_stack_entries = 0;
334 num_es_stack_entries = 0;
335 break;
336 case CHIP_RV630:
337 case CHIP_RV635:
338 num_ps_gprs = 84;
339 num_vs_gprs = 36;
340 num_temp_gprs = 4;
341 num_gs_gprs = 0;
342 num_es_gprs = 0;
343 num_ps_threads = 144;
344 num_vs_threads = 40;
345 num_gs_threads = 4;
346 num_es_threads = 4;
347 num_ps_stack_entries = 40;
348 num_vs_stack_entries = 40;
349 num_gs_stack_entries = 32;
350 num_es_stack_entries = 16;
351 break;
352 case CHIP_RV610:
353 case CHIP_RV620:
354 case CHIP_RS780:
355 case CHIP_RS880:
356 default:
357 num_ps_gprs = 84;
358 num_vs_gprs = 36;
359 num_temp_gprs = 4;
360 num_gs_gprs = 0;
361 num_es_gprs = 0;
362 num_ps_threads = 136;
363 num_vs_threads = 48;
364 num_gs_threads = 4;
365 num_es_threads = 4;
366 num_ps_stack_entries = 40;
367 num_vs_stack_entries = 40;
368 num_gs_stack_entries = 32;
369 num_es_stack_entries = 16;
370 break;
371 case CHIP_RV670:
372 num_ps_gprs = 144;
373 num_vs_gprs = 40;
374 num_temp_gprs = 4;
375 num_gs_gprs = 0;
376 num_es_gprs = 0;
377 num_ps_threads = 136;
378 num_vs_threads = 48;
379 num_gs_threads = 4;
380 num_es_threads = 4;
381 num_ps_stack_entries = 40;
382 num_vs_stack_entries = 40;
383 num_gs_stack_entries = 32;
384 num_es_stack_entries = 16;
385 break;
386 case CHIP_RV770:
387 num_ps_gprs = 192;
388 num_vs_gprs = 56;
389 num_temp_gprs = 4;
390 num_gs_gprs = 0;
391 num_es_gprs = 0;
392 num_ps_threads = 188;
393 num_vs_threads = 60;
394 num_gs_threads = 0;
395 num_es_threads = 0;
396 num_ps_stack_entries = 256;
397 num_vs_stack_entries = 256;
398 num_gs_stack_entries = 0;
399 num_es_stack_entries = 0;
400 break;
401 case CHIP_RV730:
402 case CHIP_RV740:
403 num_ps_gprs = 84;
404 num_vs_gprs = 36;
405 num_temp_gprs = 4;
406 num_gs_gprs = 0;
407 num_es_gprs = 0;
408 num_ps_threads = 188;
409 num_vs_threads = 60;
410 num_gs_threads = 0;
411 num_es_threads = 0;
412 num_ps_stack_entries = 128;
413 num_vs_stack_entries = 128;
414 num_gs_stack_entries = 0;
415 num_es_stack_entries = 0;
416 break;
417 case CHIP_RV710:
418 num_ps_gprs = 192;
419 num_vs_gprs = 56;
420 num_temp_gprs = 4;
421 num_gs_gprs = 0;
422 num_es_gprs = 0;
423 num_ps_threads = 144;
424 num_vs_threads = 48;
425 num_gs_threads = 0;
426 num_es_threads = 0;
427 num_ps_stack_entries = 128;
428 num_vs_stack_entries = 128;
429 num_gs_stack_entries = 0;
430 num_es_stack_entries = 0;
431 break;
432 }
433
434 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
435 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
436 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
437 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
438 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
439 sq_config = 0;
440 else
441 sq_config = R600_VC_ENABLE;
442
443 sq_config |= (R600_DX9_CONSTS |
444 R600_ALU_INST_PREFER_VECTOR |
445 R600_PS_PRIO(0) |
446 R600_VS_PRIO(1) |
447 R600_GS_PRIO(2) |
448 R600_ES_PRIO(3));
449
450 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
451 R600_NUM_VS_GPRS(num_vs_gprs) |
452 R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
453 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
454 R600_NUM_ES_GPRS(num_es_gprs));
455 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
456 R600_NUM_VS_THREADS(num_vs_threads) |
457 R600_NUM_GS_THREADS(num_gs_threads) |
458 R600_NUM_ES_THREADS(num_es_threads));
459 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
460 R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
461 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
462 R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
463
464 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
465 default_state_dw = r7xx_default_size * 4;
466 BEGIN_RING(default_state_dw + 10);
467 for (i = 0; i < default_state_dw; i++)
468 OUT_RING(r7xx_default_state[i]);
469 } else {
470 default_state_dw = r6xx_default_size * 4;
471 BEGIN_RING(default_state_dw + 10);
472 for (i = 0; i < default_state_dw; i++)
473 OUT_RING(r6xx_default_state[i]);
474 }
475 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
476 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
477 /* SQ config */
478 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
479 OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
480 OUT_RING(sq_config);
481 OUT_RING(sq_gpr_resource_mgmt_1);
482 OUT_RING(sq_gpr_resource_mgmt_2);
483 OUT_RING(sq_thread_resource_mgmt);
484 OUT_RING(sq_stack_resource_mgmt_1);
485 OUT_RING(sq_stack_resource_mgmt_2);
486 ADVANCE_RING();
487}
488
489static inline uint32_t i2f(uint32_t input)
490{
491 u32 result, i, exponent, fraction;
492
493 if ((input & 0x3fff) == 0)
494 result = 0; /* 0 is a special case */
495 else {
496 exponent = 140; /* exponent biased by 127; */
497 fraction = (input & 0x3fff) << 10; /* cheat and only
498 handle numbers below 2^^15 */
499 for (i = 0; i < 14; i++) {
500 if (fraction & 0x800000)
501 break;
502 else {
503 fraction = fraction << 1; /* keep
504 shifting left until top bit = 1 */
505 exponent = exponent - 1;
506 }
507 }
508 result = exponent << 23 | (fraction & 0x7fffff); /* mask
509 off top bit; assumed 1 */
510 }
511 return result;
512}
513
514
515int r600_nomm_get_vb(struct drm_device *dev)
516{
517 drm_radeon_private_t *dev_priv = dev->dev_private;
518 dev_priv->blit_vb = radeon_freelist_get(dev);
519 if (!dev_priv->blit_vb) {
520 DRM_ERROR("Unable to allocate vertex buffer for blit\n");
521 return -EAGAIN;
522 }
523 return 0;
524}
525
526void r600_nomm_put_vb(struct drm_device *dev)
527{
528 drm_radeon_private_t *dev_priv = dev->dev_private;
529
530 dev_priv->blit_vb->used = 0;
531 radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
532}
533
534void *r600_nomm_get_vb_ptr(struct drm_device *dev)
535{
536 drm_radeon_private_t *dev_priv = dev->dev_private;
537 return (((char *)dev->agp_buffer_map->handle +
538 dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
539}
540
541int
542r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
543{
544 drm_radeon_private_t *dev_priv = dev->dev_private;
545 DRM_DEBUG("\n");
546
547 r600_nomm_get_vb(dev);
548
549 dev_priv->blit_vb->file_priv = file_priv;
550
551 set_default_state(dev_priv);
552 set_shaders(dev);
553
554 return 0;
555}
556
557
558void
559r600_done_blit_copy(struct drm_device *dev)
560{
561 drm_radeon_private_t *dev_priv = dev->dev_private;
562 RING_LOCALS;
563 DRM_DEBUG("\n");
564
565 BEGIN_RING(5);
566 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
567 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
568 /* wait for 3D idle clean */
569 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
570 OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
571 OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
572
573 ADVANCE_RING();
574 COMMIT_RING();
575
576 r600_nomm_put_vb(dev);
577}
578
579void
580r600_blit_copy(struct drm_device *dev,
581 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
582 int size_bytes)
583{
584 drm_radeon_private_t *dev_priv = dev->dev_private;
585 int max_bytes;
586 u64 vb_addr;
587 u32 *vb;
588
589 vb = r600_nomm_get_vb_ptr(dev);
590
591 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
592 max_bytes = 8192;
593
594 while (size_bytes) {
595 int cur_size = size_bytes;
596 int src_x = src_gpu_addr & 255;
597 int dst_x = dst_gpu_addr & 255;
598 int h = 1;
599 src_gpu_addr = src_gpu_addr & ~255;
600 dst_gpu_addr = dst_gpu_addr & ~255;
601
602 if (!src_x && !dst_x) {
603 h = (cur_size / max_bytes);
604 if (h > 8192)
605 h = 8192;
606 if (h == 0)
607 h = 1;
608 else
609 cur_size = max_bytes;
610 } else {
611 if (cur_size > max_bytes)
612 cur_size = max_bytes;
613 if (cur_size > (max_bytes - dst_x))
614 cur_size = (max_bytes - dst_x);
615 if (cur_size > (max_bytes - src_x))
616 cur_size = (max_bytes - src_x);
617 }
618
619 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
620
621 r600_nomm_put_vb(dev);
622 r600_nomm_get_vb(dev);
623 if (!dev_priv->blit_vb)
624 return;
625 set_shaders(dev);
626 vb = r600_nomm_get_vb_ptr(dev);
627 }
628
629 vb[0] = i2f(dst_x);
630 vb[1] = 0;
631 vb[2] = i2f(src_x);
632 vb[3] = 0;
633
634 vb[4] = i2f(dst_x);
635 vb[5] = i2f(h);
636 vb[6] = i2f(src_x);
637 vb[7] = i2f(h);
638
639 vb[8] = i2f(dst_x + cur_size);
640 vb[9] = i2f(h);
641 vb[10] = i2f(src_x + cur_size);
642 vb[11] = i2f(h);
643
644 /* src */
645 set_tex_resource(dev_priv, FMT_8,
646 src_x + cur_size, h, src_x + cur_size,
647 src_gpu_addr);
648
649 cp_set_surface_sync(dev_priv,
650 R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
651
652 /* dst */
653 set_render_target(dev_priv, COLOR_8,
654 dst_x + cur_size, h,
655 dst_gpu_addr);
656
657 /* scissors */
658 set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
659
660 /* Vertex buffer setup */
661 vb_addr = dev_priv->gart_buffers_offset +
662 dev_priv->blit_vb->offset +
663 dev_priv->blit_vb->used;
664 set_vtx_resource(dev_priv, vb_addr);
665
666 /* draw */
667 draw_auto(dev_priv);
668
669 cp_set_surface_sync(dev_priv,
670 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
671 cur_size * h, dst_gpu_addr);
672
673 vb += 12;
674 dev_priv->blit_vb->used += 12 * 4;
675
676 src_gpu_addr += cur_size * h;
677 dst_gpu_addr += cur_size * h;
678 size_bytes -= cur_size * h;
679 }
680 } else {
681 max_bytes = 8192 * 4;
682
683 while (size_bytes) {
684 int cur_size = size_bytes;
685 int src_x = (src_gpu_addr & 255);
686 int dst_x = (dst_gpu_addr & 255);
687 int h = 1;
688 src_gpu_addr = src_gpu_addr & ~255;
689 dst_gpu_addr = dst_gpu_addr & ~255;
690
691 if (!src_x && !dst_x) {
692 h = (cur_size / max_bytes);
693 if (h > 8192)
694 h = 8192;
695 if (h == 0)
696 h = 1;
697 else
698 cur_size = max_bytes;
699 } else {
700 if (cur_size > max_bytes)
701 cur_size = max_bytes;
702 if (cur_size > (max_bytes - dst_x))
703 cur_size = (max_bytes - dst_x);
704 if (cur_size > (max_bytes - src_x))
705 cur_size = (max_bytes - src_x);
706 }
707
708 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
709 r600_nomm_put_vb(dev);
710 r600_nomm_get_vb(dev);
711 if (!dev_priv->blit_vb)
712 return;
713
714 set_shaders(dev);
715 vb = r600_nomm_get_vb_ptr(dev);
716 }
717
718 vb[0] = i2f(dst_x / 4);
719 vb[1] = 0;
720 vb[2] = i2f(src_x / 4);
721 vb[3] = 0;
722
723 vb[4] = i2f(dst_x / 4);
724 vb[5] = i2f(h);
725 vb[6] = i2f(src_x / 4);
726 vb[7] = i2f(h);
727
728 vb[8] = i2f((dst_x + cur_size) / 4);
729 vb[9] = i2f(h);
730 vb[10] = i2f((src_x + cur_size) / 4);
731 vb[11] = i2f(h);
732
733 /* src */
734 set_tex_resource(dev_priv, FMT_8_8_8_8,
735 (src_x + cur_size) / 4,
736 h, (src_x + cur_size) / 4,
737 src_gpu_addr);
738
739 cp_set_surface_sync(dev_priv,
740 R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
741
742 /* dst */
743 set_render_target(dev_priv, COLOR_8_8_8_8,
744 dst_x + cur_size, h,
745 dst_gpu_addr);
746
747 /* scissors */
748 set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
749
750 /* Vertex buffer setup */
751 vb_addr = dev_priv->gart_buffers_offset +
752 dev_priv->blit_vb->offset +
753 dev_priv->blit_vb->used;
754 set_vtx_resource(dev_priv, vb_addr);
755
756 /* draw */
757 draw_auto(dev_priv);
758
759 cp_set_surface_sync(dev_priv,
760 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
761 cur_size * h, dst_gpu_addr);
762
763 vb += 12;
764 dev_priv->blit_vb->used += 12 * 4;
765
766 src_gpu_addr += cur_size * h;
767 dst_gpu_addr += cur_size * h;
768 size_bytes -= cur_size * h;
769 }
770 }
771}
772
773void
774r600_blit_swap(struct drm_device *dev,
775 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
776 int sx, int sy, int dx, int dy,
777 int w, int h, int src_pitch, int dst_pitch, int cpp)
778{
779 drm_radeon_private_t *dev_priv = dev->dev_private;
780 int cb_format, tex_format;
781 u64 vb_addr;
782 u32 *vb;
783
784 vb = (u32 *) ((char *)dev->agp_buffer_map->handle +
785 dev_priv->blit_vb->offset + dev_priv->blit_vb->used);
786
787 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
788
789 r600_nomm_put_vb(dev);
790 r600_nomm_get_vb(dev);
791 if (!dev_priv->blit_vb)
792 return;
793
794 set_shaders(dev);
795 vb = r600_nomm_get_vb_ptr(dev);
796 }
797
798 if (cpp == 4) {
799 cb_format = COLOR_8_8_8_8;
800 tex_format = FMT_8_8_8_8;
801 } else if (cpp == 2) {
802 cb_format = COLOR_5_6_5;
803 tex_format = FMT_5_6_5;
804 } else {
805 cb_format = COLOR_8;
806 tex_format = FMT_8;
807 }
808
809 vb[0] = i2f(dx);
810 vb[1] = i2f(dy);
811 vb[2] = i2f(sx);
812 vb[3] = i2f(sy);
813
814 vb[4] = i2f(dx);
815 vb[5] = i2f(dy + h);
816 vb[6] = i2f(sx);
817 vb[7] = i2f(sy + h);
818
819 vb[8] = i2f(dx + w);
820 vb[9] = i2f(dy + h);
821 vb[10] = i2f(sx + w);
822 vb[11] = i2f(sy + h);
823
824 /* src */
825 set_tex_resource(dev_priv, tex_format,
826 src_pitch / cpp,
827 sy + h, src_pitch / cpp,
828 src_gpu_addr);
829
830 cp_set_surface_sync(dev_priv,
831 R600_TC_ACTION_ENA, (src_pitch * (sy + h)), src_gpu_addr);
832
833 /* dst */
834 set_render_target(dev_priv, cb_format,
835 dst_pitch / cpp, dy + h,
836 dst_gpu_addr);
837
838 /* scissors */
839 set_scissors(dev_priv, dx, dy, dx + w, dy + h);
840
841 /* Vertex buffer setup */
842 vb_addr = dev_priv->gart_buffers_offset +
843 dev_priv->blit_vb->offset +
844 dev_priv->blit_vb->used;
845 set_vtx_resource(dev_priv, vb_addr);
846
847 /* draw */
848 draw_auto(dev_priv);
849
850 cp_set_surface_sync(dev_priv,
851 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
852 dst_pitch * (dy + h), dst_gpu_addr);
853
854 dev_priv->blit_vb->used += 12 * 4;
855}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
new file mode 100644
index 000000000000..5755647e688a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -0,0 +1,777 @@
1#include "drmP.h"
2#include "drm.h"
3#include "radeon_drm.h"
4#include "radeon.h"
5
6#include "r600d.h"
7#include "r600_blit_shaders.h"
8
9#define DI_PT_RECTLIST 0x11
10#define DI_INDEX_SIZE_16_BIT 0x0
11#define DI_SRC_SEL_AUTO_INDEX 0x2
12
13#define FMT_8 0x1
14#define FMT_5_6_5 0x8
15#define FMT_8_8_8_8 0x1a
16#define COLOR_8 0x1
17#define COLOR_5_6_5 0x8
18#define COLOR_8_8_8_8 0x1a
19
20/* emits 21 on rv770+, 23 on r600 */
21static void
22set_render_target(struct radeon_device *rdev, int format,
23 int w, int h, u64 gpu_addr)
24{
25 u32 cb_color_info;
26 int pitch, slice;
27
28 h = (h + 7) & ~7;
29 if (h < 8)
30 h = 8;
31
32 cb_color_info = ((format << 2) | (1 << 27));
33 pitch = (w / 8) - 1;
34 slice = ((w * h) / 64) - 1;
35
36 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
37 radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
38 radeon_ring_write(rdev, gpu_addr >> 8);
39
40 if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
41 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
42 radeon_ring_write(rdev, 2 << 0);
43 }
44
45 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
46 radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
47 radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
48
49 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
50 radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
51 radeon_ring_write(rdev, 0);
52
53 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
54 radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
55 radeon_ring_write(rdev, cb_color_info);
56
57 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
58 radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
59 radeon_ring_write(rdev, 0);
60
61 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
62 radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
63 radeon_ring_write(rdev, 0);
64
65 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
66 radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
67 radeon_ring_write(rdev, 0);
68}
69
70/* emits 5dw */
71static void
72cp_set_surface_sync(struct radeon_device *rdev,
73 u32 sync_type, u32 size,
74 u64 mc_addr)
75{
76 u32 cp_coher_size;
77
78 if (size == 0xffffffff)
79 cp_coher_size = 0xffffffff;
80 else
81 cp_coher_size = ((size + 255) >> 8);
82
83 radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
84 radeon_ring_write(rdev, sync_type);
85 radeon_ring_write(rdev, cp_coher_size);
86 radeon_ring_write(rdev, mc_addr >> 8);
87 radeon_ring_write(rdev, 10); /* poll interval */
88}
89
90/* emits 21dw + 1 surface sync = 26dw */
91static void
92set_shaders(struct radeon_device *rdev)
93{
94 u64 gpu_addr;
95 u32 sq_pgm_resources;
96
97 /* setup shader regs */
98 sq_pgm_resources = (1 << 0);
99
100 /* VS */
101 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
102 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
103 radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
104 radeon_ring_write(rdev, gpu_addr >> 8);
105
106 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
107 radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
108 radeon_ring_write(rdev, sq_pgm_resources);
109
110 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
111 radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
112 radeon_ring_write(rdev, 0);
113
114 /* PS */
115 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
116 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
117 radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
118 radeon_ring_write(rdev, gpu_addr >> 8);
119
120 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
121 radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
122 radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
123
124 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
125 radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
126 radeon_ring_write(rdev, 2);
127
128 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
129 radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
130 radeon_ring_write(rdev, 0);
131
132 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
133}
134
135/* emits 9 + 1 sync (5) = 14*/
136static void
137set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
138{
139 u32 sq_vtx_constant_word2;
140
141 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
142
143 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
144 radeon_ring_write(rdev, 0x460);
145 radeon_ring_write(rdev, gpu_addr & 0xffffffff);
146 radeon_ring_write(rdev, 48 - 1);
147 radeon_ring_write(rdev, sq_vtx_constant_word2);
148 radeon_ring_write(rdev, 1 << 0);
149 radeon_ring_write(rdev, 0);
150 radeon_ring_write(rdev, 0);
151 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
152
153 if ((rdev->family == CHIP_RV610) ||
154 (rdev->family == CHIP_RV620) ||
155 (rdev->family == CHIP_RS780) ||
156 (rdev->family == CHIP_RS880) ||
157 (rdev->family == CHIP_RV710))
158 cp_set_surface_sync(rdev,
159 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
160 else
161 cp_set_surface_sync(rdev,
162 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
163}
164
165/* emits 9 */
166static void
167set_tex_resource(struct radeon_device *rdev,
168 int format, int w, int h, int pitch,
169 u64 gpu_addr)
170{
171 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
172
173 if (h < 1)
174 h = 1;
175
176 sq_tex_resource_word0 = (1 << 0);
177 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
178 ((w - 1) << 19));
179
180 sq_tex_resource_word1 = (format << 26);
181 sq_tex_resource_word1 |= ((h - 1) << 0);
182
183 sq_tex_resource_word4 = ((1 << 14) |
184 (0 << 16) |
185 (1 << 19) |
186 (2 << 22) |
187 (3 << 25));
188
189 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
190 radeon_ring_write(rdev, 0);
191 radeon_ring_write(rdev, sq_tex_resource_word0);
192 radeon_ring_write(rdev, sq_tex_resource_word1);
193 radeon_ring_write(rdev, gpu_addr >> 8);
194 radeon_ring_write(rdev, gpu_addr >> 8);
195 radeon_ring_write(rdev, sq_tex_resource_word4);
196 radeon_ring_write(rdev, 0);
197 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
198}
199
200/* emits 12 */
201static void
202set_scissors(struct radeon_device *rdev, int x1, int y1,
203 int x2, int y2)
204{
205 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
206 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
207 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
208 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
209
210 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
211 radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
212 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
213 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
214
215 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
216 radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
217 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
218 radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
219}
220
221/* emits 10 */
222static void
223draw_auto(struct radeon_device *rdev)
224{
225 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
226 radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
227 radeon_ring_write(rdev, DI_PT_RECTLIST);
228
229 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
230 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
231
232 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
233 radeon_ring_write(rdev, 1);
234
235 radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
236 radeon_ring_write(rdev, 3);
237 radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
238
239}
240
241/* emits 14 */
242static void
243set_default_state(struct radeon_device *rdev)
244{
245 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
246 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
247 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
248 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
249 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
250 u64 gpu_addr;
251
252 switch (rdev->family) {
253 case CHIP_R600:
254 num_ps_gprs = 192;
255 num_vs_gprs = 56;
256 num_temp_gprs = 4;
257 num_gs_gprs = 0;
258 num_es_gprs = 0;
259 num_ps_threads = 136;
260 num_vs_threads = 48;
261 num_gs_threads = 4;
262 num_es_threads = 4;
263 num_ps_stack_entries = 128;
264 num_vs_stack_entries = 128;
265 num_gs_stack_entries = 0;
266 num_es_stack_entries = 0;
267 break;
268 case CHIP_RV630:
269 case CHIP_RV635:
270 num_ps_gprs = 84;
271 num_vs_gprs = 36;
272 num_temp_gprs = 4;
273 num_gs_gprs = 0;
274 num_es_gprs = 0;
275 num_ps_threads = 144;
276 num_vs_threads = 40;
277 num_gs_threads = 4;
278 num_es_threads = 4;
279 num_ps_stack_entries = 40;
280 num_vs_stack_entries = 40;
281 num_gs_stack_entries = 32;
282 num_es_stack_entries = 16;
283 break;
284 case CHIP_RV610:
285 case CHIP_RV620:
286 case CHIP_RS780:
287 case CHIP_RS880:
288 default:
289 num_ps_gprs = 84;
290 num_vs_gprs = 36;
291 num_temp_gprs = 4;
292 num_gs_gprs = 0;
293 num_es_gprs = 0;
294 num_ps_threads = 136;
295 num_vs_threads = 48;
296 num_gs_threads = 4;
297 num_es_threads = 4;
298 num_ps_stack_entries = 40;
299 num_vs_stack_entries = 40;
300 num_gs_stack_entries = 32;
301 num_es_stack_entries = 16;
302 break;
303 case CHIP_RV670:
304 num_ps_gprs = 144;
305 num_vs_gprs = 40;
306 num_temp_gprs = 4;
307 num_gs_gprs = 0;
308 num_es_gprs = 0;
309 num_ps_threads = 136;
310 num_vs_threads = 48;
311 num_gs_threads = 4;
312 num_es_threads = 4;
313 num_ps_stack_entries = 40;
314 num_vs_stack_entries = 40;
315 num_gs_stack_entries = 32;
316 num_es_stack_entries = 16;
317 break;
318 case CHIP_RV770:
319 num_ps_gprs = 192;
320 num_vs_gprs = 56;
321 num_temp_gprs = 4;
322 num_gs_gprs = 0;
323 num_es_gprs = 0;
324 num_ps_threads = 188;
325 num_vs_threads = 60;
326 num_gs_threads = 0;
327 num_es_threads = 0;
328 num_ps_stack_entries = 256;
329 num_vs_stack_entries = 256;
330 num_gs_stack_entries = 0;
331 num_es_stack_entries = 0;
332 break;
333 case CHIP_RV730:
334 case CHIP_RV740:
335 num_ps_gprs = 84;
336 num_vs_gprs = 36;
337 num_temp_gprs = 4;
338 num_gs_gprs = 0;
339 num_es_gprs = 0;
340 num_ps_threads = 188;
341 num_vs_threads = 60;
342 num_gs_threads = 0;
343 num_es_threads = 0;
344 num_ps_stack_entries = 128;
345 num_vs_stack_entries = 128;
346 num_gs_stack_entries = 0;
347 num_es_stack_entries = 0;
348 break;
349 case CHIP_RV710:
350 num_ps_gprs = 192;
351 num_vs_gprs = 56;
352 num_temp_gprs = 4;
353 num_gs_gprs = 0;
354 num_es_gprs = 0;
355 num_ps_threads = 144;
356 num_vs_threads = 48;
357 num_gs_threads = 0;
358 num_es_threads = 0;
359 num_ps_stack_entries = 128;
360 num_vs_stack_entries = 128;
361 num_gs_stack_entries = 0;
362 num_es_stack_entries = 0;
363 break;
364 }
365
366 if ((rdev->family == CHIP_RV610) ||
367 (rdev->family == CHIP_RV620) ||
368 (rdev->family == CHIP_RS780) ||
369 (rdev->family == CHIP_RS780) ||
370 (rdev->family == CHIP_RV710))
371 sq_config = 0;
372 else
373 sq_config = VC_ENABLE;
374
375 sq_config |= (DX9_CONSTS |
376 ALU_INST_PREFER_VECTOR |
377 PS_PRIO(0) |
378 VS_PRIO(1) |
379 GS_PRIO(2) |
380 ES_PRIO(3));
381
382 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
383 NUM_VS_GPRS(num_vs_gprs) |
384 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
385 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
386 NUM_ES_GPRS(num_es_gprs));
387 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
388 NUM_VS_THREADS(num_vs_threads) |
389 NUM_GS_THREADS(num_gs_threads) |
390 NUM_ES_THREADS(num_es_threads));
391 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
392 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
393 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
394 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
395
396 /* emit an IB pointing at default state */
397 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
398 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
399 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
400 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
401 radeon_ring_write(rdev, (rdev->r600_blit.state_len / 4));
402
403 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
404 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
405 /* SQ config */
406 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
407 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
408 radeon_ring_write(rdev, sq_config);
409 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
410 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
411 radeon_ring_write(rdev, sq_thread_resource_mgmt);
412 radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
413 radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
414}
415
416static inline uint32_t i2f(uint32_t input)
417{
418 u32 result, i, exponent, fraction;
419
420 if ((input & 0x3fff) == 0)
421 result = 0; /* 0 is a special case */
422 else {
423 exponent = 140; /* exponent biased by 127; */
424 fraction = (input & 0x3fff) << 10; /* cheat and only
425 handle numbers below 2^^15 */
426 for (i = 0; i < 14; i++) {
427 if (fraction & 0x800000)
428 break;
429 else {
430 fraction = fraction << 1; /* keep
431 shifting left until top bit = 1 */
432 exponent = exponent - 1;
433 }
434 }
435 result = exponent << 23 | (fraction & 0x7fffff); /* mask
436 off top bit; assumed 1 */
437 }
438 return result;
439}
440
441int r600_blit_init(struct radeon_device *rdev)
442{
443 u32 obj_size;
444 int r;
445 void *ptr;
446
447 rdev->r600_blit.state_offset = 0;
448
449 if (rdev->family >= CHIP_RV770)
450 rdev->r600_blit.state_len = r7xx_default_size * 4;
451 else
452 rdev->r600_blit.state_len = r6xx_default_size * 4;
453
454 obj_size = rdev->r600_blit.state_len;
455 obj_size = ALIGN(obj_size, 256);
456
457 rdev->r600_blit.vs_offset = obj_size;
458 obj_size += r6xx_vs_size * 4;
459 obj_size = ALIGN(obj_size, 256);
460
461 rdev->r600_blit.ps_offset = obj_size;
462 obj_size += r6xx_ps_size * 4;
463 obj_size = ALIGN(obj_size, 256);
464
465 r = radeon_object_create(rdev, NULL, obj_size,
466 true, RADEON_GEM_DOMAIN_VRAM,
467 false, &rdev->r600_blit.shader_obj);
468 if (r) {
469 DRM_ERROR("r600 failed to allocate shader\n");
470 return r;
471 }
472
473 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
474 &rdev->r600_blit.shader_gpu_addr);
475 if (r) {
476 DRM_ERROR("failed to pin blit object %d\n", r);
477 return r;
478 }
479
480 DRM_DEBUG("r6xx blit allocated bo @ 0x%16llx %08x vs %08x ps %08x\n",
481 rdev->r600_blit.shader_gpu_addr, obj_size,
482 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
483
484 r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
485 if (r) {
486 DRM_ERROR("failed to map blit object %d\n", r);
487 return r;
488 }
489
490 if (rdev->family >= CHIP_RV770)
491 memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len);
492 else
493 memcpy_toio(ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len);
494
495 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
496 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
497
498 radeon_object_kunmap(rdev->r600_blit.shader_obj);
499 return 0;
500}
501
502void r600_blit_fini(struct radeon_device *rdev)
503{
504 radeon_object_unpin(rdev->r600_blit.shader_obj);
505 radeon_object_unref(&rdev->r600_blit.shader_obj);
506}
507
508int r600_vb_ib_get(struct radeon_device *rdev)
509{
510 int r;
511 r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
512 if (r) {
513 DRM_ERROR("failed to get IB for vertex buffer\n");
514 return r;
515 }
516
517 rdev->r600_blit.vb_total = 64*1024;
518 rdev->r600_blit.vb_used = 0;
519 return 0;
520}
521
522void r600_vb_ib_put(struct radeon_device *rdev)
523{
524 mutex_lock(&rdev->ib_pool.mutex);
525 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
526 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
527 mutex_unlock(&rdev->ib_pool.mutex);
528 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
529}
530
531int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
532{
533 int r;
534 int ring_size;
535 const int max_size = 8192*8192;
536
537 r = r600_vb_ib_get(rdev);
538 WARN_ON(r);
539
540 /* loops of emits 64 + fence emit possible */
541 ring_size = ((size_bytes + max_size) / max_size) * 78;
542 /* set default + shaders */
543 ring_size += 40; /* shaders + def state */
544 ring_size += 3; /* fence emit for VB IB */
545 ring_size += 5; /* done copy */
546 ring_size += 3; /* fence emit for done copy */
547 r = radeon_ring_lock(rdev, ring_size);
548 WARN_ON(r);
549
550 set_default_state(rdev); /* 14 */
551 set_shaders(rdev); /* 26 */
552 return 0;
553}
554
555void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
556{
557 int r;
558
559 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
560 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
561 /* wait for 3D idle clean */
562 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
563 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
564 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
565
566 if (rdev->r600_blit.vb_ib)
567 r600_vb_ib_put(rdev);
568
569 if (fence)
570 r = radeon_fence_emit(rdev, fence);
571
572 radeon_ring_unlock_commit(rdev);
573}
574
575void r600_kms_blit_copy(struct radeon_device *rdev,
576 u64 src_gpu_addr, u64 dst_gpu_addr,
577 int size_bytes)
578{
579 int max_bytes;
580 u64 vb_gpu_addr;
581 u32 *vb;
582
583 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
584 size_bytes, rdev->r600_blit.vb_used);
585 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
586 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
587 max_bytes = 8192;
588
589 while (size_bytes) {
590 int cur_size = size_bytes;
591 int src_x = src_gpu_addr & 255;
592 int dst_x = dst_gpu_addr & 255;
593 int h = 1;
594 src_gpu_addr = src_gpu_addr & ~255;
595 dst_gpu_addr = dst_gpu_addr & ~255;
596
597 if (!src_x && !dst_x) {
598 h = (cur_size / max_bytes);
599 if (h > 8192)
600 h = 8192;
601 if (h == 0)
602 h = 1;
603 else
604 cur_size = max_bytes;
605 } else {
606 if (cur_size > max_bytes)
607 cur_size = max_bytes;
608 if (cur_size > (max_bytes - dst_x))
609 cur_size = (max_bytes - dst_x);
610 if (cur_size > (max_bytes - src_x))
611 cur_size = (max_bytes - src_x);
612 }
613
614 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
615 WARN_ON(1);
616
617#if 0
618 r600_vb_ib_put(rdev);
619
620 r600_nomm_put_vb(dev);
621 r600_nomm_get_vb(dev);
622 if (!dev_priv->blit_vb)
623 return;
624 set_shaders(dev);
625 vb = r600_nomm_get_vb_ptr(dev);
626#endif
627 }
628
629 vb[0] = i2f(dst_x);
630 vb[1] = 0;
631 vb[2] = i2f(src_x);
632 vb[3] = 0;
633
634 vb[4] = i2f(dst_x);
635 vb[5] = i2f(h);
636 vb[6] = i2f(src_x);
637 vb[7] = i2f(h);
638
639 vb[8] = i2f(dst_x + cur_size);
640 vb[9] = i2f(h);
641 vb[10] = i2f(src_x + cur_size);
642 vb[11] = i2f(h);
643
644 /* src 9 */
645 set_tex_resource(rdev, FMT_8,
646 src_x + cur_size, h, src_x + cur_size,
647 src_gpu_addr);
648
649 /* 5 */
650 cp_set_surface_sync(rdev,
651 PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
652
653 /* dst 23 */
654 set_render_target(rdev, COLOR_8,
655 dst_x + cur_size, h,
656 dst_gpu_addr);
657
658 /* scissors 12 */
659 set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
660
661 /* 14 */
662 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
663 set_vtx_resource(rdev, vb_gpu_addr);
664
665 /* draw 10 */
666 draw_auto(rdev);
667
668 /* 5 */
669 cp_set_surface_sync(rdev,
670 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
671 cur_size * h, dst_gpu_addr);
672
673 vb += 12;
674 rdev->r600_blit.vb_used += 12 * 4;
675
676 src_gpu_addr += cur_size * h;
677 dst_gpu_addr += cur_size * h;
678 size_bytes -= cur_size * h;
679 }
680 } else {
681 max_bytes = 8192 * 4;
682
683 while (size_bytes) {
684 int cur_size = size_bytes;
685 int src_x = (src_gpu_addr & 255);
686 int dst_x = (dst_gpu_addr & 255);
687 int h = 1;
688 src_gpu_addr = src_gpu_addr & ~255;
689 dst_gpu_addr = dst_gpu_addr & ~255;
690
691 if (!src_x && !dst_x) {
692 h = (cur_size / max_bytes);
693 if (h > 8192)
694 h = 8192;
695 if (h == 0)
696 h = 1;
697 else
698 cur_size = max_bytes;
699 } else {
700 if (cur_size > max_bytes)
701 cur_size = max_bytes;
702 if (cur_size > (max_bytes - dst_x))
703 cur_size = (max_bytes - dst_x);
704 if (cur_size > (max_bytes - src_x))
705 cur_size = (max_bytes - src_x);
706 }
707
708 if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
709 WARN_ON(1);
710 }
711#if 0
712 if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) {
713 r600_nomm_put_vb(dev);
714 r600_nomm_get_vb(dev);
715 if (!rdev->blit_vb)
716 return;
717
718 set_shaders(dev);
719 vb = r600_nomm_get_vb_ptr(dev);
720 }
721#endif
722
723 vb[0] = i2f(dst_x / 4);
724 vb[1] = 0;
725 vb[2] = i2f(src_x / 4);
726 vb[3] = 0;
727
728 vb[4] = i2f(dst_x / 4);
729 vb[5] = i2f(h);
730 vb[6] = i2f(src_x / 4);
731 vb[7] = i2f(h);
732
733 vb[8] = i2f((dst_x + cur_size) / 4);
734 vb[9] = i2f(h);
735 vb[10] = i2f((src_x + cur_size) / 4);
736 vb[11] = i2f(h);
737
738 /* src 9 */
739 set_tex_resource(rdev, FMT_8_8_8_8,
740 (src_x + cur_size) / 4,
741 h, (src_x + cur_size) / 4,
742 src_gpu_addr);
743 /* 5 */
744 cp_set_surface_sync(rdev,
745 PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
746
747 /* dst 23 */
748 set_render_target(rdev, COLOR_8_8_8_8,
749 dst_x + cur_size, h,
750 dst_gpu_addr);
751
752 /* scissors 12 */
753 set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
754
755 /* Vertex buffer setup 14 */
756 vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
757 set_vtx_resource(rdev, vb_gpu_addr);
758
759 /* draw 10 */
760 draw_auto(rdev);
761
762 /* 5 */
763 cp_set_surface_sync(rdev,
764 PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
765 cur_size * h, dst_gpu_addr);
766
767 /* 78 ring dwords per loop */
768 vb += 12;
769 rdev->r600_blit.vb_used += 12 * 4;
770
771 src_gpu_addr += cur_size * h;
772 dst_gpu_addr += cur_size * h;
773 size_bytes -= cur_size * h;
774 }
775 }
776}
777
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
new file mode 100644
index 000000000000..d745e815c2e8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -0,0 +1,1072 @@
1
2#include <linux/types.h>
3#include <linux/kernel.h>
4
5const u32 r6xx_default_state[] =
6{
7 0xc0002400,
8 0x00000000,
9 0xc0012800,
10 0x80000000,
11 0x80000000,
12 0xc0004600,
13 0x00000016,
14 0xc0016800,
15 0x00000010,
16 0x00028000,
17 0xc0016800,
18 0x00000010,
19 0x00008000,
20 0xc0016800,
21 0x00000542,
22 0x07000003,
23 0xc0016800,
24 0x000005c5,
25 0x00000000,
26 0xc0016800,
27 0x00000363,
28 0x00000000,
29 0xc0016800,
30 0x0000060c,
31 0x82000000,
32 0xc0016800,
33 0x0000060e,
34 0x01020204,
35 0xc0016f00,
36 0x00000000,
37 0x00000000,
38 0xc0016f00,
39 0x00000001,
40 0x00000000,
41 0xc0096900,
42 0x0000022a,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0xc0016900,
53 0x00000004,
54 0x00000000,
55 0xc0016900,
56 0x0000000a,
57 0x00000000,
58 0xc0016900,
59 0x0000000b,
60 0x00000000,
61 0xc0016900,
62 0x0000010c,
63 0x00000000,
64 0xc0016900,
65 0x0000010d,
66 0x00000000,
67 0xc0016900,
68 0x00000200,
69 0x00000000,
70 0xc0016900,
71 0x00000343,
72 0x00000060,
73 0xc0016900,
74 0x00000344,
75 0x00000040,
76 0xc0016900,
77 0x00000351,
78 0x0000aa00,
79 0xc0016900,
80 0x00000104,
81 0x00000000,
82 0xc0016900,
83 0x0000010e,
84 0x00000000,
85 0xc0046900,
86 0x00000105,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0xc0036900,
92 0x00000109,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0xc0046900,
97 0x0000030c,
98 0x01000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0xc0046900,
103 0x00000048,
104 0x3f800000,
105 0x00000000,
106 0x3f800000,
107 0x3f800000,
108 0xc0016900,
109 0x0000008e,
110 0x0000000f,
111 0xc0016900,
112 0x00000080,
113 0x00000000,
114 0xc0016900,
115 0x00000083,
116 0x0000ffff,
117 0xc0016900,
118 0x00000084,
119 0x00000000,
120 0xc0016900,
121 0x00000085,
122 0x20002000,
123 0xc0016900,
124 0x00000086,
125 0x00000000,
126 0xc0016900,
127 0x00000087,
128 0x20002000,
129 0xc0016900,
130 0x00000088,
131 0x00000000,
132 0xc0016900,
133 0x00000089,
134 0x20002000,
135 0xc0016900,
136 0x0000008a,
137 0x00000000,
138 0xc0016900,
139 0x0000008b,
140 0x20002000,
141 0xc0016900,
142 0x0000008c,
143 0x00000000,
144 0xc0016900,
145 0x00000094,
146 0x80000000,
147 0xc0016900,
148 0x00000095,
149 0x20002000,
150 0xc0026900,
151 0x000000b4,
152 0x00000000,
153 0x3f800000,
154 0xc0016900,
155 0x00000096,
156 0x80000000,
157 0xc0016900,
158 0x00000097,
159 0x20002000,
160 0xc0026900,
161 0x000000b6,
162 0x00000000,
163 0x3f800000,
164 0xc0016900,
165 0x00000098,
166 0x80000000,
167 0xc0016900,
168 0x00000099,
169 0x20002000,
170 0xc0026900,
171 0x000000b8,
172 0x00000000,
173 0x3f800000,
174 0xc0016900,
175 0x0000009a,
176 0x80000000,
177 0xc0016900,
178 0x0000009b,
179 0x20002000,
180 0xc0026900,
181 0x000000ba,
182 0x00000000,
183 0x3f800000,
184 0xc0016900,
185 0x0000009c,
186 0x80000000,
187 0xc0016900,
188 0x0000009d,
189 0x20002000,
190 0xc0026900,
191 0x000000bc,
192 0x00000000,
193 0x3f800000,
194 0xc0016900,
195 0x0000009e,
196 0x80000000,
197 0xc0016900,
198 0x0000009f,
199 0x20002000,
200 0xc0026900,
201 0x000000be,
202 0x00000000,
203 0x3f800000,
204 0xc0016900,
205 0x000000a0,
206 0x80000000,
207 0xc0016900,
208 0x000000a1,
209 0x20002000,
210 0xc0026900,
211 0x000000c0,
212 0x00000000,
213 0x3f800000,
214 0xc0016900,
215 0x000000a2,
216 0x80000000,
217 0xc0016900,
218 0x000000a3,
219 0x20002000,
220 0xc0026900,
221 0x000000c2,
222 0x00000000,
223 0x3f800000,
224 0xc0016900,
225 0x000000a4,
226 0x80000000,
227 0xc0016900,
228 0x000000a5,
229 0x20002000,
230 0xc0026900,
231 0x000000c4,
232 0x00000000,
233 0x3f800000,
234 0xc0016900,
235 0x000000a6,
236 0x80000000,
237 0xc0016900,
238 0x000000a7,
239 0x20002000,
240 0xc0026900,
241 0x000000c6,
242 0x00000000,
243 0x3f800000,
244 0xc0016900,
245 0x000000a8,
246 0x80000000,
247 0xc0016900,
248 0x000000a9,
249 0x20002000,
250 0xc0026900,
251 0x000000c8,
252 0x00000000,
253 0x3f800000,
254 0xc0016900,
255 0x000000aa,
256 0x80000000,
257 0xc0016900,
258 0x000000ab,
259 0x20002000,
260 0xc0026900,
261 0x000000ca,
262 0x00000000,
263 0x3f800000,
264 0xc0016900,
265 0x000000ac,
266 0x80000000,
267 0xc0016900,
268 0x000000ad,
269 0x20002000,
270 0xc0026900,
271 0x000000cc,
272 0x00000000,
273 0x3f800000,
274 0xc0016900,
275 0x000000ae,
276 0x80000000,
277 0xc0016900,
278 0x000000af,
279 0x20002000,
280 0xc0026900,
281 0x000000ce,
282 0x00000000,
283 0x3f800000,
284 0xc0016900,
285 0x000000b0,
286 0x80000000,
287 0xc0016900,
288 0x000000b1,
289 0x20002000,
290 0xc0026900,
291 0x000000d0,
292 0x00000000,
293 0x3f800000,
294 0xc0016900,
295 0x000000b2,
296 0x80000000,
297 0xc0016900,
298 0x000000b3,
299 0x20002000,
300 0xc0026900,
301 0x000000d2,
302 0x00000000,
303 0x3f800000,
304 0xc0016900,
305 0x00000293,
306 0x00004010,
307 0xc0016900,
308 0x00000300,
309 0x00000000,
310 0xc0016900,
311 0x00000301,
312 0x00000000,
313 0xc0016900,
314 0x00000312,
315 0xffffffff,
316 0xc0016900,
317 0x00000307,
318 0x00000000,
319 0xc0016900,
320 0x00000308,
321 0x00000000,
322 0xc0016900,
323 0x00000283,
324 0x00000000,
325 0xc0016900,
326 0x00000292,
327 0x00000000,
328 0xc0066900,
329 0x0000010f,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0xc0016900,
337 0x00000206,
338 0x00000000,
339 0xc0016900,
340 0x00000207,
341 0x00000000,
342 0xc0016900,
343 0x00000208,
344 0x00000000,
345 0xc0046900,
346 0x00000303,
347 0x3f800000,
348 0x3f800000,
349 0x3f800000,
350 0x3f800000,
351 0xc0016900,
352 0x00000205,
353 0x00000004,
354 0xc0016900,
355 0x00000280,
356 0x00000000,
357 0xc0016900,
358 0x00000281,
359 0x00000000,
360 0xc0016900,
361 0x0000037e,
362 0x00000000,
363 0xc0016900,
364 0x00000382,
365 0x00000000,
366 0xc0016900,
367 0x00000380,
368 0x00000000,
369 0xc0016900,
370 0x00000383,
371 0x00000000,
372 0xc0016900,
373 0x00000381,
374 0x00000000,
375 0xc0016900,
376 0x00000282,
377 0x00000008,
378 0xc0016900,
379 0x00000302,
380 0x0000002d,
381 0xc0016900,
382 0x0000037f,
383 0x00000000,
384 0xc0016900,
385 0x000001b2,
386 0x00000000,
387 0xc0016900,
388 0x000001b6,
389 0x00000000,
390 0xc0016900,
391 0x000001b7,
392 0x00000000,
393 0xc0016900,
394 0x000001b8,
395 0x00000000,
396 0xc0016900,
397 0x000001b9,
398 0x00000000,
399 0xc0016900,
400 0x00000225,
401 0x00000000,
402 0xc0016900,
403 0x00000229,
404 0x00000000,
405 0xc0016900,
406 0x00000237,
407 0x00000000,
408 0xc0016900,
409 0x00000100,
410 0x00000800,
411 0xc0016900,
412 0x00000101,
413 0x00000000,
414 0xc0016900,
415 0x00000102,
416 0x00000000,
417 0xc0016900,
418 0x000002a8,
419 0x00000000,
420 0xc0016900,
421 0x000002a9,
422 0x00000000,
423 0xc0016900,
424 0x00000103,
425 0x00000000,
426 0xc0016900,
427 0x00000284,
428 0x00000000,
429 0xc0016900,
430 0x00000290,
431 0x00000000,
432 0xc0016900,
433 0x00000285,
434 0x00000000,
435 0xc0016900,
436 0x00000286,
437 0x00000000,
438 0xc0016900,
439 0x00000287,
440 0x00000000,
441 0xc0016900,
442 0x00000288,
443 0x00000000,
444 0xc0016900,
445 0x00000289,
446 0x00000000,
447 0xc0016900,
448 0x0000028a,
449 0x00000000,
450 0xc0016900,
451 0x0000028b,
452 0x00000000,
453 0xc0016900,
454 0x0000028c,
455 0x00000000,
456 0xc0016900,
457 0x0000028d,
458 0x00000000,
459 0xc0016900,
460 0x0000028e,
461 0x00000000,
462 0xc0016900,
463 0x0000028f,
464 0x00000000,
465 0xc0016900,
466 0x000002a1,
467 0x00000000,
468 0xc0016900,
469 0x000002a5,
470 0x00000000,
471 0xc0016900,
472 0x000002ac,
473 0x00000000,
474 0xc0016900,
475 0x000002ad,
476 0x00000000,
477 0xc0016900,
478 0x000002ae,
479 0x00000000,
480 0xc0016900,
481 0x000002c8,
482 0x00000000,
483 0xc0016900,
484 0x00000206,
485 0x00000100,
486 0xc0016900,
487 0x00000204,
488 0x00010000,
489 0xc0036e00,
490 0x00000000,
491 0x00000012,
492 0x00000000,
493 0x00000000,
494 0xc0016900,
495 0x0000008f,
496 0x0000000f,
497 0xc0016900,
498 0x000001e8,
499 0x00000001,
500 0xc0016900,
501 0x00000202,
502 0x00cc0000,
503 0xc0016900,
504 0x00000205,
505 0x00000244,
506 0xc0016900,
507 0x00000203,
508 0x00000210,
509 0xc0016900,
510 0x000001b1,
511 0x00000000,
512 0xc0016900,
513 0x00000185,
514 0x00000000,
515 0xc0016900,
516 0x000001b3,
517 0x00000001,
518 0xc0016900,
519 0x000001b4,
520 0x00000000,
521 0xc0016900,
522 0x00000191,
523 0x00000b00,
524 0xc0016900,
525 0x000001b5,
526 0x00000000,
527};
528
529const u32 r7xx_default_state[] =
530{
531 0xc0012800,
532 0x80000000,
533 0x80000000,
534 0xc0004600,
535 0x00000016,
536 0xc0016800,
537 0x00000010,
538 0x00028000,
539 0xc0016800,
540 0x00000010,
541 0x00008000,
542 0xc0016800,
543 0x00000542,
544 0x07000002,
545 0xc0016800,
546 0x000005c5,
547 0x00000000,
548 0xc0016800,
549 0x00000363,
550 0x00004000,
551 0xc0016800,
552 0x0000060c,
553 0x00000000,
554 0xc0016800,
555 0x0000060e,
556 0x00420204,
557 0xc0016f00,
558 0x00000000,
559 0x00000000,
560 0xc0016f00,
561 0x00000001,
562 0x00000000,
563 0xc0096900,
564 0x0000022a,
565 0x00000000,
566 0x00000000,
567 0x00000000,
568 0x00000000,
569 0x00000000,
570 0x00000000,
571 0x00000000,
572 0x00000000,
573 0x00000000,
574 0xc0016900,
575 0x00000004,
576 0x00000000,
577 0xc0016900,
578 0x0000000a,
579 0x00000000,
580 0xc0016900,
581 0x0000000b,
582 0x00000000,
583 0xc0016900,
584 0x0000010c,
585 0x00000000,
586 0xc0016900,
587 0x0000010d,
588 0x00000000,
589 0xc0016900,
590 0x00000200,
591 0x00000000,
592 0xc0016900,
593 0x00000343,
594 0x00000060,
595 0xc0016900,
596 0x00000344,
597 0x00000000,
598 0xc0016900,
599 0x00000351,
600 0x0000aa00,
601 0xc0016900,
602 0x00000104,
603 0x00000000,
604 0xc0016900,
605 0x0000010e,
606 0x00000000,
607 0xc0046900,
608 0x00000105,
609 0x00000000,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0xc0046900,
614 0x0000030c,
615 0x01000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0xc0016900,
620 0x0000008e,
621 0x0000000f,
622 0xc0016900,
623 0x00000080,
624 0x00000000,
625 0xc0016900,
626 0x00000083,
627 0x0000ffff,
628 0xc0016900,
629 0x00000084,
630 0x00000000,
631 0xc0016900,
632 0x00000085,
633 0x20002000,
634 0xc0016900,
635 0x00000086,
636 0x00000000,
637 0xc0016900,
638 0x00000087,
639 0x20002000,
640 0xc0016900,
641 0x00000088,
642 0x00000000,
643 0xc0016900,
644 0x00000089,
645 0x20002000,
646 0xc0016900,
647 0x0000008a,
648 0x00000000,
649 0xc0016900,
650 0x0000008b,
651 0x20002000,
652 0xc0016900,
653 0x0000008c,
654 0xaaaaaaaa,
655 0xc0016900,
656 0x00000094,
657 0x80000000,
658 0xc0016900,
659 0x00000095,
660 0x20002000,
661 0xc0026900,
662 0x000000b4,
663 0x00000000,
664 0x3f800000,
665 0xc0016900,
666 0x00000096,
667 0x80000000,
668 0xc0016900,
669 0x00000097,
670 0x20002000,
671 0xc0026900,
672 0x000000b6,
673 0x00000000,
674 0x3f800000,
675 0xc0016900,
676 0x00000098,
677 0x80000000,
678 0xc0016900,
679 0x00000099,
680 0x20002000,
681 0xc0026900,
682 0x000000b8,
683 0x00000000,
684 0x3f800000,
685 0xc0016900,
686 0x0000009a,
687 0x80000000,
688 0xc0016900,
689 0x0000009b,
690 0x20002000,
691 0xc0026900,
692 0x000000ba,
693 0x00000000,
694 0x3f800000,
695 0xc0016900,
696 0x0000009c,
697 0x80000000,
698 0xc0016900,
699 0x0000009d,
700 0x20002000,
701 0xc0026900,
702 0x000000bc,
703 0x00000000,
704 0x3f800000,
705 0xc0016900,
706 0x0000009e,
707 0x80000000,
708 0xc0016900,
709 0x0000009f,
710 0x20002000,
711 0xc0026900,
712 0x000000be,
713 0x00000000,
714 0x3f800000,
715 0xc0016900,
716 0x000000a0,
717 0x80000000,
718 0xc0016900,
719 0x000000a1,
720 0x20002000,
721 0xc0026900,
722 0x000000c0,
723 0x00000000,
724 0x3f800000,
725 0xc0016900,
726 0x000000a2,
727 0x80000000,
728 0xc0016900,
729 0x000000a3,
730 0x20002000,
731 0xc0026900,
732 0x000000c2,
733 0x00000000,
734 0x3f800000,
735 0xc0016900,
736 0x000000a4,
737 0x80000000,
738 0xc0016900,
739 0x000000a5,
740 0x20002000,
741 0xc0026900,
742 0x000000c4,
743 0x00000000,
744 0x3f800000,
745 0xc0016900,
746 0x000000a6,
747 0x80000000,
748 0xc0016900,
749 0x000000a7,
750 0x20002000,
751 0xc0026900,
752 0x000000c6,
753 0x00000000,
754 0x3f800000,
755 0xc0016900,
756 0x000000a8,
757 0x80000000,
758 0xc0016900,
759 0x000000a9,
760 0x20002000,
761 0xc0026900,
762 0x000000c8,
763 0x00000000,
764 0x3f800000,
765 0xc0016900,
766 0x000000aa,
767 0x80000000,
768 0xc0016900,
769 0x000000ab,
770 0x20002000,
771 0xc0026900,
772 0x000000ca,
773 0x00000000,
774 0x3f800000,
775 0xc0016900,
776 0x000000ac,
777 0x80000000,
778 0xc0016900,
779 0x000000ad,
780 0x20002000,
781 0xc0026900,
782 0x000000cc,
783 0x00000000,
784 0x3f800000,
785 0xc0016900,
786 0x000000ae,
787 0x80000000,
788 0xc0016900,
789 0x000000af,
790 0x20002000,
791 0xc0026900,
792 0x000000ce,
793 0x00000000,
794 0x3f800000,
795 0xc0016900,
796 0x000000b0,
797 0x80000000,
798 0xc0016900,
799 0x000000b1,
800 0x20002000,
801 0xc0026900,
802 0x000000d0,
803 0x00000000,
804 0x3f800000,
805 0xc0016900,
806 0x000000b2,
807 0x80000000,
808 0xc0016900,
809 0x000000b3,
810 0x20002000,
811 0xc0026900,
812 0x000000d2,
813 0x00000000,
814 0x3f800000,
815 0xc0016900,
816 0x00000293,
817 0x00514000,
818 0xc0016900,
819 0x00000300,
820 0x00000000,
821 0xc0016900,
822 0x00000301,
823 0x00000000,
824 0xc0016900,
825 0x00000312,
826 0xffffffff,
827 0xc0016900,
828 0x00000307,
829 0x00000000,
830 0xc0016900,
831 0x00000308,
832 0x00000000,
833 0xc0016900,
834 0x00000283,
835 0x00000000,
836 0xc0016900,
837 0x00000292,
838 0x00000000,
839 0xc0066900,
840 0x0000010f,
841 0x00000000,
842 0x00000000,
843 0x00000000,
844 0x00000000,
845 0x00000000,
846 0x00000000,
847 0xc0016900,
848 0x00000206,
849 0x00000000,
850 0xc0016900,
851 0x00000207,
852 0x00000000,
853 0xc0016900,
854 0x00000208,
855 0x00000000,
856 0xc0046900,
857 0x00000303,
858 0x3f800000,
859 0x3f800000,
860 0x3f800000,
861 0x3f800000,
862 0xc0016900,
863 0x00000205,
864 0x00000004,
865 0xc0016900,
866 0x00000280,
867 0x00000000,
868 0xc0016900,
869 0x00000281,
870 0x00000000,
871 0xc0016900,
872 0x0000037e,
873 0x00000000,
874 0xc0016900,
875 0x00000382,
876 0x00000000,
877 0xc0016900,
878 0x00000380,
879 0x00000000,
880 0xc0016900,
881 0x00000383,
882 0x00000000,
883 0xc0016900,
884 0x00000381,
885 0x00000000,
886 0xc0016900,
887 0x00000282,
888 0x00000008,
889 0xc0016900,
890 0x00000302,
891 0x0000002d,
892 0xc0016900,
893 0x0000037f,
894 0x00000000,
895 0xc0016900,
896 0x000001b2,
897 0x00000001,
898 0xc0016900,
899 0x000001b6,
900 0x00000000,
901 0xc0016900,
902 0x000001b7,
903 0x00000000,
904 0xc0016900,
905 0x000001b8,
906 0x00000000,
907 0xc0016900,
908 0x000001b9,
909 0x00000000,
910 0xc0016900,
911 0x00000225,
912 0x00000000,
913 0xc0016900,
914 0x00000229,
915 0x00000000,
916 0xc0016900,
917 0x00000237,
918 0x00000000,
919 0xc0016900,
920 0x00000100,
921 0x00000800,
922 0xc0016900,
923 0x00000101,
924 0x00000000,
925 0xc0016900,
926 0x00000102,
927 0x00000000,
928 0xc0016900,
929 0x000002a8,
930 0x00000000,
931 0xc0016900,
932 0x000002a9,
933 0x00000000,
934 0xc0016900,
935 0x00000103,
936 0x00000000,
937 0xc0016900,
938 0x00000284,
939 0x00000000,
940 0xc0016900,
941 0x00000290,
942 0x00000000,
943 0xc0016900,
944 0x00000285,
945 0x00000000,
946 0xc0016900,
947 0x00000286,
948 0x00000000,
949 0xc0016900,
950 0x00000287,
951 0x00000000,
952 0xc0016900,
953 0x00000288,
954 0x00000000,
955 0xc0016900,
956 0x00000289,
957 0x00000000,
958 0xc0016900,
959 0x0000028a,
960 0x00000000,
961 0xc0016900,
962 0x0000028b,
963 0x00000000,
964 0xc0016900,
965 0x0000028c,
966 0x00000000,
967 0xc0016900,
968 0x0000028d,
969 0x00000000,
970 0xc0016900,
971 0x0000028e,
972 0x00000000,
973 0xc0016900,
974 0x0000028f,
975 0x00000000,
976 0xc0016900,
977 0x000002a1,
978 0x00000000,
979 0xc0016900,
980 0x000002a5,
981 0x00000000,
982 0xc0016900,
983 0x000002ac,
984 0x00000000,
985 0xc0016900,
986 0x000002ad,
987 0x00000000,
988 0xc0016900,
989 0x000002ae,
990 0x00000000,
991 0xc0016900,
992 0x000002c8,
993 0x00000000,
994 0xc0016900,
995 0x00000206,
996 0x00000100,
997 0xc0016900,
998 0x00000204,
999 0x00010000,
1000 0xc0036e00,
1001 0x00000000,
1002 0x00000012,
1003 0x00000000,
1004 0x00000000,
1005 0xc0016900,
1006 0x0000008f,
1007 0x0000000f,
1008 0xc0016900,
1009 0x000001e8,
1010 0x00000001,
1011 0xc0016900,
1012 0x00000202,
1013 0x00cc0000,
1014 0xc0016900,
1015 0x00000205,
1016 0x00000244,
1017 0xc0016900,
1018 0x00000203,
1019 0x00000210,
1020 0xc0016900,
1021 0x000001b1,
1022 0x00000000,
1023 0xc0016900,
1024 0x00000185,
1025 0x00000000,
1026 0xc0016900,
1027 0x000001b3,
1028 0x00000001,
1029 0xc0016900,
1030 0x000001b4,
1031 0x00000000,
1032 0xc0016900,
1033 0x00000191,
1034 0x00000b00,
1035 0xc0016900,
1036 0x000001b5,
1037 0x00000000,
1038};
1039
1040/* same for r6xx/r7xx */
1041const u32 r6xx_vs[] =
1042{
1043 0x00000004,
1044 0x81000000,
1045 0x0000203c,
1046 0x94000b08,
1047 0x00004000,
1048 0x14200b1a,
1049 0x00000000,
1050 0x00000000,
1051 0x3c000000,
1052 0x68cd1000,
1053 0x00080000,
1054 0x00000000,
1055};
1056
1057const u32 r6xx_ps[] =
1058{
1059 0x00000002,
1060 0x80800000,
1061 0x00000000,
1062 0x94200688,
1063 0x00000010,
1064 0x000d1000,
1065 0xb0800000,
1066 0x00000000,
1067};
1068
1069const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
1070const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
1071const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
1072const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
new file mode 100644
index 000000000000..fdc3b378cbb0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -0,0 +1,14 @@
1
2#ifndef R600_BLIT_SHADERS_H
3#define R600_BLIT_SHADERS_H
4
5extern const u32 r6xx_ps[];
6extern const u32 r6xx_vs[];
7extern const u32 r7xx_default_state[];
8extern const u32 r6xx_default_state[];
9
10
11extern const u32 r6xx_ps_size, r6xx_vs_size;
12extern const u32 r6xx_default_size, r7xx_default_size;
13
14#endif
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 8327912de964..6d5a711c2e91 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -58,6 +58,12 @@ MODULE_FIRMWARE("radeon/RV730_me.bin");
58MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 58MODULE_FIRMWARE("radeon/RV710_pfp.bin");
59MODULE_FIRMWARE("radeon/RV710_me.bin"); 59MODULE_FIRMWARE("radeon/RV710_me.bin");
60 60
61
62int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
63 unsigned family, u32 *ib, int *l);
64void r600_cs_legacy_init(void);
65
66
61# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ 67# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
62# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) 68# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
63 69
@@ -1857,6 +1863,8 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1857 1863
1858 DRM_DEBUG("\n"); 1864 DRM_DEBUG("\n");
1859 1865
1866 mutex_init(&dev_priv->cs_mutex);
1867 r600_cs_legacy_init();
1860 /* if we require new memory map but we don't have it fail */ 1868 /* if we require new memory map but we don't have it fail */
1861 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { 1869 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
1862 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); 1870 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
@@ -1888,7 +1896,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1888 /* Enable vblank on CRTC1 for older X servers 1896 /* Enable vblank on CRTC1 for older X servers
1889 */ 1897 */
1890 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; 1898 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
1891 1899 dev_priv->do_boxes = 0;
1892 dev_priv->cp_mode = init->cp_mode; 1900 dev_priv->cp_mode = init->cp_mode;
1893 1901
1894 /* We don't support anything other than bus-mastering ring mode, 1902 /* We don't support anything other than bus-mastering ring mode,
@@ -1974,11 +1982,11 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
1974 } else 1982 } else
1975#endif 1983#endif
1976 { 1984 {
1977 dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset; 1985 dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset;
1978 dev_priv->ring_rptr->handle = 1986 dev_priv->ring_rptr->handle =
1979 (void *)dev_priv->ring_rptr->offset; 1987 (void *)(unsigned long)dev_priv->ring_rptr->offset;
1980 dev->agp_buffer_map->handle = 1988 dev->agp_buffer_map->handle =
1981 (void *)dev->agp_buffer_map->offset; 1989 (void *)(unsigned long)dev->agp_buffer_map->offset;
1982 1990
1983 DRM_DEBUG("dev_priv->cp_ring->handle %p\n", 1991 DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
1984 dev_priv->cp_ring->handle); 1992 dev_priv->cp_ring->handle);
@@ -2282,3 +2290,239 @@ int r600_cp_dispatch_indirect(struct drm_device *dev,
2282 2290
2283 return 0; 2291 return 0;
2284} 2292}
2293
2294void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
2295{
2296 drm_radeon_private_t *dev_priv = dev->dev_private;
2297 struct drm_master *master = file_priv->master;
2298 struct drm_radeon_master_private *master_priv = master->driver_priv;
2299 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
2300 int nbox = sarea_priv->nbox;
2301 struct drm_clip_rect *pbox = sarea_priv->boxes;
2302 int i, cpp, src_pitch, dst_pitch;
2303 uint64_t src, dst;
2304 RING_LOCALS;
2305 DRM_DEBUG("\n");
2306
2307 if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888)
2308 cpp = 4;
2309 else
2310 cpp = 2;
2311
2312 if (sarea_priv->pfCurrentPage == 0) {
2313 src_pitch = dev_priv->back_pitch;
2314 dst_pitch = dev_priv->front_pitch;
2315 src = dev_priv->back_offset + dev_priv->fb_location;
2316 dst = dev_priv->front_offset + dev_priv->fb_location;
2317 } else {
2318 src_pitch = dev_priv->front_pitch;
2319 dst_pitch = dev_priv->back_pitch;
2320 src = dev_priv->front_offset + dev_priv->fb_location;
2321 dst = dev_priv->back_offset + dev_priv->fb_location;
2322 }
2323
2324 if (r600_prepare_blit_copy(dev, file_priv)) {
2325 DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
2326 return;
2327 }
2328 for (i = 0; i < nbox; i++) {
2329 int x = pbox[i].x1;
2330 int y = pbox[i].y1;
2331 int w = pbox[i].x2 - x;
2332 int h = pbox[i].y2 - y;
2333
2334 DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
2335
2336 r600_blit_swap(dev,
2337 src, dst,
2338 x, y, x, y, w, h,
2339 src_pitch, dst_pitch, cpp);
2340 }
2341 r600_done_blit_copy(dev);
2342
2343 /* Increment the frame counter. The client-side 3D driver must
2344 * throttle the framerate by waiting for this value before
2345 * performing the swapbuffer ioctl.
2346 */
2347 sarea_priv->last_frame++;
2348
2349 BEGIN_RING(3);
2350 R600_FRAME_AGE(sarea_priv->last_frame);
2351 ADVANCE_RING();
2352}
2353
2354int r600_cp_dispatch_texture(struct drm_device *dev,
2355 struct drm_file *file_priv,
2356 drm_radeon_texture_t *tex,
2357 drm_radeon_tex_image_t *image)
2358{
2359 drm_radeon_private_t *dev_priv = dev->dev_private;
2360 struct drm_buf *buf;
2361 u32 *buffer;
2362 const u8 __user *data;
2363 int size, pass_size;
2364 u64 src_offset, dst_offset;
2365
2366 if (!radeon_check_offset(dev_priv, tex->offset)) {
2367 DRM_ERROR("Invalid destination offset\n");
2368 return -EINVAL;
2369 }
2370
2371 /* this might fail for zero-sized uploads - are those illegal? */
2372 if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) {
2373 DRM_ERROR("Invalid final destination offset\n");
2374 return -EINVAL;
2375 }
2376
2377 size = tex->height * tex->pitch;
2378
2379 if (size == 0)
2380 return 0;
2381
2382 dst_offset = tex->offset;
2383
2384 if (r600_prepare_blit_copy(dev, file_priv)) {
2385 DRM_ERROR("unable to allocate vertex buffer for swap buffer\n");
2386 return -EAGAIN;
2387 }
2388 do {
2389 data = (const u8 __user *)image->data;
2390 pass_size = size;
2391
2392 buf = radeon_freelist_get(dev);
2393 if (!buf) {
2394 DRM_DEBUG("EAGAIN\n");
2395 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
2396 return -EFAULT;
2397 return -EAGAIN;
2398 }
2399
2400 if (pass_size > buf->total)
2401 pass_size = buf->total;
2402
2403 /* Dispatch the indirect buffer.
2404 */
2405 buffer =
2406 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
2407
2408 if (DRM_COPY_FROM_USER(buffer, data, pass_size)) {
2409 DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size);
2410 return -EFAULT;
2411 }
2412
2413 buf->file_priv = file_priv;
2414 buf->used = pass_size;
2415 src_offset = dev_priv->gart_buffers_offset + buf->offset;
2416
2417 r600_blit_copy(dev, src_offset, dst_offset, pass_size);
2418
2419 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2420
2421 /* Update the input parameters for next time */
2422 image->data = (const u8 __user *)image->data + pass_size;
2423 dst_offset += pass_size;
2424 size -= pass_size;
2425 } while (size > 0);
2426 r600_done_blit_copy(dev);
2427
2428 return 0;
2429}
2430
2431/*
2432 * Legacy cs ioctl
2433 */
2434static u32 radeon_cs_id_get(struct drm_radeon_private *radeon)
2435{
2436 /* FIXME: check if wrap affect last reported wrap & sequence */
2437 radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF;
2438 if (!radeon->cs_id_scnt) {
2439 /* increment wrap counter */
2440 radeon->cs_id_wcnt += 0x01000000;
2441 /* valid sequence counter start at 1 */
2442 radeon->cs_id_scnt = 1;
2443 }
2444 return (radeon->cs_id_scnt | radeon->cs_id_wcnt);
2445}
2446
2447static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id)
2448{
2449 RING_LOCALS;
2450
2451 *id = radeon_cs_id_get(dev_priv);
2452
2453 /* SCRATCH 2 */
2454 BEGIN_RING(3);
2455 R600_CLEAR_AGE(*id);
2456 ADVANCE_RING();
2457 COMMIT_RING();
2458}
2459
2460static int r600_ib_get(struct drm_device *dev,
2461 struct drm_file *fpriv,
2462 struct drm_buf **buffer)
2463{
2464 struct drm_buf *buf;
2465
2466 *buffer = NULL;
2467 buf = radeon_freelist_get(dev);
2468 if (!buf) {
2469 return -EBUSY;
2470 }
2471 buf->file_priv = fpriv;
2472 *buffer = buf;
2473 return 0;
2474}
2475
2476static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
2477 struct drm_file *fpriv, int l, int r)
2478{
2479 drm_radeon_private_t *dev_priv = dev->dev_private;
2480
2481 if (buf) {
2482 if (!r)
2483 r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
2484 radeon_cp_discard_buffer(dev, fpriv->master, buf);
2485 COMMIT_RING();
2486 }
2487}
2488
2489int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv)
2490{
2491 struct drm_radeon_private *dev_priv = dev->dev_private;
2492 struct drm_radeon_cs *cs = data;
2493 struct drm_buf *buf;
2494 unsigned family;
2495 int l, r = 0;
2496 u32 *ib, cs_id = 0;
2497
2498 if (dev_priv == NULL) {
2499 DRM_ERROR("called with no initialization\n");
2500 return -EINVAL;
2501 }
2502 family = dev_priv->flags & RADEON_FAMILY_MASK;
2503 if (family < CHIP_R600) {
2504 DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n");
2505 return -EINVAL;
2506 }
2507 mutex_lock(&dev_priv->cs_mutex);
2508 /* get ib */
2509 r = r600_ib_get(dev, fpriv, &buf);
2510 if (r) {
2511 DRM_ERROR("ib_get failed\n");
2512 goto out;
2513 }
2514 ib = dev->agp_buffer_map->handle + buf->offset;
2515 /* now parse command stream */
2516 r = r600_cs_legacy(dev, data, fpriv, family, ib, &l);
2517 if (r) {
2518 goto out;
2519 }
2520
2521out:
2522 r600_ib_free(dev, buf, fpriv, l, r);
2523 /* emit cs id sequence */
2524 r600_cs_id_emit(dev_priv, &cs_id);
2525 cs->cs_id = cs_id;
2526 mutex_unlock(&dev_priv->cs_mutex);
2527 return r;
2528}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
new file mode 100644
index 000000000000..39bf6349351b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -0,0 +1,658 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon.h"
30#include "radeon_share.h"
31#include "r600d.h"
32#include "avivod.h"
33
34static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40
41/**
42 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
43 * @parser: parser structure holding parsing context.
44 * @pkt: where to store packet informations
45 *
46 * Assume that chunk_ib_index is properly set. Will return -EINVAL
47 * if packet is bigger than remaining ib size. or if packets is unknown.
48 **/
49int r600_cs_packet_parse(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt,
51 unsigned idx)
52{
53 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
54 uint32_t header;
55
56 if (idx >= ib_chunk->length_dw) {
57 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
58 idx, ib_chunk->length_dw);
59 return -EINVAL;
60 }
61 header = ib_chunk->kdata[idx];
62 pkt->idx = idx;
63 pkt->type = CP_PACKET_GET_TYPE(header);
64 pkt->count = CP_PACKET_GET_COUNT(header);
65 pkt->one_reg_wr = 0;
66 switch (pkt->type) {
67 case PACKET_TYPE0:
68 pkt->reg = CP_PACKET0_GET_REG(header);
69 break;
70 case PACKET_TYPE3:
71 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
72 break;
73 case PACKET_TYPE2:
74 pkt->count = -1;
75 break;
76 default:
77 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
78 return -EINVAL;
79 }
80 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
81 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
82 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
83 return -EINVAL;
84 }
85 return 0;
86}
87
88/**
89 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
90 * @parser: parser structure holding parsing context.
91 * @data: pointer to relocation data
92 * @offset_start: starting offset
93 * @offset_mask: offset mask (to align start offset on)
94 * @reloc: reloc informations
95 *
96 * Check next packet is relocation packet3, do bo validation and compute
97 * GPU offset using the provided start.
98 **/
99static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
100 struct radeon_cs_reloc **cs_reloc)
101{
102 struct radeon_cs_chunk *ib_chunk;
103 struct radeon_cs_chunk *relocs_chunk;
104 struct radeon_cs_packet p3reloc;
105 unsigned idx;
106 int r;
107
108 if (p->chunk_relocs_idx == -1) {
109 DRM_ERROR("No relocation chunk !\n");
110 return -EINVAL;
111 }
112 *cs_reloc = NULL;
113 ib_chunk = &p->chunks[p->chunk_ib_idx];
114 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
115 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
116 if (r) {
117 return r;
118 }
119 p->idx += p3reloc.count + 2;
120 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
121 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
122 p3reloc.idx);
123 return -EINVAL;
124 }
125 idx = ib_chunk->kdata[p3reloc.idx + 1];
126 if (idx >= relocs_chunk->length_dw) {
127 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
128 idx, relocs_chunk->length_dw);
129 return -EINVAL;
130 }
131 /* FIXME: we assume reloc size is 4 dwords */
132 *cs_reloc = p->relocs_ptr[(idx / 4)];
133 return 0;
134}
135
136/**
137 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
138 * @parser: parser structure holding parsing context.
139 * @data: pointer to relocation data
140 * @offset_start: starting offset
141 * @offset_mask: offset mask (to align start offset on)
142 * @reloc: reloc informations
143 *
144 * Check next packet is relocation packet3, do bo validation and compute
145 * GPU offset using the provided start.
146 **/
147static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
148 struct radeon_cs_reloc **cs_reloc)
149{
150 struct radeon_cs_chunk *ib_chunk;
151 struct radeon_cs_chunk *relocs_chunk;
152 struct radeon_cs_packet p3reloc;
153 unsigned idx;
154 int r;
155
156 if (p->chunk_relocs_idx == -1) {
157 DRM_ERROR("No relocation chunk !\n");
158 return -EINVAL;
159 }
160 *cs_reloc = NULL;
161 ib_chunk = &p->chunks[p->chunk_ib_idx];
162 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
163 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
164 if (r) {
165 return r;
166 }
167 p->idx += p3reloc.count + 2;
168 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
169 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
170 p3reloc.idx);
171 return -EINVAL;
172 }
173 idx = ib_chunk->kdata[p3reloc.idx + 1];
174 if (idx >= relocs_chunk->length_dw) {
175 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
176 idx, relocs_chunk->length_dw);
177 return -EINVAL;
178 }
179 *cs_reloc = &p->relocs[0];
180 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
181 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
182 return 0;
183}
184
185static int r600_packet0_check(struct radeon_cs_parser *p,
186 struct radeon_cs_packet *pkt,
187 unsigned idx, unsigned reg)
188{
189 switch (reg) {
190 case AVIVO_D1MODE_VLINE_START_END:
191 case AVIVO_D2MODE_VLINE_START_END:
192 break;
193 default:
194 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
195 reg, idx);
196 return -EINVAL;
197 }
198 return 0;
199}
200
201static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
202 struct radeon_cs_packet *pkt)
203{
204 unsigned reg, i;
205 unsigned idx;
206 int r;
207
208 idx = pkt->idx + 1;
209 reg = pkt->reg;
210 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
211 r = r600_packet0_check(p, pkt, idx, reg);
212 if (r) {
213 return r;
214 }
215 }
216 return 0;
217}
218
219static int r600_packet3_check(struct radeon_cs_parser *p,
220 struct radeon_cs_packet *pkt)
221{
222 struct radeon_cs_chunk *ib_chunk;
223 struct radeon_cs_reloc *reloc;
224 volatile u32 *ib;
225 unsigned idx;
226 unsigned i;
227 unsigned start_reg, end_reg, reg;
228 int r;
229
230 ib = p->ib->ptr;
231 ib_chunk = &p->chunks[p->chunk_ib_idx];
232 idx = pkt->idx + 1;
233 switch (pkt->opcode) {
234 case PACKET3_START_3D_CMDBUF:
235 if (p->family >= CHIP_RV770 || pkt->count) {
236 DRM_ERROR("bad START_3D\n");
237 return -EINVAL;
238 }
239 break;
240 case PACKET3_CONTEXT_CONTROL:
241 if (pkt->count != 1) {
242 DRM_ERROR("bad CONTEXT_CONTROL\n");
243 return -EINVAL;
244 }
245 break;
246 case PACKET3_INDEX_TYPE:
247 case PACKET3_NUM_INSTANCES:
248 if (pkt->count) {
249 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
250 return -EINVAL;
251 }
252 break;
253 case PACKET3_DRAW_INDEX:
254 if (pkt->count != 3) {
255 DRM_ERROR("bad DRAW_INDEX\n");
256 return -EINVAL;
257 }
258 r = r600_cs_packet_next_reloc(p, &reloc);
259 if (r) {
260 DRM_ERROR("bad DRAW_INDEX\n");
261 return -EINVAL;
262 }
263 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
264 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
265 break;
266 case PACKET3_DRAW_INDEX_AUTO:
267 if (pkt->count != 1) {
268 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
269 return -EINVAL;
270 }
271 break;
272 case PACKET3_DRAW_INDEX_IMMD_BE:
273 case PACKET3_DRAW_INDEX_IMMD:
274 if (pkt->count < 2) {
275 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
276 return -EINVAL;
277 }
278 break;
279 case PACKET3_WAIT_REG_MEM:
280 if (pkt->count != 5) {
281 DRM_ERROR("bad WAIT_REG_MEM\n");
282 return -EINVAL;
283 }
284 /* bit 4 is reg (0) or mem (1) */
285 if (ib_chunk->kdata[idx+0] & 0x10) {
286 r = r600_cs_packet_next_reloc(p, &reloc);
287 if (r) {
288 DRM_ERROR("bad WAIT_REG_MEM\n");
289 return -EINVAL;
290 }
291 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
292 ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
293 }
294 break;
295 case PACKET3_SURFACE_SYNC:
296 if (pkt->count != 3) {
297 DRM_ERROR("bad SURFACE_SYNC\n");
298 return -EINVAL;
299 }
300 /* 0xffffffff/0x0 is flush all cache flag */
301 if (ib_chunk->kdata[idx+1] != 0xffffffff ||
302 ib_chunk->kdata[idx+2] != 0) {
303 r = r600_cs_packet_next_reloc(p, &reloc);
304 if (r) {
305 DRM_ERROR("bad SURFACE_SYNC\n");
306 return -EINVAL;
307 }
308 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
309 }
310 break;
311 case PACKET3_EVENT_WRITE:
312 if (pkt->count != 2 && pkt->count != 0) {
313 DRM_ERROR("bad EVENT_WRITE\n");
314 return -EINVAL;
315 }
316 if (pkt->count) {
317 r = r600_cs_packet_next_reloc(p, &reloc);
318 if (r) {
319 DRM_ERROR("bad EVENT_WRITE\n");
320 return -EINVAL;
321 }
322 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
323 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
324 }
325 break;
326 case PACKET3_EVENT_WRITE_EOP:
327 if (pkt->count != 4) {
328 DRM_ERROR("bad EVENT_WRITE_EOP\n");
329 return -EINVAL;
330 }
331 r = r600_cs_packet_next_reloc(p, &reloc);
332 if (r) {
333 DRM_ERROR("bad EVENT_WRITE\n");
334 return -EINVAL;
335 }
336 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
337 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
338 break;
339 case PACKET3_SET_CONFIG_REG:
340 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
341 end_reg = 4 * pkt->count + start_reg - 4;
342 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
343 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
344 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
345 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
346 return -EINVAL;
347 }
348 for (i = 0; i < pkt->count; i++) {
349 reg = start_reg + (4 * i);
350 switch (reg) {
351 case CP_COHER_BASE:
352 /* use PACKET3_SURFACE_SYNC */
353 return -EINVAL;
354 default:
355 break;
356 }
357 }
358 break;
359 case PACKET3_SET_CONTEXT_REG:
360 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
361 end_reg = 4 * pkt->count + start_reg - 4;
362 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
363 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
364 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
365 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
366 return -EINVAL;
367 }
368 for (i = 0; i < pkt->count; i++) {
369 reg = start_reg + (4 * i);
370 switch (reg) {
371 case DB_DEPTH_BASE:
372 case CB_COLOR0_BASE:
373 case CB_COLOR1_BASE:
374 case CB_COLOR2_BASE:
375 case CB_COLOR3_BASE:
376 case CB_COLOR4_BASE:
377 case CB_COLOR5_BASE:
378 case CB_COLOR6_BASE:
379 case CB_COLOR7_BASE:
380 case SQ_PGM_START_FS:
381 case SQ_PGM_START_ES:
382 case SQ_PGM_START_VS:
383 case SQ_PGM_START_GS:
384 case SQ_PGM_START_PS:
385 r = r600_cs_packet_next_reloc(p, &reloc);
386 if (r) {
387 DRM_ERROR("bad SET_CONTEXT_REG "
388 "0x%04X\n", reg);
389 return -EINVAL;
390 }
391 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
392 break;
393 case VGT_DMA_BASE:
394 case VGT_DMA_BASE_HI:
395 /* These should be handled by DRAW_INDEX packet 3 */
396 case VGT_STRMOUT_BASE_OFFSET_0:
397 case VGT_STRMOUT_BASE_OFFSET_1:
398 case VGT_STRMOUT_BASE_OFFSET_2:
399 case VGT_STRMOUT_BASE_OFFSET_3:
400 case VGT_STRMOUT_BASE_OFFSET_HI_0:
401 case VGT_STRMOUT_BASE_OFFSET_HI_1:
402 case VGT_STRMOUT_BASE_OFFSET_HI_2:
403 case VGT_STRMOUT_BASE_OFFSET_HI_3:
404 case VGT_STRMOUT_BUFFER_BASE_0:
405 case VGT_STRMOUT_BUFFER_BASE_1:
406 case VGT_STRMOUT_BUFFER_BASE_2:
407 case VGT_STRMOUT_BUFFER_BASE_3:
408 case VGT_STRMOUT_BUFFER_OFFSET_0:
409 case VGT_STRMOUT_BUFFER_OFFSET_1:
410 case VGT_STRMOUT_BUFFER_OFFSET_2:
411 case VGT_STRMOUT_BUFFER_OFFSET_3:
412 /* These should be handled by STRMOUT_BUFFER packet 3 */
413 DRM_ERROR("bad context reg: 0x%08x\n", reg);
414 return -EINVAL;
415 default:
416 break;
417 }
418 }
419 break;
420 case PACKET3_SET_RESOURCE:
421 if (pkt->count % 7) {
422 DRM_ERROR("bad SET_RESOURCE\n");
423 return -EINVAL;
424 }
425 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET;
426 end_reg = 4 * pkt->count + start_reg - 4;
427 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
428 (start_reg >= PACKET3_SET_RESOURCE_END) ||
429 (end_reg >= PACKET3_SET_RESOURCE_END)) {
430 DRM_ERROR("bad SET_RESOURCE\n");
431 return -EINVAL;
432 }
433 for (i = 0; i < (pkt->count / 7); i++) {
434 switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) {
435 case SQ_TEX_VTX_VALID_TEXTURE:
436 /* tex base */
437 r = r600_cs_packet_next_reloc(p, &reloc);
438 if (r) {
439 DRM_ERROR("bad SET_RESOURCE\n");
440 return -EINVAL;
441 }
442 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
443 /* tex mip base */
444 r = r600_cs_packet_next_reloc(p, &reloc);
445 if (r) {
446 DRM_ERROR("bad SET_RESOURCE\n");
447 return -EINVAL;
448 }
449 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
450 break;
451 case SQ_TEX_VTX_VALID_BUFFER:
452 /* vtx base */
453 r = r600_cs_packet_next_reloc(p, &reloc);
454 if (r) {
455 DRM_ERROR("bad SET_RESOURCE\n");
456 return -EINVAL;
457 }
458 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
459 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
460 break;
461 case SQ_TEX_VTX_INVALID_TEXTURE:
462 case SQ_TEX_VTX_INVALID_BUFFER:
463 default:
464 DRM_ERROR("bad SET_RESOURCE\n");
465 return -EINVAL;
466 }
467 }
468 break;
469 case PACKET3_SET_ALU_CONST:
470 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET;
471 end_reg = 4 * pkt->count + start_reg - 4;
472 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
473 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
474 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
475 DRM_ERROR("bad SET_ALU_CONST\n");
476 return -EINVAL;
477 }
478 break;
479 case PACKET3_SET_BOOL_CONST:
480 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
481 end_reg = 4 * pkt->count + start_reg - 4;
482 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
483 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
484 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
485 DRM_ERROR("bad SET_BOOL_CONST\n");
486 return -EINVAL;
487 }
488 break;
489 case PACKET3_SET_LOOP_CONST:
490 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
491 end_reg = 4 * pkt->count + start_reg - 4;
492 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
493 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
494 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
495 DRM_ERROR("bad SET_LOOP_CONST\n");
496 return -EINVAL;
497 }
498 break;
499 case PACKET3_SET_CTL_CONST:
500 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET;
501 end_reg = 4 * pkt->count + start_reg - 4;
502 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
503 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
504 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
505 DRM_ERROR("bad SET_CTL_CONST\n");
506 return -EINVAL;
507 }
508 break;
509 case PACKET3_SET_SAMPLER:
510 if (pkt->count % 3) {
511 DRM_ERROR("bad SET_SAMPLER\n");
512 return -EINVAL;
513 }
514 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET;
515 end_reg = 4 * pkt->count + start_reg - 4;
516 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
517 (start_reg >= PACKET3_SET_SAMPLER_END) ||
518 (end_reg >= PACKET3_SET_SAMPLER_END)) {
519 DRM_ERROR("bad SET_SAMPLER\n");
520 return -EINVAL;
521 }
522 break;
523 case PACKET3_SURFACE_BASE_UPDATE:
524 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
525 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
526 return -EINVAL;
527 }
528 if (pkt->count) {
529 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
530 return -EINVAL;
531 }
532 break;
533 case PACKET3_NOP:
534 break;
535 default:
536 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
537 return -EINVAL;
538 }
539 return 0;
540}
541
542int r600_cs_parse(struct radeon_cs_parser *p)
543{
544 struct radeon_cs_packet pkt;
545 int r;
546
547 do {
548 r = r600_cs_packet_parse(p, &pkt, p->idx);
549 if (r) {
550 return r;
551 }
552 p->idx += pkt.count + 2;
553 switch (pkt.type) {
554 case PACKET_TYPE0:
555 r = r600_cs_parse_packet0(p, &pkt);
556 break;
557 case PACKET_TYPE2:
558 break;
559 case PACKET_TYPE3:
560 r = r600_packet3_check(p, &pkt);
561 break;
562 default:
563 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
564 return -EINVAL;
565 }
566 if (r) {
567 return r;
568 }
569 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
570#if 0
571 for (r = 0; r < p->ib->length_dw; r++) {
572 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
573 mdelay(1);
574 }
575#endif
576 return 0;
577}
578
579static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
580{
581 if (p->chunk_relocs_idx == -1) {
582 return 0;
583 }
584 p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
585 if (p->relocs == NULL) {
586 return -ENOMEM;
587 }
588 return 0;
589}
590
591/**
592 * cs_parser_fini() - clean parser states
593 * @parser: parser structure holding parsing context.
594 * @error: error number
595 *
596 * If error is set than unvalidate buffer, otherwise just free memory
597 * used by parsing context.
598 **/
599static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
600{
601 unsigned i;
602
603 kfree(parser->relocs);
604 for (i = 0; i < parser->nchunks; i++) {
605 kfree(parser->chunks[i].kdata);
606 }
607 kfree(parser->chunks);
608 kfree(parser->chunks_array);
609}
610
611int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
612 unsigned family, u32 *ib, int *l)
613{
614 struct radeon_cs_parser parser;
615 struct radeon_cs_chunk *ib_chunk;
616 struct radeon_ib fake_ib;
617 int r;
618
619 /* initialize parser */
620 memset(&parser, 0, sizeof(struct radeon_cs_parser));
621 parser.filp = filp;
622 parser.rdev = NULL;
623 parser.family = family;
624 parser.ib = &fake_ib;
625 fake_ib.ptr = ib;
626 r = radeon_cs_parser_init(&parser, data);
627 if (r) {
628 DRM_ERROR("Failed to initialize parser !\n");
629 r600_cs_parser_fini(&parser, r);
630 return r;
631 }
632 r = r600_cs_parser_relocs_legacy(&parser);
633 if (r) {
634 DRM_ERROR("Failed to parse relocation !\n");
635 r600_cs_parser_fini(&parser, r);
636 return r;
637 }
638 /* Copy the packet into the IB, the parser will read from the
639 * input memory (cached) and write to the IB (which can be
640 * uncached). */
641 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
642 parser.ib->length_dw = ib_chunk->length_dw;
643 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
644 *l = parser.ib->length_dw;
645 r = r600_cs_parse(&parser);
646 if (r) {
647 DRM_ERROR("Invalid command stream !\n");
648 r600_cs_parser_fini(&parser, r);
649 return r;
650 }
651 r600_cs_parser_fini(&parser, r);
652 return r;
653}
654
655void r600_cs_legacy_init(void)
656{
657 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
658}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
new file mode 100644
index 000000000000..723295f59281
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -0,0 +1,661 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 * Jerome Glisse
26 */
27#ifndef R600D_H
28#define R600D_H
29
30#define CP_PACKET2 0x80000000
31#define PACKET2_PAD_SHIFT 0
32#define PACKET2_PAD_MASK (0x3fffffff << 0)
33
34#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
35
36#define R6XX_MAX_SH_GPRS 256
37#define R6XX_MAX_TEMP_GPRS 16
38#define R6XX_MAX_SH_THREADS 256
39#define R6XX_MAX_SH_STACK_ENTRIES 4096
40#define R6XX_MAX_BACKENDS 8
41#define R6XX_MAX_BACKENDS_MASK 0xff
42#define R6XX_MAX_SIMDS 8
43#define R6XX_MAX_SIMDS_MASK 0xff
44#define R6XX_MAX_PIPES 8
45#define R6XX_MAX_PIPES_MASK 0xff
46
47/* PTE flags */
48#define PTE_VALID (1 << 0)
49#define PTE_SYSTEM (1 << 1)
50#define PTE_SNOOPED (1 << 2)
51#define PTE_READABLE (1 << 5)
52#define PTE_WRITEABLE (1 << 6)
53
54/* Registers */
55#define ARB_POP 0x2418
56#define ENABLE_TC128 (1 << 30)
57#define ARB_GDEC_RD_CNTL 0x246C
58
59#define CC_GC_SHADER_PIPE_CONFIG 0x8950
60#define CC_RB_BACKEND_DISABLE 0x98F4
61#define BACKEND_DISABLE(x) ((x) << 16)
62
63#define CB_COLOR0_BASE 0x28040
64#define CB_COLOR1_BASE 0x28044
65#define CB_COLOR2_BASE 0x28048
66#define CB_COLOR3_BASE 0x2804C
67#define CB_COLOR4_BASE 0x28050
68#define CB_COLOR5_BASE 0x28054
69#define CB_COLOR6_BASE 0x28058
70#define CB_COLOR7_BASE 0x2805C
71#define CB_COLOR7_FRAG 0x280FC
72
73#define CB_COLOR0_SIZE 0x28060
74#define CB_COLOR0_VIEW 0x28080
75#define CB_COLOR0_INFO 0x280a0
76#define CB_COLOR0_TILE 0x280c0
77#define CB_COLOR0_FRAG 0x280e0
78#define CB_COLOR0_MASK 0x28100
79
80#define CONFIG_MEMSIZE 0x5428
81#define CP_STAT 0x8680
82#define CP_COHER_BASE 0x85F8
83#define CP_DEBUG 0xC1FC
84#define R_0086D8_CP_ME_CNTL 0x86D8
85#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
86#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
87#define CP_ME_RAM_DATA 0xC160
88#define CP_ME_RAM_RADDR 0xC158
89#define CP_ME_RAM_WADDR 0xC15C
90#define CP_MEQ_THRESHOLDS 0x8764
91#define MEQ_END(x) ((x) << 16)
92#define ROQ_END(x) ((x) << 24)
93#define CP_PERFMON_CNTL 0x87FC
94#define CP_PFP_UCODE_ADDR 0xC150
95#define CP_PFP_UCODE_DATA 0xC154
96#define CP_QUEUE_THRESHOLDS 0x8760
97#define ROQ_IB1_START(x) ((x) << 0)
98#define ROQ_IB2_START(x) ((x) << 8)
99#define CP_RB_BASE 0xC100
100#define CP_RB_CNTL 0xC104
101#define RB_BUFSZ(x) ((x)<<0)
102#define RB_BLKSZ(x) ((x)<<8)
103#define RB_NO_UPDATE (1<<27)
104#define RB_RPTR_WR_ENA (1<<31)
105#define BUF_SWAP_32BIT (2 << 16)
106#define CP_RB_RPTR 0x8700
107#define CP_RB_RPTR_ADDR 0xC10C
108#define CP_RB_RPTR_ADDR_HI 0xC110
109#define CP_RB_RPTR_WR 0xC108
110#define CP_RB_WPTR 0xC114
111#define CP_RB_WPTR_ADDR 0xC118
112#define CP_RB_WPTR_ADDR_HI 0xC11C
113#define CP_RB_WPTR_DELAY 0x8704
114#define CP_ROQ_IB1_STAT 0x8784
115#define CP_ROQ_IB2_STAT 0x8788
116#define CP_SEM_WAIT_TIMER 0x85BC
117
118#define DB_DEBUG 0x9830
119#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
120#define DB_DEPTH_BASE 0x2800C
121#define DB_WATERMARKS 0x9838
122#define DEPTH_FREE(x) ((x) << 0)
123#define DEPTH_FLUSH(x) ((x) << 5)
124#define DEPTH_PENDING_FREE(x) ((x) << 15)
125#define DEPTH_CACHELINE_FREE(x) ((x) << 20)
126
127#define DCP_TILING_CONFIG 0x6CA0
128#define PIPE_TILING(x) ((x) << 1)
129#define BANK_TILING(x) ((x) << 4)
130#define GROUP_SIZE(x) ((x) << 6)
131#define ROW_TILING(x) ((x) << 8)
132#define BANK_SWAPS(x) ((x) << 11)
133#define SAMPLE_SPLIT(x) ((x) << 14)
134#define BACKEND_MAP(x) ((x) << 16)
135
136#define GB_TILING_CONFIG 0x98F0
137
138#define GC_USER_SHADER_PIPE_CONFIG 0x8954
139#define INACTIVE_QD_PIPES(x) ((x) << 8)
140#define INACTIVE_QD_PIPES_MASK 0x0000FF00
141#define INACTIVE_SIMDS(x) ((x) << 16)
142#define INACTIVE_SIMDS_MASK 0x00FF0000
143
144#define SQ_CONFIG 0x8c00
145# define VC_ENABLE (1 << 0)
146# define EXPORT_SRC_C (1 << 1)
147# define DX9_CONSTS (1 << 2)
148# define ALU_INST_PREFER_VECTOR (1 << 3)
149# define DX10_CLAMP (1 << 4)
150# define CLAUSE_SEQ_PRIO(x) ((x) << 8)
151# define PS_PRIO(x) ((x) << 24)
152# define VS_PRIO(x) ((x) << 26)
153# define GS_PRIO(x) ((x) << 28)
154# define ES_PRIO(x) ((x) << 30)
155#define SQ_GPR_RESOURCE_MGMT_1 0x8c04
156# define NUM_PS_GPRS(x) ((x) << 0)
157# define NUM_VS_GPRS(x) ((x) << 16)
158# define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
159#define SQ_GPR_RESOURCE_MGMT_2 0x8c08
160# define NUM_GS_GPRS(x) ((x) << 0)
161# define NUM_ES_GPRS(x) ((x) << 16)
162#define SQ_THREAD_RESOURCE_MGMT 0x8c0c
163# define NUM_PS_THREADS(x) ((x) << 0)
164# define NUM_VS_THREADS(x) ((x) << 8)
165# define NUM_GS_THREADS(x) ((x) << 16)
166# define NUM_ES_THREADS(x) ((x) << 24)
167#define SQ_STACK_RESOURCE_MGMT_1 0x8c10
168# define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
169# define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
170#define SQ_STACK_RESOURCE_MGMT_2 0x8c14
171# define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
172# define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
173
174#define GRBM_CNTL 0x8000
175# define GRBM_READ_TIMEOUT(x) ((x) << 0)
176#define GRBM_STATUS 0x8010
177#define CMDFIFO_AVAIL_MASK 0x0000001F
178#define GUI_ACTIVE (1<<31)
179#define GRBM_STATUS2 0x8014
180#define GRBM_SOFT_RESET 0x8020
181#define SOFT_RESET_CP (1<<0)
182
183#define HDP_HOST_PATH_CNTL 0x2C00
184#define HDP_NONSURFACE_BASE 0x2C04
185#define HDP_NONSURFACE_INFO 0x2C08
186#define HDP_NONSURFACE_SIZE 0x2C0C
187#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
188#define HDP_TILING_CONFIG 0x2F3C
189
190#define MC_VM_AGP_TOP 0x2184
191#define MC_VM_AGP_BOT 0x2188
192#define MC_VM_AGP_BASE 0x218C
193#define MC_VM_FB_LOCATION 0x2180
194#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
195#define ENABLE_L1_TLB (1 << 0)
196#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
197#define ENABLE_L1_STRICT_ORDERING (1 << 2)
198#define SYSTEM_ACCESS_MODE_MASK 0x000000C0
199#define SYSTEM_ACCESS_MODE_SHIFT 6
200#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 6)
201#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 6)
202#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 6)
203#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 6)
204#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 8)
205#define SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 8)
206#define ENABLE_SEMAPHORE_MODE (1 << 10)
207#define ENABLE_WAIT_L2_QUERY (1 << 11)
208#define EFFECTIVE_L1_TLB_SIZE(x) (((x) & 7) << 12)
209#define EFFECTIVE_L1_TLB_SIZE_MASK 0x00007000
210#define EFFECTIVE_L1_TLB_SIZE_SHIFT 12
211#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
212#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
213#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
214#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
215#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
216#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
217#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
218#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
219#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
220#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
221#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
222#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
223#define MC_VM_L1_TLB_MCB_WR_HDP_CNTL 0x2218
224#define MC_VM_L1_TLB_MCB_WR_PDMA_CNTL 0x221C
225#define MC_VM_L1_TLB_MCB_WR_SEM_CNTL 0x2220
226#define MC_VM_L1_TLB_MCB_WR_SYS_CNTL 0x2214
227#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
228#define LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
229#define LOGICAL_PAGE_NUMBER_SHIFT 0
230#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
231#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
232
233#define PA_CL_ENHANCE 0x8A14
234#define CLIP_VTX_REORDER_ENA (1 << 0)
235#define NUM_CLIP_SEQ(x) ((x) << 1)
236#define PA_SC_AA_CONFIG 0x28C04
237#define PA_SC_AA_SAMPLE_LOCS_2S 0x8B40
238#define PA_SC_AA_SAMPLE_LOCS_4S 0x8B44
239#define PA_SC_AA_SAMPLE_LOCS_8S_WD0 0x8B48
240#define PA_SC_AA_SAMPLE_LOCS_8S_WD1 0x8B4C
241#define S0_X(x) ((x) << 0)
242#define S0_Y(x) ((x) << 4)
243#define S1_X(x) ((x) << 8)
244#define S1_Y(x) ((x) << 12)
245#define S2_X(x) ((x) << 16)
246#define S2_Y(x) ((x) << 20)
247#define S3_X(x) ((x) << 24)
248#define S3_Y(x) ((x) << 28)
249#define S4_X(x) ((x) << 0)
250#define S4_Y(x) ((x) << 4)
251#define S5_X(x) ((x) << 8)
252#define S5_Y(x) ((x) << 12)
253#define S6_X(x) ((x) << 16)
254#define S6_Y(x) ((x) << 20)
255#define S7_X(x) ((x) << 24)
256#define S7_Y(x) ((x) << 28)
257#define PA_SC_CLIPRECT_RULE 0x2820c
258#define PA_SC_ENHANCE 0x8BF0
259#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
260#define FORCE_EOV_MAX_TILE_CNT(x) ((x) << 12)
261#define PA_SC_LINE_STIPPLE 0x28A0C
262#define PA_SC_LINE_STIPPLE_STATE 0x8B10
263#define PA_SC_MODE_CNTL 0x28A4C
264#define PA_SC_MULTI_CHIP_CNTL 0x8B20
265
266#define PA_SC_SCREEN_SCISSOR_TL 0x28030
267#define PA_SC_GENERIC_SCISSOR_TL 0x28240
268#define PA_SC_WINDOW_SCISSOR_TL 0x28204
269
270#define PCIE_PORT_INDEX 0x0038
271#define PCIE_PORT_DATA 0x003C
272
273#define RAMCFG 0x2408
274#define NOOFBANK_SHIFT 0
275#define NOOFBANK_MASK 0x00000001
276#define NOOFRANK_SHIFT 1
277#define NOOFRANK_MASK 0x00000002
278#define NOOFROWS_SHIFT 2
279#define NOOFROWS_MASK 0x0000001C
280#define NOOFCOLS_SHIFT 5
281#define NOOFCOLS_MASK 0x00000060
282#define CHANSIZE_SHIFT 7
283#define CHANSIZE_MASK 0x00000080
284#define BURSTLENGTH_SHIFT 8
285#define BURSTLENGTH_MASK 0x00000100
286#define CHANSIZE_OVERRIDE (1 << 10)
287
288#define SCRATCH_REG0 0x8500
289#define SCRATCH_REG1 0x8504
290#define SCRATCH_REG2 0x8508
291#define SCRATCH_REG3 0x850C
292#define SCRATCH_REG4 0x8510
293#define SCRATCH_REG5 0x8514
294#define SCRATCH_REG6 0x8518
295#define SCRATCH_REG7 0x851C
296#define SCRATCH_UMSK 0x8540
297#define SCRATCH_ADDR 0x8544
298
299#define SPI_CONFIG_CNTL 0x9100
300#define GPR_WRITE_PRIORITY(x) ((x) << 0)
301#define DISABLE_INTERP_1 (1 << 5)
302#define SPI_CONFIG_CNTL_1 0x913C
303#define VTX_DONE_DELAY(x) ((x) << 0)
304#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
305#define SPI_INPUT_Z 0x286D8
306#define SPI_PS_IN_CONTROL_0 0x286CC
307#define NUM_INTERP(x) ((x)<<0)
308#define POSITION_ENA (1<<8)
309#define POSITION_CENTROID (1<<9)
310#define POSITION_ADDR(x) ((x)<<10)
311#define PARAM_GEN(x) ((x)<<15)
312#define PARAM_GEN_ADDR(x) ((x)<<19)
313#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
314#define PERSP_GRADIENT_ENA (1<<28)
315#define LINEAR_GRADIENT_ENA (1<<29)
316#define POSITION_SAMPLE (1<<30)
317#define BARYC_AT_SAMPLE_ENA (1<<31)
318#define SPI_PS_IN_CONTROL_1 0x286D0
319#define GEN_INDEX_PIX (1<<0)
320#define GEN_INDEX_PIX_ADDR(x) ((x)<<1)
321#define FRONT_FACE_ENA (1<<8)
322#define FRONT_FACE_CHAN(x) ((x)<<9)
323#define FRONT_FACE_ALL_BITS (1<<11)
324#define FRONT_FACE_ADDR(x) ((x)<<12)
325#define FOG_ADDR(x) ((x)<<17)
326#define FIXED_PT_POSITION_ENA (1<<24)
327#define FIXED_PT_POSITION_ADDR(x) ((x)<<25)
328
329#define SQ_MS_FIFO_SIZES 0x8CF0
330#define CACHE_FIFO_SIZE(x) ((x) << 0)
331#define FETCH_FIFO_HIWATER(x) ((x) << 8)
332#define DONE_FIFO_HIWATER(x) ((x) << 16)
333#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
334#define SQ_PGM_START_ES 0x28880
335#define SQ_PGM_START_FS 0x28894
336#define SQ_PGM_START_GS 0x2886C
337#define SQ_PGM_START_PS 0x28840
338#define SQ_PGM_RESOURCES_PS 0x28850
339#define SQ_PGM_EXPORTS_PS 0x28854
340#define SQ_PGM_CF_OFFSET_PS 0x288cc
341#define SQ_PGM_START_VS 0x28858
342#define SQ_PGM_RESOURCES_VS 0x28868
343#define SQ_PGM_CF_OFFSET_VS 0x288d0
344#define SQ_VTX_CONSTANT_WORD6_0 0x38018
345#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
346#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
347#define SQ_TEX_VTX_INVALID_TEXTURE 0x0
348#define SQ_TEX_VTX_INVALID_BUFFER 0x1
349#define SQ_TEX_VTX_VALID_TEXTURE 0x2
350#define SQ_TEX_VTX_VALID_BUFFER 0x3
351
352
353#define SX_MISC 0x28350
354#define SX_DEBUG_1 0x9054
355#define SMX_EVENT_RELEASE (1 << 0)
356#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
357
358#define TA_CNTL_AUX 0x9508
359#define DISABLE_CUBE_WRAP (1 << 0)
360#define DISABLE_CUBE_ANISO (1 << 1)
361#define SYNC_GRADIENT (1 << 24)
362#define SYNC_WALKER (1 << 25)
363#define SYNC_ALIGNER (1 << 26)
364#define BILINEAR_PRECISION_6_BIT (0 << 31)
365#define BILINEAR_PRECISION_8_BIT (1 << 31)
366
367#define TC_CNTL 0x9608
368#define TC_L2_SIZE(x) ((x)<<5)
369#define L2_DISABLE_LATE_HIT (1<<9)
370
371
372#define VGT_CACHE_INVALIDATION 0x88C4
373#define CACHE_INVALIDATION(x) ((x)<<0)
374#define VC_ONLY 0
375#define TC_ONLY 1
376#define VC_AND_TC 2
377#define VGT_DMA_BASE 0x287E8
378#define VGT_DMA_BASE_HI 0x287E4
379#define VGT_ES_PER_GS 0x88CC
380#define VGT_GS_PER_ES 0x88C8
381#define VGT_GS_PER_VS 0x88E8
382#define VGT_GS_VERTEX_REUSE 0x88D4
383#define VGT_PRIMITIVE_TYPE 0x8958
384#define VGT_NUM_INSTANCES 0x8974
385#define VGT_OUT_DEALLOC_CNTL 0x28C5C
386#define DEALLOC_DIST_MASK 0x0000007F
387#define VGT_STRMOUT_BASE_OFFSET_0 0x28B10
388#define VGT_STRMOUT_BASE_OFFSET_1 0x28B14
389#define VGT_STRMOUT_BASE_OFFSET_2 0x28B18
390#define VGT_STRMOUT_BASE_OFFSET_3 0x28B1c
391#define VGT_STRMOUT_BASE_OFFSET_HI_0 0x28B44
392#define VGT_STRMOUT_BASE_OFFSET_HI_1 0x28B48
393#define VGT_STRMOUT_BASE_OFFSET_HI_2 0x28B4c
394#define VGT_STRMOUT_BASE_OFFSET_HI_3 0x28B50
395#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
396#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
397#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
398#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
399#define VGT_STRMOUT_BUFFER_OFFSET_0 0x28ADC
400#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
401#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
402#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
403#define VGT_STRMOUT_EN 0x28AB0
404#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
405#define VTX_REUSE_DEPTH_MASK 0x000000FF
406#define VGT_EVENT_INITIATOR 0x28a90
407# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
408
409#define VM_CONTEXT0_CNTL 0x1410
410#define ENABLE_CONTEXT (1 << 0)
411#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
412#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
413#define VM_CONTEXT0_INVALIDATION_LOW_ADDR 0x1490
414#define VM_CONTEXT0_INVALIDATION_HIGH_ADDR 0x14B0
415#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x1574
416#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x1594
417#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x15B4
418#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1554
419#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
420#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
421#define RESPONSE_TYPE_MASK 0x000000F0
422#define RESPONSE_TYPE_SHIFT 4
423#define VM_L2_CNTL 0x1400
424#define ENABLE_L2_CACHE (1 << 0)
425#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
426#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
427#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 13)
428#define VM_L2_CNTL2 0x1404
429#define INVALIDATE_ALL_L1_TLBS (1 << 0)
430#define INVALIDATE_L2_CACHE (1 << 1)
431#define VM_L2_CNTL3 0x1408
432#define BANK_SELECT_0(x) (((x) & 0x1f) << 0)
433#define BANK_SELECT_1(x) (((x) & 0x1f) << 5)
434#define L2_CACHE_UPDATE_MODE(x) (((x) & 3) << 10)
435#define VM_L2_STATUS 0x140C
436#define L2_BUSY (1 << 0)
437
438#define WAIT_UNTIL 0x8040
439#define WAIT_2D_IDLE_bit (1 << 14)
440#define WAIT_3D_IDLE_bit (1 << 15)
441#define WAIT_2D_IDLECLEAN_bit (1 << 16)
442#define WAIT_3D_IDLECLEAN_bit (1 << 17)
443
444
445
446/*
447 * PM4
448 */
449#define PACKET_TYPE0 0
450#define PACKET_TYPE1 1
451#define PACKET_TYPE2 2
452#define PACKET_TYPE3 3
453
454#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
455#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
456#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
457#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
458#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
459 (((reg) >> 2) & 0xFFFF) | \
460 ((n) & 0x3FFF) << 16)
461#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
462 (((op) & 0xFF) << 8) | \
463 ((n) & 0x3FFF) << 16)
464
465/* Packet 3 types */
466#define PACKET3_NOP 0x10
467#define PACKET3_INDIRECT_BUFFER_END 0x17
468#define PACKET3_SET_PREDICATION 0x20
469#define PACKET3_REG_RMW 0x21
470#define PACKET3_COND_EXEC 0x22
471#define PACKET3_PRED_EXEC 0x23
472#define PACKET3_START_3D_CMDBUF 0x24
473#define PACKET3_DRAW_INDEX_2 0x27
474#define PACKET3_CONTEXT_CONTROL 0x28
475#define PACKET3_DRAW_INDEX_IMMD_BE 0x29
476#define PACKET3_INDEX_TYPE 0x2A
477#define PACKET3_DRAW_INDEX 0x2B
478#define PACKET3_DRAW_INDEX_AUTO 0x2D
479#define PACKET3_DRAW_INDEX_IMMD 0x2E
480#define PACKET3_NUM_INSTANCES 0x2F
481#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
482#define PACKET3_INDIRECT_BUFFER_MP 0x38
483#define PACKET3_MEM_SEMAPHORE 0x39
484#define PACKET3_MPEG_INDEX 0x3A
485#define PACKET3_WAIT_REG_MEM 0x3C
486#define PACKET3_MEM_WRITE 0x3D
487#define PACKET3_INDIRECT_BUFFER 0x32
488#define PACKET3_CP_INTERRUPT 0x40
489#define PACKET3_SURFACE_SYNC 0x43
490# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
491# define PACKET3_TC_ACTION_ENA (1 << 23)
492# define PACKET3_VC_ACTION_ENA (1 << 24)
493# define PACKET3_CB_ACTION_ENA (1 << 25)
494# define PACKET3_DB_ACTION_ENA (1 << 26)
495# define PACKET3_SH_ACTION_ENA (1 << 27)
496# define PACKET3_SMX_ACTION_ENA (1 << 28)
497#define PACKET3_ME_INITIALIZE 0x44
498#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
499#define PACKET3_COND_WRITE 0x45
500#define PACKET3_EVENT_WRITE 0x46
501#define PACKET3_EVENT_WRITE_EOP 0x47
502#define PACKET3_ONE_REG_WRITE 0x57
503#define PACKET3_SET_CONFIG_REG 0x68
504#define PACKET3_SET_CONFIG_REG_OFFSET 0x00008000
505#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
506#define PACKET3_SET_CONTEXT_REG 0x69
507#define PACKET3_SET_CONTEXT_REG_OFFSET 0x00028000
508#define PACKET3_SET_CONTEXT_REG_END 0x00029000
509#define PACKET3_SET_ALU_CONST 0x6A
510#define PACKET3_SET_ALU_CONST_OFFSET 0x00030000
511#define PACKET3_SET_ALU_CONST_END 0x00032000
512#define PACKET3_SET_BOOL_CONST 0x6B
513#define PACKET3_SET_BOOL_CONST_OFFSET 0x0003e380
514#define PACKET3_SET_BOOL_CONST_END 0x00040000
515#define PACKET3_SET_LOOP_CONST 0x6C
516#define PACKET3_SET_LOOP_CONST_OFFSET 0x0003e200
517#define PACKET3_SET_LOOP_CONST_END 0x0003e380
518#define PACKET3_SET_RESOURCE 0x6D
519#define PACKET3_SET_RESOURCE_OFFSET 0x00038000
520#define PACKET3_SET_RESOURCE_END 0x0003c000
521#define PACKET3_SET_SAMPLER 0x6E
522#define PACKET3_SET_SAMPLER_OFFSET 0x0003c000
523#define PACKET3_SET_SAMPLER_END 0x0003cff0
524#define PACKET3_SET_CTL_CONST 0x6F
525#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
526#define PACKET3_SET_CTL_CONST_END 0x0003e200
527#define PACKET3_SURFACE_BASE_UPDATE 0x73
528
529
530#define R_008020_GRBM_SOFT_RESET 0x8020
531#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
532#define S_008020_SOFT_RESET_CB(x) (((x) & 1) << 1)
533#define S_008020_SOFT_RESET_CR(x) (((x) & 1) << 2)
534#define S_008020_SOFT_RESET_DB(x) (((x) & 1) << 3)
535#define S_008020_SOFT_RESET_PA(x) (((x) & 1) << 5)
536#define S_008020_SOFT_RESET_SC(x) (((x) & 1) << 6)
537#define S_008020_SOFT_RESET_SMX(x) (((x) & 1) << 7)
538#define S_008020_SOFT_RESET_SPI(x) (((x) & 1) << 8)
539#define S_008020_SOFT_RESET_SH(x) (((x) & 1) << 9)
540#define S_008020_SOFT_RESET_SX(x) (((x) & 1) << 10)
541#define S_008020_SOFT_RESET_TC(x) (((x) & 1) << 11)
542#define S_008020_SOFT_RESET_TA(x) (((x) & 1) << 12)
543#define S_008020_SOFT_RESET_VC(x) (((x) & 1) << 13)
544#define S_008020_SOFT_RESET_VGT(x) (((x) & 1) << 14)
545#define R_008010_GRBM_STATUS 0x8010
546#define S_008010_CMDFIFO_AVAIL(x) (((x) & 0x1F) << 0)
547#define S_008010_CP_RQ_PENDING(x) (((x) & 1) << 6)
548#define S_008010_CF_RQ_PENDING(x) (((x) & 1) << 7)
549#define S_008010_PF_RQ_PENDING(x) (((x) & 1) << 8)
550#define S_008010_GRBM_EE_BUSY(x) (((x) & 1) << 10)
551#define S_008010_VC_BUSY(x) (((x) & 1) << 11)
552#define S_008010_DB03_CLEAN(x) (((x) & 1) << 12)
553#define S_008010_CB03_CLEAN(x) (((x) & 1) << 13)
554#define S_008010_VGT_BUSY_NO_DMA(x) (((x) & 1) << 16)
555#define S_008010_VGT_BUSY(x) (((x) & 1) << 17)
556#define S_008010_TA03_BUSY(x) (((x) & 1) << 18)
557#define S_008010_TC_BUSY(x) (((x) & 1) << 19)
558#define S_008010_SX_BUSY(x) (((x) & 1) << 20)
559#define S_008010_SH_BUSY(x) (((x) & 1) << 21)
560#define S_008010_SPI03_BUSY(x) (((x) & 1) << 22)
561#define S_008010_SMX_BUSY(x) (((x) & 1) << 23)
562#define S_008010_SC_BUSY(x) (((x) & 1) << 24)
563#define S_008010_PA_BUSY(x) (((x) & 1) << 25)
564#define S_008010_DB03_BUSY(x) (((x) & 1) << 26)
565#define S_008010_CR_BUSY(x) (((x) & 1) << 27)
566#define S_008010_CP_COHERENCY_BUSY(x) (((x) & 1) << 28)
567#define S_008010_CP_BUSY(x) (((x) & 1) << 29)
568#define S_008010_CB03_BUSY(x) (((x) & 1) << 30)
569#define S_008010_GUI_ACTIVE(x) (((x) & 1) << 31)
570#define G_008010_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x1F)
571#define G_008010_CP_RQ_PENDING(x) (((x) >> 6) & 1)
572#define G_008010_CF_RQ_PENDING(x) (((x) >> 7) & 1)
573#define G_008010_PF_RQ_PENDING(x) (((x) >> 8) & 1)
574#define G_008010_GRBM_EE_BUSY(x) (((x) >> 10) & 1)
575#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
576#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
577#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
578#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
579#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
580#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
581#define G_008010_TC_BUSY(x) (((x) >> 19) & 1)
582#define G_008010_SX_BUSY(x) (((x) >> 20) & 1)
583#define G_008010_SH_BUSY(x) (((x) >> 21) & 1)
584#define G_008010_SPI03_BUSY(x) (((x) >> 22) & 1)
585#define G_008010_SMX_BUSY(x) (((x) >> 23) & 1)
586#define G_008010_SC_BUSY(x) (((x) >> 24) & 1)
587#define G_008010_PA_BUSY(x) (((x) >> 25) & 1)
588#define G_008010_DB03_BUSY(x) (((x) >> 26) & 1)
589#define G_008010_CR_BUSY(x) (((x) >> 27) & 1)
590#define G_008010_CP_COHERENCY_BUSY(x) (((x) >> 28) & 1)
591#define G_008010_CP_BUSY(x) (((x) >> 29) & 1)
592#define G_008010_CB03_BUSY(x) (((x) >> 30) & 1)
593#define G_008010_GUI_ACTIVE(x) (((x) >> 31) & 1)
594#define R_008014_GRBM_STATUS2 0x8014
595#define S_008014_CR_CLEAN(x) (((x) & 1) << 0)
596#define S_008014_SMX_CLEAN(x) (((x) & 1) << 1)
597#define S_008014_SPI0_BUSY(x) (((x) & 1) << 8)
598#define S_008014_SPI1_BUSY(x) (((x) & 1) << 9)
599#define S_008014_SPI2_BUSY(x) (((x) & 1) << 10)
600#define S_008014_SPI3_BUSY(x) (((x) & 1) << 11)
601#define S_008014_TA0_BUSY(x) (((x) & 1) << 12)
602#define S_008014_TA1_BUSY(x) (((x) & 1) << 13)
603#define S_008014_TA2_BUSY(x) (((x) & 1) << 14)
604#define S_008014_TA3_BUSY(x) (((x) & 1) << 15)
605#define S_008014_DB0_BUSY(x) (((x) & 1) << 16)
606#define S_008014_DB1_BUSY(x) (((x) & 1) << 17)
607#define S_008014_DB2_BUSY(x) (((x) & 1) << 18)
608#define S_008014_DB3_BUSY(x) (((x) & 1) << 19)
609#define S_008014_CB0_BUSY(x) (((x) & 1) << 20)
610#define S_008014_CB1_BUSY(x) (((x) & 1) << 21)
611#define S_008014_CB2_BUSY(x) (((x) & 1) << 22)
612#define S_008014_CB3_BUSY(x) (((x) & 1) << 23)
613#define G_008014_CR_CLEAN(x) (((x) >> 0) & 1)
614#define G_008014_SMX_CLEAN(x) (((x) >> 1) & 1)
615#define G_008014_SPI0_BUSY(x) (((x) >> 8) & 1)
616#define G_008014_SPI1_BUSY(x) (((x) >> 9) & 1)
617#define G_008014_SPI2_BUSY(x) (((x) >> 10) & 1)
618#define G_008014_SPI3_BUSY(x) (((x) >> 11) & 1)
619#define G_008014_TA0_BUSY(x) (((x) >> 12) & 1)
620#define G_008014_TA1_BUSY(x) (((x) >> 13) & 1)
621#define G_008014_TA2_BUSY(x) (((x) >> 14) & 1)
622#define G_008014_TA3_BUSY(x) (((x) >> 15) & 1)
623#define G_008014_DB0_BUSY(x) (((x) >> 16) & 1)
624#define G_008014_DB1_BUSY(x) (((x) >> 17) & 1)
625#define G_008014_DB2_BUSY(x) (((x) >> 18) & 1)
626#define G_008014_DB3_BUSY(x) (((x) >> 19) & 1)
627#define G_008014_CB0_BUSY(x) (((x) >> 20) & 1)
628#define G_008014_CB1_BUSY(x) (((x) >> 21) & 1)
629#define G_008014_CB2_BUSY(x) (((x) >> 22) & 1)
630#define G_008014_CB3_BUSY(x) (((x) >> 23) & 1)
631#define R_000E50_SRBM_STATUS 0x0E50
632#define G_000E50_RLC_RQ_PENDING(x) (((x) >> 3) & 1)
633#define G_000E50_RCU_RQ_PENDING(x) (((x) >> 4) & 1)
634#define G_000E50_GRBM_RQ_PENDING(x) (((x) >> 5) & 1)
635#define G_000E50_HI_RQ_PENDING(x) (((x) >> 6) & 1)
636#define G_000E50_IO_EXTERN_SIGNAL(x) (((x) >> 7) & 1)
637#define G_000E50_VMC_BUSY(x) (((x) >> 8) & 1)
638#define G_000E50_MCB_BUSY(x) (((x) >> 9) & 1)
639#define G_000E50_MCDZ_BUSY(x) (((x) >> 10) & 1)
640#define G_000E50_MCDY_BUSY(x) (((x) >> 11) & 1)
641#define G_000E50_MCDX_BUSY(x) (((x) >> 12) & 1)
642#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
643#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
644#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
645#define R_000E60_SRBM_SOFT_RESET 0x0E60
646#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
647#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2)
648#define S_000E60_SOFT_RESET_CMC(x) (((x) & 1) << 3)
649#define S_000E60_SOFT_RESET_CSC(x) (((x) & 1) << 4)
650#define S_000E60_SOFT_RESET_DC(x) (((x) & 1) << 5)
651#define S_000E60_SOFT_RESET_GRBM(x) (((x) & 1) << 8)
652#define S_000E60_SOFT_RESET_HDP(x) (((x) & 1) << 9)
653#define S_000E60_SOFT_RESET_IH(x) (((x) & 1) << 10)
654#define S_000E60_SOFT_RESET_MC(x) (((x) & 1) << 11)
655#define S_000E60_SOFT_RESET_RLC(x) (((x) & 1) << 13)
656#define S_000E60_SOFT_RESET_ROM(x) (((x) & 1) << 14)
657#define S_000E60_SOFT_RESET_SEM(x) (((x) & 1) << 15)
658#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
659#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
660
661#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e47f2fc294ce..3299733ac300 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -50,8 +50,8 @@
50#include <linux/kref.h> 50#include <linux/kref.h>
51 51
52#include "radeon_mode.h" 52#include "radeon_mode.h"
53#include "radeon_share.h"
53#include "radeon_reg.h" 54#include "radeon_reg.h"
54#include "r300.h"
55 55
56/* 56/*
57 * Modules parameters. 57 * Modules parameters.
@@ -112,10 +112,11 @@ enum radeon_family {
112 CHIP_RV635, 112 CHIP_RV635,
113 CHIP_RV670, 113 CHIP_RV670,
114 CHIP_RS780, 114 CHIP_RS780,
115 CHIP_RS880,
115 CHIP_RV770, 116 CHIP_RV770,
116 CHIP_RV730, 117 CHIP_RV730,
117 CHIP_RV710, 118 CHIP_RV710,
118 CHIP_RS880, 119 CHIP_RV740,
119 CHIP_LAST, 120 CHIP_LAST,
120}; 121};
121 122
@@ -152,10 +153,21 @@ struct radeon_device;
152 */ 153 */
153bool radeon_get_bios(struct radeon_device *rdev); 154bool radeon_get_bios(struct radeon_device *rdev);
154 155
156
155/* 157/*
156 * Clocks 158 * Dummy page
157 */ 159 */
160struct radeon_dummy_page {
161 struct page *page;
162 dma_addr_t addr;
163};
164int radeon_dummy_page_init(struct radeon_device *rdev);
165void radeon_dummy_page_fini(struct radeon_device *rdev);
166
158 167
168/*
169 * Clocks
170 */
159struct radeon_clock { 171struct radeon_clock {
160 struct radeon_pll p1pll; 172 struct radeon_pll p1pll;
161 struct radeon_pll p2pll; 173 struct radeon_pll p2pll;
@@ -166,6 +178,7 @@ struct radeon_clock {
166 uint32_t default_sclk; 178 uint32_t default_sclk;
167}; 179};
168 180
181
169/* 182/*
170 * Fences. 183 * Fences.
171 */ 184 */
@@ -332,14 +345,18 @@ struct radeon_mc {
332 resource_size_t aper_size; 345 resource_size_t aper_size;
333 resource_size_t aper_base; 346 resource_size_t aper_base;
334 resource_size_t agp_base; 347 resource_size_t agp_base;
335 unsigned gtt_location;
336 unsigned gtt_size;
337 unsigned vram_location;
338 /* for some chips with <= 32MB we need to lie 348 /* for some chips with <= 32MB we need to lie
339 * about vram size near mc fb location */ 349 * about vram size near mc fb location */
340 unsigned mc_vram_size; 350 u64 mc_vram_size;
351 u64 gtt_location;
352 u64 gtt_size;
353 u64 gtt_start;
354 u64 gtt_end;
355 u64 vram_location;
356 u64 vram_start;
357 u64 vram_end;
341 unsigned vram_width; 358 unsigned vram_width;
342 unsigned real_vram_size; 359 u64 real_vram_size;
343 int vram_mtrr; 360 int vram_mtrr;
344 bool vram_is_ddr; 361 bool vram_is_ddr;
345}; 362};
@@ -411,6 +428,16 @@ struct radeon_cp {
411 bool ready; 428 bool ready;
412}; 429};
413 430
431struct r600_blit {
432 struct radeon_object *shader_obj;
433 u64 shader_gpu_addr;
434 u32 vs_offset, ps_offset;
435 u32 state_offset;
436 u32 state_len;
437 u32 vb_used, vb_total;
438 struct radeon_ib *vb_ib;
439};
440
414int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); 441int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
415void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); 442void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
416int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); 443int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -463,6 +490,7 @@ struct radeon_cs_parser {
463 int chunk_relocs_idx; 490 int chunk_relocs_idx;
464 struct radeon_ib *ib; 491 struct radeon_ib *ib;
465 void *track; 492 void *track;
493 unsigned family;
466}; 494};
467 495
468struct radeon_cs_packet { 496struct radeon_cs_packet {
@@ -559,6 +587,9 @@ int r100_debugfs_cp_init(struct radeon_device *rdev);
559 */ 587 */
560struct radeon_asic { 588struct radeon_asic {
561 int (*init)(struct radeon_device *rdev); 589 int (*init)(struct radeon_device *rdev);
590 void (*fini)(struct radeon_device *rdev);
591 int (*resume)(struct radeon_device *rdev);
592 int (*suspend)(struct radeon_device *rdev);
562 void (*errata)(struct radeon_device *rdev); 593 void (*errata)(struct radeon_device *rdev);
563 void (*vram_info)(struct radeon_device *rdev); 594 void (*vram_info)(struct radeon_device *rdev);
564 int (*gpu_reset)(struct radeon_device *rdev); 595 int (*gpu_reset)(struct radeon_device *rdev);
@@ -573,7 +604,11 @@ struct radeon_asic {
573 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 604 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
574 void (*cp_fini)(struct radeon_device *rdev); 605 void (*cp_fini)(struct radeon_device *rdev);
575 void (*cp_disable)(struct radeon_device *rdev); 606 void (*cp_disable)(struct radeon_device *rdev);
607 void (*cp_commit)(struct radeon_device *rdev);
576 void (*ring_start)(struct radeon_device *rdev); 608 void (*ring_start)(struct radeon_device *rdev);
609 int (*ring_test)(struct radeon_device *rdev);
610 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
611 int (*ib_test)(struct radeon_device *rdev);
577 int (*irq_set)(struct radeon_device *rdev); 612 int (*irq_set)(struct radeon_device *rdev);
578 int (*irq_process)(struct radeon_device *rdev); 613 int (*irq_process)(struct radeon_device *rdev);
579 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 614 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
@@ -613,6 +648,8 @@ struct r100_asic {
613union radeon_asic_config { 648union radeon_asic_config {
614 struct r300_asic r300; 649 struct r300_asic r300;
615 struct r100_asic r100; 650 struct r100_asic r100;
651 struct r600_asic r600;
652 struct rv770_asic rv770;
616}; 653};
617 654
618 655
@@ -698,12 +735,16 @@ struct radeon_device {
698 struct radeon_pm pm; 735 struct radeon_pm pm;
699 struct mutex cs_mutex; 736 struct mutex cs_mutex;
700 struct radeon_wb wb; 737 struct radeon_wb wb;
738 struct radeon_dummy_page dummy_page;
701 bool gpu_lockup; 739 bool gpu_lockup;
702 bool shutdown; 740 bool shutdown;
703 bool suspend; 741 bool suspend;
704 bool need_dma32; 742 bool need_dma32;
743 bool new_init_path;
705 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 744 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
706 const struct firmware *fw; /* firmware */ 745 const struct firmware *me_fw; /* all family ME firmware */
746 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
747 struct r600_blit r600_blit;
707}; 748};
708 749
709int radeon_device_init(struct radeon_device *rdev, 750int radeon_device_init(struct radeon_device *rdev,
@@ -713,6 +754,13 @@ int radeon_device_init(struct radeon_device *rdev,
713void radeon_device_fini(struct radeon_device *rdev); 754void radeon_device_fini(struct radeon_device *rdev);
714int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 755int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
715 756
757/* r600 blit */
758int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
759void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
760void r600_kms_blit_copy(struct radeon_device *rdev,
761 u64 src_gpu_addr, u64 dst_gpu_addr,
762 int size_bytes);
763
716static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 764static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
717{ 765{
718 if (reg < 0x10000) 766 if (reg < 0x10000)
@@ -740,6 +788,7 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
740#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) 788#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
741#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) 789#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
742#define RREG32(reg) r100_mm_rreg(rdev, (reg)) 790#define RREG32(reg) r100_mm_rreg(rdev, (reg))
791#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
743#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 792#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
744#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 793#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
745#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 794#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
@@ -763,6 +812,7 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
763 tmp_ |= ((val) & ~(mask)); \ 812 tmp_ |= ((val) & ~(mask)); \
764 WREG32_PLL(reg, tmp_); \ 813 WREG32_PLL(reg, tmp_); \
765 } while (0) 814 } while (0)
815#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
766 816
767/* 817/*
768 * Indirect registers accessor 818 * Indirect registers accessor
@@ -827,51 +877,6 @@ void radeon_atombios_fini(struct radeon_device *rdev);
827/* 877/*
828 * RING helpers. 878 * RING helpers.
829 */ 879 */
830#define CP_PACKET0 0x00000000
831#define PACKET0_BASE_INDEX_SHIFT 0
832#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
833#define PACKET0_COUNT_SHIFT 16
834#define PACKET0_COUNT_MASK (0x3fff << 16)
835#define CP_PACKET1 0x40000000
836#define CP_PACKET2 0x80000000
837#define PACKET2_PAD_SHIFT 0
838#define PACKET2_PAD_MASK (0x3fffffff << 0)
839#define CP_PACKET3 0xC0000000
840#define PACKET3_IT_OPCODE_SHIFT 8
841#define PACKET3_IT_OPCODE_MASK (0xff << 8)
842#define PACKET3_COUNT_SHIFT 16
843#define PACKET3_COUNT_MASK (0x3fff << 16)
844/* PACKET3 op code */
845#define PACKET3_NOP 0x10
846#define PACKET3_3D_DRAW_VBUF 0x28
847#define PACKET3_3D_DRAW_IMMD 0x29
848#define PACKET3_3D_DRAW_INDX 0x2A
849#define PACKET3_3D_LOAD_VBPNTR 0x2F
850#define PACKET3_INDX_BUFFER 0x33
851#define PACKET3_3D_DRAW_VBUF_2 0x34
852#define PACKET3_3D_DRAW_IMMD_2 0x35
853#define PACKET3_3D_DRAW_INDX_2 0x36
854#define PACKET3_BITBLT_MULTI 0x9B
855
856#define PACKET0(reg, n) (CP_PACKET0 | \
857 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
858 REG_SET(PACKET0_COUNT, (n)))
859#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
860#define PACKET3(op, n) (CP_PACKET3 | \
861 REG_SET(PACKET3_IT_OPCODE, (op)) | \
862 REG_SET(PACKET3_COUNT, (n)))
863
864#define PACKET_TYPE0 0
865#define PACKET_TYPE1 1
866#define PACKET_TYPE2 2
867#define PACKET_TYPE3 3
868
869#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
870#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
871#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
872#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
873#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
874
875static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) 880static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
876{ 881{
877#if DRM_DEBUG_CODE 882#if DRM_DEBUG_CODE
@@ -890,6 +895,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
890 * ASICs macro. 895 * ASICs macro.
891 */ 896 */
892#define radeon_init(rdev) (rdev)->asic->init((rdev)) 897#define radeon_init(rdev) (rdev)->asic->init((rdev))
898#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
899#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
900#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
893#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 901#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
894#define radeon_errata(rdev) (rdev)->asic->errata((rdev)) 902#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
895#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) 903#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
@@ -905,7 +913,11 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
905#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) 913#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
906#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) 914#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
907#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) 915#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
916#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
908#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 917#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
918#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
919#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
920#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev))
909#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 921#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
910#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 922#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
911#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) 923#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c9cbd8ae1f95..e87bb915a6de 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -60,6 +60,7 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
60int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); 60int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
61void r100_cp_fini(struct radeon_device *rdev); 61void r100_cp_fini(struct radeon_device *rdev);
62void r100_cp_disable(struct radeon_device *rdev); 62void r100_cp_disable(struct radeon_device *rdev);
63void r100_cp_commit(struct radeon_device *rdev);
63void r100_ring_start(struct radeon_device *rdev); 64void r100_ring_start(struct radeon_device *rdev);
64int r100_irq_set(struct radeon_device *rdev); 65int r100_irq_set(struct radeon_device *rdev);
65int r100_irq_process(struct radeon_device *rdev); 66int r100_irq_process(struct radeon_device *rdev);
@@ -78,6 +79,9 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
78 uint32_t offset, uint32_t obj_size); 79 uint32_t offset, uint32_t obj_size);
79int r100_clear_surface_reg(struct radeon_device *rdev, int reg); 80int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
80void r100_bandwidth_update(struct radeon_device *rdev); 81void r100_bandwidth_update(struct radeon_device *rdev);
82void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
83int r100_ib_test(struct radeon_device *rdev);
84int r100_ring_test(struct radeon_device *rdev);
81 85
82static struct radeon_asic r100_asic = { 86static struct radeon_asic r100_asic = {
83 .init = &r100_init, 87 .init = &r100_init,
@@ -95,7 +99,11 @@ static struct radeon_asic r100_asic = {
95 .cp_init = &r100_cp_init, 99 .cp_init = &r100_cp_init,
96 .cp_fini = &r100_cp_fini, 100 .cp_fini = &r100_cp_fini,
97 .cp_disable = &r100_cp_disable, 101 .cp_disable = &r100_cp_disable,
102 .cp_commit = &r100_cp_commit,
98 .ring_start = &r100_ring_start, 103 .ring_start = &r100_ring_start,
104 .ring_test = &r100_ring_test,
105 .ring_ib_execute = &r100_ring_ib_execute,
106 .ib_test = &r100_ib_test,
99 .irq_set = &r100_irq_set, 107 .irq_set = &r100_irq_set,
100 .irq_process = &r100_irq_process, 108 .irq_process = &r100_irq_process,
101 .get_vblank_counter = &r100_get_vblank_counter, 109 .get_vblank_counter = &r100_get_vblank_counter,
@@ -156,7 +164,11 @@ static struct radeon_asic r300_asic = {
156 .cp_init = &r100_cp_init, 164 .cp_init = &r100_cp_init,
157 .cp_fini = &r100_cp_fini, 165 .cp_fini = &r100_cp_fini,
158 .cp_disable = &r100_cp_disable, 166 .cp_disable = &r100_cp_disable,
167 .cp_commit = &r100_cp_commit,
159 .ring_start = &r300_ring_start, 168 .ring_start = &r300_ring_start,
169 .ring_test = &r100_ring_test,
170 .ring_ib_execute = &r100_ring_ib_execute,
171 .ib_test = &r100_ib_test,
160 .irq_set = &r100_irq_set, 172 .irq_set = &r100_irq_set,
161 .irq_process = &r100_irq_process, 173 .irq_process = &r100_irq_process,
162 .get_vblank_counter = &r100_get_vblank_counter, 174 .get_vblank_counter = &r100_get_vblank_counter,
@@ -197,7 +209,11 @@ static struct radeon_asic r420_asic = {
197 .cp_init = &r100_cp_init, 209 .cp_init = &r100_cp_init,
198 .cp_fini = &r100_cp_fini, 210 .cp_fini = &r100_cp_fini,
199 .cp_disable = &r100_cp_disable, 211 .cp_disable = &r100_cp_disable,
212 .cp_commit = &r100_cp_commit,
200 .ring_start = &r300_ring_start, 213 .ring_start = &r300_ring_start,
214 .ring_test = &r100_ring_test,
215 .ring_ib_execute = &r100_ring_ib_execute,
216 .ib_test = &r100_ib_test,
201 .irq_set = &r100_irq_set, 217 .irq_set = &r100_irq_set,
202 .irq_process = &r100_irq_process, 218 .irq_process = &r100_irq_process,
203 .get_vblank_counter = &r100_get_vblank_counter, 219 .get_vblank_counter = &r100_get_vblank_counter,
@@ -245,7 +261,11 @@ static struct radeon_asic rs400_asic = {
245 .cp_init = &r100_cp_init, 261 .cp_init = &r100_cp_init,
246 .cp_fini = &r100_cp_fini, 262 .cp_fini = &r100_cp_fini,
247 .cp_disable = &r100_cp_disable, 263 .cp_disable = &r100_cp_disable,
264 .cp_commit = &r100_cp_commit,
248 .ring_start = &r300_ring_start, 265 .ring_start = &r300_ring_start,
266 .ring_test = &r100_ring_test,
267 .ring_ib_execute = &r100_ring_ib_execute,
268 .ib_test = &r100_ib_test,
249 .irq_set = &r100_irq_set, 269 .irq_set = &r100_irq_set,
250 .irq_process = &r100_irq_process, 270 .irq_process = &r100_irq_process,
251 .get_vblank_counter = &r100_get_vblank_counter, 271 .get_vblank_counter = &r100_get_vblank_counter,
@@ -298,7 +318,11 @@ static struct radeon_asic rs600_asic = {
298 .cp_init = &r100_cp_init, 318 .cp_init = &r100_cp_init,
299 .cp_fini = &r100_cp_fini, 319 .cp_fini = &r100_cp_fini,
300 .cp_disable = &r100_cp_disable, 320 .cp_disable = &r100_cp_disable,
321 .cp_commit = &r100_cp_commit,
301 .ring_start = &r300_ring_start, 322 .ring_start = &r300_ring_start,
323 .ring_test = &r100_ring_test,
324 .ring_ib_execute = &r100_ring_ib_execute,
325 .ib_test = &r100_ib_test,
302 .irq_set = &rs600_irq_set, 326 .irq_set = &rs600_irq_set,
303 .irq_process = &rs600_irq_process, 327 .irq_process = &rs600_irq_process,
304 .get_vblank_counter = &rs600_get_vblank_counter, 328 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -341,7 +365,11 @@ static struct radeon_asic rs690_asic = {
341 .cp_init = &r100_cp_init, 365 .cp_init = &r100_cp_init,
342 .cp_fini = &r100_cp_fini, 366 .cp_fini = &r100_cp_fini,
343 .cp_disable = &r100_cp_disable, 367 .cp_disable = &r100_cp_disable,
368 .cp_commit = &r100_cp_commit,
344 .ring_start = &r300_ring_start, 369 .ring_start = &r300_ring_start,
370 .ring_test = &r100_ring_test,
371 .ring_ib_execute = &r100_ring_ib_execute,
372 .ib_test = &r100_ib_test,
345 .irq_set = &rs600_irq_set, 373 .irq_set = &rs600_irq_set,
346 .irq_process = &rs600_irq_process, 374 .irq_process = &rs600_irq_process,
347 .get_vblank_counter = &rs600_get_vblank_counter, 375 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -391,7 +419,11 @@ static struct radeon_asic rv515_asic = {
391 .cp_init = &r100_cp_init, 419 .cp_init = &r100_cp_init,
392 .cp_fini = &r100_cp_fini, 420 .cp_fini = &r100_cp_fini,
393 .cp_disable = &r100_cp_disable, 421 .cp_disable = &r100_cp_disable,
422 .cp_commit = &r100_cp_commit,
394 .ring_start = &rv515_ring_start, 423 .ring_start = &rv515_ring_start,
424 .ring_test = &r100_ring_test,
425 .ring_ib_execute = &r100_ring_ib_execute,
426 .ib_test = &r100_ib_test,
395 .irq_set = &rs600_irq_set, 427 .irq_set = &rs600_irq_set,
396 .irq_process = &rs600_irq_process, 428 .irq_process = &rs600_irq_process,
397 .get_vblank_counter = &rs600_get_vblank_counter, 429 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -434,7 +466,11 @@ static struct radeon_asic r520_asic = {
434 .cp_init = &r100_cp_init, 466 .cp_init = &r100_cp_init,
435 .cp_fini = &r100_cp_fini, 467 .cp_fini = &r100_cp_fini,
436 .cp_disable = &r100_cp_disable, 468 .cp_disable = &r100_cp_disable,
469 .cp_commit = &r100_cp_commit,
437 .ring_start = &rv515_ring_start, 470 .ring_start = &rv515_ring_start,
471 .ring_test = &r100_ring_test,
472 .ring_ib_execute = &r100_ring_ib_execute,
473 .ib_test = &r100_ib_test,
438 .irq_set = &rs600_irq_set, 474 .irq_set = &rs600_irq_set,
439 .irq_process = &rs600_irq_process, 475 .irq_process = &rs600_irq_process,
440 .get_vblank_counter = &rs600_get_vblank_counter, 476 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -453,9 +489,127 @@ static struct radeon_asic r520_asic = {
453}; 489};
454 490
455/* 491/*
456 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710 492 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
457 */ 493 */
494int r600_init(struct radeon_device *rdev);
495void r600_fini(struct radeon_device *rdev);
496int r600_suspend(struct radeon_device *rdev);
497int r600_resume(struct radeon_device *rdev);
498int r600_wb_init(struct radeon_device *rdev);
499void r600_wb_fini(struct radeon_device *rdev);
500void r600_cp_commit(struct radeon_device *rdev);
501void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
458uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 502uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
459void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 503void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
504int r600_cs_parse(struct radeon_cs_parser *p);
505void r600_fence_ring_emit(struct radeon_device *rdev,
506 struct radeon_fence *fence);
507int r600_copy_dma(struct radeon_device *rdev,
508 uint64_t src_offset,
509 uint64_t dst_offset,
510 unsigned num_pages,
511 struct radeon_fence *fence);
512int r600_irq_process(struct radeon_device *rdev);
513int r600_irq_set(struct radeon_device *rdev);
514int r600_gpu_reset(struct radeon_device *rdev);
515int r600_set_surface_reg(struct radeon_device *rdev, int reg,
516 uint32_t tiling_flags, uint32_t pitch,
517 uint32_t offset, uint32_t obj_size);
518int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
519void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
520int r600_ib_test(struct radeon_device *rdev);
521int r600_ring_test(struct radeon_device *rdev);
522int r600_copy_blit(struct radeon_device *rdev,
523 uint64_t src_offset, uint64_t dst_offset,
524 unsigned num_pages, struct radeon_fence *fence);
525
526static struct radeon_asic r600_asic = {
527 .errata = NULL,
528 .init = &r600_init,
529 .fini = &r600_fini,
530 .suspend = &r600_suspend,
531 .resume = &r600_resume,
532 .cp_commit = &r600_cp_commit,
533 .vram_info = NULL,
534 .gpu_reset = &r600_gpu_reset,
535 .mc_init = NULL,
536 .mc_fini = NULL,
537 .wb_init = &r600_wb_init,
538 .wb_fini = &r600_wb_fini,
539 .gart_enable = NULL,
540 .gart_disable = NULL,
541 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
542 .gart_set_page = &rs600_gart_set_page,
543 .cp_init = NULL,
544 .cp_fini = NULL,
545 .cp_disable = NULL,
546 .ring_start = NULL,
547 .ring_test = &r600_ring_test,
548 .ring_ib_execute = &r600_ring_ib_execute,
549 .ib_test = &r600_ib_test,
550 .irq_set = &r600_irq_set,
551 .irq_process = &r600_irq_process,
552 .fence_ring_emit = &r600_fence_ring_emit,
553 .cs_parse = &r600_cs_parse,
554 .copy_blit = &r600_copy_blit,
555 .copy_dma = &r600_copy_blit,
556 .copy = NULL,
557 .set_engine_clock = &radeon_atom_set_engine_clock,
558 .set_memory_clock = &radeon_atom_set_memory_clock,
559 .set_pcie_lanes = NULL,
560 .set_clock_gating = &radeon_atom_set_clock_gating,
561 .set_surface_reg = r600_set_surface_reg,
562 .clear_surface_reg = r600_clear_surface_reg,
563 .bandwidth_update = &r520_bandwidth_update,
564};
565
566/*
567 * rv770,rv730,rv710,rv740
568 */
569int rv770_init(struct radeon_device *rdev);
570void rv770_fini(struct radeon_device *rdev);
571int rv770_suspend(struct radeon_device *rdev);
572int rv770_resume(struct radeon_device *rdev);
573int rv770_gpu_reset(struct radeon_device *rdev);
574
575static struct radeon_asic rv770_asic = {
576 .errata = NULL,
577 .init = &rv770_init,
578 .fini = &rv770_fini,
579 .suspend = &rv770_suspend,
580 .resume = &rv770_resume,
581 .cp_commit = &r600_cp_commit,
582 .vram_info = NULL,
583 .gpu_reset = &rv770_gpu_reset,
584 .mc_init = NULL,
585 .mc_fini = NULL,
586 .wb_init = &r600_wb_init,
587 .wb_fini = &r600_wb_fini,
588 .gart_enable = NULL,
589 .gart_disable = NULL,
590 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
591 .gart_set_page = &rs600_gart_set_page,
592 .cp_init = NULL,
593 .cp_fini = NULL,
594 .cp_disable = NULL,
595 .ring_start = NULL,
596 .ring_test = &r600_ring_test,
597 .ring_ib_execute = &r600_ring_ib_execute,
598 .ib_test = &r600_ib_test,
599 .irq_set = &r600_irq_set,
600 .irq_process = &r600_irq_process,
601 .fence_ring_emit = &r600_fence_ring_emit,
602 .cs_parse = &r600_cs_parse,
603 .copy_blit = &r600_copy_blit,
604 .copy_dma = &r600_copy_blit,
605 .copy = NULL,
606 .set_engine_clock = &radeon_atom_set_engine_clock,
607 .set_memory_clock = &radeon_atom_set_memory_clock,
608 .set_pcie_lanes = NULL,
609 .set_clock_gating = &radeon_atom_set_clock_gating,
610 .set_surface_reg = r600_set_surface_reg,
611 .clear_surface_reg = r600_clear_surface_reg,
612 .bandwidth_update = &r520_bandwidth_update,
613};
460 614
461#endif 615#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bba9b4bd8f5c..a8fb392c9cd6 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -370,10 +370,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
370 && record-> 370 && record->
371 ucRecordType <= 371 ucRecordType <=
372 ATOM_MAX_OBJECT_RECORD_NUMBER) { 372 ATOM_MAX_OBJECT_RECORD_NUMBER) {
373 DRM_ERROR
374 ("record type %d\n",
375 record->
376 ucRecordType);
377 switch (record-> 373 switch (record->
378 ucRecordType) { 374 ucRecordType) {
379 case ATOM_I2C_RECORD_TYPE: 375 case ATOM_I2C_RECORD_TYPE:
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a37cbce53181..152eef13197a 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -102,10 +102,12 @@ void radeon_get_clock_info(struct drm_device *dev)
102 p1pll->reference_div = 12; 102 p1pll->reference_div = 12;
103 if (p2pll->reference_div < 2) 103 if (p2pll->reference_div < 2)
104 p2pll->reference_div = 12; 104 p2pll->reference_div = 12;
105 if (spll->reference_div < 2) 105 if (rdev->family < CHIP_RS600) {
106 spll->reference_div = 106 if (spll->reference_div < 2)
107 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & 107 spll->reference_div =
108 RADEON_M_SPLL_REF_DIV_MASK; 108 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
109 RADEON_M_SPLL_REF_DIV_MASK;
110 }
109 if (mpll->reference_div < 2) 111 if (mpll->reference_div < 2)
110 mpll->reference_div = spll->reference_div; 112 mpll->reference_div = spll->reference_div;
111 } else { 113 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7693f7c67bd3..f2469c511789 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -37,7 +37,7 @@
37/* 37/*
38 * Clear GPU surface registers. 38 * Clear GPU surface registers.
39 */ 39 */
40static void radeon_surface_init(struct radeon_device *rdev) 40void radeon_surface_init(struct radeon_device *rdev)
41{ 41{
42 /* FIXME: check this out */ 42 /* FIXME: check this out */
43 if (rdev->family < CHIP_R600) { 43 if (rdev->family < CHIP_R600) {
@@ -56,7 +56,7 @@ static void radeon_surface_init(struct radeon_device *rdev)
56/* 56/*
57 * GPU scratch registers helpers function. 57 * GPU scratch registers helpers function.
58 */ 58 */
59static void radeon_scratch_init(struct radeon_device *rdev) 59void radeon_scratch_init(struct radeon_device *rdev)
60{ 60{
61 int i; 61 int i;
62 62
@@ -156,16 +156,14 @@ int radeon_mc_setup(struct radeon_device *rdev)
156 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 156 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
157 rdev->mc.gtt_location = tmp; 157 rdev->mc.gtt_location = tmp;
158 } 158 }
159 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); 159 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
160 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", 160 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
161 rdev->mc.vram_location, 161 (unsigned)rdev->mc.vram_location,
162 rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); 162 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
163 if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) 163 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
164 DRM_INFO("radeon: VRAM less than aperture workaround enabled\n");
165 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
166 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", 164 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
167 rdev->mc.gtt_location, 165 (unsigned)rdev->mc.gtt_location,
168 rdev->mc.gtt_location + rdev->mc.gtt_size - 1); 166 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
169 return 0; 167 return 0;
170} 168}
171 169
@@ -205,6 +203,31 @@ static bool radeon_card_posted(struct radeon_device *rdev)
205 203
206} 204}
207 205
206int radeon_dummy_page_init(struct radeon_device *rdev)
207{
208 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
209 if (rdev->dummy_page.page == NULL)
210 return -ENOMEM;
211 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
212 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
213 if (!rdev->dummy_page.addr) {
214 __free_page(rdev->dummy_page.page);
215 rdev->dummy_page.page = NULL;
216 return -ENOMEM;
217 }
218 return 0;
219}
220
221void radeon_dummy_page_fini(struct radeon_device *rdev)
222{
223 if (rdev->dummy_page.page == NULL)
224 return;
225 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
226 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
227 __free_page(rdev->dummy_page.page);
228 rdev->dummy_page.page = NULL;
229}
230
208 231
209/* 232/*
210 * Registers accessors functions. 233 * Registers accessors functions.
@@ -323,9 +346,15 @@ int radeon_asic_init(struct radeon_device *rdev)
323 case CHIP_RV635: 346 case CHIP_RV635:
324 case CHIP_RV670: 347 case CHIP_RV670:
325 case CHIP_RS780: 348 case CHIP_RS780:
349 case CHIP_RS880:
350 rdev->asic = &r600_asic;
351 break;
326 case CHIP_RV770: 352 case CHIP_RV770:
327 case CHIP_RV730: 353 case CHIP_RV730:
328 case CHIP_RV710: 354 case CHIP_RV710:
355 case CHIP_RV740:
356 rdev->asic = &rv770_asic;
357 break;
329 default: 358 default:
330 /* FIXME: not supported yet */ 359 /* FIXME: not supported yet */
331 return -EINVAL; 360 return -EINVAL;
@@ -448,7 +477,7 @@ int radeon_device_init(struct radeon_device *rdev,
448 struct pci_dev *pdev, 477 struct pci_dev *pdev,
449 uint32_t flags) 478 uint32_t flags)
450{ 479{
451 int r, ret; 480 int r, ret = 0;
452 int dma_bits; 481 int dma_bits;
453 482
454 DRM_INFO("radeon: Initializing kernel modesetting.\n"); 483 DRM_INFO("radeon: Initializing kernel modesetting.\n");
@@ -487,10 +516,6 @@ int radeon_device_init(struct radeon_device *rdev,
487 if (r) { 516 if (r) {
488 return r; 517 return r;
489 } 518 }
490 r = radeon_init(rdev);
491 if (r) {
492 return r;
493 }
494 519
495 /* set DMA mask + need_dma32 flags. 520 /* set DMA mask + need_dma32 flags.
496 * PCIE - can handle 40-bits. 521 * PCIE - can handle 40-bits.
@@ -521,111 +546,118 @@ int radeon_device_init(struct radeon_device *rdev,
521 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 546 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
522 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 547 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
523 548
524 /* Setup errata flags */ 549 rdev->new_init_path = false;
525 radeon_errata(rdev); 550 r = radeon_init(rdev);
526 /* Initialize scratch registers */ 551 if (r) {
527 radeon_scratch_init(rdev); 552 return r;
528 /* Initialize surface registers */ 553 }
529 radeon_surface_init(rdev); 554 if (!rdev->new_init_path) {
530 555 /* Setup errata flags */
531 /* TODO: disable VGA need to use VGA request */ 556 radeon_errata(rdev);
532 /* BIOS*/ 557 /* Initialize scratch registers */
533 if (!radeon_get_bios(rdev)) { 558 radeon_scratch_init(rdev);
534 if (ASIC_IS_AVIVO(rdev)) 559 /* Initialize surface registers */
535 return -EINVAL; 560 radeon_surface_init(rdev);
536 } 561
537 if (rdev->is_atom_bios) { 562 /* TODO: disable VGA need to use VGA request */
538 r = radeon_atombios_init(rdev); 563 /* BIOS*/
564 if (!radeon_get_bios(rdev)) {
565 if (ASIC_IS_AVIVO(rdev))
566 return -EINVAL;
567 }
568 if (rdev->is_atom_bios) {
569 r = radeon_atombios_init(rdev);
570 if (r) {
571 return r;
572 }
573 } else {
574 r = radeon_combios_init(rdev);
575 if (r) {
576 return r;
577 }
578 }
579 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
580 if (radeon_gpu_reset(rdev)) {
581 /* FIXME: what do we want to do here ? */
582 }
583 /* check if cards are posted or not */
584 if (!radeon_card_posted(rdev) && rdev->bios) {
585 DRM_INFO("GPU not posted. posting now...\n");
586 if (rdev->is_atom_bios) {
587 atom_asic_init(rdev->mode_info.atom_context);
588 } else {
589 radeon_combios_asic_init(rdev->ddev);
590 }
591 }
592 /* Initialize clocks */
593 r = radeon_clocks_init(rdev);
539 if (r) { 594 if (r) {
540 return r; 595 return r;
541 } 596 }
542 } else { 597 /* Get vram informations */
543 r = radeon_combios_init(rdev); 598 radeon_vram_info(rdev);
599
600 /* Add an MTRR for the VRAM */
601 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
602 MTRR_TYPE_WRCOMB, 1);
603 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
604 (unsigned)(rdev->mc.mc_vram_size >> 20),
605 (unsigned)(rdev->mc.aper_size >> 20));
606 DRM_INFO("RAM width %dbits %cDR\n",
607 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
608 /* Initialize memory controller (also test AGP) */
609 r = radeon_mc_init(rdev);
544 if (r) { 610 if (r) {
545 return r; 611 return r;
546 } 612 }
547 } 613 /* Fence driver */
548 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 614 r = radeon_fence_driver_init(rdev);
549 if (radeon_gpu_reset(rdev)) {
550 /* FIXME: what do we want to do here ? */
551 }
552 /* check if cards are posted or not */
553 if (!radeon_card_posted(rdev) && rdev->bios) {
554 DRM_INFO("GPU not posted. posting now...\n");
555 if (rdev->is_atom_bios) {
556 atom_asic_init(rdev->mode_info.atom_context);
557 } else {
558 radeon_combios_asic_init(rdev->ddev);
559 }
560 }
561 /* Initialize clocks */
562 r = radeon_clocks_init(rdev);
563 if (r) {
564 return r;
565 }
566 /* Get vram informations */
567 radeon_vram_info(rdev);
568
569 /* Add an MTRR for the VRAM */
570 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
571 MTRR_TYPE_WRCOMB, 1);
572 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
573 rdev->mc.real_vram_size >> 20,
574 (unsigned)rdev->mc.aper_size >> 20);
575 DRM_INFO("RAM width %dbits %cDR\n",
576 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
577 /* Initialize memory controller (also test AGP) */
578 r = radeon_mc_init(rdev);
579 if (r) {
580 return r;
581 }
582 /* Fence driver */
583 r = radeon_fence_driver_init(rdev);
584 if (r) {
585 return r;
586 }
587 r = radeon_irq_kms_init(rdev);
588 if (r) {
589 return r;
590 }
591 /* Memory manager */
592 r = radeon_object_init(rdev);
593 if (r) {
594 return r;
595 }
596 /* Initialize GART (initialize after TTM so we can allocate
597 * memory through TTM but finalize after TTM) */
598 r = radeon_gart_enable(rdev);
599 if (!r) {
600 r = radeon_gem_init(rdev);
601 }
602
603 /* 1M ring buffer */
604 if (!r) {
605 r = radeon_cp_init(rdev, 1024 * 1024);
606 }
607 if (!r) {
608 r = radeon_wb_init(rdev);
609 if (r) { 615 if (r) {
610 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
611 return r; 616 return r;
612 } 617 }
613 } 618 r = radeon_irq_kms_init(rdev);
614 if (!r) {
615 r = radeon_ib_pool_init(rdev);
616 if (r) { 619 if (r) {
617 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
618 return r; 620 return r;
619 } 621 }
620 } 622 /* Memory manager */
621 if (!r) { 623 r = radeon_object_init(rdev);
622 r = radeon_ib_test(rdev);
623 if (r) { 624 if (r) {
624 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
625 return r; 625 return r;
626 } 626 }
627 /* Initialize GART (initialize after TTM so we can allocate
628 * memory through TTM but finalize after TTM) */
629 r = radeon_gart_enable(rdev);
630 if (!r) {
631 r = radeon_gem_init(rdev);
632 }
633
634 /* 1M ring buffer */
635 if (!r) {
636 r = radeon_cp_init(rdev, 1024 * 1024);
637 }
638 if (!r) {
639 r = radeon_wb_init(rdev);
640 if (r) {
641 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
642 return r;
643 }
644 }
645 if (!r) {
646 r = radeon_ib_pool_init(rdev);
647 if (r) {
648 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
649 return r;
650 }
651 }
652 if (!r) {
653 r = radeon_ib_test(rdev);
654 if (r) {
655 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
656 return r;
657 }
658 }
659 ret = r;
627 } 660 }
628 ret = r;
629 r = radeon_modeset_init(rdev); 661 r = radeon_modeset_init(rdev);
630 if (r) { 662 if (r) {
631 return r; 663 return r;
@@ -651,26 +683,29 @@ void radeon_device_fini(struct radeon_device *rdev)
651 rdev->shutdown = true; 683 rdev->shutdown = true;
652 /* Order matter so becarefull if you rearrange anythings */ 684 /* Order matter so becarefull if you rearrange anythings */
653 radeon_modeset_fini(rdev); 685 radeon_modeset_fini(rdev);
654 radeon_ib_pool_fini(rdev); 686 if (!rdev->new_init_path) {
655 radeon_cp_fini(rdev); 687 radeon_ib_pool_fini(rdev);
656 radeon_wb_fini(rdev); 688 radeon_cp_fini(rdev);
657 radeon_gem_fini(rdev); 689 radeon_wb_fini(rdev);
658 radeon_object_fini(rdev); 690 radeon_gem_fini(rdev);
659 /* mc_fini must be after object_fini */ 691 radeon_mc_fini(rdev);
660 radeon_mc_fini(rdev);
661#if __OS_HAS_AGP 692#if __OS_HAS_AGP
662 radeon_agp_fini(rdev); 693 radeon_agp_fini(rdev);
663#endif 694#endif
664 radeon_irq_kms_fini(rdev); 695 radeon_irq_kms_fini(rdev);
665 radeon_fence_driver_fini(rdev); 696 radeon_fence_driver_fini(rdev);
666 radeon_clocks_fini(rdev); 697 radeon_clocks_fini(rdev);
667 if (rdev->is_atom_bios) { 698 radeon_object_fini(rdev);
668 radeon_atombios_fini(rdev); 699 if (rdev->is_atom_bios) {
700 radeon_atombios_fini(rdev);
701 } else {
702 radeon_combios_fini(rdev);
703 }
704 kfree(rdev->bios);
705 rdev->bios = NULL;
669 } else { 706 } else {
670 radeon_combios_fini(rdev); 707 radeon_fini(rdev);
671 } 708 }
672 kfree(rdev->bios);
673 rdev->bios = NULL;
674 iounmap(rdev->rmmio); 709 iounmap(rdev->rmmio);
675 rdev->rmmio = NULL; 710 rdev->rmmio = NULL;
676} 711}
@@ -708,9 +743,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
708 /* wait for gpu to finish processing current batch */ 743 /* wait for gpu to finish processing current batch */
709 radeon_fence_wait_last(rdev); 744 radeon_fence_wait_last(rdev);
710 745
711 radeon_cp_disable(rdev); 746 if (!rdev->new_init_path) {
712 radeon_gart_disable(rdev); 747 radeon_cp_disable(rdev);
713 748 radeon_gart_disable(rdev);
749 } else {
750 radeon_suspend(rdev);
751 }
714 /* evict remaining vram memory */ 752 /* evict remaining vram memory */
715 radeon_object_evict_vram(rdev); 753 radeon_object_evict_vram(rdev);
716 754
@@ -746,33 +784,37 @@ int radeon_resume_kms(struct drm_device *dev)
746 if (radeon_gpu_reset(rdev)) { 784 if (radeon_gpu_reset(rdev)) {
747 /* FIXME: what do we want to do here ? */ 785 /* FIXME: what do we want to do here ? */
748 } 786 }
749 /* post card */ 787 if (!rdev->new_init_path) {
750 if (rdev->is_atom_bios) { 788 /* post card */
751 atom_asic_init(rdev->mode_info.atom_context); 789 if (rdev->is_atom_bios) {
790 atom_asic_init(rdev->mode_info.atom_context);
791 } else {
792 radeon_combios_asic_init(rdev->ddev);
793 }
794 /* Initialize clocks */
795 r = radeon_clocks_init(rdev);
796 if (r) {
797 release_console_sem();
798 return r;
799 }
800 /* Enable IRQ */
801 rdev->irq.sw_int = true;
802 radeon_irq_set(rdev);
803 /* Initialize GPU Memory Controller */
804 r = radeon_mc_init(rdev);
805 if (r) {
806 goto out;
807 }
808 r = radeon_gart_enable(rdev);
809 if (r) {
810 goto out;
811 }
812 r = radeon_cp_init(rdev, rdev->cp.ring_size);
813 if (r) {
814 goto out;
815 }
752 } else { 816 } else {
753 radeon_combios_asic_init(rdev->ddev); 817 radeon_resume(rdev);
754 }
755 /* Initialize clocks */
756 r = radeon_clocks_init(rdev);
757 if (r) {
758 release_console_sem();
759 return r;
760 }
761 /* Enable IRQ */
762 rdev->irq.sw_int = true;
763 radeon_irq_set(rdev);
764 /* Initialize GPU Memory Controller */
765 r = radeon_mc_init(rdev);
766 if (r) {
767 goto out;
768 }
769 r = radeon_gart_enable(rdev);
770 if (r) {
771 goto out;
772 }
773 r = radeon_cp_init(rdev, rdev->cp.ring_size);
774 if (r) {
775 goto out;
776 } 818 }
777out: 819out:
778 fb_set_suspend(rdev->fbdev_info, 0); 820 fb_set_suspend(rdev->fbdev_info, 0);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 40294a07976f..c7b185924f6c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -356,6 +356,12 @@ typedef struct drm_radeon_private {
356 int r700_sc_hiz_tile_fifo_size; 356 int r700_sc_hiz_tile_fifo_size;
357 int r700_sc_earlyz_tile_fifo_fize; 357 int r700_sc_earlyz_tile_fifo_fize;
358 358
359 struct mutex cs_mutex;
360 u32 cs_id_scnt;
361 u32 cs_id_wcnt;
362 /* r6xx/r7xx drm blit vertex buffer */
363 struct drm_buf *blit_vb;
364
359 /* firmware */ 365 /* firmware */
360 const struct firmware *me_fw, *pfp_fw; 366 const struct firmware *me_fw, *pfp_fw;
361} drm_radeon_private_t; 367} drm_radeon_private_t;
@@ -396,6 +402,9 @@ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
396 (off >= gart_start && off <= gart_end)); 402 (off >= gart_start && off <= gart_end));
397} 403}
398 404
405/* radeon_state.c */
406extern void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf);
407
399 /* radeon_cp.c */ 408 /* radeon_cp.c */
400extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv); 409extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
401extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv); 410extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
@@ -487,6 +496,22 @@ extern int r600_cp_dispatch_indirect(struct drm_device *dev,
487 struct drm_buf *buf, int start, int end); 496 struct drm_buf *buf, int start, int end);
488extern int r600_page_table_init(struct drm_device *dev); 497extern int r600_page_table_init(struct drm_device *dev);
489extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); 498extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
499extern int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv);
500extern void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv);
501extern int r600_cp_dispatch_texture(struct drm_device *dev,
502 struct drm_file *file_priv,
503 drm_radeon_texture_t *tex,
504 drm_radeon_tex_image_t *image);
505/* r600_blit.c */
506extern int r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv);
507extern void r600_done_blit_copy(struct drm_device *dev);
508extern void r600_blit_copy(struct drm_device *dev,
509 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
510 int size_bytes);
511extern void r600_blit_swap(struct drm_device *dev,
512 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
513 int sx, int sy, int dx, int dy,
514 int w, int h, int src_pitch, int dst_pitch, int cpp);
490 515
491/* Flags for stats.boxes 516/* Flags for stats.boxes
492 */ 517 */
@@ -1114,13 +1139,71 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1114# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 1139# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
1115# define RADEON_CNTL_SET_SCISSORS 0xC0001E00 1140# define RADEON_CNTL_SET_SCISSORS 0xC0001E00
1116 1141
1117# define R600_IT_INDIRECT_BUFFER 0x00003200 1142# define R600_IT_INDIRECT_BUFFER_END 0x00001700
1118# define R600_IT_ME_INITIALIZE 0x00004400 1143# define R600_IT_SET_PREDICATION 0x00002000
1144# define R600_IT_REG_RMW 0x00002100
1145# define R600_IT_COND_EXEC 0x00002200
1146# define R600_IT_PRED_EXEC 0x00002300
1147# define R600_IT_START_3D_CMDBUF 0x00002400
1148# define R600_IT_DRAW_INDEX_2 0x00002700
1149# define R600_IT_CONTEXT_CONTROL 0x00002800
1150# define R600_IT_DRAW_INDEX_IMMD_BE 0x00002900
1151# define R600_IT_INDEX_TYPE 0x00002A00
1152# define R600_IT_DRAW_INDEX 0x00002B00
1153# define R600_IT_DRAW_INDEX_AUTO 0x00002D00
1154# define R600_IT_DRAW_INDEX_IMMD 0x00002E00
1155# define R600_IT_NUM_INSTANCES 0x00002F00
1156# define R600_IT_STRMOUT_BUFFER_UPDATE 0x00003400
1157# define R600_IT_INDIRECT_BUFFER_MP 0x00003800
1158# define R600_IT_MEM_SEMAPHORE 0x00003900
1159# define R600_IT_MPEG_INDEX 0x00003A00
1160# define R600_IT_WAIT_REG_MEM 0x00003C00
1161# define R600_IT_MEM_WRITE 0x00003D00
1162# define R600_IT_INDIRECT_BUFFER 0x00003200
1163# define R600_IT_CP_INTERRUPT 0x00004000
1164# define R600_IT_SURFACE_SYNC 0x00004300
1165# define R600_CB0_DEST_BASE_ENA (1 << 6)
1166# define R600_TC_ACTION_ENA (1 << 23)
1167# define R600_VC_ACTION_ENA (1 << 24)
1168# define R600_CB_ACTION_ENA (1 << 25)
1169# define R600_DB_ACTION_ENA (1 << 26)
1170# define R600_SH_ACTION_ENA (1 << 27)
1171# define R600_SMX_ACTION_ENA (1 << 28)
1172# define R600_IT_ME_INITIALIZE 0x00004400
1119# define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) 1173# define R600_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
1120# define R600_IT_EVENT_WRITE 0x00004600 1174# define R600_IT_COND_WRITE 0x00004500
1121# define R600_IT_SET_CONFIG_REG 0x00006800 1175# define R600_IT_EVENT_WRITE 0x00004600
1122# define R600_SET_CONFIG_REG_OFFSET 0x00008000 1176# define R600_IT_EVENT_WRITE_EOP 0x00004700
1123# define R600_SET_CONFIG_REG_END 0x0000ac00 1177# define R600_IT_ONE_REG_WRITE 0x00005700
1178# define R600_IT_SET_CONFIG_REG 0x00006800
1179# define R600_SET_CONFIG_REG_OFFSET 0x00008000
1180# define R600_SET_CONFIG_REG_END 0x0000ac00
1181# define R600_IT_SET_CONTEXT_REG 0x00006900
1182# define R600_SET_CONTEXT_REG_OFFSET 0x00028000
1183# define R600_SET_CONTEXT_REG_END 0x00029000
1184# define R600_IT_SET_ALU_CONST 0x00006A00
1185# define R600_SET_ALU_CONST_OFFSET 0x00030000
1186# define R600_SET_ALU_CONST_END 0x00032000
1187# define R600_IT_SET_BOOL_CONST 0x00006B00
1188# define R600_SET_BOOL_CONST_OFFSET 0x0003e380
1189# define R600_SET_BOOL_CONST_END 0x00040000
1190# define R600_IT_SET_LOOP_CONST 0x00006C00
1191# define R600_SET_LOOP_CONST_OFFSET 0x0003e200
1192# define R600_SET_LOOP_CONST_END 0x0003e380
1193# define R600_IT_SET_RESOURCE 0x00006D00
1194# define R600_SET_RESOURCE_OFFSET 0x00038000
1195# define R600_SET_RESOURCE_END 0x0003c000
1196# define R600_SQ_TEX_VTX_INVALID_TEXTURE 0x0
1197# define R600_SQ_TEX_VTX_INVALID_BUFFER 0x1
1198# define R600_SQ_TEX_VTX_VALID_TEXTURE 0x2
1199# define R600_SQ_TEX_VTX_VALID_BUFFER 0x3
1200# define R600_IT_SET_SAMPLER 0x00006E00
1201# define R600_SET_SAMPLER_OFFSET 0x0003c000
1202# define R600_SET_SAMPLER_END 0x0003cff0
1203# define R600_IT_SET_CTL_CONST 0x00006F00
1204# define R600_SET_CTL_CONST_OFFSET 0x0003cff0
1205# define R600_SET_CTL_CONST_END 0x0003e200
1206# define R600_IT_SURFACE_BASE_UPDATE 0x00007300
1124 1207
1125#define RADEON_CP_PACKET_MASK 0xC0000000 1208#define RADEON_CP_PACKET_MASK 0xC0000000
1126#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000 1209#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
@@ -1598,6 +1681,52 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1598#define R600_CB_COLOR7_BASE 0x2805c 1681#define R600_CB_COLOR7_BASE 0x2805c
1599#define R600_CB_COLOR7_FRAG 0x280fc 1682#define R600_CB_COLOR7_FRAG 0x280fc
1600 1683
1684#define R600_CB_COLOR0_SIZE 0x28060
1685#define R600_CB_COLOR0_VIEW 0x28080
1686#define R600_CB_COLOR0_INFO 0x280a0
1687#define R600_CB_COLOR0_TILE 0x280c0
1688#define R600_CB_COLOR0_FRAG 0x280e0
1689#define R600_CB_COLOR0_MASK 0x28100
1690
1691#define AVIVO_D1MODE_VLINE_START_END 0x6538
1692#define AVIVO_D2MODE_VLINE_START_END 0x6d38
1693#define R600_CP_COHER_BASE 0x85f8
1694#define R600_DB_DEPTH_BASE 0x2800c
1695#define R600_SQ_PGM_START_FS 0x28894
1696#define R600_SQ_PGM_START_ES 0x28880
1697#define R600_SQ_PGM_START_VS 0x28858
1698#define R600_SQ_PGM_RESOURCES_VS 0x28868
1699#define R600_SQ_PGM_CF_OFFSET_VS 0x288d0
1700#define R600_SQ_PGM_START_GS 0x2886c
1701#define R600_SQ_PGM_START_PS 0x28840
1702#define R600_SQ_PGM_RESOURCES_PS 0x28850
1703#define R600_SQ_PGM_EXPORTS_PS 0x28854
1704#define R600_SQ_PGM_CF_OFFSET_PS 0x288cc
1705#define R600_VGT_DMA_BASE 0x287e8
1706#define R600_VGT_DMA_BASE_HI 0x287e4
1707#define R600_VGT_STRMOUT_BASE_OFFSET_0 0x28b10
1708#define R600_VGT_STRMOUT_BASE_OFFSET_1 0x28b14
1709#define R600_VGT_STRMOUT_BASE_OFFSET_2 0x28b18
1710#define R600_VGT_STRMOUT_BASE_OFFSET_3 0x28b1c
1711#define R600_VGT_STRMOUT_BASE_OFFSET_HI_0 0x28b44
1712#define R600_VGT_STRMOUT_BASE_OFFSET_HI_1 0x28b48
1713#define R600_VGT_STRMOUT_BASE_OFFSET_HI_2 0x28b4c
1714#define R600_VGT_STRMOUT_BASE_OFFSET_HI_3 0x28b50
1715#define R600_VGT_STRMOUT_BUFFER_BASE_0 0x28ad8
1716#define R600_VGT_STRMOUT_BUFFER_BASE_1 0x28ae8
1717#define R600_VGT_STRMOUT_BUFFER_BASE_2 0x28af8
1718#define R600_VGT_STRMOUT_BUFFER_BASE_3 0x28b08
1719#define R600_VGT_STRMOUT_BUFFER_OFFSET_0 0x28adc
1720#define R600_VGT_STRMOUT_BUFFER_OFFSET_1 0x28aec
1721#define R600_VGT_STRMOUT_BUFFER_OFFSET_2 0x28afc
1722#define R600_VGT_STRMOUT_BUFFER_OFFSET_3 0x28b0c
1723
1724#define R600_VGT_PRIMITIVE_TYPE 0x8958
1725
1726#define R600_PA_SC_SCREEN_SCISSOR_TL 0x28030
1727#define R600_PA_SC_GENERIC_SCISSOR_TL 0x28240
1728#define R600_PA_SC_WINDOW_SCISSOR_TL 0x28204
1729
1601#define R600_TC_CNTL 0x9608 1730#define R600_TC_CNTL 0x9608
1602# define R600_TC_L2_SIZE(x) ((x) << 5) 1731# define R600_TC_L2_SIZE(x) ((x) << 5)
1603# define R600_L2_DISABLE_LATE_HIT (1 << 9) 1732# define R600_L2_DISABLE_LATE_HIT (1 << 9)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index b4e48dd2e859..506dd4dd3a24 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -53,9 +53,9 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
53 * away 53 * away
54 */ 54 */
55 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 55 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
56 } else { 56 } else
57 radeon_fence_ring_emit(rdev, fence); 57 radeon_fence_ring_emit(rdev, fence);
58 } 58
59 fence->emited = true; 59 fence->emited = true;
60 fence->timeout = jiffies + ((2000 * HZ) / 1000); 60 fence->timeout = jiffies + ((2000 * HZ) / 1000);
61 list_del(&fence->list); 61 list_del(&fence->list);
@@ -168,7 +168,47 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
168 return signaled; 168 return signaled;
169} 169}
170 170
171int radeon_fence_wait(struct radeon_fence *fence, bool interruptible) 171int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
172{
173 struct radeon_device *rdev;
174 unsigned long cur_jiffies;
175 unsigned long timeout;
176 int ret = 0;
177
178 cur_jiffies = jiffies;
179 timeout = HZ / 100;
180
181 if (time_after(fence->timeout, cur_jiffies)) {
182 timeout = fence->timeout - cur_jiffies;
183 }
184
185 rdev = fence->rdev;
186
187 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
188
189 while (1) {
190 if (radeon_fence_signaled(fence))
191 break;
192
193 if (time_after_eq(jiffies, timeout)) {
194 ret = -EBUSY;
195 break;
196 }
197
198 if (lazy)
199 schedule_timeout(1);
200
201 if (intr && signal_pending(current)) {
202 ret = -ERESTART;
203 break;
204 }
205 }
206 __set_current_state(TASK_RUNNING);
207 return ret;
208}
209
210
211int radeon_fence_wait(struct radeon_fence *fence, bool intr)
172{ 212{
173 struct radeon_device *rdev; 213 struct radeon_device *rdev;
174 unsigned long cur_jiffies; 214 unsigned long cur_jiffies;
@@ -176,7 +216,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
176 bool expired = false; 216 bool expired = false;
177 int r; 217 int r;
178 218
179
180 if (fence == NULL) { 219 if (fence == NULL) {
181 WARN(1, "Querying an invalid fence : %p !\n", fence); 220 WARN(1, "Querying an invalid fence : %p !\n", fence);
182 return 0; 221 return 0;
@@ -185,13 +224,18 @@ int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
185 if (radeon_fence_signaled(fence)) { 224 if (radeon_fence_signaled(fence)) {
186 return 0; 225 return 0;
187 } 226 }
227
228 if (rdev->family >= CHIP_R600)
229 return r600_fence_wait(fence, intr, 0);
230
188retry: 231retry:
189 cur_jiffies = jiffies; 232 cur_jiffies = jiffies;
190 timeout = HZ / 100; 233 timeout = HZ / 100;
191 if (time_after(fence->timeout, cur_jiffies)) { 234 if (time_after(fence->timeout, cur_jiffies)) {
192 timeout = fence->timeout - cur_jiffies; 235 timeout = fence->timeout - cur_jiffies;
193 } 236 }
194 if (interruptible) { 237
238 if (intr) {
195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 239 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
196 radeon_fence_signaled(fence), timeout); 240 radeon_fence_signaled(fence), timeout);
197 if (unlikely(r == -ERESTARTSYS)) { 241 if (unlikely(r == -ERESTARTSYS)) {
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 28be2f1165cb..21da871a793c 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3255,6 +3255,24 @@
3255#define RADEON_CP_RB_WPTR 0x0714 3255#define RADEON_CP_RB_WPTR 0x0714
3256#define RADEON_CP_RB_RPTR_WR 0x071c 3256#define RADEON_CP_RB_RPTR_WR 0x071c
3257 3257
3258#define RADEON_SCRATCH_UMSK 0x0770
3259#define RADEON_SCRATCH_ADDR 0x0774
3260
3261#define R600_CP_RB_BASE 0xc100
3262#define R600_CP_RB_CNTL 0xc104
3263# define R600_RB_BUFSZ(x) ((x) << 0)
3264# define R600_RB_BLKSZ(x) ((x) << 8)
3265# define R600_RB_NO_UPDATE (1 << 27)
3266# define R600_RB_RPTR_WR_ENA (1 << 31)
3267#define R600_CP_RB_RPTR_WR 0xc108
3268#define R600_CP_RB_RPTR_ADDR 0xc10c
3269#define R600_CP_RB_RPTR_ADDR_HI 0xc110
3270#define R600_CP_RB_WPTR 0xc114
3271#define R600_CP_RB_WPTR_ADDR 0xc118
3272#define R600_CP_RB_WPTR_ADDR_HI 0xc11c
3273#define R600_CP_RB_RPTR 0x8700
3274#define R600_CP_RB_WPTR_DELAY 0x8704
3275
3258#define RADEON_CP_IB_BASE 0x0738 3276#define RADEON_CP_IB_BASE 0x0738
3259#define RADEON_CP_IB_BUFSZ 0x073c 3277#define RADEON_CP_IB_BUFSZ 0x073c
3260 3278
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 60d159308b88..aa9837a6aa75 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -110,7 +110,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
110 return; 110 return;
111 } 111 }
112 list_del(&tmp->list); 112 list_del(&tmp->list);
113 INIT_LIST_HEAD(&tmp->list);
114 if (tmp->fence) { 113 if (tmp->fence) {
115 radeon_fence_unref(&tmp->fence); 114 radeon_fence_unref(&tmp->fence);
116 } 115 }
@@ -119,19 +118,11 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
119 mutex_unlock(&rdev->ib_pool.mutex); 118 mutex_unlock(&rdev->ib_pool.mutex);
120} 119}
121 120
122static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
123{
124 while ((ib->length_dw & rdev->cp.align_mask)) {
125 ib->ptr[ib->length_dw++] = PACKET2(0);
126 }
127}
128
129int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 121int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
130{ 122{
131 int r = 0; 123 int r = 0;
132 124
133 mutex_lock(&rdev->ib_pool.mutex); 125 mutex_lock(&rdev->ib_pool.mutex);
134 radeon_ib_align(rdev, ib);
135 if (!ib->length_dw || !rdev->cp.ready) { 126 if (!ib->length_dw || !rdev->cp.ready) {
136 /* TODO: Nothings in the ib we should report. */ 127 /* TODO: Nothings in the ib we should report. */
137 mutex_unlock(&rdev->ib_pool.mutex); 128 mutex_unlock(&rdev->ib_pool.mutex);
@@ -145,9 +136,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
145 mutex_unlock(&rdev->ib_pool.mutex); 136 mutex_unlock(&rdev->ib_pool.mutex);
146 return r; 137 return r;
147 } 138 }
148 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); 139 radeon_ring_ib_execute(rdev, ib);
149 radeon_ring_write(rdev, ib->gpu_addr);
150 radeon_ring_write(rdev, ib->length_dw);
151 radeon_fence_emit(rdev, ib->fence); 140 radeon_fence_emit(rdev, ib->fence);
152 radeon_ring_unlock_commit(rdev); 141 radeon_ring_unlock_commit(rdev);
153 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 142 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
@@ -215,69 +204,16 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
215 mutex_unlock(&rdev->ib_pool.mutex); 204 mutex_unlock(&rdev->ib_pool.mutex);
216} 205}
217 206
218int radeon_ib_test(struct radeon_device *rdev)
219{
220 struct radeon_ib *ib;
221 uint32_t scratch;
222 uint32_t tmp = 0;
223 unsigned i;
224 int r;
225
226 r = radeon_scratch_get(rdev, &scratch);
227 if (r) {
228 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
229 return r;
230 }
231 WREG32(scratch, 0xCAFEDEAD);
232 r = radeon_ib_get(rdev, &ib);
233 if (r) {
234 return r;
235 }
236 ib->ptr[0] = PACKET0(scratch, 0);
237 ib->ptr[1] = 0xDEADBEEF;
238 ib->ptr[2] = PACKET2(0);
239 ib->ptr[3] = PACKET2(0);
240 ib->ptr[4] = PACKET2(0);
241 ib->ptr[5] = PACKET2(0);
242 ib->ptr[6] = PACKET2(0);
243 ib->ptr[7] = PACKET2(0);
244 ib->length_dw = 8;
245 r = radeon_ib_schedule(rdev, ib);
246 if (r) {
247 radeon_scratch_free(rdev, scratch);
248 radeon_ib_free(rdev, &ib);
249 return r;
250 }
251 r = radeon_fence_wait(ib->fence, false);
252 if (r) {
253 return r;
254 }
255 for (i = 0; i < rdev->usec_timeout; i++) {
256 tmp = RREG32(scratch);
257 if (tmp == 0xDEADBEEF) {
258 break;
259 }
260 DRM_UDELAY(1);
261 }
262 if (i < rdev->usec_timeout) {
263 DRM_INFO("ib test succeeded in %u usecs\n", i);
264 } else {
265 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
266 scratch, tmp);
267 r = -EINVAL;
268 }
269 radeon_scratch_free(rdev, scratch);
270 radeon_ib_free(rdev, &ib);
271 return r;
272}
273
274 207
275/* 208/*
276 * Ring. 209 * Ring.
277 */ 210 */
278void radeon_ring_free_size(struct radeon_device *rdev) 211void radeon_ring_free_size(struct radeon_device *rdev)
279{ 212{
280 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 213 if (rdev->family >= CHIP_R600)
214 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
215 else
216 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
281 /* This works because ring_size is a power of 2 */ 217 /* This works because ring_size is a power of 2 */
282 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); 218 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
283 rdev->cp.ring_free_dw -= rdev->cp.wptr; 219 rdev->cp.ring_free_dw -= rdev->cp.wptr;
@@ -320,11 +256,10 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev)
320 count_dw_pad = (rdev->cp.align_mask + 1) - 256 count_dw_pad = (rdev->cp.align_mask + 1) -
321 (rdev->cp.wptr & rdev->cp.align_mask); 257 (rdev->cp.wptr & rdev->cp.align_mask);
322 for (i = 0; i < count_dw_pad; i++) { 258 for (i = 0; i < count_dw_pad; i++) {
323 radeon_ring_write(rdev, PACKET2(0)); 259 radeon_ring_write(rdev, 2 << 30);
324 } 260 }
325 DRM_MEMORYBARRIER(); 261 DRM_MEMORYBARRIER();
326 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 262 radeon_cp_commit(rdev);
327 (void)RREG32(RADEON_CP_RB_WPTR);
328 mutex_unlock(&rdev->cp.mutex); 263 mutex_unlock(&rdev->cp.mutex);
329} 264}
330 265
@@ -334,46 +269,6 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev)
334 mutex_unlock(&rdev->cp.mutex); 269 mutex_unlock(&rdev->cp.mutex);
335} 270}
336 271
337int radeon_ring_test(struct radeon_device *rdev)
338{
339 uint32_t scratch;
340 uint32_t tmp = 0;
341 unsigned i;
342 int r;
343
344 r = radeon_scratch_get(rdev, &scratch);
345 if (r) {
346 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
347 return r;
348 }
349 WREG32(scratch, 0xCAFEDEAD);
350 r = radeon_ring_lock(rdev, 2);
351 if (r) {
352 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
353 radeon_scratch_free(rdev, scratch);
354 return r;
355 }
356 radeon_ring_write(rdev, PACKET0(scratch, 0));
357 radeon_ring_write(rdev, 0xDEADBEEF);
358 radeon_ring_unlock_commit(rdev);
359 for (i = 0; i < rdev->usec_timeout; i++) {
360 tmp = RREG32(scratch);
361 if (tmp == 0xDEADBEEF) {
362 break;
363 }
364 DRM_UDELAY(1);
365 }
366 if (i < rdev->usec_timeout) {
367 DRM_INFO("ring test succeeded in %d usecs\n", i);
368 } else {
369 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
370 scratch, tmp);
371 r = -EINVAL;
372 }
373 radeon_scratch_free(rdev, scratch);
374 return r;
375}
376
377int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) 272int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
378{ 273{
379 int r; 274 int r;
diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h
index 63a773578f17..5f9e358ab506 100644
--- a/drivers/gpu/drm/radeon/radeon_share.h
+++ b/drivers/gpu/drm/radeon/radeon_share.h
@@ -28,12 +28,89 @@
28#ifndef __RADEON_SHARE_H__ 28#ifndef __RADEON_SHARE_H__
29#define __RADEON_SHARE_H__ 29#define __RADEON_SHARE_H__
30 30
31/* Common */
32struct radeon_device;
33struct radeon_cs_parser;
34int radeon_clocks_init(struct radeon_device *rdev);
35void radeon_clocks_fini(struct radeon_device *rdev);
36void radeon_scratch_init(struct radeon_device *rdev);
37void radeon_surface_init(struct radeon_device *rdev);
38int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
39
40
41/* R100, RV100, RS100, RV200, RS200, R200, RV250, RS300, RV280 */
31void r100_vram_init_sizes(struct radeon_device *rdev); 42void r100_vram_init_sizes(struct radeon_device *rdev);
32 43
44
45/* R300, R350, RV350, RV380 */
46struct r300_asic {
47 const unsigned *reg_safe_bm;
48 unsigned reg_safe_bm_size;
49};
50
51
52/* RS690, RS740 */
33void rs690_line_buffer_adjust(struct radeon_device *rdev, 53void rs690_line_buffer_adjust(struct radeon_device *rdev,
34 struct drm_display_mode *mode1, 54 struct drm_display_mode *mode1,
35 struct drm_display_mode *mode2); 55 struct drm_display_mode *mode2);
36 56
57
58/* RV515 */
37void rv515_bandwidth_avivo_update(struct radeon_device *rdev); 59void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
38 60
61
62/* R600, RV610, RV630, RV620, RV635, RV670, RS780, RS880 */
63bool r600_card_posted(struct radeon_device *rdev);
64void r600_cp_stop(struct radeon_device *rdev);
65void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
66int r600_cp_resume(struct radeon_device *rdev);
67int r600_count_pipe_bits(uint32_t val);
68int r600_gart_clear_page(struct radeon_device *rdev, int i);
69int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
71int r600_ib_test(struct radeon_device *rdev);
72int r600_ring_test(struct radeon_device *rdev);
73int r600_wb_init(struct radeon_device *rdev);
74void r600_wb_fini(struct radeon_device *rdev);
75void r600_scratch_init(struct radeon_device *rdev);
76int r600_blit_init(struct radeon_device *rdev);
77void r600_blit_fini(struct radeon_device *rdev);
78int r600_cp_init_microcode(struct radeon_device *rdev);
79struct r600_asic {
80 unsigned max_pipes;
81 unsigned max_tile_pipes;
82 unsigned max_simds;
83 unsigned max_backends;
84 unsigned max_gprs;
85 unsigned max_threads;
86 unsigned max_stack_entries;
87 unsigned max_hw_contexts;
88 unsigned max_gs_threads;
89 unsigned sx_max_export_size;
90 unsigned sx_max_export_pos_size;
91 unsigned sx_max_export_smx_size;
92 unsigned sq_num_cf_insts;
93};
94
95/* RV770, RV7300, RV710 */
96struct rv770_asic {
97 unsigned max_pipes;
98 unsigned max_tile_pipes;
99 unsigned max_simds;
100 unsigned max_backends;
101 unsigned max_gprs;
102 unsigned max_threads;
103 unsigned max_stack_entries;
104 unsigned max_hw_contexts;
105 unsigned max_gs_threads;
106 unsigned sx_max_export_size;
107 unsigned sx_max_export_pos_size;
108 unsigned sx_max_export_smx_size;
109 unsigned sq_num_cf_insts;
110 unsigned sx_num_of_sets;
111 unsigned sc_prim_fifo_size;
112 unsigned sc_hiz_tile_fifo_size;
113 unsigned sc_earlyz_tile_fifo_fize;
114};
115
39#endif 116#endif
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 2882f40d5ec5..aad0c6fafcf4 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1546,7 +1546,7 @@ static void radeon_cp_dispatch_vertex(struct drm_device * dev,
1546 } while (i < nbox); 1546 } while (i < nbox);
1547} 1547}
1548 1548
1549static void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf) 1549void radeon_cp_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
1550{ 1550{
1551 drm_radeon_private_t *dev_priv = dev->dev_private; 1551 drm_radeon_private_t *dev_priv = dev->dev_private;
1552 struct drm_radeon_master_private *master_priv = master->driver_priv; 1552 struct drm_radeon_master_private *master_priv = master->driver_priv;
@@ -2213,7 +2213,10 @@ static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *f
2213 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) 2213 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2214 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; 2214 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2215 2215
2216 radeon_cp_dispatch_swap(dev, file_priv->master); 2216 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
2217 r600_cp_dispatch_swap(dev, file_priv);
2218 else
2219 radeon_cp_dispatch_swap(dev, file_priv->master);
2217 sarea_priv->ctx_owner = 0; 2220 sarea_priv->ctx_owner = 0;
2218 2221
2219 COMMIT_RING(); 2222 COMMIT_RING();
@@ -2412,7 +2415,10 @@ static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file
2412 RING_SPACE_TEST_WITH_RETURN(dev_priv); 2415 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2413 VB_AGE_TEST_WITH_RETURN(dev_priv); 2416 VB_AGE_TEST_WITH_RETURN(dev_priv);
2414 2417
2415 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image); 2418 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
2419 ret = r600_cp_dispatch_texture(dev, file_priv, tex, &image);
2420 else
2421 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
2416 2422
2417 return ret; 2423 return ret;
2418} 2424}
@@ -2495,8 +2501,9 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
2495 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end); 2501 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2496 } 2502 }
2497 2503
2498 if (indirect->discard) 2504 if (indirect->discard) {
2499 radeon_cp_discard_buffer(dev, file_priv->master, buf); 2505 radeon_cp_discard_buffer(dev, file_priv->master, buf);
2506 }
2500 2507
2501 COMMIT_RING(); 2508 COMMIT_RING();
2502 return 0; 2509 return 0;
@@ -3227,7 +3234,8 @@ struct drm_ioctl_desc radeon_ioctls[] = {
3227 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), 3234 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3228 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), 3235 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3229 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), 3236 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3230 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH) 3237 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
3238 DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
3231}; 3239};
3232 3240
3233int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3241int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index dc7a44274ea8..acd889c94549 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -376,9 +376,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
376 radeon_move_null(bo, new_mem); 376 radeon_move_null(bo, new_mem);
377 return 0; 377 return 0;
378 } 378 }
379 if (!rdev->cp.ready) { 379 if (!rdev->cp.ready || rdev->asic->copy == NULL) {
380 /* use memcpy */ 380 /* use memcpy */
381 DRM_ERROR("CP is not ready use memcpy.\n");
382 goto memcpy; 381 goto memcpy;
383 } 382 }
384 383
@@ -495,7 +494,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
495 return r; 494 return r;
496 } 495 }
497 DRM_INFO("radeon: %uM of VRAM memory ready\n", 496 DRM_INFO("radeon: %uM of VRAM memory ready\n",
498 rdev->mc.real_vram_size / (1024 * 1024)); 497 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
499 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, 498 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
500 ((rdev->mc.gtt_size) >> PAGE_SHIFT)); 499 ((rdev->mc.gtt_size) >> PAGE_SHIFT));
501 if (r) { 500 if (r) {
@@ -503,7 +502,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
503 return r; 502 return r;
504 } 503 }
505 DRM_INFO("radeon: %uM of GTT memory ready.\n", 504 DRM_INFO("radeon: %uM of GTT memory ready.\n",
506 rdev->mc.gtt_size / (1024 * 1024)); 505 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
507 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 506 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
508 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; 507 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
509 } 508 }
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b29affd9c5d8..8c3ea7e36060 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -63,7 +63,7 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
63 break; 63 break;
64 default: 64 default:
65 DRM_ERROR("Unable to use IGP GART size %uM\n", 65 DRM_ERROR("Unable to use IGP GART size %uM\n",
66 rdev->mc.gtt_size >> 20); 66 (unsigned)(rdev->mc.gtt_size >> 20));
67 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n"); 67 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
68 DRM_ERROR("Forcing to 32M GART size\n"); 68 DRM_ERROR("Forcing to 32M GART size\n");
69 rdev->mc.gtt_size = 32 * 1024 * 1024; 69 rdev->mc.gtt_size = 32 * 1024 * 1024;
diff --git a/drivers/gpu/drm/radeon/rs780.c b/drivers/gpu/drm/radeon/rs780.c
deleted file mode 100644
index 0affcff81825..000000000000
--- a/drivers/gpu/drm/radeon/rs780.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs780 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * rs780
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int rs780_mc_wait_for_idle(struct radeon_device *rdev);
41void rs780_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int rs780_mc_init(struct radeon_device *rdev)
48{
49 rs780_gpu_init(rdev);
50 /* FIXME: implement */
51
52 rs600_mc_disable_clients(rdev);
53 if (rs780_mc_wait_for_idle(rdev)) {
54 printk(KERN_WARNING "Failed to wait MC idle while "
55 "programming pipes. Bad things might happen.\n");
56 }
57 return 0;
58}
59
60void rs780_mc_fini(struct radeon_device *rdev)
61{
62 /* FIXME: implement */
63}
64
65
66/*
67 * Global GPU functions
68 */
69void rs780_errata(struct radeon_device *rdev)
70{
71 rdev->pll_errata = 0;
72}
73
74int rs780_mc_wait_for_idle(struct radeon_device *rdev)
75{
76 /* FIXME: implement */
77 return 0;
78}
79
80void rs780_gpu_init(struct radeon_device *rdev)
81{
82 /* FIXME: implement */
83}
84
85
86/*
87 * VRAM info
88 */
89void rs780_vram_get_type(struct radeon_device *rdev)
90{
91 /* FIXME: implement */
92}
93
94void rs780_vram_info(struct radeon_device *rdev)
95{
96 rs780_vram_get_type(rdev);
97
98 /* FIXME: implement */
99 /* Could aper size report 0 ? */
100 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
101 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
102}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 97965c430c1f..99e397f16384 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -27,7 +27,7 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include "drmP.h" 29#include "drmP.h"
30#include "rv515r.h" 30#include "rv515d.h"
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_share.h" 32#include "radeon_share.h"
33 33
diff --git a/drivers/gpu/drm/radeon/rv515r.h b/drivers/gpu/drm/radeon/rv515d.h
index f3cf84039906..a65e17ec1c08 100644
--- a/drivers/gpu/drm/radeon/rv515r.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -25,10 +25,12 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#ifndef RV515R_H 28#ifndef __RV515D_H__
29#define RV515R_H 29#define __RV515D_H__
30 30
31/* RV515 registers */ 31/*
32 * RV515 registers
33 */
32#define PCIE_INDEX 0x0030 34#define PCIE_INDEX 0x0030
33#define PCIE_DATA 0x0034 35#define PCIE_DATA 0x0034
34#define MC_IND_INDEX 0x0070 36#define MC_IND_INDEX 0x0070
@@ -166,5 +168,53 @@
166#define MC_GLOBW_INIT_LAT_MASK 0xF0000000 168#define MC_GLOBW_INIT_LAT_MASK 0xF0000000
167 169
168 170
171/*
172 * PM4 packet
173 */
174#define CP_PACKET0 0x00000000
175#define PACKET0_BASE_INDEX_SHIFT 0
176#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
177#define PACKET0_COUNT_SHIFT 16
178#define PACKET0_COUNT_MASK (0x3fff << 16)
179#define CP_PACKET1 0x40000000
180#define CP_PACKET2 0x80000000
181#define PACKET2_PAD_SHIFT 0
182#define PACKET2_PAD_MASK (0x3fffffff << 0)
183#define CP_PACKET3 0xC0000000
184#define PACKET3_IT_OPCODE_SHIFT 8
185#define PACKET3_IT_OPCODE_MASK (0xff << 8)
186#define PACKET3_COUNT_SHIFT 16
187#define PACKET3_COUNT_MASK (0x3fff << 16)
188/* PACKET3 op code */
189#define PACKET3_NOP 0x10
190#define PACKET3_3D_DRAW_VBUF 0x28
191#define PACKET3_3D_DRAW_IMMD 0x29
192#define PACKET3_3D_DRAW_INDX 0x2A
193#define PACKET3_3D_LOAD_VBPNTR 0x2F
194#define PACKET3_INDX_BUFFER 0x33
195#define PACKET3_3D_DRAW_VBUF_2 0x34
196#define PACKET3_3D_DRAW_IMMD_2 0x35
197#define PACKET3_3D_DRAW_INDX_2 0x36
198#define PACKET3_BITBLT_MULTI 0x9B
199
200#define PACKET0(reg, n) (CP_PACKET0 | \
201 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
202 REG_SET(PACKET0_COUNT, (n)))
203#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
204#define PACKET3(op, n) (CP_PACKET3 | \
205 REG_SET(PACKET3_IT_OPCODE, (op)) | \
206 REG_SET(PACKET3_COUNT, (n)))
207
208#define PACKET_TYPE0 0
209#define PACKET_TYPE1 1
210#define PACKET_TYPE2 2
211#define PACKET_TYPE3 3
212
213#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
214#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
215#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218
169#endif 219#endif
170 220
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 21d8ffd57308..57765f6d5b20 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -25,100 +25,975 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/firmware.h>
29#include <linux/platform_device.h>
28#include "drmP.h" 30#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 31#include "radeon.h"
32#include "radeon_share.h"
33#include "rv770d.h"
34#include "avivod.h"
35#include "atom.h"
31 36
32/* rv770,rv730,rv710 depends on : */ 37#define R700_PFP_UCODE_SIZE 848
33void rs600_mc_disable_clients(struct radeon_device *rdev); 38#define R700_PM4_UCODE_SIZE 1360
34 39
35/* This files gather functions specifics to: 40static void rv770_gpu_init(struct radeon_device *rdev);
36 * rv770,rv730,rv710 41void rv770_fini(struct radeon_device *rdev);
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int rv770_mc_wait_for_idle(struct radeon_device *rdev);
41void rv770_gpu_init(struct radeon_device *rdev);
42 42
43 43
44/* 44/*
45 * MC 45 * GART
46 */ 46 */
47int rv770_mc_init(struct radeon_device *rdev) 47int rv770_pcie_gart_enable(struct radeon_device *rdev)
48{ 48{
49 uint32_t tmp; 49 u32 tmp;
50 int r, i;
50 51
51 rv770_gpu_init(rdev); 52 /* Initialize common gart structure */
53 r = radeon_gart_init(rdev);
54 if (r) {
55 return r;
56 }
57 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
58 r = radeon_gart_table_vram_alloc(rdev);
59 if (r) {
60 return r;
61 }
62 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
63 r600_gart_clear_page(rdev, i);
64 /* Setup L2 cache */
65 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
66 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
67 EFFECTIVE_L2_QUEUE_SIZE(7));
68 WREG32(VM_L2_CNTL2, 0);
69 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
70 /* Setup TLB control */
71 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
72 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
73 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
74 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
75 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
76 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
77 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
78 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
79 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
80 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
81 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
82 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
83 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12);
84 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
85 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
86 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
87 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
88 (u32)(rdev->dummy_page.addr >> 12));
89 for (i = 1; i < 7; i++)
90 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
52 91
53 /* setup the gart before changing location so we can ask to 92 r600_pcie_gart_tlb_flush(rdev);
54 * discard unmapped mc request 93 rdev->gart.ready = true;
55 */
56 /* FIXME: disable out of gart access */
57 tmp = rdev->mc.gtt_location / 4096;
58 tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
59 WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
60 tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
61 tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
62 WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
63
64 rs600_mc_disable_clients(rdev);
65 if (rv770_mc_wait_for_idle(rdev)) {
66 printk(KERN_WARNING "Failed to wait MC idle while "
67 "programming pipes. Bad things might happen.\n");
68 }
69
70 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R700_MC_VM_FB_LOCATION, tmp);
74 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
75 tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
76 WREG32(R700_MC_VM_AGP_TOP, tmp);
77 tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
78 WREG32(R700_MC_VM_AGP_BOT, tmp);
79 return 0; 94 return 0;
80} 95}
81 96
82void rv770_mc_fini(struct radeon_device *rdev) 97void rv770_pcie_gart_disable(struct radeon_device *rdev)
83{ 98{
84 /* FIXME: implement */ 99 u32 tmp;
100 int i;
101
102 /* Clear ptes*/
103 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
104 r600_gart_clear_page(rdev, i);
105 r600_pcie_gart_tlb_flush(rdev);
106 /* Disable all tables */
107 for (i = 0; i < 7; i++)
108 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
109
110 /* Setup L2 cache */
111 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
112 EFFECTIVE_L2_QUEUE_SIZE(7));
113 WREG32(VM_L2_CNTL2, 0);
114 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
115 /* Setup TLB control */
116 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
117 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
118 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
119 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
120 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
121 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
122 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
123 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
85} 124}
86 125
87 126
88/* 127/*
89 * Global GPU functions 128 * MC
90 */ 129 */
91void rv770_errata(struct radeon_device *rdev) 130static void rv770_mc_resume(struct radeon_device *rdev)
92{ 131{
93 rdev->pll_errata = 0; 132 u32 d1vga_control, d2vga_control;
133 u32 vga_render_control, vga_hdp_control;
134 u32 d1crtc_control, d2crtc_control;
135 u32 new_d1grph_primary, new_d1grph_secondary;
136 u32 new_d2grph_primary, new_d2grph_secondary;
137 u64 old_vram_start;
138 u32 tmp;
139 int i, j;
140
141 /* Initialize HDP */
142 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
143 WREG32((0x2c14 + j), 0x00000000);
144 WREG32((0x2c18 + j), 0x00000000);
145 WREG32((0x2c1c + j), 0x00000000);
146 WREG32((0x2c20 + j), 0x00000000);
147 WREG32((0x2c24 + j), 0x00000000);
148 }
149 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
150
151 d1vga_control = RREG32(D1VGA_CONTROL);
152 d2vga_control = RREG32(D2VGA_CONTROL);
153 vga_render_control = RREG32(VGA_RENDER_CONTROL);
154 vga_hdp_control = RREG32(VGA_HDP_CONTROL);
155 d1crtc_control = RREG32(D1CRTC_CONTROL);
156 d2crtc_control = RREG32(D2CRTC_CONTROL);
157 old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
158 new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
159 new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
160 new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
161 new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
162 new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
163 new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
164 new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
165 new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
166
167 /* Stop all video */
168 WREG32(D1VGA_CONTROL, 0);
169 WREG32(D2VGA_CONTROL, 0);
170 WREG32(VGA_RENDER_CONTROL, 0);
171 WREG32(D1CRTC_UPDATE_LOCK, 1);
172 WREG32(D2CRTC_UPDATE_LOCK, 1);
173 WREG32(D1CRTC_CONTROL, 0);
174 WREG32(D2CRTC_CONTROL, 0);
175 WREG32(D1CRTC_UPDATE_LOCK, 0);
176 WREG32(D2CRTC_UPDATE_LOCK, 0);
177
178 mdelay(1);
179 if (r600_mc_wait_for_idle(rdev)) {
180 printk(KERN_WARNING "[drm] MC not idle !\n");
181 }
182
183 /* Lockout access through VGA aperture*/
184 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
185
186 /* Update configuration */
187 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
188 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12);
189 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
190 tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16;
191 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
192 WREG32(MC_VM_FB_LOCATION, tmp);
193 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
194 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
195 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
196 if (rdev->flags & RADEON_IS_AGP) {
197 WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16);
198 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
199 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
200 } else {
201 WREG32(MC_VM_AGP_BASE, 0);
202 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
203 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
204 }
205 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
206 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
207 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
208 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
209 WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
210
211 /* Unlock host access */
212 WREG32(VGA_HDP_CONTROL, vga_hdp_control);
213
214 mdelay(1);
215 if (r600_mc_wait_for_idle(rdev)) {
216 printk(KERN_WARNING "[drm] MC not idle !\n");
217 }
218
219 /* Restore video state */
220 WREG32(D1CRTC_UPDATE_LOCK, 1);
221 WREG32(D2CRTC_UPDATE_LOCK, 1);
222 WREG32(D1CRTC_CONTROL, d1crtc_control);
223 WREG32(D2CRTC_CONTROL, d2crtc_control);
224 WREG32(D1CRTC_UPDATE_LOCK, 0);
225 WREG32(D2CRTC_UPDATE_LOCK, 0);
226 WREG32(D1VGA_CONTROL, d1vga_control);
227 WREG32(D2VGA_CONTROL, d2vga_control);
228 WREG32(VGA_RENDER_CONTROL, vga_render_control);
94} 229}
95 230
96int rv770_mc_wait_for_idle(struct radeon_device *rdev) 231
232/*
233 * CP.
234 */
235void r700_cp_stop(struct radeon_device *rdev)
97{ 236{
98 /* FIXME: implement */ 237 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
99 return 0;
100} 238}
101 239
102void rv770_gpu_init(struct radeon_device *rdev) 240
241static int rv770_cp_load_microcode(struct radeon_device *rdev)
103{ 242{
104 /* FIXME: implement */ 243 const __be32 *fw_data;
244 int i;
245
246 if (!rdev->me_fw || !rdev->pfp_fw)
247 return -EINVAL;
248
249 r700_cp_stop(rdev);
250 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
251
252 /* Reset cp */
253 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
254 RREG32(GRBM_SOFT_RESET);
255 mdelay(15);
256 WREG32(GRBM_SOFT_RESET, 0);
257
258 fw_data = (const __be32 *)rdev->pfp_fw->data;
259 WREG32(CP_PFP_UCODE_ADDR, 0);
260 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
261 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
262 WREG32(CP_PFP_UCODE_ADDR, 0);
263
264 fw_data = (const __be32 *)rdev->me_fw->data;
265 WREG32(CP_ME_RAM_WADDR, 0);
266 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
267 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
268
269 WREG32(CP_PFP_UCODE_ADDR, 0);
270 WREG32(CP_ME_RAM_WADDR, 0);
271 WREG32(CP_ME_RAM_RADDR, 0);
272 return 0;
105} 273}
106 274
107 275
108/* 276/*
109 * VRAM info 277 * Core functions
110 */ 278 */
111void rv770_vram_get_type(struct radeon_device *rdev) 279static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
280 u32 num_backends,
281 u32 backend_disable_mask)
112{ 282{
113 /* FIXME: implement */ 283 u32 backend_map = 0;
284 u32 enabled_backends_mask;
285 u32 enabled_backends_count;
286 u32 cur_pipe;
287 u32 swizzle_pipe[R7XX_MAX_PIPES];
288 u32 cur_backend;
289 u32 i;
290
291 if (num_tile_pipes > R7XX_MAX_PIPES)
292 num_tile_pipes = R7XX_MAX_PIPES;
293 if (num_tile_pipes < 1)
294 num_tile_pipes = 1;
295 if (num_backends > R7XX_MAX_BACKENDS)
296 num_backends = R7XX_MAX_BACKENDS;
297 if (num_backends < 1)
298 num_backends = 1;
299
300 enabled_backends_mask = 0;
301 enabled_backends_count = 0;
302 for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
303 if (((backend_disable_mask >> i) & 1) == 0) {
304 enabled_backends_mask |= (1 << i);
305 ++enabled_backends_count;
306 }
307 if (enabled_backends_count == num_backends)
308 break;
309 }
310
311 if (enabled_backends_count == 0) {
312 enabled_backends_mask = 1;
313 enabled_backends_count = 1;
314 }
315
316 if (enabled_backends_count != num_backends)
317 num_backends = enabled_backends_count;
318
319 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
320 switch (num_tile_pipes) {
321 case 1:
322 swizzle_pipe[0] = 0;
323 break;
324 case 2:
325 swizzle_pipe[0] = 0;
326 swizzle_pipe[1] = 1;
327 break;
328 case 3:
329 swizzle_pipe[0] = 0;
330 swizzle_pipe[1] = 2;
331 swizzle_pipe[2] = 1;
332 break;
333 case 4:
334 swizzle_pipe[0] = 0;
335 swizzle_pipe[1] = 2;
336 swizzle_pipe[2] = 3;
337 swizzle_pipe[3] = 1;
338 break;
339 case 5:
340 swizzle_pipe[0] = 0;
341 swizzle_pipe[1] = 2;
342 swizzle_pipe[2] = 4;
343 swizzle_pipe[3] = 1;
344 swizzle_pipe[4] = 3;
345 break;
346 case 6:
347 swizzle_pipe[0] = 0;
348 swizzle_pipe[1] = 2;
349 swizzle_pipe[2] = 4;
350 swizzle_pipe[3] = 5;
351 swizzle_pipe[4] = 3;
352 swizzle_pipe[5] = 1;
353 break;
354 case 7:
355 swizzle_pipe[0] = 0;
356 swizzle_pipe[1] = 2;
357 swizzle_pipe[2] = 4;
358 swizzle_pipe[3] = 6;
359 swizzle_pipe[4] = 3;
360 swizzle_pipe[5] = 1;
361 swizzle_pipe[6] = 5;
362 break;
363 case 8:
364 swizzle_pipe[0] = 0;
365 swizzle_pipe[1] = 2;
366 swizzle_pipe[2] = 4;
367 swizzle_pipe[3] = 6;
368 swizzle_pipe[4] = 3;
369 swizzle_pipe[5] = 1;
370 swizzle_pipe[6] = 7;
371 swizzle_pipe[7] = 5;
372 break;
373 }
374
375 cur_backend = 0;
376 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
377 while (((1 << cur_backend) & enabled_backends_mask) == 0)
378 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
379
380 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
381
382 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
383 }
384
385 return backend_map;
114} 386}
115 387
116void rv770_vram_info(struct radeon_device *rdev) 388static void rv770_gpu_init(struct radeon_device *rdev)
117{ 389{
118 rv770_vram_get_type(rdev); 390 int i, j, num_qd_pipes;
391 u32 sx_debug_1;
392 u32 smx_dc_ctl0;
393 u32 num_gs_verts_per_thread;
394 u32 vgt_gs_per_es;
395 u32 gs_prim_buffer_depth = 0;
396 u32 sq_ms_fifo_sizes;
397 u32 sq_config;
398 u32 sq_thread_resource_mgmt;
399 u32 hdp_host_path_cntl;
400 u32 sq_dyn_gpr_size_simd_ab_0;
401 u32 backend_map;
402 u32 gb_tiling_config = 0;
403 u32 cc_rb_backend_disable = 0;
404 u32 cc_gc_shader_pipe_config = 0;
405 u32 mc_arb_ramcfg;
406 u32 db_debug4;
119 407
120 /* FIXME: implement */ 408 /* setup chip specs */
409 switch (rdev->family) {
410 case CHIP_RV770:
411 rdev->config.rv770.max_pipes = 4;
412 rdev->config.rv770.max_tile_pipes = 8;
413 rdev->config.rv770.max_simds = 10;
414 rdev->config.rv770.max_backends = 4;
415 rdev->config.rv770.max_gprs = 256;
416 rdev->config.rv770.max_threads = 248;
417 rdev->config.rv770.max_stack_entries = 512;
418 rdev->config.rv770.max_hw_contexts = 8;
419 rdev->config.rv770.max_gs_threads = 16 * 2;
420 rdev->config.rv770.sx_max_export_size = 128;
421 rdev->config.rv770.sx_max_export_pos_size = 16;
422 rdev->config.rv770.sx_max_export_smx_size = 112;
423 rdev->config.rv770.sq_num_cf_insts = 2;
424
425 rdev->config.rv770.sx_num_of_sets = 7;
426 rdev->config.rv770.sc_prim_fifo_size = 0xF9;
427 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
428 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
429 break;
430 case CHIP_RV730:
431 rdev->config.rv770.max_pipes = 2;
432 rdev->config.rv770.max_tile_pipes = 4;
433 rdev->config.rv770.max_simds = 8;
434 rdev->config.rv770.max_backends = 2;
435 rdev->config.rv770.max_gprs = 128;
436 rdev->config.rv770.max_threads = 248;
437 rdev->config.rv770.max_stack_entries = 256;
438 rdev->config.rv770.max_hw_contexts = 8;
439 rdev->config.rv770.max_gs_threads = 16 * 2;
440 rdev->config.rv770.sx_max_export_size = 256;
441 rdev->config.rv770.sx_max_export_pos_size = 32;
442 rdev->config.rv770.sx_max_export_smx_size = 224;
443 rdev->config.rv770.sq_num_cf_insts = 2;
444
445 rdev->config.rv770.sx_num_of_sets = 7;
446 rdev->config.rv770.sc_prim_fifo_size = 0xf9;
447 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
448 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
449 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
450 rdev->config.rv770.sx_max_export_pos_size -= 16;
451 rdev->config.rv770.sx_max_export_smx_size += 16;
452 }
453 break;
454 case CHIP_RV710:
455 rdev->config.rv770.max_pipes = 2;
456 rdev->config.rv770.max_tile_pipes = 2;
457 rdev->config.rv770.max_simds = 2;
458 rdev->config.rv770.max_backends = 1;
459 rdev->config.rv770.max_gprs = 256;
460 rdev->config.rv770.max_threads = 192;
461 rdev->config.rv770.max_stack_entries = 256;
462 rdev->config.rv770.max_hw_contexts = 4;
463 rdev->config.rv770.max_gs_threads = 8 * 2;
464 rdev->config.rv770.sx_max_export_size = 128;
465 rdev->config.rv770.sx_max_export_pos_size = 16;
466 rdev->config.rv770.sx_max_export_smx_size = 112;
467 rdev->config.rv770.sq_num_cf_insts = 1;
468
469 rdev->config.rv770.sx_num_of_sets = 7;
470 rdev->config.rv770.sc_prim_fifo_size = 0x40;
471 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
472 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
473 break;
474 case CHIP_RV740:
475 rdev->config.rv770.max_pipes = 4;
476 rdev->config.rv770.max_tile_pipes = 4;
477 rdev->config.rv770.max_simds = 8;
478 rdev->config.rv770.max_backends = 4;
479 rdev->config.rv770.max_gprs = 256;
480 rdev->config.rv770.max_threads = 248;
481 rdev->config.rv770.max_stack_entries = 512;
482 rdev->config.rv770.max_hw_contexts = 8;
483 rdev->config.rv770.max_gs_threads = 16 * 2;
484 rdev->config.rv770.sx_max_export_size = 256;
485 rdev->config.rv770.sx_max_export_pos_size = 32;
486 rdev->config.rv770.sx_max_export_smx_size = 224;
487 rdev->config.rv770.sq_num_cf_insts = 2;
488
489 rdev->config.rv770.sx_num_of_sets = 7;
490 rdev->config.rv770.sc_prim_fifo_size = 0x100;
491 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
492 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
493
494 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
495 rdev->config.rv770.sx_max_export_pos_size -= 16;
496 rdev->config.rv770.sx_max_export_smx_size += 16;
497 }
498 break;
499 default:
500 break;
501 }
502
503 /* Initialize HDP */
504 j = 0;
505 for (i = 0; i < 32; i++) {
506 WREG32((0x2c14 + j), 0x00000000);
507 WREG32((0x2c18 + j), 0x00000000);
508 WREG32((0x2c1c + j), 0x00000000);
509 WREG32((0x2c20 + j), 0x00000000);
510 WREG32((0x2c24 + j), 0x00000000);
511 j += 0x18;
512 }
513
514 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
515
516 /* setup tiling, simd, pipe config */
517 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
518
519 switch (rdev->config.rv770.max_tile_pipes) {
520 case 1:
521 gb_tiling_config |= PIPE_TILING(0);
522 break;
523 case 2:
524 gb_tiling_config |= PIPE_TILING(1);
525 break;
526 case 4:
527 gb_tiling_config |= PIPE_TILING(2);
528 break;
529 case 8:
530 gb_tiling_config |= PIPE_TILING(3);
531 break;
532 default:
533 break;
534 }
535
536 if (rdev->family == CHIP_RV770)
537 gb_tiling_config |= BANK_TILING(1);
538 else
539 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK);
540
541 gb_tiling_config |= GROUP_SIZE(0);
542
543 if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) {
544 gb_tiling_config |= ROW_TILING(3);
545 gb_tiling_config |= SAMPLE_SPLIT(3);
546 } else {
547 gb_tiling_config |=
548 ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
549 gb_tiling_config |=
550 SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
551 }
552
553 gb_tiling_config |= BANK_SWAPS(1);
554
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
558 gb_tiling_config |= BACKEND_MAP(backend_map);
559
560 cc_gc_shader_pipe_config =
561 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
562 cc_gc_shader_pipe_config |=
563 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
564
565 cc_rb_backend_disable =
566 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
567
568 WREG32(GB_TILING_CONFIG, gb_tiling_config);
569 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
570 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
571
572 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
573 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
574 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
575
576 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
577 WREG32(CGTS_SYS_TCC_DISABLE, 0);
578 WREG32(CGTS_TCC_DISABLE, 0);
579 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
580 WREG32(CGTS_USER_TCC_DISABLE, 0);
581
582 num_qd_pipes =
583 R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
584 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
585 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
586
587 /* set HW defaults for 3D engine */
588 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
589 ROQ_IB2_START(0x2b)));
590
591 WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
592
593 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
594 SYNC_GRADIENT |
595 SYNC_WALKER |
596 SYNC_ALIGNER));
597
598 sx_debug_1 = RREG32(SX_DEBUG_1);
599 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
600 WREG32(SX_DEBUG_1, sx_debug_1);
601
602 smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
603 smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
604 smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
605 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
606
607 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
608 GS_FLUSH_CTL(4) |
609 ACK_FLUSH_CTL(3) |
610 SYNC_FLUSH_CTL));
611
612 if (rdev->family == CHIP_RV770)
613 WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
614 else {
615 db_debug4 = RREG32(DB_DEBUG4);
616 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
617 WREG32(DB_DEBUG4, db_debug4);
618 }
619
620 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
621 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
622 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
623
624 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
625 SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
626 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
627
628 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
629
630 WREG32(VGT_NUM_INSTANCES, 1);
631
632 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
633
634 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
635
636 WREG32(CP_PERFMON_CNTL, 0);
637
638 sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
639 DONE_FIFO_HIWATER(0xe0) |
640 ALU_UPDATE_FIFO_HIWATER(0x8));
641 switch (rdev->family) {
642 case CHIP_RV770:
643 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
644 break;
645 case CHIP_RV730:
646 case CHIP_RV710:
647 case CHIP_RV740:
648 default:
649 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
650 break;
651 }
652 WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
653
654 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
655 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
656 */
657 sq_config = RREG32(SQ_CONFIG);
658 sq_config &= ~(PS_PRIO(3) |
659 VS_PRIO(3) |
660 GS_PRIO(3) |
661 ES_PRIO(3));
662 sq_config |= (DX9_CONSTS |
663 VC_ENABLE |
664 EXPORT_SRC_C |
665 PS_PRIO(0) |
666 VS_PRIO(1) |
667 GS_PRIO(2) |
668 ES_PRIO(3));
669 if (rdev->family == CHIP_RV710)
670 /* no vertex cache */
671 sq_config &= ~VC_ENABLE;
672
673 WREG32(SQ_CONFIG, sq_config);
674
675 WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
676 NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
677 NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
678
679 WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
680 NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
681
682 sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
683 NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
684 NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
685 if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
686 sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
687 else
688 sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
689 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
690
691 WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
692 NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
693
694 WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
695 NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
696
697 sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
698 SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
699 SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
700 SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
701
702 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
703 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
704 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
705 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
706 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
707 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
708 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
709 WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
710
711 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
712 FORCE_EOV_MAX_REZ_CNT(255)));
713
714 if (rdev->family == CHIP_RV710)
715 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
716 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
717 else
718 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
719 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
720
721 switch (rdev->family) {
722 case CHIP_RV770:
723 case CHIP_RV730:
724 case CHIP_RV740:
725 gs_prim_buffer_depth = 384;
726 break;
727 case CHIP_RV710:
728 gs_prim_buffer_depth = 128;
729 break;
730 default:
731 break;
732 }
733
734 num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
735 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
736 /* Max value for this is 256 */
737 if (vgt_gs_per_es > 256)
738 vgt_gs_per_es = 256;
739
740 WREG32(VGT_ES_PER_GS, 128);
741 WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
742 WREG32(VGT_GS_PER_VS, 2);
743
744 /* more default values. 2D/3D driver should adjust as needed */
745 WREG32(VGT_GS_VERTEX_REUSE, 16);
746 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
747 WREG32(VGT_STRMOUT_EN, 0);
748 WREG32(SX_MISC, 0);
749 WREG32(PA_SC_MODE_CNTL, 0);
750 WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
751 WREG32(PA_SC_AA_CONFIG, 0);
752 WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
753 WREG32(PA_SC_LINE_STIPPLE, 0);
754 WREG32(SPI_INPUT_Z, 0);
755 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
756 WREG32(CB_COLOR7_FRAG, 0);
757
758 /* clear render buffer base addresses */
759 WREG32(CB_COLOR0_BASE, 0);
760 WREG32(CB_COLOR1_BASE, 0);
761 WREG32(CB_COLOR2_BASE, 0);
762 WREG32(CB_COLOR3_BASE, 0);
763 WREG32(CB_COLOR4_BASE, 0);
764 WREG32(CB_COLOR5_BASE, 0);
765 WREG32(CB_COLOR6_BASE, 0);
766 WREG32(CB_COLOR7_BASE, 0);
767
768 WREG32(TCP_CNTL, 0);
769
770 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
771 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
772
773 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
774
775 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
776 NUM_CLIP_SEQ(3)));
777
778}
779
780int rv770_mc_init(struct radeon_device *rdev)
781{
782 fixed20_12 a;
783 u32 tmp;
784 int r;
785
786 /* Get VRAM informations */
787 /* FIXME: Don't know how to determine vram width, need to check
788 * vram_width usage
789 */
790 rdev->mc.vram_width = 128;
791 rdev->mc.vram_is_ddr = true;
121 /* Could aper size report 0 ? */ 792 /* Could aper size report 0 ? */
122 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 793 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
123 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 794 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
795 /* Setup GPU memory space */
796 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
797 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
798 if (rdev->flags & RADEON_IS_AGP) {
799 r = radeon_agp_init(rdev);
800 if (r)
801 return r;
802 /* gtt_size is setup by radeon_agp_init */
803 rdev->mc.gtt_location = rdev->mc.agp_base;
804 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
805 /* Try to put vram before or after AGP because we
806 * we want SYSTEM_APERTURE to cover both VRAM and
807 * AGP so that GPU can catch out of VRAM/AGP access
808 */
809 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
810 /* Enought place before */
811 rdev->mc.vram_location = rdev->mc.gtt_location -
812 rdev->mc.mc_vram_size;
813 } else if (tmp > rdev->mc.mc_vram_size) {
814 /* Enought place after */
815 rdev->mc.vram_location = rdev->mc.gtt_location +
816 rdev->mc.gtt_size;
817 } else {
818 /* Try to setup VRAM then AGP might not
819 * not work on some card
820 */
821 rdev->mc.vram_location = 0x00000000UL;
822 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
823 }
824 } else {
825 rdev->mc.vram_location = 0x00000000UL;
826 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
827 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
828 }
829 rdev->mc.vram_start = rdev->mc.vram_location;
830 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size;
831 rdev->mc.gtt_start = rdev->mc.gtt_location;
832 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size;
833 /* FIXME: we should enforce default clock in case GPU is not in
834 * default setup
835 */
836 a.full = rfixed_const(100);
837 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
838 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
839 return 0;
840}
841int rv770_gpu_reset(struct radeon_device *rdev)
842{
843 /* FIXME: implement */
844 return 0;
845}
846
847int rv770_resume(struct radeon_device *rdev)
848{
849 int r;
850
851 rv770_mc_resume(rdev);
852 r = rv770_pcie_gart_enable(rdev);
853 if (r)
854 return r;
855 rv770_gpu_init(rdev);
856 r = radeon_ring_init(rdev, rdev->cp.ring_size);
857 if (r)
858 return r;
859 r = rv770_cp_load_microcode(rdev);
860 if (r)
861 return r;
862 r = r600_cp_resume(rdev);
863 if (r)
864 return r;
865 r = r600_wb_init(rdev);
866 if (r)
867 return r;
868 return 0;
869}
870
871int rv770_suspend(struct radeon_device *rdev)
872{
873 /* FIXME: we should wait for ring to be empty */
874 r700_cp_stop(rdev);
875 return 0;
876}
877
878/* Plan is to move initialization in that function and use
879 * helper function so that radeon_device_init pretty much
880 * do nothing more than calling asic specific function. This
881 * should also allow to remove a bunch of callback function
882 * like vram_info.
883 */
884int rv770_init(struct radeon_device *rdev)
885{
886 int r;
887
888 rdev->new_init_path = true;
889 r = radeon_dummy_page_init(rdev);
890 if (r)
891 return r;
892 /* This don't do much */
893 r = radeon_gem_init(rdev);
894 if (r)
895 return r;
896 /* Read BIOS */
897 if (!radeon_get_bios(rdev)) {
898 if (ASIC_IS_AVIVO(rdev))
899 return -EINVAL;
900 }
901 /* Must be an ATOMBIOS */
902 if (!rdev->is_atom_bios)
903 return -EINVAL;
904 r = radeon_atombios_init(rdev);
905 if (r)
906 return r;
907 /* Post card if necessary */
908 if (!r600_card_posted(rdev) && rdev->bios) {
909 DRM_INFO("GPU not posted. posting now...\n");
910 atom_asic_init(rdev->mode_info.atom_context);
911 }
912 /* Initialize scratch registers */
913 r600_scratch_init(rdev);
914 /* Initialize surface registers */
915 radeon_surface_init(rdev);
916 r = radeon_clocks_init(rdev);
917 if (r)
918 return r;
919 /* Fence driver */
920 r = radeon_fence_driver_init(rdev);
921 if (r)
922 return r;
923 r = rv770_mc_init(rdev);
924 if (r) {
925 if (rdev->flags & RADEON_IS_AGP) {
926 /* Retry with disabling AGP */
927 rv770_fini(rdev);
928 rdev->flags &= ~RADEON_IS_AGP;
929 return rv770_init(rdev);
930 }
931 return r;
932 }
933 /* Memory manager */
934 r = radeon_object_init(rdev);
935 if (r)
936 return r;
937 rdev->cp.ring_obj = NULL;
938 r600_ring_init(rdev, 1024 * 1024);
939
940 if (!rdev->me_fw || !rdev->pfp_fw) {
941 r = r600_cp_init_microcode(rdev);
942 if (r) {
943 DRM_ERROR("Failed to load firmware!\n");
944 return r;
945 }
946 }
947
948 r = rv770_resume(rdev);
949 if (r) {
950 if (rdev->flags & RADEON_IS_AGP) {
951 /* Retry with disabling AGP */
952 rv770_fini(rdev);
953 rdev->flags &= ~RADEON_IS_AGP;
954 return rv770_init(rdev);
955 }
956 return r;
957 }
958 r = r600_blit_init(rdev);
959 if (r) {
960 DRM_ERROR("radeon: failled blitter (%d).\n", r);
961 return r;
962 }
963 r = radeon_ib_pool_init(rdev);
964 if (r) {
965 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
966 return r;
967 }
968 r = radeon_ib_test(rdev);
969 if (r) {
970 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
971 return r;
972 }
973 return 0;
974}
975
976void rv770_fini(struct radeon_device *rdev)
977{
978 r600_blit_fini(rdev);
979 radeon_ring_fini(rdev);
980 rv770_pcie_gart_disable(rdev);
981 radeon_gart_table_vram_free(rdev);
982 radeon_gart_fini(rdev);
983 radeon_gem_fini(rdev);
984 radeon_fence_driver_fini(rdev);
985 radeon_clocks_fini(rdev);
986#if __OS_HAS_AGP
987 if (rdev->flags & RADEON_IS_AGP)
988 radeon_agp_fini(rdev);
989#endif
990 radeon_object_fini(rdev);
991 if (rdev->is_atom_bios) {
992 radeon_atombios_fini(rdev);
993 } else {
994 radeon_combios_fini(rdev);
995 }
996 kfree(rdev->bios);
997 rdev->bios = NULL;
998 radeon_dummy_page_fini(rdev);
124} 999}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
new file mode 100644
index 000000000000..4b9c3d6396ff
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -0,0 +1,341 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 * Jerome Glisse
26 */
27#ifndef RV770_H
28#define RV770_H
29
30#define R7XX_MAX_SH_GPRS 256
31#define R7XX_MAX_TEMP_GPRS 16
32#define R7XX_MAX_SH_THREADS 256
33#define R7XX_MAX_SH_STACK_ENTRIES 4096
34#define R7XX_MAX_BACKENDS 8
35#define R7XX_MAX_BACKENDS_MASK 0xff
36#define R7XX_MAX_SIMDS 16
37#define R7XX_MAX_SIMDS_MASK 0xffff
38#define R7XX_MAX_PIPES 8
39#define R7XX_MAX_PIPES_MASK 0xff
40
41/* Registers */
42#define CB_COLOR0_BASE 0x28040
43#define CB_COLOR1_BASE 0x28044
44#define CB_COLOR2_BASE 0x28048
45#define CB_COLOR3_BASE 0x2804C
46#define CB_COLOR4_BASE 0x28050
47#define CB_COLOR5_BASE 0x28054
48#define CB_COLOR6_BASE 0x28058
49#define CB_COLOR7_BASE 0x2805C
50#define CB_COLOR7_FRAG 0x280FC
51
52#define CC_GC_SHADER_PIPE_CONFIG 0x8950
53#define CC_RB_BACKEND_DISABLE 0x98F4
54#define BACKEND_DISABLE(x) ((x) << 16)
55#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
56
57#define CGTS_SYS_TCC_DISABLE 0x3F90
58#define CGTS_TCC_DISABLE 0x9148
59#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
60#define CGTS_USER_TCC_DISABLE 0x914C
61
62#define CONFIG_MEMSIZE 0x5428
63
64#define CP_ME_CNTL 0x86D8
65#define CP_ME_HALT (1<<28)
66#define CP_PFP_HALT (1<<26)
67#define CP_ME_RAM_DATA 0xC160
68#define CP_ME_RAM_RADDR 0xC158
69#define CP_ME_RAM_WADDR 0xC15C
70#define CP_MEQ_THRESHOLDS 0x8764
71#define STQ_SPLIT(x) ((x) << 0)
72#define CP_PERFMON_CNTL 0x87FC
73#define CP_PFP_UCODE_ADDR 0xC150
74#define CP_PFP_UCODE_DATA 0xC154
75#define CP_QUEUE_THRESHOLDS 0x8760
76#define ROQ_IB1_START(x) ((x) << 0)
77#define ROQ_IB2_START(x) ((x) << 8)
78#define CP_RB_CNTL 0xC104
79#define RB_BUFSZ(x) ((x)<<0)
80#define RB_BLKSZ(x) ((x)<<8)
81#define RB_NO_UPDATE (1<<27)
82#define RB_RPTR_WR_ENA (1<<31)
83#define BUF_SWAP_32BIT (2 << 16)
84#define CP_RB_RPTR 0x8700
85#define CP_RB_RPTR_ADDR 0xC10C
86#define CP_RB_RPTR_ADDR_HI 0xC110
87#define CP_RB_RPTR_WR 0xC108
88#define CP_RB_WPTR 0xC114
89#define CP_RB_WPTR_ADDR 0xC118
90#define CP_RB_WPTR_ADDR_HI 0xC11C
91#define CP_RB_WPTR_DELAY 0x8704
92#define CP_SEM_WAIT_TIMER 0x85BC
93
94#define DB_DEBUG3 0x98B0
95#define DB_CLK_OFF_DELAY(x) ((x) << 11)
96#define DB_DEBUG4 0x9B8C
97#define DISABLE_TILE_COVERED_FOR_PS_ITER (1 << 6)
98
99#define DCP_TILING_CONFIG 0x6CA0
100#define PIPE_TILING(x) ((x) << 1)
101#define BANK_TILING(x) ((x) << 4)
102#define GROUP_SIZE(x) ((x) << 6)
103#define ROW_TILING(x) ((x) << 8)
104#define BANK_SWAPS(x) ((x) << 11)
105#define SAMPLE_SPLIT(x) ((x) << 14)
106#define BACKEND_MAP(x) ((x) << 16)
107
108#define GB_TILING_CONFIG 0x98F0
109
110#define GC_USER_SHADER_PIPE_CONFIG 0x8954
111#define INACTIVE_QD_PIPES(x) ((x) << 8)
112#define INACTIVE_QD_PIPES_MASK 0x0000FF00
113#define INACTIVE_SIMDS(x) ((x) << 16)
114#define INACTIVE_SIMDS_MASK 0x00FF0000
115
116#define GRBM_CNTL 0x8000
117#define GRBM_READ_TIMEOUT(x) ((x) << 0)
118#define GRBM_SOFT_RESET 0x8020
119#define SOFT_RESET_CP (1<<0)
120#define GRBM_STATUS 0x8010
121#define CMDFIFO_AVAIL_MASK 0x0000000F
122#define GUI_ACTIVE (1<<31)
123#define GRBM_STATUS2 0x8014
124
125#define HDP_HOST_PATH_CNTL 0x2C00
126#define HDP_NONSURFACE_BASE 0x2C04
127#define HDP_NONSURFACE_INFO 0x2C08
128#define HDP_NONSURFACE_SIZE 0x2C0C
129#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
130#define HDP_TILING_CONFIG 0x2F3C
131
132#define MC_ARB_RAMCFG 0x2760
133#define NOOFBANK_SHIFT 0
134#define NOOFBANK_MASK 0x00000003
135#define NOOFRANK_SHIFT 2
136#define NOOFRANK_MASK 0x00000004
137#define NOOFROWS_SHIFT 3
138#define NOOFROWS_MASK 0x00000038
139#define NOOFCOLS_SHIFT 6
140#define NOOFCOLS_MASK 0x000000C0
141#define CHANSIZE_SHIFT 8
142#define CHANSIZE_MASK 0x00000100
143#define BURSTLENGTH_SHIFT 9
144#define BURSTLENGTH_MASK 0x00000200
145#define MC_VM_AGP_TOP 0x2028
146#define MC_VM_AGP_BOT 0x202C
147#define MC_VM_AGP_BASE 0x2030
148#define MC_VM_FB_LOCATION 0x2024
149#define MC_VM_MB_L1_TLB0_CNTL 0x2234
150#define MC_VM_MB_L1_TLB1_CNTL 0x2238
151#define MC_VM_MB_L1_TLB2_CNTL 0x223C
152#define MC_VM_MB_L1_TLB3_CNTL 0x2240
153#define ENABLE_L1_TLB (1 << 0)
154#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
155#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
156#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
157#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
158#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
159#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
160#define EFFECTIVE_L1_TLB_SIZE(x) ((x)<<15)
161#define EFFECTIVE_L1_QUEUE_SIZE(x) ((x)<<18)
162#define MC_VM_MD_L1_TLB0_CNTL 0x2654
163#define MC_VM_MD_L1_TLB1_CNTL 0x2658
164#define MC_VM_MD_L1_TLB2_CNTL 0x265C
165#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
166#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
167#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
168
169#define PA_CL_ENHANCE 0x8A14
170#define CLIP_VTX_REORDER_ENA (1 << 0)
171#define NUM_CLIP_SEQ(x) ((x) << 1)
172#define PA_SC_AA_CONFIG 0x28C04
173#define PA_SC_CLIPRECT_RULE 0x2820C
174#define PA_SC_EDGERULE 0x28230
175#define PA_SC_FIFO_SIZE 0x8BCC
176#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
177#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
178#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
179#define FORCE_EOV_MAX_CLK_CNT(x) ((x)<<0)
180#define FORCE_EOV_MAX_REZ_CNT(x) ((x)<<16)
181#define PA_SC_LINE_STIPPLE 0x28A0C
182#define PA_SC_LINE_STIPPLE_STATE 0x8B10
183#define PA_SC_MODE_CNTL 0x28A4C
184#define PA_SC_MULTI_CHIP_CNTL 0x8B20
185#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
186
187#define SCRATCH_REG0 0x8500
188#define SCRATCH_REG1 0x8504
189#define SCRATCH_REG2 0x8508
190#define SCRATCH_REG3 0x850C
191#define SCRATCH_REG4 0x8510
192#define SCRATCH_REG5 0x8514
193#define SCRATCH_REG6 0x8518
194#define SCRATCH_REG7 0x851C
195#define SCRATCH_UMSK 0x8540
196#define SCRATCH_ADDR 0x8544
197
198#define SMX_DC_CTL0 0xA020
199#define USE_HASH_FUNCTION (1 << 0)
200#define CACHE_DEPTH(x) ((x) << 1)
201#define FLUSH_ALL_ON_EVENT (1 << 10)
202#define STALL_ON_EVENT (1 << 11)
203#define SMX_EVENT_CTL 0xA02C
204#define ES_FLUSH_CTL(x) ((x) << 0)
205#define GS_FLUSH_CTL(x) ((x) << 3)
206#define ACK_FLUSH_CTL(x) ((x) << 6)
207#define SYNC_FLUSH_CTL (1 << 8)
208
209#define SPI_CONFIG_CNTL 0x9100
210#define GPR_WRITE_PRIORITY(x) ((x) << 0)
211#define DISABLE_INTERP_1 (1 << 5)
212#define SPI_CONFIG_CNTL_1 0x913C
213#define VTX_DONE_DELAY(x) ((x) << 0)
214#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
215#define SPI_INPUT_Z 0x286D8
216#define SPI_PS_IN_CONTROL_0 0x286CC
217#define NUM_INTERP(x) ((x)<<0)
218#define POSITION_ENA (1<<8)
219#define POSITION_CENTROID (1<<9)
220#define POSITION_ADDR(x) ((x)<<10)
221#define PARAM_GEN(x) ((x)<<15)
222#define PARAM_GEN_ADDR(x) ((x)<<19)
223#define BARYC_SAMPLE_CNTL(x) ((x)<<26)
224#define PERSP_GRADIENT_ENA (1<<28)
225#define LINEAR_GRADIENT_ENA (1<<29)
226#define POSITION_SAMPLE (1<<30)
227#define BARYC_AT_SAMPLE_ENA (1<<31)
228
229#define SQ_CONFIG 0x8C00
230#define VC_ENABLE (1 << 0)
231#define EXPORT_SRC_C (1 << 1)
232#define DX9_CONSTS (1 << 2)
233#define ALU_INST_PREFER_VECTOR (1 << 3)
234#define DX10_CLAMP (1 << 4)
235#define CLAUSE_SEQ_PRIO(x) ((x) << 8)
236#define PS_PRIO(x) ((x) << 24)
237#define VS_PRIO(x) ((x) << 26)
238#define GS_PRIO(x) ((x) << 28)
239#define SQ_DYN_GPR_SIZE_SIMD_AB_0 0x8DB0
240#define SIMDA_RING0(x) ((x)<<0)
241#define SIMDA_RING1(x) ((x)<<8)
242#define SIMDB_RING0(x) ((x)<<16)
243#define SIMDB_RING1(x) ((x)<<24)
244#define SQ_DYN_GPR_SIZE_SIMD_AB_1 0x8DB4
245#define SQ_DYN_GPR_SIZE_SIMD_AB_2 0x8DB8
246#define SQ_DYN_GPR_SIZE_SIMD_AB_3 0x8DBC
247#define SQ_DYN_GPR_SIZE_SIMD_AB_4 0x8DC0
248#define SQ_DYN_GPR_SIZE_SIMD_AB_5 0x8DC4
249#define SQ_DYN_GPR_SIZE_SIMD_AB_6 0x8DC8
250#define SQ_DYN_GPR_SIZE_SIMD_AB_7 0x8DCC
251#define ES_PRIO(x) ((x) << 30)
252#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
253#define NUM_PS_GPRS(x) ((x) << 0)
254#define NUM_VS_GPRS(x) ((x) << 16)
255#define DYN_GPR_ENABLE (1 << 27)
256#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
257#define SQ_GPR_RESOURCE_MGMT_2 0x8C08
258#define NUM_GS_GPRS(x) ((x) << 0)
259#define NUM_ES_GPRS(x) ((x) << 16)
260#define SQ_MS_FIFO_SIZES 0x8CF0
261#define CACHE_FIFO_SIZE(x) ((x) << 0)
262#define FETCH_FIFO_HIWATER(x) ((x) << 8)
263#define DONE_FIFO_HIWATER(x) ((x) << 16)
264#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
265#define SQ_STACK_RESOURCE_MGMT_1 0x8C10
266#define NUM_PS_STACK_ENTRIES(x) ((x) << 0)
267#define NUM_VS_STACK_ENTRIES(x) ((x) << 16)
268#define SQ_STACK_RESOURCE_MGMT_2 0x8C14
269#define NUM_GS_STACK_ENTRIES(x) ((x) << 0)
270#define NUM_ES_STACK_ENTRIES(x) ((x) << 16)
271#define SQ_THREAD_RESOURCE_MGMT 0x8C0C
272#define NUM_PS_THREADS(x) ((x) << 0)
273#define NUM_VS_THREADS(x) ((x) << 8)
274#define NUM_GS_THREADS(x) ((x) << 16)
275#define NUM_ES_THREADS(x) ((x) << 24)
276
277#define SX_DEBUG_1 0x9058
278#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
279#define SX_EXPORT_BUFFER_SIZES 0x900C
280#define COLOR_BUFFER_SIZE(x) ((x) << 0)
281#define POSITION_BUFFER_SIZE(x) ((x) << 8)
282#define SMX_BUFFER_SIZE(x) ((x) << 16)
283#define SX_MISC 0x28350
284
285#define TA_CNTL_AUX 0x9508
286#define DISABLE_CUBE_WRAP (1 << 0)
287#define DISABLE_CUBE_ANISO (1 << 1)
288#define SYNC_GRADIENT (1 << 24)
289#define SYNC_WALKER (1 << 25)
290#define SYNC_ALIGNER (1 << 26)
291#define BILINEAR_PRECISION_6_BIT (0 << 31)
292#define BILINEAR_PRECISION_8_BIT (1 << 31)
293
294#define TCP_CNTL 0x9610
295
296#define VGT_CACHE_INVALIDATION 0x88C4
297#define CACHE_INVALIDATION(x) ((x)<<0)
298#define VC_ONLY 0
299#define TC_ONLY 1
300#define VC_AND_TC 2
301#define AUTO_INVLD_EN(x) ((x) << 6)
302#define NO_AUTO 0
303#define ES_AUTO 1
304#define GS_AUTO 2
305#define ES_AND_GS_AUTO 3
306#define VGT_ES_PER_GS 0x88CC
307#define VGT_GS_PER_ES 0x88C8
308#define VGT_GS_PER_VS 0x88E8
309#define VGT_GS_VERTEX_REUSE 0x88D4
310#define VGT_NUM_INSTANCES 0x8974
311#define VGT_OUT_DEALLOC_CNTL 0x28C5C
312#define DEALLOC_DIST_MASK 0x0000007F
313#define VGT_STRMOUT_EN 0x28AB0
314#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
315#define VTX_REUSE_DEPTH_MASK 0x000000FF
316
317#define VM_CONTEXT0_CNTL 0x1410
318#define ENABLE_CONTEXT (1 << 0)
319#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
320#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
321#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
322#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
323#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
324#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
325#define VM_L2_CNTL 0x1400
326#define ENABLE_L2_CACHE (1 << 0)
327#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
328#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
329#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
330#define VM_L2_CNTL2 0x1404
331#define INVALIDATE_ALL_L1_TLBS (1 << 0)
332#define INVALIDATE_L2_CACHE (1 << 1)
333#define VM_L2_CNTL3 0x1408
334#define BANK_SELECT(x) ((x) << 0)
335#define CACHE_UPDATE_MODE(x) ((x) << 6)
336#define VM_L2_STATUS 0x140C
337#define L2_BUSY (1 << 0)
338
339#define WAIT_UNTIL 0x8040
340
341#endif