aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/Kconfig13
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_mm.c20
-rw-r--r--drivers/gpu/drm/radeon/Kconfig34
-rw-r--r--drivers/gpu/drm/radeon/Makefile12
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h578
-rw-r--r--drivers/gpu/drm/radeon/atom-bits.h48
-rw-r--r--drivers/gpu/drm/radeon/atom-names.h100
-rw-r--r--drivers/gpu/drm/radeon/atom-types.h42
-rw-r--r--drivers/gpu/drm/radeon/atom.c1215
-rw-r--r--drivers/gpu/drm/radeon/atom.h149
-rw-r--r--drivers/gpu/drm/radeon/atombios.h4785
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c695
-rw-r--r--drivers/gpu/drm/radeon/r100.c1524
-rw-r--r--drivers/gpu/drm/radeon/r300.c1116
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h52
-rw-r--r--drivers/gpu/drm/radeon/r420.c223
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h749
-rw-r--r--drivers/gpu/drm/radeon/r520.c234
-rw-r--r--drivers/gpu/drm/radeon/r600.c169
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h114
-rw-r--r--drivers/gpu/drm/radeon/radeon.h793
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h405
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1298
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c133
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c390
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c833
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2481
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c603
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c252
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c813
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c692
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c217
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1708
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c825
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c387
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c233
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c209
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c295
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1276
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1288
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h398
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c511
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h45
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h3570
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c485
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c653
-rw-r--r--drivers/gpu/drm/radeon/rs400.c411
-rw-r--r--drivers/gpu/drm/radeon/rs600.c324
-rw-r--r--drivers/gpu/drm/radeon/rs690.c181
-rw-r--r--drivers/gpu/drm/radeon/rs780.c102
-rw-r--r--drivers/gpu/drm/radeon/rv515.c504
-rw-r--r--drivers/gpu/drm/radeon/rv770.c124
-rw-r--r--drivers/gpu/drm/ttm/Makefile8
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c150
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1698
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c561
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c454
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c234
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c635
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--include/drm/drm_pciids.h8
-rw-r--r--include/drm/radeon_drm.h130
-rw-r--r--include/drm/ttm/ttm_bo_api.h618
-rw-r--r--include/drm/ttm/ttm_bo_driver.h867
-rw-r--r--include/drm/ttm/ttm_memory.h153
-rw-r--r--include/drm/ttm/ttm_module.h58
-rw-r--r--include/drm/ttm/ttm_placement.h92
75 files changed, 41092 insertions, 46 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5d46e7199d4..c961fe415aef 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -18,6 +18,14 @@ menuconfig DRM
18 details. You should also select and configure AGP 18 details. You should also select and configure AGP
19 (/dev/agpgart) support. 19 (/dev/agpgart) support.
20 20
21config DRM_TTM
22 tristate
23 depends on DRM
24 help
25 GPU memory management subsystem for devices with multiple
26 GPU memory types. Will be enabled automatically if a device driver
27 uses it.
28
21config DRM_TDFX 29config DRM_TDFX
22 tristate "3dfx Banshee/Voodoo3+" 30 tristate "3dfx Banshee/Voodoo3+"
23 depends on DRM && PCI 31 depends on DRM && PCI
@@ -36,6 +44,11 @@ config DRM_R128
36config DRM_RADEON 44config DRM_RADEON
37 tristate "ATI Radeon" 45 tristate "ATI Radeon"
38 depends on DRM && PCI 46 depends on DRM && PCI
47 select FB_CFB_FILLRECT
48 select FB_CFB_COPYAREA
49 select FB_CFB_IMAGEBLIT
50 select FB
51 select FRAMEBUFFER_CONSOLE if !EMBEDDED
39 help 52 help
40 Choose this option if you have an ATI Radeon graphics card. There 53 Choose this option if you have an ATI Radeon graphics card. There
41 are both PCI and AGP versions. You don't need to choose this to 54 are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4ec5061fa584..4e89ab08b7b8 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915) += i915/
26obj-$(CONFIG_DRM_SIS) += sis/ 26obj-$(CONFIG_DRM_SIS) += sis/
27obj-$(CONFIG_DRM_SAVAGE)+= savage/ 27obj-$(CONFIG_DRM_SAVAGE)+= savage/
28obj-$(CONFIG_DRM_VIA) +=via/ 28obj-$(CONFIG_DRM_VIA) +=via/
29 29obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 7819fd930a51..a912a0ff11cc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -188,36 +188,34 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
188 188
189 189
190 190
191struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, 191struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
192 unsigned long size, unsigned alignment) 192 unsigned long size, unsigned alignment)
193{ 193{
194 194
195 struct drm_mm_node *align_splitoff = NULL; 195 struct drm_mm_node *align_splitoff = NULL;
196 struct drm_mm_node *child;
197 unsigned tmp = 0; 196 unsigned tmp = 0;
198 197
199 if (alignment) 198 if (alignment)
200 tmp = parent->start % alignment; 199 tmp = node->start % alignment;
201 200
202 if (tmp) { 201 if (tmp) {
203 align_splitoff = 202 align_splitoff =
204 drm_mm_split_at_start(parent, alignment - tmp, 0); 203 drm_mm_split_at_start(node, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL)) 204 if (unlikely(align_splitoff == NULL))
206 return NULL; 205 return NULL;
207 } 206 }
208 207
209 if (parent->size == size) { 208 if (node->size == size) {
210 list_del_init(&parent->fl_entry); 209 list_del_init(&node->fl_entry);
211 parent->free = 0; 210 node->free = 0;
212 return parent;
213 } else { 211 } else {
214 child = drm_mm_split_at_start(parent, size, 0); 212 node = drm_mm_split_at_start(node, size, 0);
215 } 213 }
216 214
217 if (align_splitoff) 215 if (align_splitoff)
218 drm_mm_put_block(align_splitoff); 216 drm_mm_put_block(align_splitoff);
219 217
220 return child; 218 return node;
221} 219}
222 220
223EXPORT_SYMBOL(drm_mm_get_block); 221EXPORT_SYMBOL(drm_mm_get_block);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
new file mode 100644
index 000000000000..2168d67f09a6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -0,0 +1,34 @@
1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default"
3 depends on DRM_RADEON
4 select DRM_TTM
5 help
6 Choose this option if you want kernel modesetting enabled by default,
7 and you have a new enough userspace to support this. Running old
8 userspaces with this enabled will cause pain.
9
10 When kernel modesetting is enabled the IOCTL of radeon/drm
11 driver are considered as invalid and an error message is printed
12 in the log and they return failure.
13
14 KMS enabled userspace will use new API to talk with the radeon/drm
15 driver. The new API provide functions to create/destroy/share/mmap
16 buffer object which are then managed by the kernel memory manager
17 (here TTM). In order to submit command to the GPU the userspace
18 provide a buffer holding the command stream, along this buffer
19 userspace have to provide a list of buffer object used by the
20 command stream. The kernel radeon driver will then place buffer
21 in GPU accessible memory and will update command stream to reflect
22 the position of the different buffers.
23
24 The kernel will also perform security check on command stream
25 provided by the user, we want to catch and forbid any illegal use
26 of the GPU such as DMA into random system memory or into memory
27 not owned by the process supplying the command stream. This part
28 of the code is still incomplete and this why we propose that patch
29 as a staging driver addition, future security might forbid current
30 experimental userspace to run.
31
32 This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
33 (radeon up to X1950). Works is underway to provide support for R6XX,
34 R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 52ce439a0f2e..5fae1e074b4b 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -3,7 +3,17 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o r600_cp.o 6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
7 radeon_irq.o r300_cmdbuf.o r600_cp.o
8
9radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
10 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
11 atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
12 radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
13 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
14 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
15 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
16 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
7 17
8radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 18radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
9 19
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644
index 000000000000..6d0183c61d3b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -0,0 +1,578 @@
1/*
2* Copyright 2006-2007 Advanced Micro Devices, Inc.
3*
4* Permission is hereby granted, free of charge, to any person obtaining a
5* copy of this software and associated documentation files (the "Software"),
6* to deal in the Software without restriction, including without limitation
7* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8* and/or sell copies of the Software, and to permit persons to whom the
9* Software is furnished to do so, subject to the following conditions:
10*
11* The above copyright notice and this permission notice shall be included in
12* all copies or substantial portions of the Software.
13*
14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20* OTHER DEALINGS IN THE SOFTWARE.
21*/
22/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
23
24#ifndef _OBJECTID_H
25#define _OBJECTID_H
26
27#if defined(_X86_)
28#pragma pack(1)
29#endif
30
31/****************************************************/
32/* Graphics Object Type Definition */
33/****************************************************/
34#define GRAPH_OBJECT_TYPE_NONE 0x0
35#define GRAPH_OBJECT_TYPE_GPU 0x1
36#define GRAPH_OBJECT_TYPE_ENCODER 0x2
37#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
38#define GRAPH_OBJECT_TYPE_ROUTER 0x4
39/* deleted */
40
41/****************************************************/
42/* Encoder Object ID Definition */
43/****************************************************/
44#define ENCODER_OBJECT_ID_NONE 0x00
45
46/* Radeon Class Display Hardware */
47#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
48#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
49#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
50#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
51#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
52#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
53#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
54
55/* External Third Party Encoders */
56#define ENCODER_OBJECT_ID_SI170B 0x08
57#define ENCODER_OBJECT_ID_CH7303 0x09
58#define ENCODER_OBJECT_ID_CH7301 0x0A
59#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
60#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
61#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
62#define ENCODER_OBJECT_ID_TITFP513 0x0E
63#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
64#define ENCODER_OBJECT_ID_VT1623 0x10
65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
67/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
70#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
71#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
72#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
73#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
74#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
75#define ENCODER_OBJECT_ID_VT1625 0x1A
76#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
77#define ENCODER_OBJECT_ID_DP_AN9801 0x1C
78#define ENCODER_OBJECT_ID_DP_DP501 0x1D
79#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
80#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
81#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
82#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
83
84#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
85
86/****************************************************/
87/* Connector Object ID Definition */
88/****************************************************/
89#define CONNECTOR_OBJECT_ID_NONE 0x00
90#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
91#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
92#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
93#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04
94#define CONNECTOR_OBJECT_ID_VGA 0x05
95#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06
96#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
97#define CONNECTOR_OBJECT_ID_YPbPr 0x08
98#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
99#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
100#define CONNECTOR_OBJECT_ID_SCART 0x0B
101#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
102#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
103#define CONNECTOR_OBJECT_ID_LVDS 0x0E
104#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F
105#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10
106#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
107#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
109
110/* deleted */
111
112/****************************************************/
113/* Router Object ID Definition */
114/****************************************************/
115#define ROUTER_OBJECT_ID_NONE 0x00
116#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
117
118/****************************************************/
119/* Graphics Object ENUM ID Definition */
120/****************************************************/
121#define GRAPH_OBJECT_ENUM_ID1 0x01
122#define GRAPH_OBJECT_ENUM_ID2 0x02
123#define GRAPH_OBJECT_ENUM_ID3 0x03
124#define GRAPH_OBJECT_ENUM_ID4 0x04
125#define GRAPH_OBJECT_ENUM_ID5 0x05
126#define GRAPH_OBJECT_ENUM_ID6 0x06
127
128/****************************************************/
129/* Graphics Object ID Bit definition */
130/****************************************************/
131#define OBJECT_ID_MASK 0x00FF
132#define ENUM_ID_MASK 0x0700
133#define RESERVED1_ID_MASK 0x0800
134#define OBJECT_TYPE_MASK 0x7000
135#define RESERVED2_ID_MASK 0x8000
136
137#define OBJECT_ID_SHIFT 0x00
138#define ENUM_ID_SHIFT 0x08
139#define OBJECT_TYPE_SHIFT 0x0C
140
141/****************************************************/
142/* Graphics Object family definition */
143/****************************************************/
144#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
145 (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
146 GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
147/****************************************************/
148/* GPU Object ID definition - Shared with BIOS */
149/****************************************************/
150#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
151 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
152
153/****************************************************/
154/* Encoder Object ID definition - Shared with BIOS */
155/****************************************************/
156/*
157#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
158#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
159#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
160#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
161#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
162#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
163#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
164#define ENCODER_SIL170B_ENUM_ID1 0x2108
165#define ENCODER_CH7303_ENUM_ID1 0x2109
166#define ENCODER_CH7301_ENUM_ID1 0x210A
167#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
168#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C
169#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D
170#define ENCODER_TITFP513_ENUM_ID1 0x210E
171#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F
172#define ENCODER_VT1623_ENUM_ID1 0x2110
173#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111
174#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112
175#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
176#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
177#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
178#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
179#define ENCODER_SI178_ENUM_ID1 0x2117
180#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
181#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
182#define ENCODER_VT1625_ENUM_ID1 0x211A
183#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
184#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
185#define ENCODER_DP_DP501_ENUM_ID1 0x211D
186#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
187*/
188#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
189 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
190 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
191 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
192
193#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
194 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
195 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
196 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
197
198#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
199 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
200 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
201 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
202
203#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
204 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
205 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
206 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
207
208#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
209 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
210 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
211 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
212
213#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
214 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
215 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
216 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
217
218#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
219 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
220 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
221 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
222
223#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
224 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
225 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
226 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
227
228#define ENCODER_SIL170B_ENUM_ID1 \
229 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
230 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
231 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
232
233#define ENCODER_CH7303_ENUM_ID1 \
234 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
235 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
236 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
237
238#define ENCODER_CH7301_ENUM_ID1 \
239 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
240 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
241 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
242
243#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
244 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
245 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
246 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
247
248#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
249 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
250 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
251 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
252
253#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
254 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
255 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
256 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
257
258#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
259 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
260 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
261 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
262
263#define ENCODER_TITFP513_ENUM_ID1 \
264 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
265 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
266 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
267
268#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
269 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
270 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
271 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
272
273#define ENCODER_VT1623_ENUM_ID1 \
274 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
275 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
276 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
277
278#define ENCODER_HDMI_SI1930_ENUM_ID1 \
279 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
280 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
281 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
282
283#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
284 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
285 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
286 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
287
288#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
289 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
290 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
291 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
292
293#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
294 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
295 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
296 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
297
298#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
299 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
300 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
301 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
302
303#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
304 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
305 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
306 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
307
308#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
309 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
310 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
311 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
312
313#define ENCODER_SI178_ENUM_ID1 \
314 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
315 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
316 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
317
318#define ENCODER_MVPU_FPGA_ENUM_ID1 \
319 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
320 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
321 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
322
323#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
324 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
325 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
326 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
327
328#define ENCODER_VT1625_ENUM_ID1 \
329 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
330 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
331 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
332
333#define ENCODER_HDMI_SI1932_ENUM_ID1 \
334 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
335 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
336 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
337
338#define ENCODER_DP_DP501_ENUM_ID1 \
339 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
340 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
341 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
342
343#define ENCODER_DP_AN9801_ENUM_ID1 \
344 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
345 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
346 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
347
348#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
349 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
350 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
351 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
352
353#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
354 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
355 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
356 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
357
358#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
359 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
361 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
362
363#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
364 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
365 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
366 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
367
368#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
369 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
370 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
371 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
372
373#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
374 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
375 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
376 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
377
378#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
379 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
380 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
381 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
382
383#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
384 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
385 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
386 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
387
388/****************************************************/
389/* Connector Object ID definition - Shared with BIOS */
390/****************************************************/
391/*
392#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101
393#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102
394#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103
395#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104
396#define CONNECTOR_VGA_ENUM_ID1 0x3105
397#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106
398#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107
399#define CONNECTOR_YPbPr_ENUM_ID1 0x3108
400#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109
401#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A
402#define CONNECTOR_SCART_ENUM_ID1 0x310B
403#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C
404#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D
405#define CONNECTOR_LVDS_ENUM_ID1 0x310E
406#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
407#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
408*/
409#define CONNECTOR_LVDS_ENUM_ID1 \
410 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
411 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
412 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
413
414#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
415 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
416 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
417 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
418
419#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
420 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
423
424#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
425 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
426 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
427 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
428
429#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
430 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
431 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
432 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
433
434#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
435 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
436 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
437 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
438
439#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
440 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
441 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
442 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
443
444#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
445 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
446 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
447 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
448
449#define CONNECTOR_VGA_ENUM_ID1 \
450 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
451 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
452 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
453
454#define CONNECTOR_VGA_ENUM_ID2 \
455 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
456 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
457 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
458
459#define CONNECTOR_COMPOSITE_ENUM_ID1 \
460 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
461 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
462 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
463
464#define CONNECTOR_SVIDEO_ENUM_ID1 \
465 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
466 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
467 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
468
469#define CONNECTOR_YPbPr_ENUM_ID1 \
470 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
471 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
472 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
473
474#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
475 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
476 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
477 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
478
479#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
480 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
481 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
482 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
483
484#define CONNECTOR_SCART_ENUM_ID1 \
485 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
486 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
487 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
488
489#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
490 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
491 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
492 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
493
494#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
495 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
496 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
497 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
498
499#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
500 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
501 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
502 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
503
504#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
505 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
506 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
507 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
508
509#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
510 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
511 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
512 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
513
514#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
515 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
516 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
517 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
518
519#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
520 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
521 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
522 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
523
524#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
525 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
526 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
527 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
528
529#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
530 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
531 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
532 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
533
534#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
535 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
536 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
537 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
538
539#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
540 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
541 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
543
544#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
545 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
546 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
547 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
548
549#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
550 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
551 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
552 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
553
554/****************************************************/
555/* Router Object ID definition - Shared with BIOS */
556/****************************************************/
557#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
558 (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
559 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
560 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
561
562/* deleted */
563
564/****************************************************/
565/* Object Cap definition - Shared with BIOS */
566/****************************************************/
567#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
568#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
569
570#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
571#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
572#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
573
574#if defined(_X86_)
575#pragma pack()
576#endif
577
578#endif /*GRAPHICTYPE */
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644
index 000000000000..e8fae5c77514
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#ifndef ATOM_BITS_H
26#define ATOM_BITS_H
27
28static inline uint8_t get_u8(void *bios, int ptr)
29{
30 return ((unsigned char *)bios)[ptr];
31}
32#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
33#define CU8(ptr) get_u8(ctx->bios, (ptr))
34static inline uint16_t get_u16(void *bios, int ptr)
35{
36 return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
37}
38#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
39#define CU16(ptr) get_u16(ctx->bios, (ptr))
40static inline uint32_t get_u32(void *bios, int ptr)
41{
42 return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
43}
44#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
45#define CU32(ptr) get_u32(ctx->bios, (ptr))
46#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
47
48#endif
diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644
index 000000000000..6f907a5ffa5f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-names.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#ifndef ATOM_NAMES_H
26#define ATOM_NAMES_H
27
28#include "atom.h"
29
30#ifdef ATOM_DEBUG
31
32#define ATOM_OP_NAMES_CNT 123
33static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
34"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
35"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
36"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
37"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
38"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
39"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
40"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
41"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
42"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
43"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
44"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
45"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
46"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
47"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
48"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
49"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
50"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
51"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
52"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
53"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
54"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
55"DEBUG", "CTB_DS",
56};
57
58#define ATOM_TABLE_NAMES_CNT 74
59static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
60"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
61"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
62"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
63"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
64"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
65"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
66"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
67"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
68"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
69"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
70"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
71"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
72"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
73"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
74"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
75"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
76"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
77"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
78"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
79"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
80"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
81"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
82"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
83"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
84"MemoryDeviceInit", "EnableYUV",
85};
86
87#define ATOM_IO_NAMES_CNT 5
88static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
89"MM", "PLL", "MC", "PCIE", "PCIE PORT",
90};
91
92#else
93
94#define ATOM_OP_NAMES_CNT 0
95#define ATOM_TABLE_NAMES_CNT 0
96#define ATOM_IO_NAMES_CNT 0
97
98#endif
99
100#endif
diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644
index 000000000000..1125b866cdb0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-types.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2008 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Dave Airlie
23 */
24
25#ifndef ATOM_TYPES_H
26#define ATOM_TYPES_H
27
28/* sync atom types to kernel types */
29
30typedef uint16_t USHORT;
31typedef uint32_t ULONG;
32typedef uint8_t UCHAR;
33
34
35#ifndef ATOM_BIG_ENDIAN
36#if defined(__BIG_ENDIAN)
37#define ATOM_BIG_ENDIAN 1
38#else
39#define ATOM_BIG_ENDIAN 0
40#endif
41#endif
42#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
new file mode 100644
index 000000000000..901befe03da2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -0,0 +1,1215 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27
28#define ATOM_DEBUG
29
30#include "atom.h"
31#include "atom-names.h"
32#include "atom-bits.h"
33
34#define ATOM_COND_ABOVE 0
35#define ATOM_COND_ABOVEOREQUAL 1
36#define ATOM_COND_ALWAYS 2
37#define ATOM_COND_BELOW 3
38#define ATOM_COND_BELOWOREQUAL 4
39#define ATOM_COND_EQUAL 5
40#define ATOM_COND_NOTEQUAL 6
41
42#define ATOM_PORT_ATI 0
43#define ATOM_PORT_PCI 1
44#define ATOM_PORT_SYSIO 2
45
46#define ATOM_UNIT_MICROSEC 0
47#define ATOM_UNIT_MILLISEC 1
48
49#define PLL_INDEX 2
50#define PLL_DATA 3
51
52typedef struct {
53 struct atom_context *ctx;
54
55 uint32_t *ps, *ws;
56 int ps_shift;
57 uint16_t start;
58} atom_exec_context;
59
60int atom_debug = 0;
61void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
62
63static uint32_t atom_arg_mask[8] =
64 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
650xFF000000 };
66static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
67
68static int atom_dst_to_src[8][4] = {
69 /* translate destination alignment field to the source alignment encoding */
70 {0, 0, 0, 0},
71 {1, 2, 3, 0},
72 {1, 2, 3, 0},
73 {1, 2, 3, 0},
74 {4, 5, 6, 7},
75 {4, 5, 6, 7},
76 {4, 5, 6, 7},
77 {4, 5, 6, 7},
78};
79static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
80
81static int debug_depth = 0;
82#ifdef ATOM_DEBUG
83static void debug_print_spaces(int n)
84{
85 while (n--)
86 printk(" ");
87}
88
89#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
90#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
91#else
92#define DEBUG(...) do { } while (0)
93#define SDEBUG(...) do { } while (0)
94#endif
95
96static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
97 uint32_t index, uint32_t data)
98{
99 uint32_t temp = 0xCDCDCDCD;
100 while (1)
101 switch (CU8(base)) {
102 case ATOM_IIO_NOP:
103 base++;
104 break;
105 case ATOM_IIO_READ:
106 temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
107 base += 3;
108 break;
109 case ATOM_IIO_WRITE:
110 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
111 base += 3;
112 break;
113 case ATOM_IIO_CLEAR:
114 temp &=
115 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
116 CU8(base + 2));
117 base += 3;
118 break;
119 case ATOM_IIO_SET:
120 temp |=
121 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
122 2);
123 base += 3;
124 break;
125 case ATOM_IIO_MOVE_INDEX:
126 temp &=
127 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
128 CU8(base + 2));
129 temp |=
130 ((index >> CU8(base + 2)) &
131 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
132 3);
133 base += 4;
134 break;
135 case ATOM_IIO_MOVE_DATA:
136 temp &=
137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
138 CU8(base + 2));
139 temp |=
140 ((data >> CU8(base + 2)) &
141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
142 3);
143 base += 4;
144 break;
145 case ATOM_IIO_MOVE_ATTR:
146 temp &=
147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
148 CU8(base + 2));
149 temp |=
150 ((ctx->
151 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
152 CU8
153 (base
154 +
155 1))))
156 << CU8(base + 3);
157 base += 4;
158 break;
159 case ATOM_IIO_END:
160 return temp;
161 default:
162 printk(KERN_INFO "Unknown IIO opcode.\n");
163 return 0;
164 }
165}
166
167static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
168 int *ptr, uint32_t *saved, int print)
169{
170 uint32_t idx, val = 0xCDCDCDCD, align, arg;
171 struct atom_context *gctx = ctx->ctx;
172 arg = attr & 7;
173 align = (attr >> 3) & 7;
174 switch (arg) {
175 case ATOM_ARG_REG:
176 idx = U16(*ptr);
177 (*ptr) += 2;
178 if (print)
179 DEBUG("REG[0x%04X]", idx);
180 idx += gctx->reg_block;
181 switch (gctx->io_mode) {
182 case ATOM_IO_MM:
183 val = gctx->card->reg_read(gctx->card, idx);
184 break;
185 case ATOM_IO_PCI:
186 printk(KERN_INFO
187 "PCI registers are not implemented.\n");
188 return 0;
189 case ATOM_IO_SYSIO:
190 printk(KERN_INFO
191 "SYSIO registers are not implemented.\n");
192 return 0;
193 default:
194 if (!(gctx->io_mode & 0x80)) {
195 printk(KERN_INFO "Bad IO mode.\n");
196 return 0;
197 }
198 if (!gctx->iio[gctx->io_mode & 0x7F]) {
199 printk(KERN_INFO
200 "Undefined indirect IO read method %d.\n",
201 gctx->io_mode & 0x7F);
202 return 0;
203 }
204 val =
205 atom_iio_execute(gctx,
206 gctx->iio[gctx->io_mode & 0x7F],
207 idx, 0);
208 }
209 break;
210 case ATOM_ARG_PS:
211 idx = U8(*ptr);
212 (*ptr)++;
213 val = le32_to_cpu(ctx->ps[idx]);
214 if (print)
215 DEBUG("PS[0x%02X,0x%04X]", idx, val);
216 break;
217 case ATOM_ARG_WS:
218 idx = U8(*ptr);
219 (*ptr)++;
220 if (print)
221 DEBUG("WS[0x%02X]", idx);
222 switch (idx) {
223 case ATOM_WS_QUOTIENT:
224 val = gctx->divmul[0];
225 break;
226 case ATOM_WS_REMAINDER:
227 val = gctx->divmul[1];
228 break;
229 case ATOM_WS_DATAPTR:
230 val = gctx->data_block;
231 break;
232 case ATOM_WS_SHIFT:
233 val = gctx->shift;
234 break;
235 case ATOM_WS_OR_MASK:
236 val = 1 << gctx->shift;
237 break;
238 case ATOM_WS_AND_MASK:
239 val = ~(1 << gctx->shift);
240 break;
241 case ATOM_WS_FB_WINDOW:
242 val = gctx->fb_base;
243 break;
244 case ATOM_WS_ATTRIBUTES:
245 val = gctx->io_attr;
246 break;
247 default:
248 val = ctx->ws[idx];
249 }
250 break;
251 case ATOM_ARG_ID:
252 idx = U16(*ptr);
253 (*ptr) += 2;
254 if (print) {
255 if (gctx->data_block)
256 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
257 else
258 DEBUG("ID[0x%04X]", idx);
259 }
260 val = U32(idx + gctx->data_block);
261 break;
262 case ATOM_ARG_FB:
263 idx = U8(*ptr);
264 (*ptr)++;
265 if (print)
266 DEBUG("FB[0x%02X]", idx);
267 printk(KERN_INFO "FB access is not implemented.\n");
268 return 0;
269 case ATOM_ARG_IMM:
270 switch (align) {
271 case ATOM_SRC_DWORD:
272 val = U32(*ptr);
273 (*ptr) += 4;
274 if (print)
275 DEBUG("IMM 0x%08X\n", val);
276 return val;
277 case ATOM_SRC_WORD0:
278 case ATOM_SRC_WORD8:
279 case ATOM_SRC_WORD16:
280 val = U16(*ptr);
281 (*ptr) += 2;
282 if (print)
283 DEBUG("IMM 0x%04X\n", val);
284 return val;
285 case ATOM_SRC_BYTE0:
286 case ATOM_SRC_BYTE8:
287 case ATOM_SRC_BYTE16:
288 case ATOM_SRC_BYTE24:
289 val = U8(*ptr);
290 (*ptr)++;
291 if (print)
292 DEBUG("IMM 0x%02X\n", val);
293 return val;
294 }
295 return 0;
296 case ATOM_ARG_PLL:
297 idx = U8(*ptr);
298 (*ptr)++;
299 if (print)
300 DEBUG("PLL[0x%02X]", idx);
301 val = gctx->card->pll_read(gctx->card, idx);
302 break;
303 case ATOM_ARG_MC:
304 idx = U8(*ptr);
305 (*ptr)++;
306 if (print)
307 DEBUG("MC[0x%02X]", idx);
308 val = gctx->card->mc_read(gctx->card, idx);
309 break;
310 }
311 if (saved)
312 *saved = val;
313 val &= atom_arg_mask[align];
314 val >>= atom_arg_shift[align];
315 if (print)
316 switch (align) {
317 case ATOM_SRC_DWORD:
318 DEBUG(".[31:0] -> 0x%08X\n", val);
319 break;
320 case ATOM_SRC_WORD0:
321 DEBUG(".[15:0] -> 0x%04X\n", val);
322 break;
323 case ATOM_SRC_WORD8:
324 DEBUG(".[23:8] -> 0x%04X\n", val);
325 break;
326 case ATOM_SRC_WORD16:
327 DEBUG(".[31:16] -> 0x%04X\n", val);
328 break;
329 case ATOM_SRC_BYTE0:
330 DEBUG(".[7:0] -> 0x%02X\n", val);
331 break;
332 case ATOM_SRC_BYTE8:
333 DEBUG(".[15:8] -> 0x%02X\n", val);
334 break;
335 case ATOM_SRC_BYTE16:
336 DEBUG(".[23:16] -> 0x%02X\n", val);
337 break;
338 case ATOM_SRC_BYTE24:
339 DEBUG(".[31:24] -> 0x%02X\n", val);
340 break;
341 }
342 return val;
343}
344
345static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
346{
347 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
348 switch (arg) {
349 case ATOM_ARG_REG:
350 case ATOM_ARG_ID:
351 (*ptr) += 2;
352 break;
353 case ATOM_ARG_PLL:
354 case ATOM_ARG_MC:
355 case ATOM_ARG_PS:
356 case ATOM_ARG_WS:
357 case ATOM_ARG_FB:
358 (*ptr)++;
359 break;
360 case ATOM_ARG_IMM:
361 switch (align) {
362 case ATOM_SRC_DWORD:
363 (*ptr) += 4;
364 return;
365 case ATOM_SRC_WORD0:
366 case ATOM_SRC_WORD8:
367 case ATOM_SRC_WORD16:
368 (*ptr) += 2;
369 return;
370 case ATOM_SRC_BYTE0:
371 case ATOM_SRC_BYTE8:
372 case ATOM_SRC_BYTE16:
373 case ATOM_SRC_BYTE24:
374 (*ptr)++;
375 return;
376 }
377 return;
378 }
379}
380
381static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
382{
383 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
384}
385
386static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
387 int *ptr, uint32_t *saved, int print)
388{
389 return atom_get_src_int(ctx,
390 arg | atom_dst_to_src[(attr >> 3) &
391 7][(attr >> 6) & 3] << 3,
392 ptr, saved, print);
393}
394
395static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
396{
397 atom_skip_src_int(ctx,
398 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
399 3] << 3, ptr);
400}
401
402static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
403 int *ptr, uint32_t val, uint32_t saved)
404{
405 uint32_t align =
406 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
407 val, idx;
408 struct atom_context *gctx = ctx->ctx;
409 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
410 val <<= atom_arg_shift[align];
411 val &= atom_arg_mask[align];
412 saved &= ~atom_arg_mask[align];
413 val |= saved;
414 switch (arg) {
415 case ATOM_ARG_REG:
416 idx = U16(*ptr);
417 (*ptr) += 2;
418 DEBUG("REG[0x%04X]", idx);
419 idx += gctx->reg_block;
420 switch (gctx->io_mode) {
421 case ATOM_IO_MM:
422 if (idx == 0)
423 gctx->card->reg_write(gctx->card, idx,
424 val << 2);
425 else
426 gctx->card->reg_write(gctx->card, idx, val);
427 break;
428 case ATOM_IO_PCI:
429 printk(KERN_INFO
430 "PCI registers are not implemented.\n");
431 return;
432 case ATOM_IO_SYSIO:
433 printk(KERN_INFO
434 "SYSIO registers are not implemented.\n");
435 return;
436 default:
437 if (!(gctx->io_mode & 0x80)) {
438 printk(KERN_INFO "Bad IO mode.\n");
439 return;
440 }
441 if (!gctx->iio[gctx->io_mode & 0xFF]) {
442 printk(KERN_INFO
443 "Undefined indirect IO write method %d.\n",
444 gctx->io_mode & 0x7F);
445 return;
446 }
447 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
448 idx, val);
449 }
450 break;
451 case ATOM_ARG_PS:
452 idx = U8(*ptr);
453 (*ptr)++;
454 DEBUG("PS[0x%02X]", idx);
455 ctx->ps[idx] = cpu_to_le32(val);
456 break;
457 case ATOM_ARG_WS:
458 idx = U8(*ptr);
459 (*ptr)++;
460 DEBUG("WS[0x%02X]", idx);
461 switch (idx) {
462 case ATOM_WS_QUOTIENT:
463 gctx->divmul[0] = val;
464 break;
465 case ATOM_WS_REMAINDER:
466 gctx->divmul[1] = val;
467 break;
468 case ATOM_WS_DATAPTR:
469 gctx->data_block = val;
470 break;
471 case ATOM_WS_SHIFT:
472 gctx->shift = val;
473 break;
474 case ATOM_WS_OR_MASK:
475 case ATOM_WS_AND_MASK:
476 break;
477 case ATOM_WS_FB_WINDOW:
478 gctx->fb_base = val;
479 break;
480 case ATOM_WS_ATTRIBUTES:
481 gctx->io_attr = val;
482 break;
483 default:
484 ctx->ws[idx] = val;
485 }
486 break;
487 case ATOM_ARG_FB:
488 idx = U8(*ptr);
489 (*ptr)++;
490 DEBUG("FB[0x%02X]", idx);
491 printk(KERN_INFO "FB access is not implemented.\n");
492 return;
493 case ATOM_ARG_PLL:
494 idx = U8(*ptr);
495 (*ptr)++;
496 DEBUG("PLL[0x%02X]", idx);
497 gctx->card->pll_write(gctx->card, idx, val);
498 break;
499 case ATOM_ARG_MC:
500 idx = U8(*ptr);
501 (*ptr)++;
502 DEBUG("MC[0x%02X]", idx);
503 gctx->card->mc_write(gctx->card, idx, val);
504 return;
505 }
506 switch (align) {
507 case ATOM_SRC_DWORD:
508 DEBUG(".[31:0] <- 0x%08X\n", old_val);
509 break;
510 case ATOM_SRC_WORD0:
511 DEBUG(".[15:0] <- 0x%04X\n", old_val);
512 break;
513 case ATOM_SRC_WORD8:
514 DEBUG(".[23:8] <- 0x%04X\n", old_val);
515 break;
516 case ATOM_SRC_WORD16:
517 DEBUG(".[31:16] <- 0x%04X\n", old_val);
518 break;
519 case ATOM_SRC_BYTE0:
520 DEBUG(".[7:0] <- 0x%02X\n", old_val);
521 break;
522 case ATOM_SRC_BYTE8:
523 DEBUG(".[15:8] <- 0x%02X\n", old_val);
524 break;
525 case ATOM_SRC_BYTE16:
526 DEBUG(".[23:16] <- 0x%02X\n", old_val);
527 break;
528 case ATOM_SRC_BYTE24:
529 DEBUG(".[31:24] <- 0x%02X\n", old_val);
530 break;
531 }
532}
533
534static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
535{
536 uint8_t attr = U8((*ptr)++);
537 uint32_t dst, src, saved;
538 int dptr = *ptr;
539 SDEBUG(" dst: ");
540 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
541 SDEBUG(" src: ");
542 src = atom_get_src(ctx, attr, ptr);
543 dst += src;
544 SDEBUG(" dst: ");
545 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
546}
547
548static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
549{
550 uint8_t attr = U8((*ptr)++);
551 uint32_t dst, src, saved;
552 int dptr = *ptr;
553 SDEBUG(" dst: ");
554 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
555 SDEBUG(" src: ");
556 src = atom_get_src(ctx, attr, ptr);
557 dst &= src;
558 SDEBUG(" dst: ");
559 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
560}
561
562static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
563{
564 printk("ATOM BIOS beeped!\n");
565}
566
567static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
568{
569 int idx = U8((*ptr)++);
570 if (idx < ATOM_TABLE_NAMES_CNT)
571 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
572 else
573 SDEBUG(" table: %d\n", idx);
574 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
575 atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
576}
577
578static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
579{
580 uint8_t attr = U8((*ptr)++);
581 uint32_t saved;
582 int dptr = *ptr;
583 attr &= 0x38;
584 attr |= atom_def_dst[attr >> 3] << 6;
585 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
586 SDEBUG(" dst: ");
587 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
588}
589
590static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
591{
592 uint8_t attr = U8((*ptr)++);
593 uint32_t dst, src;
594 SDEBUG(" src1: ");
595 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
596 SDEBUG(" src2: ");
597 src = atom_get_src(ctx, attr, ptr);
598 ctx->ctx->cs_equal = (dst == src);
599 ctx->ctx->cs_above = (dst > src);
600 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
601 ctx->ctx->cs_above ? "GT" : "LE");
602}
603
604static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
605{
606 uint8_t count = U8((*ptr)++);
607 SDEBUG(" count: %d\n", count);
608 if (arg == ATOM_UNIT_MICROSEC)
609 schedule_timeout_uninterruptible(usecs_to_jiffies(count));
610 else
611 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
612}
613
614static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
615{
616 uint8_t attr = U8((*ptr)++);
617 uint32_t dst, src;
618 SDEBUG(" src1: ");
619 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
620 SDEBUG(" src2: ");
621 src = atom_get_src(ctx, attr, ptr);
622 if (src != 0) {
623 ctx->ctx->divmul[0] = dst / src;
624 ctx->ctx->divmul[1] = dst % src;
625 } else {
626 ctx->ctx->divmul[0] = 0;
627 ctx->ctx->divmul[1] = 0;
628 }
629}
630
631static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
632{
633 /* functionally, a nop */
634}
635
636static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
637{
638 int execute = 0, target = U16(*ptr);
639 (*ptr) += 2;
640 switch (arg) {
641 case ATOM_COND_ABOVE:
642 execute = ctx->ctx->cs_above;
643 break;
644 case ATOM_COND_ABOVEOREQUAL:
645 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
646 break;
647 case ATOM_COND_ALWAYS:
648 execute = 1;
649 break;
650 case ATOM_COND_BELOW:
651 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
652 break;
653 case ATOM_COND_BELOWOREQUAL:
654 execute = !ctx->ctx->cs_above;
655 break;
656 case ATOM_COND_EQUAL:
657 execute = ctx->ctx->cs_equal;
658 break;
659 case ATOM_COND_NOTEQUAL:
660 execute = !ctx->ctx->cs_equal;
661 break;
662 }
663 if (arg != ATOM_COND_ALWAYS)
664 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
665 SDEBUG(" target: 0x%04X\n", target);
666 if (execute)
667 *ptr = ctx->start + target;
668}
669
670static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
671{
672 uint8_t attr = U8((*ptr)++);
673 uint32_t dst, src1, src2, saved;
674 int dptr = *ptr;
675 SDEBUG(" dst: ");
676 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
677 SDEBUG(" src1: ");
678 src1 = atom_get_src(ctx, attr, ptr);
679 SDEBUG(" src2: ");
680 src2 = atom_get_src(ctx, attr, ptr);
681 dst &= src1;
682 dst |= src2;
683 SDEBUG(" dst: ");
684 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
685}
686
687static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
688{
689 uint8_t attr = U8((*ptr)++);
690 uint32_t src, saved;
691 int dptr = *ptr;
692 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
693 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
694 else {
695 atom_skip_dst(ctx, arg, attr, ptr);
696 saved = 0xCDCDCDCD;
697 }
698 SDEBUG(" src: ");
699 src = atom_get_src(ctx, attr, ptr);
700 SDEBUG(" dst: ");
701 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
702}
703
704static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
705{
706 uint8_t attr = U8((*ptr)++);
707 uint32_t dst, src;
708 SDEBUG(" src1: ");
709 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
710 SDEBUG(" src2: ");
711 src = atom_get_src(ctx, attr, ptr);
712 ctx->ctx->divmul[0] = dst * src;
713}
714
715static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
716{
717 /* nothing */
718}
719
720static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
721{
722 uint8_t attr = U8((*ptr)++);
723 uint32_t dst, src, saved;
724 int dptr = *ptr;
725 SDEBUG(" dst: ");
726 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
727 SDEBUG(" src: ");
728 src = atom_get_src(ctx, attr, ptr);
729 dst |= src;
730 SDEBUG(" dst: ");
731 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
732}
733
734static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
735{
736 uint8_t val = U8((*ptr)++);
737 SDEBUG("POST card output: 0x%02X\n", val);
738}
739
740static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
741{
742 printk(KERN_INFO "unimplemented!\n");
743}
744
745static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
746{
747 printk(KERN_INFO "unimplemented!\n");
748}
749
750static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
751{
752 printk(KERN_INFO "unimplemented!\n");
753}
754
755static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
756{
757 int idx = U8(*ptr);
758 (*ptr)++;
759 SDEBUG(" block: %d\n", idx);
760 if (!idx)
761 ctx->ctx->data_block = 0;
762 else if (idx == 255)
763 ctx->ctx->data_block = ctx->start;
764 else
765 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
766 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
767}
768
769static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
770{
771 uint8_t attr = U8((*ptr)++);
772 SDEBUG(" fb_base: ");
773 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
774}
775
776static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
777{
778 int port;
779 switch (arg) {
780 case ATOM_PORT_ATI:
781 port = U16(*ptr);
782 if (port < ATOM_IO_NAMES_CNT)
783 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
784 else
785 SDEBUG(" port: %d\n", port);
786 if (!port)
787 ctx->ctx->io_mode = ATOM_IO_MM;
788 else
789 ctx->ctx->io_mode = ATOM_IO_IIO | port;
790 (*ptr) += 2;
791 break;
792 case ATOM_PORT_PCI:
793 ctx->ctx->io_mode = ATOM_IO_PCI;
794 (*ptr)++;
795 break;
796 case ATOM_PORT_SYSIO:
797 ctx->ctx->io_mode = ATOM_IO_SYSIO;
798 (*ptr)++;
799 break;
800 }
801}
802
803static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
804{
805 ctx->ctx->reg_block = U16(*ptr);
806 (*ptr) += 2;
807 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
808}
809
810static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
811{
812 uint8_t attr = U8((*ptr)++), shift;
813 uint32_t saved, dst;
814 int dptr = *ptr;
815 attr &= 0x38;
816 attr |= atom_def_dst[attr >> 3] << 6;
817 SDEBUG(" dst: ");
818 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
819 shift = U8((*ptr)++);
820 SDEBUG(" shift: %d\n", shift);
821 dst <<= shift;
822 SDEBUG(" dst: ");
823 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
824}
825
826static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
827{
828 uint8_t attr = U8((*ptr)++), shift;
829 uint32_t saved, dst;
830 int dptr = *ptr;
831 attr &= 0x38;
832 attr |= atom_def_dst[attr >> 3] << 6;
833 SDEBUG(" dst: ");
834 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
835 shift = U8((*ptr)++);
836 SDEBUG(" shift: %d\n", shift);
837 dst >>= shift;
838 SDEBUG(" dst: ");
839 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
840}
841
842static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
843{
844 uint8_t attr = U8((*ptr)++);
845 uint32_t dst, src, saved;
846 int dptr = *ptr;
847 SDEBUG(" dst: ");
848 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
849 SDEBUG(" src: ");
850 src = atom_get_src(ctx, attr, ptr);
851 dst -= src;
852 SDEBUG(" dst: ");
853 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
854}
855
856static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
857{
858 uint8_t attr = U8((*ptr)++);
859 uint32_t src, val, target;
860 SDEBUG(" switch: ");
861 src = atom_get_src(ctx, attr, ptr);
862 while (U16(*ptr) != ATOM_CASE_END)
863 if (U8(*ptr) == ATOM_CASE_MAGIC) {
864 (*ptr)++;
865 SDEBUG(" case: ");
866 val =
867 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
868 ptr);
869 target = U16(*ptr);
870 if (val == src) {
871 SDEBUG(" target: %04X\n", target);
872 *ptr = ctx->start + target;
873 return;
874 }
875 (*ptr) += 2;
876 } else {
877 printk(KERN_INFO "Bad case.\n");
878 return;
879 }
880 (*ptr) += 2;
881}
882
883static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
884{
885 uint8_t attr = U8((*ptr)++);
886 uint32_t dst, src;
887 SDEBUG(" src1: ");
888 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
889 SDEBUG(" src2: ");
890 src = atom_get_src(ctx, attr, ptr);
891 ctx->ctx->cs_equal = ((dst & src) == 0);
892 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
893}
894
895static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
896{
897 uint8_t attr = U8((*ptr)++);
898 uint32_t dst, src, saved;
899 int dptr = *ptr;
900 SDEBUG(" dst: ");
901 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
902 SDEBUG(" src: ");
903 src = atom_get_src(ctx, attr, ptr);
904 dst ^= src;
905 SDEBUG(" dst: ");
906 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
907}
908
909static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
910{
911 printk(KERN_INFO "unimplemented!\n");
912}
913
914static struct {
915 void (*func) (atom_exec_context *, int *, int);
916 int arg;
917} opcode_table[ATOM_OP_CNT] = {
918 {
919 NULL, 0}, {
920 atom_op_move, ATOM_ARG_REG}, {
921 atom_op_move, ATOM_ARG_PS}, {
922 atom_op_move, ATOM_ARG_WS}, {
923 atom_op_move, ATOM_ARG_FB}, {
924 atom_op_move, ATOM_ARG_PLL}, {
925 atom_op_move, ATOM_ARG_MC}, {
926 atom_op_and, ATOM_ARG_REG}, {
927 atom_op_and, ATOM_ARG_PS}, {
928 atom_op_and, ATOM_ARG_WS}, {
929 atom_op_and, ATOM_ARG_FB}, {
930 atom_op_and, ATOM_ARG_PLL}, {
931 atom_op_and, ATOM_ARG_MC}, {
932 atom_op_or, ATOM_ARG_REG}, {
933 atom_op_or, ATOM_ARG_PS}, {
934 atom_op_or, ATOM_ARG_WS}, {
935 atom_op_or, ATOM_ARG_FB}, {
936 atom_op_or, ATOM_ARG_PLL}, {
937 atom_op_or, ATOM_ARG_MC}, {
938 atom_op_shl, ATOM_ARG_REG}, {
939 atom_op_shl, ATOM_ARG_PS}, {
940 atom_op_shl, ATOM_ARG_WS}, {
941 atom_op_shl, ATOM_ARG_FB}, {
942 atom_op_shl, ATOM_ARG_PLL}, {
943 atom_op_shl, ATOM_ARG_MC}, {
944 atom_op_shr, ATOM_ARG_REG}, {
945 atom_op_shr, ATOM_ARG_PS}, {
946 atom_op_shr, ATOM_ARG_WS}, {
947 atom_op_shr, ATOM_ARG_FB}, {
948 atom_op_shr, ATOM_ARG_PLL}, {
949 atom_op_shr, ATOM_ARG_MC}, {
950 atom_op_mul, ATOM_ARG_REG}, {
951 atom_op_mul, ATOM_ARG_PS}, {
952 atom_op_mul, ATOM_ARG_WS}, {
953 atom_op_mul, ATOM_ARG_FB}, {
954 atom_op_mul, ATOM_ARG_PLL}, {
955 atom_op_mul, ATOM_ARG_MC}, {
956 atom_op_div, ATOM_ARG_REG}, {
957 atom_op_div, ATOM_ARG_PS}, {
958 atom_op_div, ATOM_ARG_WS}, {
959 atom_op_div, ATOM_ARG_FB}, {
960 atom_op_div, ATOM_ARG_PLL}, {
961 atom_op_div, ATOM_ARG_MC}, {
962 atom_op_add, ATOM_ARG_REG}, {
963 atom_op_add, ATOM_ARG_PS}, {
964 atom_op_add, ATOM_ARG_WS}, {
965 atom_op_add, ATOM_ARG_FB}, {
966 atom_op_add, ATOM_ARG_PLL}, {
967 atom_op_add, ATOM_ARG_MC}, {
968 atom_op_sub, ATOM_ARG_REG}, {
969 atom_op_sub, ATOM_ARG_PS}, {
970 atom_op_sub, ATOM_ARG_WS}, {
971 atom_op_sub, ATOM_ARG_FB}, {
972 atom_op_sub, ATOM_ARG_PLL}, {
973 atom_op_sub, ATOM_ARG_MC}, {
974 atom_op_setport, ATOM_PORT_ATI}, {
975 atom_op_setport, ATOM_PORT_PCI}, {
976 atom_op_setport, ATOM_PORT_SYSIO}, {
977 atom_op_setregblock, 0}, {
978 atom_op_setfbbase, 0}, {
979 atom_op_compare, ATOM_ARG_REG}, {
980 atom_op_compare, ATOM_ARG_PS}, {
981 atom_op_compare, ATOM_ARG_WS}, {
982 atom_op_compare, ATOM_ARG_FB}, {
983 atom_op_compare, ATOM_ARG_PLL}, {
984 atom_op_compare, ATOM_ARG_MC}, {
985 atom_op_switch, 0}, {
986 atom_op_jump, ATOM_COND_ALWAYS}, {
987 atom_op_jump, ATOM_COND_EQUAL}, {
988 atom_op_jump, ATOM_COND_BELOW}, {
989 atom_op_jump, ATOM_COND_ABOVE}, {
990 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
991 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
992 atom_op_jump, ATOM_COND_NOTEQUAL}, {
993 atom_op_test, ATOM_ARG_REG}, {
994 atom_op_test, ATOM_ARG_PS}, {
995 atom_op_test, ATOM_ARG_WS}, {
996 atom_op_test, ATOM_ARG_FB}, {
997 atom_op_test, ATOM_ARG_PLL}, {
998 atom_op_test, ATOM_ARG_MC}, {
999 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1000 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1001 atom_op_calltable, 0}, {
1002 atom_op_repeat, 0}, {
1003 atom_op_clear, ATOM_ARG_REG}, {
1004 atom_op_clear, ATOM_ARG_PS}, {
1005 atom_op_clear, ATOM_ARG_WS}, {
1006 atom_op_clear, ATOM_ARG_FB}, {
1007 atom_op_clear, ATOM_ARG_PLL}, {
1008 atom_op_clear, ATOM_ARG_MC}, {
1009 atom_op_nop, 0}, {
1010 atom_op_eot, 0}, {
1011 atom_op_mask, ATOM_ARG_REG}, {
1012 atom_op_mask, ATOM_ARG_PS}, {
1013 atom_op_mask, ATOM_ARG_WS}, {
1014 atom_op_mask, ATOM_ARG_FB}, {
1015 atom_op_mask, ATOM_ARG_PLL}, {
1016 atom_op_mask, ATOM_ARG_MC}, {
1017 atom_op_postcard, 0}, {
1018 atom_op_beep, 0}, {
1019 atom_op_savereg, 0}, {
1020 atom_op_restorereg, 0}, {
1021 atom_op_setdatablock, 0}, {
1022 atom_op_xor, ATOM_ARG_REG}, {
1023 atom_op_xor, ATOM_ARG_PS}, {
1024 atom_op_xor, ATOM_ARG_WS}, {
1025 atom_op_xor, ATOM_ARG_FB}, {
1026 atom_op_xor, ATOM_ARG_PLL}, {
1027 atom_op_xor, ATOM_ARG_MC}, {
1028 atom_op_shl, ATOM_ARG_REG}, {
1029 atom_op_shl, ATOM_ARG_PS}, {
1030 atom_op_shl, ATOM_ARG_WS}, {
1031 atom_op_shl, ATOM_ARG_FB}, {
1032 atom_op_shl, ATOM_ARG_PLL}, {
1033 atom_op_shl, ATOM_ARG_MC}, {
1034 atom_op_shr, ATOM_ARG_REG}, {
1035 atom_op_shr, ATOM_ARG_PS}, {
1036 atom_op_shr, ATOM_ARG_WS}, {
1037 atom_op_shr, ATOM_ARG_FB}, {
1038 atom_op_shr, ATOM_ARG_PLL}, {
1039 atom_op_shr, ATOM_ARG_MC}, {
1040atom_op_debug, 0},};
1041
1042void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1043{
1044 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1045 int len, ws, ps, ptr;
1046 unsigned char op;
1047 atom_exec_context ectx;
1048
1049 if (!base)
1050 return;
1051
1052 len = CU16(base + ATOM_CT_SIZE_PTR);
1053 ws = CU8(base + ATOM_CT_WS_PTR);
1054 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1055 ptr = base + ATOM_CT_CODE_PTR;
1056
1057 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1058
1059 /* reset reg block */
1060 ctx->reg_block = 0;
1061 ectx.ctx = ctx;
1062 ectx.ps_shift = ps / 4;
1063 ectx.start = base;
1064 ectx.ps = params;
1065 if (ws)
1066 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1067 else
1068 ectx.ws = NULL;
1069
1070 debug_depth++;
1071 while (1) {
1072 op = CU8(ptr++);
1073 if (op < ATOM_OP_NAMES_CNT)
1074 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1075 else
1076 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1077
1078 if (op < ATOM_OP_CNT && op > 0)
1079 opcode_table[op].func(&ectx, &ptr,
1080 opcode_table[op].arg);
1081 else
1082 break;
1083
1084 if (op == ATOM_OP_EOT)
1085 break;
1086 }
1087 debug_depth--;
1088 SDEBUG("<<\n");
1089
1090 if (ws)
1091 kfree(ectx.ws);
1092}
1093
1094static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1095
1096static void atom_index_iio(struct atom_context *ctx, int base)
1097{
1098 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1099 while (CU8(base) == ATOM_IIO_START) {
1100 ctx->iio[CU8(base + 1)] = base + 2;
1101 base += 2;
1102 while (CU8(base) != ATOM_IIO_END)
1103 base += atom_iio_len[CU8(base)];
1104 base += 3;
1105 }
1106}
1107
1108struct atom_context *atom_parse(struct card_info *card, void *bios)
1109{
1110 int base;
1111 struct atom_context *ctx =
1112 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1113 char *str;
1114 char name[512];
1115 int i;
1116
1117 ctx->card = card;
1118 ctx->bios = bios;
1119
1120 if (CU16(0) != ATOM_BIOS_MAGIC) {
1121 printk(KERN_INFO "Invalid BIOS magic.\n");
1122 kfree(ctx);
1123 return NULL;
1124 }
1125 if (strncmp
1126 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1127 strlen(ATOM_ATI_MAGIC))) {
1128 printk(KERN_INFO "Invalid ATI magic.\n");
1129 kfree(ctx);
1130 return NULL;
1131 }
1132
1133 base = CU16(ATOM_ROM_TABLE_PTR);
1134 if (strncmp
1135 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1136 strlen(ATOM_ROM_MAGIC))) {
1137 printk(KERN_INFO "Invalid ATOM magic.\n");
1138 kfree(ctx);
1139 return NULL;
1140 }
1141
1142 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1143 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1144 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1145
1146 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1147 while (*str && ((*str == '\n') || (*str == '\r')))
1148 str++;
1149 /* name string isn't always 0 terminated */
1150 for (i = 0; i < 511; i++) {
1151 name[i] = str[i];
1152 if (name[i] < '.' || name[i] > 'z') {
1153 name[i] = 0;
1154 break;
1155 }
1156 }
1157 printk(KERN_INFO "ATOM BIOS: %s\n", name);
1158
1159 return ctx;
1160}
1161
1162int atom_asic_init(struct atom_context *ctx)
1163{
1164 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1165 uint32_t ps[16];
1166 memset(ps, 0, 64);
1167
1168 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1169 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1170 if (!ps[0] || !ps[1])
1171 return 1;
1172
1173 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1174 return 1;
1175 atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1176
1177 return 0;
1178}
1179
1180void atom_destroy(struct atom_context *ctx)
1181{
1182 if (ctx->iio)
1183 kfree(ctx->iio);
1184 kfree(ctx);
1185}
1186
1187void atom_parse_data_header(struct atom_context *ctx, int index,
1188 uint16_t * size, uint8_t * frev, uint8_t * crev,
1189 uint16_t * data_start)
1190{
1191 int offset = index * 2 + 4;
1192 int idx = CU16(ctx->data_table + offset);
1193
1194 if (size)
1195 *size = CU16(idx);
1196 if (frev)
1197 *frev = CU8(idx + 2);
1198 if (crev)
1199 *crev = CU8(idx + 3);
1200 *data_start = idx;
1201 return;
1202}
1203
1204void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1205 uint8_t * crev)
1206{
1207 int offset = index * 2 + 4;
1208 int idx = CU16(ctx->cmd_table + offset);
1209
1210 if (frev)
1211 *frev = CU8(idx + 2);
1212 if (crev)
1213 *crev = CU8(idx + 3);
1214 return;
1215}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
new file mode 100644
index 000000000000..e6eb38f2bcae
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#ifndef ATOM_H
26#define ATOM_H
27
28#include <linux/types.h>
29#include "drmP.h"
30
31#define ATOM_BIOS_MAGIC 0xAA55
32#define ATOM_ATI_MAGIC_PTR 0x30
33#define ATOM_ATI_MAGIC " 761295520"
34#define ATOM_ROM_TABLE_PTR 0x48
35
36#define ATOM_ROM_MAGIC "ATOM"
37#define ATOM_ROM_MAGIC_PTR 4
38
39#define ATOM_ROM_MSG_PTR 0x10
40#define ATOM_ROM_CMD_PTR 0x1E
41#define ATOM_ROM_DATA_PTR 0x20
42
43#define ATOM_CMD_INIT 0
44#define ATOM_CMD_SETSCLK 0x0A
45#define ATOM_CMD_SETMCLK 0x0B
46#define ATOM_CMD_SETPCLK 0x0C
47
48#define ATOM_DATA_FWI_PTR 0xC
49#define ATOM_DATA_IIO_PTR 0x32
50
51#define ATOM_FWI_DEFSCLK_PTR 8
52#define ATOM_FWI_DEFMCLK_PTR 0xC
53#define ATOM_FWI_MAXSCLK_PTR 0x24
54#define ATOM_FWI_MAXMCLK_PTR 0x28
55
56#define ATOM_CT_SIZE_PTR 0
57#define ATOM_CT_WS_PTR 4
58#define ATOM_CT_PS_PTR 5
59#define ATOM_CT_PS_MASK 0x7F
60#define ATOM_CT_CODE_PTR 6
61
62#define ATOM_OP_CNT 123
63#define ATOM_OP_EOT 91
64
65#define ATOM_CASE_MAGIC 0x63
66#define ATOM_CASE_END 0x5A5A
67
68#define ATOM_ARG_REG 0
69#define ATOM_ARG_PS 1
70#define ATOM_ARG_WS 2
71#define ATOM_ARG_FB 3
72#define ATOM_ARG_ID 4
73#define ATOM_ARG_IMM 5
74#define ATOM_ARG_PLL 6
75#define ATOM_ARG_MC 7
76
77#define ATOM_SRC_DWORD 0
78#define ATOM_SRC_WORD0 1
79#define ATOM_SRC_WORD8 2
80#define ATOM_SRC_WORD16 3
81#define ATOM_SRC_BYTE0 4
82#define ATOM_SRC_BYTE8 5
83#define ATOM_SRC_BYTE16 6
84#define ATOM_SRC_BYTE24 7
85
86#define ATOM_WS_QUOTIENT 0x40
87#define ATOM_WS_REMAINDER 0x41
88#define ATOM_WS_DATAPTR 0x42
89#define ATOM_WS_SHIFT 0x43
90#define ATOM_WS_OR_MASK 0x44
91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47
94
95#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1
97#define ATOM_IIO_READ 2
98#define ATOM_IIO_WRITE 3
99#define ATOM_IIO_CLEAR 4
100#define ATOM_IIO_SET 5
101#define ATOM_IIO_MOVE_INDEX 6
102#define ATOM_IIO_MOVE_ATTR 7
103#define ATOM_IIO_MOVE_DATA 8
104#define ATOM_IIO_END 9
105
106#define ATOM_IO_MM 0
107#define ATOM_IO_PCI 1
108#define ATOM_IO_SYSIO 2
109#define ATOM_IO_IIO 0x80
110
111struct card_info {
112 struct drm_device *dev;
113 void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
114 uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
115 void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
116 uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
117 void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
118 uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
119};
120
121struct atom_context {
122 struct card_info *card;
123 void *bios;
124 uint32_t cmd_table, data_table;
125 uint16_t *iio;
126
127 uint16_t data_block;
128 uint32_t fb_base;
129 uint32_t divmul[2];
130 uint16_t io_attr;
131 uint16_t reg_block;
132 uint8_t shift;
133 int cs_equal, cs_above;
134 int io_mode;
135};
136
137extern int atom_debug;
138
139struct atom_context *atom_parse(struct card_info *, void *);
140void atom_execute_table(struct atom_context *, int, uint32_t *);
141int atom_asic_init(struct atom_context *);
142void atom_destroy(struct atom_context *);
143void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
144void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
145#include "atom-types.h"
146#include "atombios.h"
147#include "ObjectID.h"
148
149#endif
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
new file mode 100644
index 000000000000..cf67928abbc8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -0,0 +1,4785 @@
1/*
2 * Copyright 2006-2007 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/****************************************************************************/
24/*Portion I: Definitions shared between VBIOS and Driver */
25/****************************************************************************/
26
27#ifndef _ATOMBIOS_H
28#define _ATOMBIOS_H
29
30#define ATOM_VERSION_MAJOR 0x00020000
31#define ATOM_VERSION_MINOR 0x00000002
32
33#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
34
35/* Endianness should be specified before inclusion,
36 * default to little endian
37 */
38#ifndef ATOM_BIG_ENDIAN
39#error Endian not specified
40#endif
41
42#ifdef _H2INC
43#ifndef ULONG
44typedef unsigned long ULONG;
45#endif
46
47#ifndef UCHAR
48typedef unsigned char UCHAR;
49#endif
50
51#ifndef USHORT
52typedef unsigned short USHORT;
53#endif
54#endif
55
56#define ATOM_DAC_A 0
57#define ATOM_DAC_B 1
58#define ATOM_EXT_DAC 2
59
60#define ATOM_CRTC1 0
61#define ATOM_CRTC2 1
62
63#define ATOM_DIGA 0
64#define ATOM_DIGB 1
65
66#define ATOM_PPLL1 0
67#define ATOM_PPLL2 1
68
69#define ATOM_SCALER1 0
70#define ATOM_SCALER2 1
71
72#define ATOM_SCALER_DISABLE 0
73#define ATOM_SCALER_CENTER 1
74#define ATOM_SCALER_EXPANSION 2
75#define ATOM_SCALER_MULTI_EX 3
76
77#define ATOM_DISABLE 0
78#define ATOM_ENABLE 1
79#define ATOM_LCD_BLOFF (ATOM_DISABLE+2)
80#define ATOM_LCD_BLON (ATOM_ENABLE+2)
81#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3)
82#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
83#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
84#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
85
86#define ATOM_BLANKING 1
87#define ATOM_BLANKING_OFF 0
88
89#define ATOM_CURSOR1 0
90#define ATOM_CURSOR2 1
91
92#define ATOM_ICON1 0
93#define ATOM_ICON2 1
94
95#define ATOM_CRT1 0
96#define ATOM_CRT2 1
97
98#define ATOM_TV_NTSC 1
99#define ATOM_TV_NTSCJ 2
100#define ATOM_TV_PAL 3
101#define ATOM_TV_PALM 4
102#define ATOM_TV_PALCN 5
103#define ATOM_TV_PALN 6
104#define ATOM_TV_PAL60 7
105#define ATOM_TV_SECAM 8
106#define ATOM_TV_CV 16
107
108#define ATOM_DAC1_PS2 1
109#define ATOM_DAC1_CV 2
110#define ATOM_DAC1_NTSC 3
111#define ATOM_DAC1_PAL 4
112
113#define ATOM_DAC2_PS2 ATOM_DAC1_PS2
114#define ATOM_DAC2_CV ATOM_DAC1_CV
115#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
116#define ATOM_DAC2_PAL ATOM_DAC1_PAL
117
118#define ATOM_PM_ON 0
119#define ATOM_PM_STANDBY 1
120#define ATOM_PM_SUSPEND 2
121#define ATOM_PM_OFF 3
122
123/* Bit0:{=0:single, =1:dual},
124 Bit1 {=0:666RGB, =1:888RGB},
125 Bit2:3:{Grey level}
126 Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
127
128#define ATOM_PANEL_MISC_DUAL 0x00000001
129#define ATOM_PANEL_MISC_888RGB 0x00000002
130#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C
131#define ATOM_PANEL_MISC_FPDI 0x00000010
132#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2
133#define ATOM_PANEL_MISC_SPATIAL 0x00000020
134#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
135#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
136
137#define MEMTYPE_DDR1 "DDR1"
138#define MEMTYPE_DDR2 "DDR2"
139#define MEMTYPE_DDR3 "DDR3"
140#define MEMTYPE_DDR4 "DDR4"
141
142#define ASIC_BUS_TYPE_PCI "PCI"
143#define ASIC_BUS_TYPE_AGP "AGP"
144#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS"
145
146/* Maximum size of that FireGL flag string */
147
148#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */
149#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
150
151#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */
152#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
153
154#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */
155#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
156
157#define HW_ASSISTED_I2C_STATUS_FAILURE 2
158#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
159
160#pragma pack(1) /* BIOS data must use byte aligment */
161
162/* Define offset to location of ROM header. */
163
164#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L
165#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
166
167#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
168#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
169#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
170#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
171
172/* Common header for all ROM Data tables.
173 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
174 And the pointer actually points to this header. */
175
176typedef struct _ATOM_COMMON_TABLE_HEADER {
177 USHORT usStructureSize;
178 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
179 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
180 /*Image can't be updated, while Driver needs to carry the new table! */
181} ATOM_COMMON_TABLE_HEADER;
182
183typedef struct _ATOM_ROM_HEADER {
184 ATOM_COMMON_TABLE_HEADER sHeader;
185 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
186 atombios should init it as "ATOM", don't change the position */
187 USHORT usBiosRuntimeSegmentAddress;
188 USHORT usProtectedModeInfoOffset;
189 USHORT usConfigFilenameOffset;
190 USHORT usCRC_BlockOffset;
191 USHORT usBIOS_BootupMessageOffset;
192 USHORT usInt10Offset;
193 USHORT usPciBusDevInitCode;
194 USHORT usIoBaseAddress;
195 USHORT usSubsystemVendorID;
196 USHORT usSubsystemID;
197 USHORT usPCI_InfoOffset;
198 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
199 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
200 UCHAR ucExtendedFunctionCode;
201 UCHAR ucReserved;
202} ATOM_ROM_HEADER;
203
204/*==============================Command Table Portion==================================== */
205
206#ifdef UEFI_BUILD
207#define UTEMP USHORT
208#define USHORT void*
209#endif
210
211typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
212 USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */
213 USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */
214 USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
215 USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */
216 USHORT DIGxEncoderControl; /* Only used by Bios */
217 USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
218 USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */
219 USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */
220 USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */
221 USHORT GPIOPinControl; /* Atomic Table, only used by Bios */
222 USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */
223 USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */
224 USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */
225 USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
226 USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
227 USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
228 USHORT MemoryPLLInit;
229 USHORT AdjustDisplayPll; /* only used by Bios */
230 USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
231 USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */
232 USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */
233 USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */
234 USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */
235 USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
236 USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
237 USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
238 USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
239 USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
240 USHORT GetConditionalGoldenSetting; /* only used by Bios */
241 USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */
242 USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
243 USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
244 USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
245 USHORT EnableScaler; /* Atomic Table, used only by Bios */
246 USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
247 USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
248 USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
249 USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */
250 USHORT EnableVGA_Access; /* Obsolete , only used by Bios */
251 USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */
252 USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */
253 USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */
254 USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */
255 USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */
256 USHORT UpdateCRTC_DoubleBufferRegisters;
257 USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */
258 USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */
259 USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
260 USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
261 USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */
262 USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */
263 USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
264 USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */
265 USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */
266 USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */
267 USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */
268 USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */
269 USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
270 USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */
271 USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
272 USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */
273 USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */
274 USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */
275 USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
276 USHORT MemoryTraining; /* Atomic Table, used only by Bios */
277 USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */
278 USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
279 USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
280 USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
281 USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
282 USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
283 USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
284 USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
285 USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */
286 USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
287 USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
288 USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
289 USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
290 USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */
291 USHORT DPEncoderService; /* Function Table,only used by Bios */
292} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
293
294/* For backward compatible */
295#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
296#define UNIPHYTransmitterControl DIG1TransmitterControl
297#define LVTMATransmitterControl DIG2TransmitterControl
298#define SetCRTC_DPM_State GetConditionalGoldenSetting
299#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
300
301typedef struct _ATOM_MASTER_COMMAND_TABLE {
302 ATOM_COMMON_TABLE_HEADER sHeader;
303 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
304} ATOM_MASTER_COMMAND_TABLE;
305
306/****************************************************************************/
307/* Structures used in every command table */
308/****************************************************************************/
309typedef struct _ATOM_TABLE_ATTRIBUTE {
310#if ATOM_BIG_ENDIAN
311 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
312 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
313 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
314#else
315 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
316 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
317 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
318#endif
319} ATOM_TABLE_ATTRIBUTE;
320
321typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
322 ATOM_TABLE_ATTRIBUTE sbfAccess;
323 USHORT susAccess;
324} ATOM_TABLE_ATTRIBUTE_ACCESS;
325
326/****************************************************************************/
327/* Common header for all command tables. */
328/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
329/* And the pointer actually points to this header. */
330/****************************************************************************/
331typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
332 ATOM_COMMON_TABLE_HEADER CommonHeader;
333 ATOM_TABLE_ATTRIBUTE TableAttribute;
334} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
335
336/****************************************************************************/
337/* Structures used by ComputeMemoryEnginePLLTable */
338/****************************************************************************/
339#define COMPUTE_MEMORY_PLL_PARAM 1
340#define COMPUTE_ENGINE_PLL_PARAM 2
341
342typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
343 ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
344 UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */
345 UCHAR ucReserved; /* may expand to return larger Fbdiv later */
346 UCHAR ucFbDiv; /* return value */
347 UCHAR ucPostDiv; /* return value */
348} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
349
350typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
351 ULONG ulClock; /* When return, [23:0] return real clock */
352 UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
353 USHORT usFbDiv; /* return Feedback value to be written to register */
354 UCHAR ucPostDiv; /* return post div to be written to register */
355} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
356#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
357
358#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */
359#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
360#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
361#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
362#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
363#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
364#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
365
366#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
367#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
368#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
369#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
370#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
371
372typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
373#if ATOM_BIG_ENDIAN
374 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
375 ULONG ulClockFreq:24; /* in unit of 10kHz */
376#else
377 ULONG ulClockFreq:24; /* in unit of 10kHz */
378 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
379#endif
380} ATOM_COMPUTE_CLOCK_FREQ;
381
382typedef struct _ATOM_S_MPLL_FB_DIVIDER {
383 USHORT usFbDivFrac;
384 USHORT usFbDiv;
385} ATOM_S_MPLL_FB_DIVIDER;
386
387typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
388 union {
389 ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */
390 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
391 };
392 UCHAR ucRefDiv; /* Output Parameter */
393 UCHAR ucPostDiv; /* Output Parameter */
394 UCHAR ucCntlFlag; /* Output Parameter */
395 UCHAR ucReserved;
396} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
397
398/* ucCntlFlag */
399#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
400#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
401#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
402
403typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
404 ATOM_COMPUTE_CLOCK_FREQ ulClock;
405 ULONG ulReserved[2];
406} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
407
408typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
409 ATOM_COMPUTE_CLOCK_FREQ ulClock;
410 ULONG ulMemoryClock;
411 ULONG ulReserved;
412} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
413
414/****************************************************************************/
415/* Structures used by SetEngineClockTable */
416/****************************************************************************/
417typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
418 ULONG ulTargetEngineClock; /* In 10Khz unit */
419} SET_ENGINE_CLOCK_PARAMETERS;
420
421typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
422 ULONG ulTargetEngineClock; /* In 10Khz unit */
423 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
424} SET_ENGINE_CLOCK_PS_ALLOCATION;
425
426/****************************************************************************/
427/* Structures used by SetMemoryClockTable */
428/****************************************************************************/
429typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
430 ULONG ulTargetMemoryClock; /* In 10Khz unit */
431} SET_MEMORY_CLOCK_PARAMETERS;
432
433typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
434 ULONG ulTargetMemoryClock; /* In 10Khz unit */
435 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
436} SET_MEMORY_CLOCK_PS_ALLOCATION;
437
438/****************************************************************************/
439/* Structures used by ASIC_Init.ctb */
440/****************************************************************************/
441typedef struct _ASIC_INIT_PARAMETERS {
442 ULONG ulDefaultEngineClock; /* In 10Khz unit */
443 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
444} ASIC_INIT_PARAMETERS;
445
446typedef struct _ASIC_INIT_PS_ALLOCATION {
447 ASIC_INIT_PARAMETERS sASICInitClocks;
448 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */
449} ASIC_INIT_PS_ALLOCATION;
450
451/****************************************************************************/
452/* Structure used by DynamicClockGatingTable.ctb */
453/****************************************************************************/
454typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
455 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
456 UCHAR ucPadding[3];
457} DYNAMIC_CLOCK_GATING_PARAMETERS;
458#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
459
460/****************************************************************************/
461/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */
462/****************************************************************************/
463typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
464 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
465 UCHAR ucPadding[3];
466} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
467#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
468
469/****************************************************************************/
470/* Structures used by DAC_LoadDetectionTable.ctb */
471/****************************************************************************/
472typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
473 USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
474 UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
475 UCHAR ucMisc; /* Valid only when table revision =1.3 and above */
476} DAC_LOAD_DETECTION_PARAMETERS;
477
478/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
479#define DAC_LOAD_MISC_YPrPb 0x01
480
481typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
482 DAC_LOAD_DETECTION_PARAMETERS sDacload;
483 ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */
484} DAC_LOAD_DETECTION_PS_ALLOCATION;
485
486/****************************************************************************/
487/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
488/****************************************************************************/
489typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
490 USHORT usPixelClock; /* in 10KHz; for bios convenient */
491 UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
492 UCHAR ucAction; /* 0: turn off encoder */
493 /* 1: setup and turn on encoder */
494 /* 7: ATOM_ENCODER_INIT Initialize DAC */
495} DAC_ENCODER_CONTROL_PARAMETERS;
496
497#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
498
499/****************************************************************************/
500/* Structures used by DIG1EncoderControlTable */
501/* DIG2EncoderControlTable */
502/* ExternalEncoderControlTable */
503/****************************************************************************/
504typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
505 USHORT usPixelClock; /* in 10KHz; for bios convenient */
506 UCHAR ucConfig;
507 /* [2] Link Select: */
508 /* =0: PHY linkA if bfLane<3 */
509 /* =1: PHY linkB if bfLanes<3 */
510 /* =0: PHY linkA+B if bfLanes=3 */
511 /* [3] Transmitter Sel */
512 /* =0: UNIPHY or PCIEPHY */
513 /* =1: LVTMA */
514 UCHAR ucAction; /* =0: turn off encoder */
515 /* =1: turn on encoder */
516 UCHAR ucEncoderMode;
517 /* =0: DP encoder */
518 /* =1: LVDS encoder */
519 /* =2: DVI encoder */
520 /* =3: HDMI encoder */
521 /* =4: SDVO encoder */
522 UCHAR ucLaneNum; /* how many lanes to enable */
523 UCHAR ucReserved[2];
524} DIG_ENCODER_CONTROL_PARAMETERS;
525#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
526#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
527
528/* ucConfig */
529#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
530#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
531#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
532#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
533#define ATOM_ENCODER_CONFIG_LINKA 0x00
534#define ATOM_ENCODER_CONFIG_LINKB 0x04
535#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA
536#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB
537#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08
538#define ATOM_ENCODER_CONFIG_UNIPHY 0x00
539#define ATOM_ENCODER_CONFIG_LVTMA 0x08
540#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
541#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
542#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */
543/* ucAction */
544/* ATOM_ENABLE: Enable Encoder */
545/* ATOM_DISABLE: Disable Encoder */
546
547/* ucEncoderMode */
548#define ATOM_ENCODER_MODE_DP 0
549#define ATOM_ENCODER_MODE_LVDS 1
550#define ATOM_ENCODER_MODE_DVI 2
551#define ATOM_ENCODER_MODE_HDMI 3
552#define ATOM_ENCODER_MODE_SDVO 4
553#define ATOM_ENCODER_MODE_TV 13
554#define ATOM_ENCODER_MODE_CV 14
555#define ATOM_ENCODER_MODE_CRT 15
556
557typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
558#if ATOM_BIG_ENDIAN
559 UCHAR ucReserved1:2;
560 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
561 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
562 UCHAR ucReserved:1;
563 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
564#else
565 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
566 UCHAR ucReserved:1;
567 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
568 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
569 UCHAR ucReserved1:2;
570#endif
571} ATOM_DIG_ENCODER_CONFIG_V2;
572
573typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
574 USHORT usPixelClock; /* in 10KHz; for bios convenient */
575 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
576 UCHAR ucAction;
577 UCHAR ucEncoderMode;
578 /* =0: DP encoder */
579 /* =1: LVDS encoder */
580 /* =2: DVI encoder */
581 /* =3: HDMI encoder */
582 /* =4: SDVO encoder */
583 UCHAR ucLaneNum; /* how many lanes to enable */
584 UCHAR ucReserved[2];
585} DIG_ENCODER_CONTROL_PARAMETERS_V2;
586
587/* ucConfig */
588#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
589#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
590#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
591#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04
592#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00
593#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04
594#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18
595#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00
596#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
597#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
598
599/****************************************************************************/
600/* Structures used by UNIPHYTransmitterControlTable */
601/* LVTMATransmitterControlTable */
602/* DVOOutputControlTable */
603/****************************************************************************/
604typedef struct _ATOM_DP_VS_MODE {
605 UCHAR ucLaneSel;
606 UCHAR ucLaneSet;
607} ATOM_DP_VS_MODE;
608
609typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
610 union {
611 USHORT usPixelClock; /* in 10KHz; for bios convenient */
612 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
613 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
614 };
615 UCHAR ucConfig;
616 /* [0]=0: 4 lane Link, */
617 /* =1: 8 lane Link ( Dual Links TMDS ) */
618 /* [1]=0: InCoherent mode */
619 /* =1: Coherent Mode */
620 /* [2] Link Select: */
621 /* =0: PHY linkA if bfLane<3 */
622 /* =1: PHY linkB if bfLanes<3 */
623 /* =0: PHY linkA+B if bfLanes=3 */
624 /* [5:4]PCIE lane Sel */
625 /* =0: lane 0~3 or 0~7 */
626 /* =1: lane 4~7 */
627 /* =2: lane 8~11 or 8~15 */
628 /* =3: lane 12~15 */
629 UCHAR ucAction; /* =0: turn off encoder */
630 /* =1: turn on encoder */
631 UCHAR ucReserved[4];
632} DIG_TRANSMITTER_CONTROL_PARAMETERS;
633
634#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
635
636/* ucInitInfo */
637#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
638
639/* ucConfig */
640#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
641#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
642#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
643#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
644#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
645#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
646#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
647
648#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
649#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
650#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
651
652#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
653#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
654#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20
655#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30
656#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0
657#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00
658#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00
659#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40
660#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80
661#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
662#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
663
664/* ucAction */
665#define ATOM_TRANSMITTER_ACTION_DISABLE 0
666#define ATOM_TRANSMITTER_ACTION_ENABLE 1
667#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
668#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3
669#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4
670#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5
671#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6
672#define ATOM_TRANSMITTER_ACTION_INIT 7
673#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8
674#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
675#define ATOM_TRANSMITTER_ACTION_SETUP 10
676#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
677
678/* Following are used for DigTransmitterControlTable ver1.2 */
679typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
680#if ATOM_BIG_ENDIAN
681 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
682 /* =1 Dig Transmitter 2 ( Uniphy CD ) */
683 /* =2 Dig Transmitter 3 ( Uniphy EF ) */
684 UCHAR ucReserved:1;
685 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
686 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
687 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
688 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
689
690 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
691 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
692#else
693 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
694 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
695 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
696 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
697 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
698 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
699 UCHAR ucReserved:1;
700 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
701 /* =1 Dig Transmitter 2 ( Uniphy CD ) */
702 /* =2 Dig Transmitter 3 ( Uniphy EF ) */
703#endif
704} ATOM_DIG_TRANSMITTER_CONFIG_V2;
705
706/* ucConfig */
707/* Bit0 */
708#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
709
710/* Bit1 */
711#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
712
713/* Bit2 */
714#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
715#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
716#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
717
718/* Bit3 */
719#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
720#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
721#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
722
723/* Bit4 */
724#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
725
726/* Bit7:6 */
727#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
728#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */
729#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */
730#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */
731
732typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
733 union {
734 USHORT usPixelClock; /* in 10KHz; for bios convenient */
735 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
736 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
737 };
738 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
739 UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */
740 UCHAR ucReserved[4];
741} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
742
743/****************************************************************************/
744/* Structures used by DAC1OuputControlTable */
745/* DAC2OuputControlTable */
746/* LVTMAOutputControlTable (Before DEC30) */
747/* TMDSAOutputControlTable (Before DEC30) */
748/****************************************************************************/
749typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
750 UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */
751 /* When the display is LCD, in addition to above: */
752 /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
753 /* ATOM_LCD_SELFTEST_STOP */
754
755 UCHAR aucPadding[3]; /* padding to DWORD aligned */
756} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
757
758#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
759
760#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
761#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
762
763#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
764#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
765
766#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
767#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
768
769#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
770#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
771
772#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
773#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
774
775#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
776#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
777
778#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
779#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
780
781#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
782#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
783#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
784
785/****************************************************************************/
786/* Structures used by BlankCRTCTable */
787/****************************************************************************/
788typedef struct _BLANK_CRTC_PARAMETERS {
789 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
790 UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */
791 USHORT usBlackColorRCr;
792 USHORT usBlackColorGY;
793 USHORT usBlackColorBCb;
794} BLANK_CRTC_PARAMETERS;
795#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
796
797/****************************************************************************/
798/* Structures used by EnableCRTCTable */
799/* EnableCRTCMemReqTable */
800/* UpdateCRTC_DoubleBufferRegistersTable */
801/****************************************************************************/
802typedef struct _ENABLE_CRTC_PARAMETERS {
803 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
804 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
805 UCHAR ucPadding[2];
806} ENABLE_CRTC_PARAMETERS;
807#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
808
809/****************************************************************************/
810/* Structures used by SetCRTC_OverScanTable */
811/****************************************************************************/
812typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
813 USHORT usOverscanRight; /* right */
814 USHORT usOverscanLeft; /* left */
815 USHORT usOverscanBottom; /* bottom */
816 USHORT usOverscanTop; /* top */
817 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
818 UCHAR ucPadding[3];
819} SET_CRTC_OVERSCAN_PARAMETERS;
820#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
821
822/****************************************************************************/
823/* Structures used by SetCRTC_ReplicationTable */
824/****************************************************************************/
825typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
826 UCHAR ucH_Replication; /* horizontal replication */
827 UCHAR ucV_Replication; /* vertical replication */
828 UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
829 UCHAR ucPadding;
830} SET_CRTC_REPLICATION_PARAMETERS;
831#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
832
833/****************************************************************************/
834/* Structures used by SelectCRTC_SourceTable */
835/****************************************************************************/
836typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
837 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
838 UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
839 UCHAR ucPadding[2];
840} SELECT_CRTC_SOURCE_PARAMETERS;
841#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
842
843typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
844 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
845 UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
846 UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */
847 UCHAR ucPadding;
848} SELECT_CRTC_SOURCE_PARAMETERS_V2;
849
850/* ucEncoderID */
851/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */
852/* #define ASIC_INT_TV_ENCODER_ID 0x02 */
853/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */
854/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */
855/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */
856/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */
857/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */
858/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */
859
860/* ucEncodeMode */
861/* #define ATOM_ENCODER_MODE_DP 0 */
862/* #define ATOM_ENCODER_MODE_LVDS 1 */
863/* #define ATOM_ENCODER_MODE_DVI 2 */
864/* #define ATOM_ENCODER_MODE_HDMI 3 */
865/* #define ATOM_ENCODER_MODE_SDVO 4 */
866/* #define ATOM_ENCODER_MODE_TV 13 */
867/* #define ATOM_ENCODER_MODE_CV 14 */
868/* #define ATOM_ENCODER_MODE_CRT 15 */
869
870/****************************************************************************/
871/* Structures used by SetPixelClockTable */
872/* GetPixelClockTable */
873/****************************************************************************/
874/* Major revision=1., Minor revision=1 */
875typedef struct _PIXEL_CLOCK_PARAMETERS {
876 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
877 /* 0 means disable PPLL */
878 USHORT usRefDiv; /* Reference divider */
879 USHORT usFbDiv; /* feedback divider */
880 UCHAR ucPostDiv; /* post divider */
881 UCHAR ucFracFbDiv; /* fractional feedback divider */
882 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
883 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
884 UCHAR ucCRTC; /* Which CRTC uses this Ppll */
885 UCHAR ucPadding;
886} PIXEL_CLOCK_PARAMETERS;
887
888/* Major revision=1., Minor revision=2, add ucMiscIfno */
889/* ucMiscInfo: */
890#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
891#define MISC_DEVICE_INDEX_MASK 0xF0
892#define MISC_DEVICE_INDEX_SHIFT 4
893
894typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
895 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
896 /* 0 means disable PPLL */
897 USHORT usRefDiv; /* Reference divider */
898 USHORT usFbDiv; /* feedback divider */
899 UCHAR ucPostDiv; /* post divider */
900 UCHAR ucFracFbDiv; /* fractional feedback divider */
901 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
902 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
903 UCHAR ucCRTC; /* Which CRTC uses this Ppll */
904 UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
905} PIXEL_CLOCK_PARAMETERS_V2;
906
907/* Major revision=1., Minor revision=3, structure/definition change */
908/* ucEncoderMode: */
909/* ATOM_ENCODER_MODE_DP */
910/* ATOM_ENOCDER_MODE_LVDS */
911/* ATOM_ENOCDER_MODE_DVI */
912/* ATOM_ENOCDER_MODE_HDMI */
913/* ATOM_ENOCDER_MODE_SDVO */
914/* ATOM_ENCODER_MODE_TV 13 */
915/* ATOM_ENCODER_MODE_CV 14 */
916/* ATOM_ENCODER_MODE_CRT 15 */
917
918/* ucDVOConfig */
919/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */
920/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */
921/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */
922/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */
923/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */
924/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */
925/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */
926
927/* ucMiscInfo: also changed, see below */
928#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
929#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
930#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
931#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
932#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
933#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
934
935typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
936 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
937 /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
938 USHORT usRefDiv; /* Reference divider */
939 USHORT usFbDiv; /* feedback divider */
940 UCHAR ucPostDiv; /* post divider */
941 UCHAR ucFracFbDiv; /* fractional feedback divider */
942 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
943 UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */
944 union {
945 UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
946 UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */
947 };
948 UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
949 /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
950} PIXEL_CLOCK_PARAMETERS_V3;
951
952#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
953#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
954
955/****************************************************************************/
956/* Structures used by AdjustDisplayPllTable */
957/****************************************************************************/
958typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
959 USHORT usPixelClock;
960 UCHAR ucTransmitterID;
961 UCHAR ucEncodeMode;
962 union {
963 UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */
964 UCHAR ucConfig; /* if none DVO, not defined yet */
965 };
966 UCHAR ucReserved[3];
967} ADJUST_DISPLAY_PLL_PARAMETERS;
968
969#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
970
971#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
972
973/****************************************************************************/
974/* Structures used by EnableYUVTable */
975/****************************************************************************/
976typedef struct _ENABLE_YUV_PARAMETERS {
977 UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
978 UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */
979 UCHAR ucPadding[2];
980} ENABLE_YUV_PARAMETERS;
981#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
982
983/****************************************************************************/
984/* Structures used by GetMemoryClockTable */
985/****************************************************************************/
986typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
987 ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */
988} GET_MEMORY_CLOCK_PARAMETERS;
989#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
990
991/****************************************************************************/
992/* Structures used by GetEngineClockTable */
993/****************************************************************************/
994typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
995 ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */
996} GET_ENGINE_CLOCK_PARAMETERS;
997#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
998
999/****************************************************************************/
1000/* Following Structures and constant may be obsolete */
1001/****************************************************************************/
1002/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
1003/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
1004typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
1005 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
1006 USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */
1007 USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */
1008 /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */
1009 UCHAR ucSlaveAddr; /* Read from which slave */
1010 UCHAR ucLineNumber; /* Read from which HW assisted line */
1011} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
1012#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1013
1014#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
1015#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
1016#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
1017#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
1018#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
1019
1020typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
1021 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
1022 USHORT usByteOffset; /* Write to which byte */
1023 /* Upper portion of usByteOffset is Format of data */
1024 /* 1bytePS+offsetPS */
1025 /* 2bytesPS+offsetPS */
1026 /* blockID+offsetPS */
1027 /* blockID+offsetID */
1028 /* blockID+counterID+offsetID */
1029 UCHAR ucData; /* PS data1 */
1030 UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */
1031 UCHAR ucSlaveAddr; /* Write to which slave */
1032 UCHAR ucLineNumber; /* Write from which HW assisted line */
1033} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
1034
1035#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1036
1037typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
1038 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
1039 UCHAR ucSlaveAddr; /* Write to which slave */
1040 UCHAR ucLineNumber; /* Write from which HW assisted line */
1041} SET_UP_HW_I2C_DATA_PARAMETERS;
1042
1043/**************************************************************************/
1044#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1045
1046/****************************************************************************/
1047/* Structures used by PowerConnectorDetectionTable */
1048/****************************************************************************/
1049typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
1050 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
1051 UCHAR ucPwrBehaviorId;
1052 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
1053} POWER_CONNECTOR_DETECTION_PARAMETERS;
1054
1055typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
1056 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
1057 UCHAR ucReserved;
1058 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
1059 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1060} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
1061
1062/****************************LVDS SS Command Table Definitions**********************/
1063
1064/****************************************************************************/
1065/* Structures used by EnableSpreadSpectrumOnPPLLTable */
1066/****************************************************************************/
1067typedef struct _ENABLE_LVDS_SS_PARAMETERS {
1068 USHORT usSpreadSpectrumPercentage;
1069 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
1070 UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
1071 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
1072 UCHAR ucPadding[3];
1073} ENABLE_LVDS_SS_PARAMETERS;
1074
1075/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1076typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
1077 USHORT usSpreadSpectrumPercentage;
1078 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
1079 UCHAR ucSpreadSpectrumStep; /* */
1080 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
1081 UCHAR ucSpreadSpectrumDelay;
1082 UCHAR ucSpreadSpectrumRange;
1083 UCHAR ucPadding;
1084} ENABLE_LVDS_SS_PARAMETERS_V2;
1085
1086/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
1087typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
1088 USHORT usSpreadSpectrumPercentage;
1089 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
1090 UCHAR ucSpreadSpectrumStep; /* */
1091 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
1092 UCHAR ucSpreadSpectrumDelay;
1093 UCHAR ucSpreadSpectrumRange;
1094 UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */
1095} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
1096
1097#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
1098
1099/**************************************************************************/
1100
1101typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
1102 PIXEL_CLOCK_PARAMETERS sPCLKInput;
1103 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */
1104} SET_PIXEL_CLOCK_PS_ALLOCATION;
1105
1106#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
1107
1108/****************************************************************************/
1109/* Structures used by ### */
1110/****************************************************************************/
1111typedef struct _MEMORY_TRAINING_PARAMETERS {
1112 ULONG ulTargetMemoryClock; /* In 10Khz unit */
1113} MEMORY_TRAINING_PARAMETERS;
1114#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
1115
1116/****************************LVDS and other encoder command table definitions **********************/
1117
1118/****************************************************************************/
1119/* Structures used by LVDSEncoderControlTable (Before DCE30) */
1120/* LVTMAEncoderControlTable (Before DCE30) */
1121/* TMDSAEncoderControlTable (Before DCE30) */
1122/****************************************************************************/
1123typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
1124 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1125 UCHAR ucMisc; /* bit0=0: Enable single link */
1126 /* =1: Enable dual link */
1127 /* Bit1=0: 666RGB */
1128 /* =1: 888RGB */
1129 UCHAR ucAction; /* 0: turn off encoder */
1130 /* 1: setup and turn on encoder */
1131} LVDS_ENCODER_CONTROL_PARAMETERS;
1132
1133#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
1134
1135#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
1136#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
1137
1138#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
1139#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
1140
1141/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1142typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1143 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1144 UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx defintions below */
1145 UCHAR ucAction; /* 0: turn off encoder */
1146 /* 1: setup and turn on encoder */
1147 UCHAR ucTruncate; /* bit0=0: Disable truncate */
1148 /* =1: Enable truncate */
1149 /* bit4=0: 666RGB */
1150 /* =1: 888RGB */
1151 UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
1152 /* =1: Enable spatial dithering */
1153 /* bit4=0: 666RGB */
1154 /* =1: 888RGB */
1155 UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
1156 /* =1: Enable temporal dithering */
1157 /* bit4=0: 666RGB */
1158 /* =1: 888RGB */
1159 /* bit5=0: Gray level 2 */
1160 /* =1: Gray level 4 */
1161 UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
1162 /* =1: 25FRC_SEL pattern F */
1163 /* bit6:5=0: 50FRC_SEL pattern A */
1164 /* =1: 50FRC_SEL pattern B */
1165 /* =2: 50FRC_SEL pattern C */
1166 /* =3: 50FRC_SEL pattern D */
1167 /* bit7=0: 75FRC_SEL pattern E */
1168 /* =1: 75FRC_SEL pattern F */
1169} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1170
1171#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1172
1173#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1174#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1175
1176#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1177#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
1178
1179#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1180#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1181
1182#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1183#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
1184
1185#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1186#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
1187
1188/****************************************************************************/
1189/* Structures used by ### */
1190/****************************************************************************/
1191typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
1192 UCHAR ucEnable; /* Enable or Disable External TMDS encoder */
1193 UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
1194 UCHAR ucPadding[2];
1195} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
1196
1197typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
1198 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
1199 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
1200} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
1201
1202#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1203
1204typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
1205 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
1206 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
1207} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
1208
1209typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
1210 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
1211 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1212} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
1213
1214/****************************************************************************/
1215/* Structures used by DVOEncoderControlTable */
1216/****************************************************************************/
1217/* ucTableFormatRevision=1,ucTableContentRevision=3 */
1218
1219/* ucDVOConfig: */
1220#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
1221#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
1222#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
1223#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
1224#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
1225#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
1226#define DVO_ENCODER_CONFIG_24BIT 0x08
1227
1228typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1229 USHORT usPixelClock;
1230 UCHAR ucDVOConfig;
1231 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
1232 UCHAR ucReseved[4];
1233} DVO_ENCODER_CONTROL_PARAMETERS_V3;
1234#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
1235
1236/* ucTableFormatRevision=1 */
1237/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
1238/* bit1=0: non-coherent mode */
1239/* =1: coherent mode */
1240
1241/* ========================================================================================== */
1242/* Only change is here next time when changing encoder parameter definitions again! */
1243#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1244#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
1245
1246#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1247#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
1248
1249#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1250#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
1251
1252#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
1253#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
1254
1255/* ========================================================================================== */
1256#define PANEL_ENCODER_MISC_DUAL 0x01
1257#define PANEL_ENCODER_MISC_COHERENT 0x02
1258#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
1259#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08
1260
1261#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE
1262#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE
1263#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1)
1264
1265#define PANEL_ENCODER_TRUNCATE_EN 0x01
1266#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10
1267#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01
1268#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10
1269#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01
1270#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10
1271#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20
1272#define PANEL_ENCODER_25FRC_MASK 0x10
1273#define PANEL_ENCODER_25FRC_E 0x00
1274#define PANEL_ENCODER_25FRC_F 0x10
1275#define PANEL_ENCODER_50FRC_MASK 0x60
1276#define PANEL_ENCODER_50FRC_A 0x00
1277#define PANEL_ENCODER_50FRC_B 0x20
1278#define PANEL_ENCODER_50FRC_C 0x40
1279#define PANEL_ENCODER_50FRC_D 0x60
1280#define PANEL_ENCODER_75FRC_MASK 0x80
1281#define PANEL_ENCODER_75FRC_E 0x00
1282#define PANEL_ENCODER_75FRC_F 0x80
1283
1284/****************************************************************************/
1285/* Structures used by SetVoltageTable */
1286/****************************************************************************/
1287#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
1288#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
1289#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
1290#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
1291#define SET_VOLTAGE_INIT_MODE 5
1292#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */
1293
1294#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
1295#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
1296#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
1297
1298#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
1299#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
1300#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
1301
1302typedef struct _SET_VOLTAGE_PARAMETERS {
1303 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1304 UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */
1305 UCHAR ucVoltageIndex; /* An index to tell which voltage level */
1306 UCHAR ucReserved;
1307} SET_VOLTAGE_PARAMETERS;
1308
1309typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
1310 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1311 UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
1312 USHORT usVoltageLevel; /* real voltage level */
1313} SET_VOLTAGE_PARAMETERS_V2;
1314
1315typedef struct _SET_VOLTAGE_PS_ALLOCATION {
1316 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1317 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1318} SET_VOLTAGE_PS_ALLOCATION;
1319
1320/****************************************************************************/
1321/* Structures used by TVEncoderControlTable */
1322/****************************************************************************/
1323typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
1324 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1325 UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
1326 UCHAR ucAction; /* 0: turn off encoder */
1327 /* 1: setup and turn on encoder */
1328} TV_ENCODER_CONTROL_PARAMETERS;
1329
1330typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
1331 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1332 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
1333} TV_ENCODER_CONTROL_PS_ALLOCATION;
1334
1335/* ==============================Data Table Portion==================================== */
1336
1337#ifdef UEFI_BUILD
1338#define UTEMP USHORT
1339#define USHORT void*
1340#endif
1341
1342/****************************************************************************/
1343/* Structure used in Data.mtb */
1344/****************************************************************************/
1345typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
1346 USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
1347 USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
1348 USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
1349 USHORT StandardVESA_Timing; /* Only used by Bios */
1350 USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
1351 USHORT DAC_Info; /* Will be obsolete from R600 */
1352 USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
1353 USHORT TMDS_Info; /* Will be obsolete from R600 */
1354 USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
1355 USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
1356 USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
1357 USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
1358 USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
1359 USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
1360 USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
1361 USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
1362 USHORT CompassionateData; /* Will be obsolete from R600 */
1363 USHORT SaveRestoreInfo; /* Only used by Bios */
1364 USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
1365 USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
1366 USHORT XTMDS_Info; /* Will be obsolete from R600 */
1367 USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
1368 USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
1369 USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
1370 USHORT MC_InitParameter; /* Only used by command table */
1371 USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
1372 USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
1373 USHORT TV_VideoMode; /* Only used by command table */
1374 USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
1375 USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
1376 USHORT IntegratedSystemInfo; /* Shared by various SW components */
1377 USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
1378 USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
1379 USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
1380} ATOM_MASTER_LIST_OF_DATA_TABLES;
1381
1382#ifdef UEFI_BUILD
1383#define USHORT UTEMP
1384#endif
1385
1386typedef struct _ATOM_MASTER_DATA_TABLE {
1387 ATOM_COMMON_TABLE_HEADER sHeader;
1388 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
1389} ATOM_MASTER_DATA_TABLE;
1390
1391/****************************************************************************/
1392/* Structure used in MultimediaCapabilityInfoTable */
1393/****************************************************************************/
1394typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
1395 ATOM_COMMON_TABLE_HEADER sHeader;
1396 ULONG ulSignature; /* HW info table signature string "$ATI" */
1397 UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
1398 UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
1399 UCHAR ucVideoPortInfo; /* Provides the video port capabilities */
1400 UCHAR ucHostPortInfo; /* Provides host port configuration information */
1401} ATOM_MULTIMEDIA_CAPABILITY_INFO;
1402
1403/****************************************************************************/
1404/* Structure used in MultimediaConfigInfoTable */
1405/****************************************************************************/
1406typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
1407 ATOM_COMMON_TABLE_HEADER sHeader;
1408 ULONG ulSignature; /* MM info table signature sting "$MMT" */
1409 UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
1410 UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
1411 UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
1412 UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
1413 UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
1414 UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
1415 UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
1416 UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1417 UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1418 UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1419 UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1420 UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1421} ATOM_MULTIMEDIA_CONFIG_INFO;
1422
1423/****************************************************************************/
1424/* Structures used in FirmwareInfoTable */
1425/****************************************************************************/
1426
1427/* usBIOSCapability Defintion: */
1428/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
1429/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
1430/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
1431/* Others: Reserved */
1432#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
1433#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
1434#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
1435#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008
1436#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010
1437#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
1438#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
1439#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
1440#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100
1441#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
1442#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
1443#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
1444
1445#ifndef _H2INC
1446
1447/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
1448typedef struct _ATOM_FIRMWARE_CAPABILITY {
1449#if ATOM_BIG_ENDIAN
1450 USHORT Reserved:3;
1451 USHORT HyperMemory_Size:4;
1452 USHORT HyperMemory_Support:1;
1453 USHORT PPMode_Assigned:1;
1454 USHORT WMI_SUPPORT:1;
1455 USHORT GPUControlsBL:1;
1456 USHORT EngineClockSS_Support:1;
1457 USHORT MemoryClockSS_Support:1;
1458 USHORT ExtendedDesktopSupport:1;
1459 USHORT DualCRTC_Support:1;
1460 USHORT FirmwarePosted:1;
1461#else
1462 USHORT FirmwarePosted:1;
1463 USHORT DualCRTC_Support:1;
1464 USHORT ExtendedDesktopSupport:1;
1465 USHORT MemoryClockSS_Support:1;
1466 USHORT EngineClockSS_Support:1;
1467 USHORT GPUControlsBL:1;
1468 USHORT WMI_SUPPORT:1;
1469 USHORT PPMode_Assigned:1;
1470 USHORT HyperMemory_Support:1;
1471 USHORT HyperMemory_Size:4;
1472 USHORT Reserved:3;
1473#endif
1474} ATOM_FIRMWARE_CAPABILITY;
1475
1476typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
1477 ATOM_FIRMWARE_CAPABILITY sbfAccess;
1478 USHORT susAccess;
1479} ATOM_FIRMWARE_CAPABILITY_ACCESS;
1480
1481#else
1482
1483typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
1484 USHORT susAccess;
1485} ATOM_FIRMWARE_CAPABILITY_ACCESS;
1486
1487#endif
1488
1489typedef struct _ATOM_FIRMWARE_INFO {
1490 ATOM_COMMON_TABLE_HEADER sHeader;
1491 ULONG ulFirmwareRevision;
1492 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1493 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1494 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1495 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1496 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1497 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1498 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1499 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1500 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1501 UCHAR ucASICMaxTemperature;
1502 UCHAR ucPadding[3]; /* Don't use them */
1503 ULONG aulReservedForBIOS[3]; /* Don't use them */
1504 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1505 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1506 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1507 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1508 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1509 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1510 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1511 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1512 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1513 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */
1514 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1515 USHORT usReferenceClock; /* In 10Khz unit */
1516 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1517 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1518 UCHAR ucDesign_ID; /* Indicate what is the board design */
1519 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1520} ATOM_FIRMWARE_INFO;
1521
1522typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
1523 ATOM_COMMON_TABLE_HEADER sHeader;
1524 ULONG ulFirmwareRevision;
1525 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1526 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1527 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1528 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1529 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1530 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1531 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1532 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1533 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1534 UCHAR ucASICMaxTemperature;
1535 UCHAR ucMinAllowedBL_Level;
1536 UCHAR ucPadding[2]; /* Don't use them */
1537 ULONG aulReservedForBIOS[2]; /* Don't use them */
1538 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
1539 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1540 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1541 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1542 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1543 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1544 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1545 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1546 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1547 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1548 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
1549 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1550 USHORT usReferenceClock; /* In 10Khz unit */
1551 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1552 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1553 UCHAR ucDesign_ID; /* Indicate what is the board design */
1554 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1555} ATOM_FIRMWARE_INFO_V1_2;
1556
1557typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
1558 ATOM_COMMON_TABLE_HEADER sHeader;
1559 ULONG ulFirmwareRevision;
1560 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1561 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1562 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1563 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1564 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1565 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1566 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1567 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1568 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1569 UCHAR ucASICMaxTemperature;
1570 UCHAR ucMinAllowedBL_Level;
1571 UCHAR ucPadding[2]; /* Don't use them */
1572 ULONG aulReservedForBIOS; /* Don't use them */
1573 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
1574 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
1575 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1576 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1577 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1578 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1579 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1580 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1581 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1582 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1583 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1584 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
1585 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1586 USHORT usReferenceClock; /* In 10Khz unit */
1587 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1588 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1589 UCHAR ucDesign_ID; /* Indicate what is the board design */
1590 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1591} ATOM_FIRMWARE_INFO_V1_3;
1592
1593typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
1594 ATOM_COMMON_TABLE_HEADER sHeader;
1595 ULONG ulFirmwareRevision;
1596 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1597 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1598 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1599 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1600 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1601 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1602 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1603 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1604 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1605 UCHAR ucASICMaxTemperature;
1606 UCHAR ucMinAllowedBL_Level;
1607 USHORT usBootUpVDDCVoltage; /* In MV unit */
1608 USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */
1609 USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */
1610 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
1611 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
1612 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1613 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1614 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1615 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1616 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1617 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1618 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1619 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1620 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1621 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
1622 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1623 USHORT usReferenceClock; /* In 10Khz unit */
1624 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1625 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1626 UCHAR ucDesign_ID; /* Indicate what is the board design */
1627 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1628} ATOM_FIRMWARE_INFO_V1_4;
1629
1630#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4
1631
1632/****************************************************************************/
1633/* Structures used in IntegratedSystemInfoTable */
1634/****************************************************************************/
1635#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
1636#define IGP_CAP_FLAG_AC_CARD 0x4
1637#define IGP_CAP_FLAG_SDVO_CARD 0x8
1638#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
1639
1640typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
1641 ATOM_COMMON_TABLE_HEADER sHeader;
1642 ULONG ulBootUpEngineClock; /* in 10kHz unit */
1643 ULONG ulBootUpMemoryClock; /* in 10kHz unit */
1644 ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */
1645 ULONG ulMinSystemMemoryClock; /* in 10kHz unit */
1646 UCHAR ucNumberOfCyclesInPeriodHi;
1647 UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
1648 USHORT usReserved1;
1649 USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */
1650 USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */
1651 ULONG ulReserved[2];
1652
1653 USHORT usFSBClock; /* In MHz unit */
1654 USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
1655 /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
1656 /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
1657 USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
1658 USHORT usK8MemoryClock; /* in MHz unit */
1659 USHORT usK8SyncStartDelay; /* in 0.01 us unit */
1660 USHORT usK8DataReturnTime; /* in 0.01 us unit */
1661 UCHAR ucMaxNBVoltage;
1662 UCHAR ucMinNBVoltage;
1663 UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
1664 UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
1665 UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
1666 UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */
1667 UCHAR ucMaxNBVoltageHigh;
1668 UCHAR ucMinNBVoltageHigh;
1669} ATOM_INTEGRATED_SYSTEM_INFO;
1670
1671/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
1672ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
1673 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
1674ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1675 For AMD IGP,for now this can be 0
1676ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1677 For AMD IGP,for now this can be 0
1678
1679usFSBClock: For Intel IGP,it's FSB Freq
1680 For AMD IGP,it's HT Link Speed
1681
1682usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
1683usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation
1684usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation
1685
1686VC:Voltage Control
1687ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1688ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1689
1690ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
1691ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
1692
1693ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1694ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1695
1696usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
1697usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
1698*/
1699
1700/*
1701The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
1702Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
1703The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
1704
1705SW components can access the IGP system infor structure in the same way as before
1706*/
1707
1708typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
1709 ATOM_COMMON_TABLE_HEADER sHeader;
1710 ULONG ulBootUpEngineClock; /* in 10kHz unit */
1711 ULONG ulReserved1[2]; /* must be 0x0 for the reserved */
1712 ULONG ulBootUpUMAClock; /* in 10kHz unit */
1713 ULONG ulBootUpSidePortClock; /* in 10kHz unit */
1714 ULONG ulMinSidePortClock; /* in 10kHz unit */
1715 ULONG ulReserved2[6]; /* must be 0x0 for the reserved */
1716 ULONG ulSystemConfig; /* see explanation below */
1717 ULONG ulBootUpReqDisplayVector;
1718 ULONG ulOtherDisplayMisc;
1719 ULONG ulDDISlot1Config;
1720 ULONG ulDDISlot2Config;
1721 UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
1722 UCHAR ucUMAChannelNumber;
1723 UCHAR ucDockingPinBit;
1724 UCHAR ucDockingPinPolarity;
1725 ULONG ulDockingPinCFGInfo;
1726 ULONG ulCPUCapInfo;
1727 USHORT usNumberOfCyclesInPeriod;
1728 USHORT usMaxNBVoltage;
1729 USHORT usMinNBVoltage;
1730 USHORT usBootUpNBVoltage;
1731 ULONG ulHTLinkFreq; /* in 10Khz */
1732 USHORT usMinHTLinkWidth;
1733 USHORT usMaxHTLinkWidth;
1734 USHORT usUMASyncStartDelay;
1735 USHORT usUMADataReturnTime;
1736 USHORT usLinkStatusZeroTime;
1737 USHORT usReserved;
1738 ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */
1739 ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */
1740 USHORT usMaxUpStreamHTLinkWidth;
1741 USHORT usMaxDownStreamHTLinkWidth;
1742 USHORT usMinUpStreamHTLinkWidth;
1743 USHORT usMinDownStreamHTLinkWidth;
1744 ULONG ulReserved3[97]; /* must be 0x0 */
1745} ATOM_INTEGRATED_SYSTEM_INFO_V2;
1746
1747/*
1748ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
1749ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
1750ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
1751
1752ulSystemConfig:
1753Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
1754Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
1755 =0: system boots up at driver control state. Power state depends on PowerPlay table.
1756Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
1757Bit[3]=1: Only one power state(Performance) will be supported.
1758 =0: Multiple power states supported from PowerPlay table.
1759Bit[4]=1: CLMC is supported and enabled on current system.
1760 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
1761Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
1762 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
1763Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
1764 =0: Voltage settings is determined by powerplay table.
1765Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
1766 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
1767
1768ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
1769
1770ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
1771 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
1772
1773ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
1774 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1775 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1776 [15:8] - Lane configuration attribute;
1777 [23:16]- Connector type, possible value:
1778 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
1779 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
1780 CONNECTOR_OBJECT_ID_HDMI_TYPE_A
1781 CONNECTOR_OBJECT_ID_DISPLAYPORT
1782 [31:24]- Reserved
1783
1784ulDDISlot2Config: Same as Slot1.
1785ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
1786For IGP, Hypermemory is the only memory type showed in CCC.
1787
1788ucUMAChannelNumber: how many channels for the UMA;
1789
1790ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
1791ucDockingPinBit: which bit in this register to read the pin status;
1792ucDockingPinPolarity:Polarity of the pin when docked;
1793
1794ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
1795
1796usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
1797usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
1798usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
1799 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
1800 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
1801 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
1802usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
1803
1804ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
1805usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
1806 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1807usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
1808 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1809
1810usUMASyncStartDelay: Memory access latency, required for watermark calculation
1811usUMADataReturnTime: Memory access latency, required for watermark calculation
1812usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
1813for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1814 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
1815 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
1816 if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
1817 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
1818
1819ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
1820 This must be less than or equal to ulHTLinkFreq(bootup frequency).
1821ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
1822 This must be less than or equal to ulHighVoltageHTLinkFreq.
1823
1824usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
1825usMaxDownStreamHTLinkWidth: same as above.
1826usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
1827usMinDownStreamHTLinkWidth: same as above.
1828*/
1829
1830#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
1831#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
1832#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
1833#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
1834#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
1835#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
1836#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
1837#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
1838
1839#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
1840
1841#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F
1842#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0
1843#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01
1844#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02
1845#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04
1846#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08
1847
1848#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00
1849#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100
1850#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01
1851
1852#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
1853
1854#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
1855#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
1856#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
1857#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003
1858#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004
1859#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005
1860#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006
1861#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007
1862#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008
1863#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009
1864#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A
1865#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B
1866#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
1867#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
1868
1869/* define ASIC internal encoder id ( bit vector ) */
1870#define ASIC_INT_DAC1_ENCODER_ID 0x00
1871#define ASIC_INT_TV_ENCODER_ID 0x02
1872#define ASIC_INT_DIG1_ENCODER_ID 0x03
1873#define ASIC_INT_DAC2_ENCODER_ID 0x04
1874#define ASIC_EXT_TV_ENCODER_ID 0x06
1875#define ASIC_INT_DVO_ENCODER_ID 0x07
1876#define ASIC_INT_DIG2_ENCODER_ID 0x09
1877#define ASIC_EXT_DIG_ENCODER_ID 0x05
1878
1879/* define Encoder attribute */
1880#define ATOM_ANALOG_ENCODER 0
1881#define ATOM_DIGITAL_ENCODER 1
1882
1883#define ATOM_DEVICE_CRT1_INDEX 0x00000000
1884#define ATOM_DEVICE_LCD1_INDEX 0x00000001
1885#define ATOM_DEVICE_TV1_INDEX 0x00000002
1886#define ATOM_DEVICE_DFP1_INDEX 0x00000003
1887#define ATOM_DEVICE_CRT2_INDEX 0x00000004
1888#define ATOM_DEVICE_LCD2_INDEX 0x00000005
1889#define ATOM_DEVICE_TV2_INDEX 0x00000006
1890#define ATOM_DEVICE_DFP2_INDEX 0x00000007
1891#define ATOM_DEVICE_CV_INDEX 0x00000008
1892#define ATOM_DEVICE_DFP3_INDEX 0x00000009
1893#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
1894#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
1895#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
1896#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
1897#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
1898#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
1899#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
1900#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
1901#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1)
1902
1903#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
1904
1905#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX)
1906#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX)
1907#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX)
1908#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX)
1909#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX)
1910#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX)
1911#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
1912#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX)
1913#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX)
1914#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX)
1915#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
1916#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX)
1917
1918#define ATOM_DEVICE_CRT_SUPPORT \
1919 (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
1920#define ATOM_DEVICE_DFP_SUPPORT \
1921 (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
1922 ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
1923 ATOM_DEVICE_DFP5_SUPPORT)
1924#define ATOM_DEVICE_TV_SUPPORT \
1925 (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
1926#define ATOM_DEVICE_LCD_SUPPORT \
1927 (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1928
1929#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
1930#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
1931#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001
1932#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002
1933#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003
1934#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004
1935#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005
1936#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006
1937#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007
1938#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008
1939#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009
1940#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A
1941#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B
1942#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
1943#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
1944
1945#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
1946#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
1947#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
1948#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001
1949#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002
1950#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003
1951
1952#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000
1953
1954#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F
1955#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000
1956
1957#define ATOM_DEVICE_I2C_ID_MASK 0x00000070
1958#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
1959#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
1960#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
1961#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */
1962#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */
1963
1964#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
1965#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
1966#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
1967#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
1968
1969/* usDeviceSupport: */
1970/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */
1971/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */
1972/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */
1973/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */
1974/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */
1975/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */
1976/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */
1977/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */
1978/* Bit 8 = 0 - no CV support= 1- CV is supported */
1979/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */
1980/* Byte1 (Supported Device Info) */
1981/* Bit 0 = = 0 - no CV support= 1- CV is supported */
1982/* */
1983/* */
1984
1985/* ucI2C_ConfigID */
1986/* [7:0] - I2C LINE Associate ID */
1987/* = 0 - no I2C */
1988/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
1989/* = 0, [6:0]=SW assisted I2C ID */
1990/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
1991/* = 2, HW engine for Multimedia use */
1992/* = 3-7 Reserved for future I2C engines */
1993/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
1994
1995typedef struct _ATOM_I2C_ID_CONFIG {
1996#if ATOM_BIG_ENDIAN
1997 UCHAR bfHW_Capable:1;
1998 UCHAR bfHW_EngineID:3;
1999 UCHAR bfI2C_LineMux:4;
2000#else
2001 UCHAR bfI2C_LineMux:4;
2002 UCHAR bfHW_EngineID:3;
2003 UCHAR bfHW_Capable:1;
2004#endif
2005} ATOM_I2C_ID_CONFIG;
2006
2007typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
2008 ATOM_I2C_ID_CONFIG sbfAccess;
2009 UCHAR ucAccess;
2010} ATOM_I2C_ID_CONFIG_ACCESS;
2011
2012/****************************************************************************/
2013/* Structure used in GPIO_I2C_InfoTable */
2014/****************************************************************************/
2015typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
2016 USHORT usClkMaskRegisterIndex;
2017 USHORT usClkEnRegisterIndex;
2018 USHORT usClkY_RegisterIndex;
2019 USHORT usClkA_RegisterIndex;
2020 USHORT usDataMaskRegisterIndex;
2021 USHORT usDataEnRegisterIndex;
2022 USHORT usDataY_RegisterIndex;
2023 USHORT usDataA_RegisterIndex;
2024 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
2025 UCHAR ucClkMaskShift;
2026 UCHAR ucClkEnShift;
2027 UCHAR ucClkY_Shift;
2028 UCHAR ucClkA_Shift;
2029 UCHAR ucDataMaskShift;
2030 UCHAR ucDataEnShift;
2031 UCHAR ucDataY_Shift;
2032 UCHAR ucDataA_Shift;
2033 UCHAR ucReserved1;
2034 UCHAR ucReserved2;
2035} ATOM_GPIO_I2C_ASSIGMENT;
2036
2037typedef struct _ATOM_GPIO_I2C_INFO {
2038 ATOM_COMMON_TABLE_HEADER sHeader;
2039 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2040} ATOM_GPIO_I2C_INFO;
2041
2042/****************************************************************************/
2043/* Common Structure used in other structures */
2044/****************************************************************************/
2045
2046#ifndef _H2INC
2047
2048/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
2049typedef struct _ATOM_MODE_MISC_INFO {
2050#if ATOM_BIG_ENDIAN
2051 USHORT Reserved:6;
2052 USHORT RGB888:1;
2053 USHORT DoubleClock:1;
2054 USHORT Interlace:1;
2055 USHORT CompositeSync:1;
2056 USHORT V_ReplicationBy2:1;
2057 USHORT H_ReplicationBy2:1;
2058 USHORT VerticalCutOff:1;
2059 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
2060 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
2061 USHORT HorizontalCutOff:1;
2062#else
2063 USHORT HorizontalCutOff:1;
2064 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
2065 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
2066 USHORT VerticalCutOff:1;
2067 USHORT H_ReplicationBy2:1;
2068 USHORT V_ReplicationBy2:1;
2069 USHORT CompositeSync:1;
2070 USHORT Interlace:1;
2071 USHORT DoubleClock:1;
2072 USHORT RGB888:1;
2073 USHORT Reserved:6;
2074#endif
2075} ATOM_MODE_MISC_INFO;
2076
2077typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2078 ATOM_MODE_MISC_INFO sbfAccess;
2079 USHORT usAccess;
2080} ATOM_MODE_MISC_INFO_ACCESS;
2081
2082#else
2083
2084typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2085 USHORT usAccess;
2086} ATOM_MODE_MISC_INFO_ACCESS;
2087
2088#endif
2089
2090/* usModeMiscInfo- */
2091#define ATOM_H_CUTOFF 0x01
2092#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */
2093#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */
2094#define ATOM_V_CUTOFF 0x08
2095#define ATOM_H_REPLICATIONBY2 0x10
2096#define ATOM_V_REPLICATIONBY2 0x20
2097#define ATOM_COMPOSITESYNC 0x40
2098#define ATOM_INTERLACE 0x80
2099#define ATOM_DOUBLE_CLOCK_MODE 0x100
2100#define ATOM_RGB888_MODE 0x200
2101
2102/* usRefreshRate- */
2103#define ATOM_REFRESH_43 43
2104#define ATOM_REFRESH_47 47
2105#define ATOM_REFRESH_56 56
2106#define ATOM_REFRESH_60 60
2107#define ATOM_REFRESH_65 65
2108#define ATOM_REFRESH_70 70
2109#define ATOM_REFRESH_72 72
2110#define ATOM_REFRESH_75 75
2111#define ATOM_REFRESH_85 85
2112
2113/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
2114/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
2115/* */
2116/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
2117/* = EDID_HA + EDID_HBL */
2118/* VESA_HDISP = VESA_ACTIVE = EDID_HA */
2119/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
2120/* = EDID_HA + EDID_HSO */
2121/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */
2122/* VESA_BORDER = EDID_BORDER */
2123
2124/****************************************************************************/
2125/* Structure used in SetCRTC_UsingDTDTimingTable */
2126/****************************************************************************/
2127typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
2128 USHORT usH_Size;
2129 USHORT usH_Blanking_Time;
2130 USHORT usV_Size;
2131 USHORT usV_Blanking_Time;
2132 USHORT usH_SyncOffset;
2133 USHORT usH_SyncWidth;
2134 USHORT usV_SyncOffset;
2135 USHORT usV_SyncWidth;
2136 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2137 UCHAR ucH_Border; /* From DFP EDID */
2138 UCHAR ucV_Border;
2139 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
2140 UCHAR ucPadding[3];
2141} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
2142
2143/****************************************************************************/
2144/* Structure used in SetCRTC_TimingTable */
2145/****************************************************************************/
2146typedef struct _SET_CRTC_TIMING_PARAMETERS {
2147 USHORT usH_Total; /* horizontal total */
2148 USHORT usH_Disp; /* horizontal display */
2149 USHORT usH_SyncStart; /* horozontal Sync start */
2150 USHORT usH_SyncWidth; /* horizontal Sync width */
2151 USHORT usV_Total; /* vertical total */
2152 USHORT usV_Disp; /* vertical display */
2153 USHORT usV_SyncStart; /* vertical Sync start */
2154 USHORT usV_SyncWidth; /* vertical Sync width */
2155 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2156 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
2157 UCHAR ucOverscanRight; /* right */
2158 UCHAR ucOverscanLeft; /* left */
2159 UCHAR ucOverscanBottom; /* bottom */
2160 UCHAR ucOverscanTop; /* top */
2161 UCHAR ucReserved;
2162} SET_CRTC_TIMING_PARAMETERS;
2163#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
2164
2165/****************************************************************************/
2166/* Structure used in StandardVESA_TimingTable */
2167/* AnalogTV_InfoTable */
2168/* ComponentVideoInfoTable */
2169/****************************************************************************/
2170typedef struct _ATOM_MODE_TIMING {
2171 USHORT usCRTC_H_Total;
2172 USHORT usCRTC_H_Disp;
2173 USHORT usCRTC_H_SyncStart;
2174 USHORT usCRTC_H_SyncWidth;
2175 USHORT usCRTC_V_Total;
2176 USHORT usCRTC_V_Disp;
2177 USHORT usCRTC_V_SyncStart;
2178 USHORT usCRTC_V_SyncWidth;
2179 USHORT usPixelClock; /* in 10Khz unit */
2180 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2181 USHORT usCRTC_OverscanRight;
2182 USHORT usCRTC_OverscanLeft;
2183 USHORT usCRTC_OverscanBottom;
2184 USHORT usCRTC_OverscanTop;
2185 USHORT usReserve;
2186 UCHAR ucInternalModeNumber;
2187 UCHAR ucRefreshRate;
2188} ATOM_MODE_TIMING;
2189
2190typedef struct _ATOM_DTD_FORMAT {
2191 USHORT usPixClk;
2192 USHORT usHActive;
2193 USHORT usHBlanking_Time;
2194 USHORT usVActive;
2195 USHORT usVBlanking_Time;
2196 USHORT usHSyncOffset;
2197 USHORT usHSyncWidth;
2198 USHORT usVSyncOffset;
2199 USHORT usVSyncWidth;
2200 USHORT usImageHSize;
2201 USHORT usImageVSize;
2202 UCHAR ucHBorder;
2203 UCHAR ucVBorder;
2204 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2205 UCHAR ucInternalModeNumber;
2206 UCHAR ucRefreshRate;
2207} ATOM_DTD_FORMAT;
2208
2209/****************************************************************************/
2210/* Structure used in LVDS_InfoTable */
2211/* * Need a document to describe this table */
2212/****************************************************************************/
2213#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
2214#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
2215#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
2216#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
2217
2218/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
2219/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
2220#define LCDPANEL_CAP_READ_EDID 0x1
2221
2222/* ucTableFormatRevision=1 */
2223/* ucTableContentRevision=1 */
2224typedef struct _ATOM_LVDS_INFO {
2225 ATOM_COMMON_TABLE_HEADER sHeader;
2226 ATOM_DTD_FORMAT sLCDTiming;
2227 USHORT usModePatchTableOffset;
2228 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
2229 USHORT usOffDelayInMs;
2230 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2231 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2232 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
2233 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
2234 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
2235 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
2236 UCHAR ucPanelDefaultRefreshRate;
2237 UCHAR ucPanelIdentification;
2238 UCHAR ucSS_Id;
2239} ATOM_LVDS_INFO;
2240
2241/* ucTableFormatRevision=1 */
2242/* ucTableContentRevision=2 */
2243typedef struct _ATOM_LVDS_INFO_V12 {
2244 ATOM_COMMON_TABLE_HEADER sHeader;
2245 ATOM_DTD_FORMAT sLCDTiming;
2246 USHORT usExtInfoTableOffset;
2247 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
2248 USHORT usOffDelayInMs;
2249 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2250 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2251 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
2252 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
2253 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
2254 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
2255 UCHAR ucPanelDefaultRefreshRate;
2256 UCHAR ucPanelIdentification;
2257 UCHAR ucSS_Id;
2258 USHORT usLCDVenderID;
2259 USHORT usLCDProductID;
2260 UCHAR ucLCDPanel_SpecialHandlingCap;
2261 UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
2262 UCHAR ucReserved[2];
2263} ATOM_LVDS_INFO_V12;
2264
2265#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
2266
2267typedef struct _ATOM_PATCH_RECORD_MODE {
2268 UCHAR ucRecordType;
2269 USHORT usHDisp;
2270 USHORT usVDisp;
2271} ATOM_PATCH_RECORD_MODE;
2272
2273typedef struct _ATOM_LCD_RTS_RECORD {
2274 UCHAR ucRecordType;
2275 UCHAR ucRTSValue;
2276} ATOM_LCD_RTS_RECORD;
2277
2278/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
2279typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
2280 UCHAR ucRecordType;
2281 USHORT usLCDCap;
2282} ATOM_LCD_MODE_CONTROL_CAP;
2283
2284#define LCD_MODE_CAP_BL_OFF 1
2285#define LCD_MODE_CAP_CRTC_OFF 2
2286#define LCD_MODE_CAP_PANEL_OFF 4
2287
2288typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
2289 UCHAR ucRecordType;
2290 UCHAR ucFakeEDIDLength;
2291 UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */
2292} ATOM_FAKE_EDID_PATCH_RECORD;
2293
2294typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
2295 UCHAR ucRecordType;
2296 USHORT usHSize;
2297 USHORT usVSize;
2298} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
2299
2300#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
2301#define LCD_RTS_RECORD_TYPE 2
2302#define LCD_CAP_RECORD_TYPE 3
2303#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
2304#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
2305#define ATOM_RECORD_END_TYPE 0xFF
2306
2307/****************************Spread Spectrum Info Table Definitions **********************/
2308
2309/* ucTableFormatRevision=1 */
2310/* ucTableContentRevision=2 */
2311typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
2312 USHORT usSpreadSpectrumPercentage;
2313 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
2314 UCHAR ucSS_Step;
2315 UCHAR ucSS_Delay;
2316 UCHAR ucSS_Id;
2317 UCHAR ucRecommandedRef_Div;
2318 UCHAR ucSS_Range; /* it was reserved for V11 */
2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
2320
2321#define ATOM_MAX_SS_ENTRY 16
2322#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */
2323#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */
2324
2325#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
2326#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
2327#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
2328#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
2329#define ATOM_INTERNAL_SS_MASK 0x00000000
2330#define ATOM_EXTERNAL_SS_MASK 0x00000002
2331#define EXEC_SS_STEP_SIZE_SHIFT 2
2332#define EXEC_SS_DELAY_SHIFT 4
2333#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
2334
2335typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
2336 ATOM_COMMON_TABLE_HEADER sHeader;
2337 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
2338} ATOM_SPREAD_SPECTRUM_INFO;
2339
2340/****************************************************************************/
2341/* Structure used in AnalogTV_InfoTable (Top level) */
2342/****************************************************************************/
2343/* ucTVBootUpDefaultStd definiton: */
2344
2345/* ATOM_TV_NTSC 1 */
2346/* ATOM_TV_NTSCJ 2 */
2347/* ATOM_TV_PAL 3 */
2348/* ATOM_TV_PALM 4 */
2349/* ATOM_TV_PALCN 5 */
2350/* ATOM_TV_PALN 6 */
2351/* ATOM_TV_PAL60 7 */
2352/* ATOM_TV_SECAM 8 */
2353
2354/* ucTVSuppportedStd definition: */
2355#define NTSC_SUPPORT 0x1
2356#define NTSCJ_SUPPORT 0x2
2357
2358#define PAL_SUPPORT 0x4
2359#define PALM_SUPPORT 0x8
2360#define PALCN_SUPPORT 0x10
2361#define PALN_SUPPORT 0x20
2362#define PAL60_SUPPORT 0x40
2363#define SECAM_SUPPORT 0x80
2364
2365#define MAX_SUPPORTED_TV_TIMING 2
2366
2367typedef struct _ATOM_ANALOG_TV_INFO {
2368 ATOM_COMMON_TABLE_HEADER sHeader;
2369 UCHAR ucTV_SupportedStandard;
2370 UCHAR ucTV_BootUpDefaultStandard;
2371 UCHAR ucExt_TV_ASIC_ID;
2372 UCHAR ucExt_TV_ASIC_SlaveAddr;
2373 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
2374 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
2375} ATOM_ANALOG_TV_INFO;
2376
2377/**************************************************************************/
2378/* VRAM usage and their defintions */
2379
2380/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
2381/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
2382/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
2383/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
2384/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
2385
2386#ifndef VESA_MEMORY_IN_64K_BLOCK
2387#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
2388#endif
2389
2390#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */
2391#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */
2392#define ATOM_HWICON_INFOTABLE_SIZE 32
2393#define MAX_DTD_MODE_IN_VRAM 6
2394#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */
2395#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
2396#define DFP_ENCODER_TYPE_OFFSET 0x80
2397#define DP_ENCODER_LANE_NUM_OFFSET 0x84
2398#define DP_ENCODER_LINK_RATE_OFFSET 0x88
2399
2400#define ATOM_HWICON1_SURFACE_ADDR 0
2401#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
2402#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
2403#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
2404#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2405#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2406
2407#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2408#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2409#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2410
2411#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2412
2413#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2414#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2415#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2416
2417#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2418#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2419#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2420
2421#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2422#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2423#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2424
2425#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2426#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2427#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2428
2429#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2430#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2431#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2432
2433#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2434#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2435#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2436
2437#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2438#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2439#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2440
2441#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2442#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2443#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2444
2445#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2446#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2447#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2448
2449#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2450
2451#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256)
2452#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512)
2453
2454/* The size below is in Kb! */
2455#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
2456
2457#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
2458#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
2459#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
2460#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
2461
2462/***********************************************************************************/
2463/* Structure used in VRAM_UsageByFirmwareTable */
2464/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
2465/* at running time. */
2466/* note2: From RV770, the memory is more than 32bit addressable, so we will change */
2467/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
2468/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
2469/* (in offset to start of memory address) is KB aligned instead of byte aligend. */
2470/***********************************************************************************/
2471#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
2472
2473typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
2474 ULONG ulStartAddrUsedByFirmware;
2475 USHORT usFirmwareUseInKb;
2476 USHORT usReserved;
2477} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
2478
2479typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
2480 ATOM_COMMON_TABLE_HEADER sHeader;
2481 ATOM_FIRMWARE_VRAM_RESERVE_INFO
2482 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
2483} ATOM_VRAM_USAGE_BY_FIRMWARE;
2484
2485/****************************************************************************/
2486/* Structure used in GPIO_Pin_LUTTable */
2487/****************************************************************************/
2488typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
2489 USHORT usGpioPin_AIndex;
2490 UCHAR ucGpioPinBitShift;
2491 UCHAR ucGPIO_ID;
2492} ATOM_GPIO_PIN_ASSIGNMENT;
2493
2494typedef struct _ATOM_GPIO_PIN_LUT {
2495 ATOM_COMMON_TABLE_HEADER sHeader;
2496 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
2497} ATOM_GPIO_PIN_LUT;
2498
2499/****************************************************************************/
2500/* Structure used in ComponentVideoInfoTable */
2501/****************************************************************************/
2502#define GPIO_PIN_ACTIVE_HIGH 0x1
2503
2504#define MAX_SUPPORTED_CV_STANDARDS 5
2505
2506/* definitions for ATOM_D_INFO.ucSettings */
2507#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */
2508#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */
2509#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */
2510
2511typedef struct _ATOM_GPIO_INFO {
2512 USHORT usAOffset;
2513 UCHAR ucSettings;
2514 UCHAR ucReserved;
2515} ATOM_GPIO_INFO;
2516
2517/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
2518#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
2519
2520/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
2521#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */
2522#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */
2523
2524/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
2525/* Line 3 out put 5V. */
2526#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */
2527#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */
2528#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
2529
2530/* Line 3 out put 2.2V */
2531#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */
2532#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */
2533#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
2534
2535/* Line 3 out put 0V */
2536#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */
2537#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */
2538#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
2539
2540#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */
2541
2542#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */
2543
2544/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
2545#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
2546#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
2547
2548typedef struct _ATOM_COMPONENT_VIDEO_INFO {
2549 ATOM_COMMON_TABLE_HEADER sHeader;
2550 USHORT usMask_PinRegisterIndex;
2551 USHORT usEN_PinRegisterIndex;
2552 USHORT usY_PinRegisterIndex;
2553 USHORT usA_PinRegisterIndex;
2554 UCHAR ucBitShift;
2555 UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
2556 ATOM_DTD_FORMAT sReserved; /* must be zeroed out */
2557 UCHAR ucMiscInfo;
2558 UCHAR uc480i;
2559 UCHAR uc480p;
2560 UCHAR uc720p;
2561 UCHAR uc1080i;
2562 UCHAR ucLetterBoxMode;
2563 UCHAR ucReserved[3];
2564 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
2565 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2566 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2567} ATOM_COMPONENT_VIDEO_INFO;
2568
2569/* ucTableFormatRevision=2 */
2570/* ucTableContentRevision=1 */
2571typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
2572 ATOM_COMMON_TABLE_HEADER sHeader;
2573 UCHAR ucMiscInfo;
2574 UCHAR uc480i;
2575 UCHAR uc480p;
2576 UCHAR uc720p;
2577 UCHAR uc1080i;
2578 UCHAR ucReserved;
2579 UCHAR ucLetterBoxMode;
2580 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
2581 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2582 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2583} ATOM_COMPONENT_VIDEO_INFO_V21;
2584
2585#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
2586
2587/****************************************************************************/
2588/* Structure used in object_InfoTable */
2589/****************************************************************************/
2590typedef struct _ATOM_OBJECT_HEADER {
2591 ATOM_COMMON_TABLE_HEADER sHeader;
2592 USHORT usDeviceSupport;
2593 USHORT usConnectorObjectTableOffset;
2594 USHORT usRouterObjectTableOffset;
2595 USHORT usEncoderObjectTableOffset;
2596 USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */
2597 USHORT usDisplayPathTableOffset;
2598} ATOM_OBJECT_HEADER;
2599
2600typedef struct _ATOM_DISPLAY_OBJECT_PATH {
2601 USHORT usDeviceTag; /* supported device */
2602 USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */
2603 USHORT usConnObjectId; /* Connector Object ID */
2604 USHORT usGPUObjectId; /* GPU ID */
2605 USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
2606} ATOM_DISPLAY_OBJECT_PATH;
2607
2608typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
2609 UCHAR ucNumOfDispPath;
2610 UCHAR ucVersion;
2611 UCHAR ucPadding[2];
2612 ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
2613} ATOM_DISPLAY_OBJECT_PATH_TABLE;
2614
2615typedef struct _ATOM_OBJECT /* each object has this structure */
2616{
2617 USHORT usObjectID;
2618 USHORT usSrcDstTableOffset;
2619 USHORT usRecordOffset; /* this pointing to a bunch of records defined below */
2620 USHORT usReserved;
2621} ATOM_OBJECT;
2622
2623typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */
2624{
2625 UCHAR ucNumberOfObjects;
2626 UCHAR ucPadding[3];
2627 ATOM_OBJECT asObjects[1];
2628} ATOM_OBJECT_TABLE;
2629
2630typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */
2631{
2632 UCHAR ucNumberOfSrc;
2633 USHORT usSrcObjectID[1];
2634 UCHAR ucNumberOfDst;
2635 USHORT usDstObjectID[1];
2636} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
2637
2638/* Related definitions, all records are differnt but they have a commond header */
2639typedef struct _ATOM_COMMON_RECORD_HEADER {
2640 UCHAR ucRecordType; /* An emun to indicate the record type */
2641 UCHAR ucRecordSize; /* The size of the whole record in byte */
2642} ATOM_COMMON_RECORD_HEADER;
2643
2644#define ATOM_I2C_RECORD_TYPE 1
2645#define ATOM_HPD_INT_RECORD_TYPE 2
2646#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
2647#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
2648#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
2649#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
2650#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
2651#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
2652#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
2653#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
2654#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
2655#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
2656#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
2657#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
2658#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
2659
2660/* Must be updated when new record type is added,equal to that record definition! */
2661#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE
2662
2663typedef struct _ATOM_I2C_RECORD {
2664 ATOM_COMMON_RECORD_HEADER sheader;
2665 ATOM_I2C_ID_CONFIG sucI2cId;
2666 UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */
2667} ATOM_I2C_RECORD;
2668
2669typedef struct _ATOM_HPD_INT_RECORD {
2670 ATOM_COMMON_RECORD_HEADER sheader;
2671 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
2672 UCHAR ucPluggged_PinState;
2673} ATOM_HPD_INT_RECORD;
2674
2675typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
2676 ATOM_COMMON_RECORD_HEADER sheader;
2677 UCHAR ucProtectionFlag;
2678 UCHAR ucReserved;
2679} ATOM_OUTPUT_PROTECTION_RECORD;
2680
2681typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
2682 ULONG ulACPIDeviceEnum; /* Reserved for now */
2683 USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
2684 USHORT usPadding;
2685} ATOM_CONNECTOR_DEVICE_TAG;
2686
2687typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
2688 ATOM_COMMON_RECORD_HEADER sheader;
2689 UCHAR ucNumberOfDevice;
2690 UCHAR ucReserved;
2691 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
2692} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
2693
2694typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
2695 ATOM_COMMON_RECORD_HEADER sheader;
2696 UCHAR ucConfigGPIOID;
2697 UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */
2698 UCHAR ucFlowinGPIPID;
2699 UCHAR ucExtInGPIPID;
2700} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
2701
2702typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
2703 ATOM_COMMON_RECORD_HEADER sheader;
2704 UCHAR ucCTL1GPIO_ID;
2705 UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */
2706 UCHAR ucCTL2GPIO_ID;
2707 UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */
2708 UCHAR ucCTL3GPIO_ID;
2709 UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */
2710 UCHAR ucCTLFPGA_IN_ID;
2711 UCHAR ucPadding[3];
2712} ATOM_ENCODER_FPGA_CONTROL_RECORD;
2713
2714typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
2715 ATOM_COMMON_RECORD_HEADER sheader;
2716 UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
2717 UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */
2718} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
2719
2720typedef struct _ATOM_JTAG_RECORD {
2721 ATOM_COMMON_RECORD_HEADER sheader;
2722 UCHAR ucTMSGPIO_ID;
2723 UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */
2724 UCHAR ucTCKGPIO_ID;
2725 UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */
2726 UCHAR ucTDOGPIO_ID;
2727 UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */
2728 UCHAR ucTDIGPIO_ID;
2729 UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */
2730 UCHAR ucPadding[2];
2731} ATOM_JTAG_RECORD;
2732
2733/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
2734typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
2735 UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */
2736 UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */
2737} ATOM_GPIO_PIN_CONTROL_PAIR;
2738
2739typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
2740 ATOM_COMMON_RECORD_HEADER sheader;
2741 UCHAR ucFlags; /* Future expnadibility */
2742 UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */
2743 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */
2744} ATOM_OBJECT_GPIO_CNTL_RECORD;
2745
2746/* Definitions for GPIO pin state */
2747#define GPIO_PIN_TYPE_INPUT 0x00
2748#define GPIO_PIN_TYPE_OUTPUT 0x10
2749#define GPIO_PIN_TYPE_HW_CONTROL 0x20
2750
2751/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
2752#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
2753#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
2754#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
2755#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
2756
2757typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
2758 ATOM_COMMON_RECORD_HEADER sheader;
2759 ULONG ulStrengthControl; /* DVOA strength control for CF */
2760 UCHAR ucPadding[2];
2761} ATOM_ENCODER_DVO_CF_RECORD;
2762
2763/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
2764#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
2765#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
2766
2767typedef struct _ATOM_CONNECTOR_CF_RECORD {
2768 ATOM_COMMON_RECORD_HEADER sheader;
2769 USHORT usMaxPixClk;
2770 UCHAR ucFlowCntlGpioId;
2771 UCHAR ucSwapCntlGpioId;
2772 UCHAR ucConnectedDvoBundle;
2773 UCHAR ucPadding;
2774} ATOM_CONNECTOR_CF_RECORD;
2775
2776typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
2777 ATOM_COMMON_RECORD_HEADER sheader;
2778 ATOM_DTD_FORMAT asTiming;
2779} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
2780
2781typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
2782 ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
2783 UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
2784 UCHAR ucReserved;
2785} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
2786
2787typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
2788 ATOM_COMMON_RECORD_HEADER sheader;
2789 UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
2790 UCHAR ucMuxControlPin;
2791 UCHAR ucMuxState[2]; /* for alligment purpose */
2792} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
2793
2794typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
2795 ATOM_COMMON_RECORD_HEADER sheader;
2796 UCHAR ucMuxType;
2797 UCHAR ucMuxControlPin;
2798 UCHAR ucMuxState[2]; /* for alligment purpose */
2799} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
2800
2801/* define ucMuxType */
2802#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
2803#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
2804
2805/****************************************************************************/
2806/* ASIC voltage data table */
2807/****************************************************************************/
2808typedef struct _ATOM_VOLTAGE_INFO_HEADER {
2809 USHORT usVDDCBaseLevel; /* In number of 50mv unit */
2810 USHORT usReserved; /* For possible extension table offset */
2811 UCHAR ucNumOfVoltageEntries;
2812 UCHAR ucBytesPerVoltageEntry;
2813 UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */
2814 UCHAR ucDefaultVoltageEntry;
2815 UCHAR ucVoltageControlI2cLine;
2816 UCHAR ucVoltageControlAddress;
2817 UCHAR ucVoltageControlOffset;
2818} ATOM_VOLTAGE_INFO_HEADER;
2819
2820typedef struct _ATOM_VOLTAGE_INFO {
2821 ATOM_COMMON_TABLE_HEADER sHeader;
2822 ATOM_VOLTAGE_INFO_HEADER viHeader;
2823 UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
2824} ATOM_VOLTAGE_INFO;
2825
2826typedef struct _ATOM_VOLTAGE_FORMULA {
2827 USHORT usVoltageBaseLevel; /* In number of 1mv unit */
2828 USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */
2829 UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */
2830 UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */
2831 UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
2832 UCHAR ucReserved;
2833 UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
2834} ATOM_VOLTAGE_FORMULA;
2835
2836typedef struct _ATOM_VOLTAGE_CONTROL {
2837 UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */
2838 UCHAR ucVoltageControlI2cLine;
2839 UCHAR ucVoltageControlAddress;
2840 UCHAR ucVoltageControlOffset;
2841 USHORT usGpioPin_AIndex; /* GPIO_PAD register index */
2842 UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */
2843 UCHAR ucReserved;
2844} ATOM_VOLTAGE_CONTROL;
2845
2846/* Define ucVoltageControlId */
2847#define VOLTAGE_CONTROLLED_BY_HW 0x00
2848#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
2849#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
2850#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */
2851#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
2852#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */
2853#define VOLTAGE_CONTROL_ID_DS4402 0x04
2854
2855typedef struct _ATOM_VOLTAGE_OBJECT {
2856 UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
2857 UCHAR ucSize; /* Size of Object */
2858 ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
2859 ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
2860} ATOM_VOLTAGE_OBJECT;
2861
2862typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
2863 ATOM_COMMON_TABLE_HEADER sHeader;
2864 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */
2865} ATOM_VOLTAGE_OBJECT_INFO;
2866
2867typedef struct _ATOM_LEAKID_VOLTAGE {
2868 UCHAR ucLeakageId;
2869 UCHAR ucReserved;
2870 USHORT usVoltage;
2871} ATOM_LEAKID_VOLTAGE;
2872
2873typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
2874 UCHAR ucProfileId;
2875 UCHAR ucReserved;
2876 USHORT usSize;
2877 USHORT usEfuseSpareStartAddr;
2878 USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
2879 ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */
2880} ATOM_ASIC_PROFILE_VOLTAGE;
2881
2882/* ucProfileId */
2883#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
2884#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
2885#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
2886
2887typedef struct _ATOM_ASIC_PROFILING_INFO {
2888 ATOM_COMMON_TABLE_HEADER asHeader;
2889 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
2890} ATOM_ASIC_PROFILING_INFO;
2891
2892typedef struct _ATOM_POWER_SOURCE_OBJECT {
2893 UCHAR ucPwrSrcId; /* Power source */
2894 UCHAR ucPwrSensorType; /* GPIO, I2C or none */
2895 UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */
2896 UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */
2897 UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */
2898 UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */
2899 UCHAR ucPwrSensActiveState; /* high active or low active */
2900 UCHAR ucReserve[3]; /* reserve */
2901 USHORT usSensPwr; /* in unit of watt */
2902} ATOM_POWER_SOURCE_OBJECT;
2903
2904typedef struct _ATOM_POWER_SOURCE_INFO {
2905 ATOM_COMMON_TABLE_HEADER asHeader;
2906 UCHAR asPwrbehave[16];
2907 ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
2908} ATOM_POWER_SOURCE_INFO;
2909
2910/* Define ucPwrSrcId */
2911#define POWERSOURCE_PCIE_ID1 0x00
2912#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
2913#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
2914#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
2915#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
2916
2917/* define ucPwrSensorId */
2918#define POWER_SENSOR_ALWAYS 0x00
2919#define POWER_SENSOR_GPIO 0x01
2920#define POWER_SENSOR_I2C 0x02
2921
2922/**************************************************************************/
2923/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
2924/* Memory SS Info Table */
2925/* Define Memory Clock SS chip ID */
2926#define ICS91719 1
2927#define ICS91720 2
2928
2929/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
2930typedef struct _ATOM_I2C_DATA_RECORD {
2931 UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
2932 UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */
2933} ATOM_I2C_DATA_RECORD;
2934
2935/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
2936typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
2937 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */
2938 UCHAR ucSSChipID; /* SS chip being used */
2939 UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */
2940 UCHAR ucNumOfI2CDataRecords; /* number of data block */
2941 ATOM_I2C_DATA_RECORD asI2CData[1];
2942} ATOM_I2C_DEVICE_SETUP_INFO;
2943
2944/* ========================================================================================== */
2945typedef struct _ATOM_ASIC_MVDD_INFO {
2946 ATOM_COMMON_TABLE_HEADER sHeader;
2947 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
2948} ATOM_ASIC_MVDD_INFO;
2949
2950/* ========================================================================================== */
2951#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
2952
2953/* ========================================================================================== */
2954/**************************************************************************/
2955
2956typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
2957 ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */
2958 USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */
2959 USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */
2960 UCHAR ucClockIndication; /* Indicate which clock source needs SS */
2961 UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */
2962 UCHAR ucReserved[2];
2963} ATOM_ASIC_SS_ASSIGNMENT;
2964
2965/* Define ucSpreadSpectrumType */
2966#define ASIC_INTERNAL_MEMORY_SS 1
2967#define ASIC_INTERNAL_ENGINE_SS 2
2968#define ASIC_INTERNAL_UVD_SS 3
2969
2970typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
2971 ATOM_COMMON_TABLE_HEADER sHeader;
2972 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
2973} ATOM_ASIC_INTERNAL_SS_INFO;
2974
2975/* ==============================Scratch Pad Definition Portion=============================== */
2976#define ATOM_DEVICE_CONNECT_INFO_DEF 0
2977#define ATOM_ROM_LOCATION_DEF 1
2978#define ATOM_TV_STANDARD_DEF 2
2979#define ATOM_ACTIVE_INFO_DEF 3
2980#define ATOM_LCD_INFO_DEF 4
2981#define ATOM_DOS_REQ_INFO_DEF 5
2982#define ATOM_ACC_CHANGE_INFO_DEF 6
2983#define ATOM_DOS_MODE_INFO_DEF 7
2984#define ATOM_I2C_CHANNEL_STATUS_DEF 8
2985#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
2986
2987/* BIOS_0_SCRATCH Definition */
2988#define ATOM_S0_CRT1_MONO 0x00000001L
2989#define ATOM_S0_CRT1_COLOR 0x00000002L
2990#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
2991
2992#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L
2993#define ATOM_S0_TV1_SVIDEO_A 0x00000008L
2994#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
2995
2996#define ATOM_S0_CV_A 0x00000010L
2997#define ATOM_S0_CV_DIN_A 0x00000020L
2998#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
2999
3000#define ATOM_S0_CRT2_MONO 0x00000100L
3001#define ATOM_S0_CRT2_COLOR 0x00000200L
3002#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
3003
3004#define ATOM_S0_TV1_COMPOSITE 0x00000400L
3005#define ATOM_S0_TV1_SVIDEO 0x00000800L
3006#define ATOM_S0_TV1_SCART 0x00004000L
3007#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
3008
3009#define ATOM_S0_CV 0x00001000L
3010#define ATOM_S0_CV_DIN 0x00002000L
3011#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN)
3012
3013#define ATOM_S0_DFP1 0x00010000L
3014#define ATOM_S0_DFP2 0x00020000L
3015#define ATOM_S0_LCD1 0x00040000L
3016#define ATOM_S0_LCD2 0x00080000L
3017#define ATOM_S0_TV2 0x00100000L
3018#define ATOM_S0_DFP3 0x00200000L
3019#define ATOM_S0_DFP4 0x00400000L
3020#define ATOM_S0_DFP5 0x00800000L
3021
3022#define ATOM_S0_DFP_MASK \
3023 (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
3024
3025#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */
3026 /* the FAD/HDP reg access bug. Bit is read by DAL */
3027
3028#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
3029#define ATOM_S0_THERMAL_STATE_SHIFT 26
3030
3031#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
3032#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
3033
3034#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
3035#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
3036#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
3037
3038/* Byte aligned defintion for BIOS usage */
3039#define ATOM_S0_CRT1_MONOb0 0x01
3040#define ATOM_S0_CRT1_COLORb0 0x02
3041#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
3042
3043#define ATOM_S0_TV1_COMPOSITEb0 0x04
3044#define ATOM_S0_TV1_SVIDEOb0 0x08
3045#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
3046
3047#define ATOM_S0_CVb0 0x10
3048#define ATOM_S0_CV_DINb0 0x20
3049#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
3050
3051#define ATOM_S0_CRT2_MONOb1 0x01
3052#define ATOM_S0_CRT2_COLORb1 0x02
3053#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
3054
3055#define ATOM_S0_TV1_COMPOSITEb1 0x04
3056#define ATOM_S0_TV1_SVIDEOb1 0x08
3057#define ATOM_S0_TV1_SCARTb1 0x40
3058#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
3059
3060#define ATOM_S0_CVb1 0x10
3061#define ATOM_S0_CV_DINb1 0x20
3062#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
3063
3064#define ATOM_S0_DFP1b2 0x01
3065#define ATOM_S0_DFP2b2 0x02
3066#define ATOM_S0_LCD1b2 0x04
3067#define ATOM_S0_LCD2b2 0x08
3068#define ATOM_S0_TV2b2 0x10
3069#define ATOM_S0_DFP3b2 0x20
3070
3071#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
3072#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
3073
3074#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
3075#define ATOM_S0_LCD1_SHIFT 18
3076
3077/* BIOS_1_SCRATCH Definition */
3078#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
3079#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
3080
3081/* BIOS_2_SCRATCH Definition */
3082#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
3083#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
3084#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
3085
3086#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
3087#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
3088#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
3089#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
3090#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
3091#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
3092#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
3093#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
3094#define ATOM_S2_CV_DPMS_STATE 0x01000000L
3095#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
3096#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
3097#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
3098
3099#define ATOM_S2_DFP_DPM_STATE \
3100 (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
3101 ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
3102 ATOM_S2_DFP5_DPMS_STATE)
3103
3104#define ATOM_S2_DEVICE_DPMS_STATE \
3105 (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
3106 ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
3107 ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
3108 ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
3109
3110#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
3111#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
3112#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
3113
3114#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
3115
3116#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
3117#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1
3118#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2
3119#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3
3120#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
3121#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
3122
3123/* Byte aligned defintion for BIOS usage */
3124#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
3125#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
3126#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
3127#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
3128#define ATOM_S2_TV1_DPMS_STATEb2 0x04
3129#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
3130#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
3131#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
3132#define ATOM_S2_TV2_DPMS_STATEb2 0x40
3133#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
3134#define ATOM_S2_CV_DPMS_STATEb3 0x01
3135#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
3136#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
3137#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
3138
3139#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
3140#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
3141#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
3142#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
3143#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
3144
3145/* BIOS_3_SCRATCH Definition */
3146#define ATOM_S3_CRT1_ACTIVE 0x00000001L
3147#define ATOM_S3_LCD1_ACTIVE 0x00000002L
3148#define ATOM_S3_TV1_ACTIVE 0x00000004L
3149#define ATOM_S3_DFP1_ACTIVE 0x00000008L
3150#define ATOM_S3_CRT2_ACTIVE 0x00000010L
3151#define ATOM_S3_LCD2_ACTIVE 0x00000020L
3152#define ATOM_S3_TV2_ACTIVE 0x00000040L
3153#define ATOM_S3_DFP2_ACTIVE 0x00000080L
3154#define ATOM_S3_CV_ACTIVE 0x00000100L
3155#define ATOM_S3_DFP3_ACTIVE 0x00000200L
3156#define ATOM_S3_DFP4_ACTIVE 0x00000400L
3157#define ATOM_S3_DFP5_ACTIVE 0x00000800L
3158
3159#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL
3160
3161#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
3162#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
3163
3164#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L
3165#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L
3166#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L
3167#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
3168#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
3169#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
3170#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L
3171#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
3172#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
3173#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
3174#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L
3175#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L
3176
3177#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
3178#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
3179#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
3180#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
3181
3182/* Byte aligned defintion for BIOS usage */
3183#define ATOM_S3_CRT1_ACTIVEb0 0x01
3184#define ATOM_S3_LCD1_ACTIVEb0 0x02
3185#define ATOM_S3_TV1_ACTIVEb0 0x04
3186#define ATOM_S3_DFP1_ACTIVEb0 0x08
3187#define ATOM_S3_CRT2_ACTIVEb0 0x10
3188#define ATOM_S3_LCD2_ACTIVEb0 0x20
3189#define ATOM_S3_TV2_ACTIVEb0 0x40
3190#define ATOM_S3_DFP2_ACTIVEb0 0x80
3191#define ATOM_S3_CV_ACTIVEb1 0x01
3192#define ATOM_S3_DFP3_ACTIVEb1 0x02
3193#define ATOM_S3_DFP4_ACTIVEb1 0x04
3194#define ATOM_S3_DFP5_ACTIVEb1 0x08
3195
3196#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF
3197
3198#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01
3199#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02
3200#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04
3201#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
3202#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
3203#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
3204#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40
3205#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
3206#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
3207#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
3208#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04
3209#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08
3210
3211#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
3212
3213#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
3214#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
3215#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
3216
3217/* BIOS_4_SCRATCH Definition */
3218#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
3219#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
3220#define ATOM_S4_LCD1_REFRESH_SHIFT 8
3221
3222/* Byte aligned defintion for BIOS usage */
3223#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
3224#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
3225#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
3226
3227/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
3228#define ATOM_S5_DOS_REQ_CRT1b0 0x01
3229#define ATOM_S5_DOS_REQ_LCD1b0 0x02
3230#define ATOM_S5_DOS_REQ_TV1b0 0x04
3231#define ATOM_S5_DOS_REQ_DFP1b0 0x08
3232#define ATOM_S5_DOS_REQ_CRT2b0 0x10
3233#define ATOM_S5_DOS_REQ_LCD2b0 0x20
3234#define ATOM_S5_DOS_REQ_TV2b0 0x40
3235#define ATOM_S5_DOS_REQ_DFP2b0 0x80
3236#define ATOM_S5_DOS_REQ_CVb1 0x01
3237#define ATOM_S5_DOS_REQ_DFP3b1 0x02
3238#define ATOM_S5_DOS_REQ_DFP4b1 0x04
3239#define ATOM_S5_DOS_REQ_DFP5b1 0x08
3240
3241#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF
3242
3243#define ATOM_S5_DOS_REQ_CRT1 0x0001
3244#define ATOM_S5_DOS_REQ_LCD1 0x0002
3245#define ATOM_S5_DOS_REQ_TV1 0x0004
3246#define ATOM_S5_DOS_REQ_DFP1 0x0008
3247#define ATOM_S5_DOS_REQ_CRT2 0x0010
3248#define ATOM_S5_DOS_REQ_LCD2 0x0020
3249#define ATOM_S5_DOS_REQ_TV2 0x0040
3250#define ATOM_S5_DOS_REQ_DFP2 0x0080
3251#define ATOM_S5_DOS_REQ_CV 0x0100
3252#define ATOM_S5_DOS_REQ_DFP3 0x0200
3253#define ATOM_S5_DOS_REQ_DFP4 0x0400
3254#define ATOM_S5_DOS_REQ_DFP5 0x0800
3255
3256#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
3257#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
3258#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
3259#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
3260#define ATOM_S5_DOS_FORCE_DEVICEw1 \
3261 (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
3262 ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
3263
3264/* BIOS_6_SCRATCH Definition */
3265#define ATOM_S6_DEVICE_CHANGE 0x00000001L
3266#define ATOM_S6_SCALER_CHANGE 0x00000002L
3267#define ATOM_S6_LID_CHANGE 0x00000004L
3268#define ATOM_S6_DOCKING_CHANGE 0x00000008L
3269#define ATOM_S6_ACC_MODE 0x00000010L
3270#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L
3271#define ATOM_S6_LID_STATE 0x00000040L
3272#define ATOM_S6_DOCK_STATE 0x00000080L
3273#define ATOM_S6_CRITICAL_STATE 0x00000100L
3274#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
3275#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
3276#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
3277#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */
3278#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */
3279
3280#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
3281#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
3282
3283#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
3284#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
3285#define ATOM_S6_ACC_REQ_TV1 0x00040000L
3286#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
3287#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
3288#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
3289#define ATOM_S6_ACC_REQ_TV2 0x00400000L
3290#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
3291#define ATOM_S6_ACC_REQ_CV 0x01000000L
3292#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
3293#define ATOM_S6_ACC_REQ_DFP4 0x04000000L
3294#define ATOM_S6_ACC_REQ_DFP5 0x08000000L
3295
3296#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L
3297#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L
3298#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L
3299#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
3300#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
3301
3302/* Byte aligned defintion for BIOS usage */
3303#define ATOM_S6_DEVICE_CHANGEb0 0x01
3304#define ATOM_S6_SCALER_CHANGEb0 0x02
3305#define ATOM_S6_LID_CHANGEb0 0x04
3306#define ATOM_S6_DOCKING_CHANGEb0 0x08
3307#define ATOM_S6_ACC_MODEb0 0x10
3308#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20
3309#define ATOM_S6_LID_STATEb0 0x40
3310#define ATOM_S6_DOCK_STATEb0 0x80
3311#define ATOM_S6_CRITICAL_STATEb1 0x01
3312#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
3313#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
3314#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
3315#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
3316#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
3317
3318#define ATOM_S6_ACC_REQ_CRT1b2 0x01
3319#define ATOM_S6_ACC_REQ_LCD1b2 0x02
3320#define ATOM_S6_ACC_REQ_TV1b2 0x04
3321#define ATOM_S6_ACC_REQ_DFP1b2 0x08
3322#define ATOM_S6_ACC_REQ_CRT2b2 0x10
3323#define ATOM_S6_ACC_REQ_LCD2b2 0x20
3324#define ATOM_S6_ACC_REQ_TV2b2 0x40
3325#define ATOM_S6_ACC_REQ_DFP2b2 0x80
3326#define ATOM_S6_ACC_REQ_CVb3 0x01
3327#define ATOM_S6_ACC_REQ_DFP3b3 0x02
3328#define ATOM_S6_ACC_REQ_DFP4b3 0x04
3329#define ATOM_S6_ACC_REQ_DFP5b3 0x08
3330
3331#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
3332#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
3333#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
3334#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40
3335#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80
3336
3337#define ATOM_S6_DEVICE_CHANGE_SHIFT 0
3338#define ATOM_S6_SCALER_CHANGE_SHIFT 1
3339#define ATOM_S6_LID_CHANGE_SHIFT 2
3340#define ATOM_S6_DOCKING_CHANGE_SHIFT 3
3341#define ATOM_S6_ACC_MODE_SHIFT 4
3342#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5
3343#define ATOM_S6_LID_STATE_SHIFT 6
3344#define ATOM_S6_DOCK_STATE_SHIFT 7
3345#define ATOM_S6_CRITICAL_STATE_SHIFT 8
3346#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9
3347#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10
3348#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11
3349#define ATOM_S6_REQ_SCALER_SHIFT 12
3350#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13
3351#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14
3352#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15
3353#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28
3354#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29
3355#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
3356#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
3357
3358/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
3359#define ATOM_S7_DOS_MODE_TYPEb0 0x03
3360#define ATOM_S7_DOS_MODE_VGAb0 0x00
3361#define ATOM_S7_DOS_MODE_VESAb0 0x01
3362#define ATOM_S7_DOS_MODE_EXTb0 0x02
3363#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
3364#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
3365#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
3366#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
3367
3368#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
3369
3370/* BIOS_8_SCRATCH Definition */
3371#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
3372#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
3373
3374#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
3375#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
3376
3377/* BIOS_9_SCRATCH Definition */
3378#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
3379#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
3380#endif
3381#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
3382#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
3383#endif
3384#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
3385#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
3386#endif
3387#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
3388#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
3389#endif
3390
3391#define ATOM_FLAG_SET 0x20
3392#define ATOM_FLAG_CLEAR 0
3393#define CLEAR_ATOM_S6_ACC_MODE \
3394 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3395 ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
3396#define SET_ATOM_S6_DEVICE_CHANGE \
3397 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3398 ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3399#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
3400 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3401 ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3402#define SET_ATOM_S6_SCALER_CHANGE \
3403 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3404 ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3405#define SET_ATOM_S6_LID_CHANGE \
3406 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3407 ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3408
3409#define SET_ATOM_S6_LID_STATE \
3410 ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
3411 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3412#define CLEAR_ATOM_S6_LID_STATE \
3413 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3414 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
3415
3416#define SET_ATOM_S6_DOCK_CHANGE \
3417 ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
3418 ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
3419#define SET_ATOM_S6_DOCK_STATE \
3420 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3421 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
3422#define CLEAR_ATOM_S6_DOCK_STATE \
3423 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3424 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
3425
3426#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
3427 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3428 ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
3429#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
3430 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3431 ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
3432#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
3433 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3434 ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
3435
3436#define SET_ATOM_S6_CRITICAL_STATE \
3437 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3438 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
3439#define CLEAR_ATOM_S6_CRITICAL_STATE \
3440 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3441 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
3442
3443#define SET_ATOM_S6_REQ_SCALER \
3444 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3445 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
3446#define CLEAR_ATOM_S6_REQ_SCALER \
3447 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3448 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
3449
3450#define SET_ATOM_S6_REQ_SCALER_ARATIO \
3451 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3452 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
3453#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
3454 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3455 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
3456
3457#define SET_ATOM_S6_I2C_STATE_CHANGE \
3458 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3459 ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3460
3461#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
3462 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3463 ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3464
3465#define SET_ATOM_S6_DEVICE_RECONFIG \
3466 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3467 ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
3468#define CLEAR_ATOM_S0_LCD1 \
3469 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
3470 ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
3471#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
3472 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3473 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
3474#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
3475 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3476 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
3477
3478/****************************************************************************/
3479/* Portion II: Definitinos only used in Driver */
3480/****************************************************************************/
3481
3482/* Macros used by driver */
3483
3484#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
3485
3486#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
3487#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
3488
3489#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
3490#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
3491
3492/****************************************************************************/
3493/* Portion III: Definitinos only used in VBIOS */
3494/****************************************************************************/
3495#define ATOM_DAC_SRC 0x80
3496#define ATOM_SRC_DAC1 0
3497#define ATOM_SRC_DAC2 0x80
3498
3499#ifdef UEFI_BUILD
3500#define USHORT UTEMP
3501#endif
3502
3503typedef struct _MEMORY_PLLINIT_PARAMETERS {
3504 ULONG ulTargetMemoryClock; /* In 10Khz unit */
3505 UCHAR ucAction; /* not define yet */
3506 UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */
3507 UCHAR ucFbDiv; /* FB value */
3508 UCHAR ucPostDiv; /* Post div */
3509} MEMORY_PLLINIT_PARAMETERS;
3510
3511#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
3512
3513#define GPIO_PIN_WRITE 0x01
3514#define GPIO_PIN_READ 0x00
3515
3516typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
3517 UCHAR ucGPIO_ID; /* return value, read from GPIO pins */
3518 UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */
3519 UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */
3520 UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
3521} GPIO_PIN_CONTROL_PARAMETERS;
3522
3523typedef struct _ENABLE_SCALER_PARAMETERS {
3524 UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */
3525 UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
3526 UCHAR ucTVStandard; /* */
3527 UCHAR ucPadding[1];
3528} ENABLE_SCALER_PARAMETERS;
3529#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
3530
3531/* ucEnable: */
3532#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
3533#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
3534#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
3535#define SCALER_ENABLE_MULTITAP_MODE 3
3536
3537typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
3538 ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */
3539 UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */
3540 UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */
3541 UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
3542 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
3543} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
3544
3545typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
3546 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
3547 ENABLE_CRTC_PARAMETERS sReserved;
3548} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
3549
3550typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
3551 USHORT usHight; /* Image Hight */
3552 USHORT usWidth; /* Image Width */
3553 UCHAR ucSurface; /* Surface 1 or 2 */
3554 UCHAR ucPadding[3];
3555} ENABLE_GRAPH_SURFACE_PARAMETERS;
3556
3557typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
3558 USHORT usHight; /* Image Hight */
3559 USHORT usWidth; /* Image Width */
3560 UCHAR ucSurface; /* Surface 1 or 2 */
3561 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
3562 UCHAR ucPadding[2];
3563} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
3564
3565typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
3566 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
3567 ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */
3568} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
3569
3570typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
3571 USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */
3572 USHORT usMemorySize; /* 8Kb blocks aligned */
3573} MEMORY_CLEAN_UP_PARAMETERS;
3574#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
3575
3576typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
3577 USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */
3578 USHORT usY_Size;
3579} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
3580
3581typedef struct _INDIRECT_IO_ACCESS {
3582 ATOM_COMMON_TABLE_HEADER sHeader;
3583 UCHAR IOAccessSequence[256];
3584} INDIRECT_IO_ACCESS;
3585
3586#define INDIRECT_READ 0x00
3587#define INDIRECT_WRITE 0x80
3588
3589#define INDIRECT_IO_MM 0
3590#define INDIRECT_IO_PLL 1
3591#define INDIRECT_IO_MC 2
3592#define INDIRECT_IO_PCIE 3
3593#define INDIRECT_IO_PCIEP 4
3594#define INDIRECT_IO_NBMISC 5
3595
3596#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
3597#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
3598#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ
3599#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE
3600#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ
3601#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE
3602#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ
3603#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
3604#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
3605#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
3606
3607typedef struct _ATOM_OEM_INFO {
3608 ATOM_COMMON_TABLE_HEADER sHeader;
3609 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
3610} ATOM_OEM_INFO;
3611
3612typedef struct _ATOM_TV_MODE {
3613 UCHAR ucVMode_Num; /* Video mode number */
3614 UCHAR ucTV_Mode_Num; /* Internal TV mode number */
3615} ATOM_TV_MODE;
3616
3617typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
3618 ATOM_COMMON_TABLE_HEADER sHeader;
3619 USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */
3620 USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */
3621 USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */
3622 USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
3623 USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
3624} ATOM_BIOS_INT_TVSTD_MODE;
3625
3626typedef struct _ATOM_TV_MODE_SCALER_PTR {
3627 USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */
3628 USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */
3629 UCHAR ucTV_Mode_Num;
3630} ATOM_TV_MODE_SCALER_PTR;
3631
3632typedef struct _ATOM_STANDARD_VESA_TIMING {
3633 ATOM_COMMON_TABLE_HEADER sHeader;
3634 ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */
3635} ATOM_STANDARD_VESA_TIMING;
3636
3637typedef struct _ATOM_STD_FORMAT {
3638 USHORT usSTD_HDisp;
3639 USHORT usSTD_VDisp;
3640 USHORT usSTD_RefreshRate;
3641 USHORT usReserved;
3642} ATOM_STD_FORMAT;
3643
3644typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
3645 USHORT usVESA_ModeNumber;
3646 USHORT usExtendedModeNumber;
3647} ATOM_VESA_TO_EXTENDED_MODE;
3648
3649typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
3650 ATOM_COMMON_TABLE_HEADER sHeader;
3651 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
3652} ATOM_VESA_TO_INTENAL_MODE_LUT;
3653
3654/*************** ATOM Memory Related Data Structure ***********************/
3655typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
3656 UCHAR ucMemoryType;
3657 UCHAR ucMemoryVendor;
3658 UCHAR ucAdjMCId;
3659 UCHAR ucDynClkId;
3660 ULONG ulDllResetClkRange;
3661} ATOM_MEMORY_VENDOR_BLOCK;
3662
3663typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
3664#if ATOM_BIG_ENDIAN
3665 ULONG ucMemBlkId:8;
3666 ULONG ulMemClockRange:24;
3667#else
3668 ULONG ulMemClockRange:24;
3669 ULONG ucMemBlkId:8;
3670#endif
3671} ATOM_MEMORY_SETTING_ID_CONFIG;
3672
3673typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
3674 ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
3675 ULONG ulAccess;
3676} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
3677
3678typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
3679 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
3680 ULONG aulMemData[1];
3681} ATOM_MEMORY_SETTING_DATA_BLOCK;
3682
3683typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
3684 USHORT usRegIndex; /* MC register index */
3685 UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
3686} ATOM_INIT_REG_INDEX_FORMAT;
3687
3688typedef struct _ATOM_INIT_REG_BLOCK {
3689 USHORT usRegIndexTblSize; /* size of asRegIndexBuf */
3690 USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
3691 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
3692 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
3693} ATOM_INIT_REG_BLOCK;
3694
3695#define END_OF_REG_INDEX_BLOCK 0x0ffff
3696#define END_OF_REG_DATA_BLOCK 0x00000000
3697#define ATOM_INIT_REG_MASK_FLAG 0x80
3698#define CLOCK_RANGE_HIGHEST 0x00ffffff
3699
3700#define VALUE_DWORD SIZEOF ULONG
3701#define VALUE_SAME_AS_ABOVE 0
3702#define VALUE_MASK_DWORD 0x84
3703
3704#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
3705#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
3706#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
3707
3708typedef struct _ATOM_MC_INIT_PARAM_TABLE {
3709 ATOM_COMMON_TABLE_HEADER sHeader;
3710 USHORT usAdjustARB_SEQDataOffset;
3711 USHORT usMCInitMemTypeTblOffset;
3712 USHORT usMCInitCommonTblOffset;
3713 USHORT usMCInitPowerDownTblOffset;
3714 ULONG ulARB_SEQDataBuf[32];
3715 ATOM_INIT_REG_BLOCK asMCInitMemType;
3716 ATOM_INIT_REG_BLOCK asMCInitCommon;
3717} ATOM_MC_INIT_PARAM_TABLE;
3718
3719#define _4Mx16 0x2
3720#define _4Mx32 0x3
3721#define _8Mx16 0x12
3722#define _8Mx32 0x13
3723#define _16Mx16 0x22
3724#define _16Mx32 0x23
3725#define _32Mx16 0x32
3726#define _32Mx32 0x33
3727#define _64Mx8 0x41
3728#define _64Mx16 0x42
3729
3730#define SAMSUNG 0x1
3731#define INFINEON 0x2
3732#define ELPIDA 0x3
3733#define ETRON 0x4
3734#define NANYA 0x5
3735#define HYNIX 0x6
3736#define MOSEL 0x7
3737#define WINBOND 0x8
3738#define ESMT 0x9
3739#define MICRON 0xF
3740
3741#define QIMONDA INFINEON
3742#define PROMOS MOSEL
3743
3744/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
3745
3746#define UCODE_ROM_START_ADDRESS 0x1c000
3747#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */
3748
3749/* uCode block header for reference */
3750
3751typedef struct _MCuCodeHeader {
3752 ULONG ulSignature;
3753 UCHAR ucRevision;
3754 UCHAR ucChecksum;
3755 UCHAR ucReserved1;
3756 UCHAR ucReserved2;
3757 USHORT usParametersLength;
3758 USHORT usUCodeLength;
3759 USHORT usReserved1;
3760 USHORT usReserved2;
3761} MCuCodeHeader;
3762
3763/* //////////////////////////////////////////////////////////////////////////////// */
3764
3765#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
3766
3767#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
3768typedef struct _ATOM_VRAM_MODULE_V1 {
3769 ULONG ulReserved;
3770 USHORT usEMRSValue;
3771 USHORT usMRSValue;
3772 USHORT usReserved;
3773 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3774 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
3775 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */
3776 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
3777 UCHAR ucRow; /* Number of Row,in power of 2; */
3778 UCHAR ucColumn; /* Number of Column,in power of 2; */
3779 UCHAR ucBank; /* Nunber of Bank; */
3780 UCHAR ucRank; /* Number of Rank, in power of 2 */
3781 UCHAR ucChannelNum; /* Number of channel; */
3782 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
3783 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
3784 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
3785 UCHAR ucReserved[2];
3786} ATOM_VRAM_MODULE_V1;
3787
3788typedef struct _ATOM_VRAM_MODULE_V2 {
3789 ULONG ulReserved;
3790 ULONG ulFlags; /* To enable/disable functionalities based on memory type */
3791 ULONG ulEngineClock; /* Override of default engine clock for particular memory type */
3792 ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */
3793 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3794 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3795 USHORT usEMRSValue;
3796 USHORT usMRSValue;
3797 USHORT usReserved;
3798 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3799 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
3800 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
3801 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
3802 UCHAR ucRow; /* Number of Row,in power of 2; */
3803 UCHAR ucColumn; /* Number of Column,in power of 2; */
3804 UCHAR ucBank; /* Nunber of Bank; */
3805 UCHAR ucRank; /* Number of Rank, in power of 2 */
3806 UCHAR ucChannelNum; /* Number of channel; */
3807 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
3808 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
3809 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
3810 UCHAR ucRefreshRateFactor;
3811 UCHAR ucReserved[3];
3812} ATOM_VRAM_MODULE_V2;
3813
3814typedef struct _ATOM_MEMORY_TIMING_FORMAT {
3815 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
3816 union {
3817 USHORT usMRS; /* mode register */
3818 USHORT usDDR3_MR0;
3819 };
3820 union {
3821 USHORT usEMRS; /* extended mode register */
3822 USHORT usDDR3_MR1;
3823 };
3824 UCHAR ucCL; /* CAS latency */
3825 UCHAR ucWL; /* WRITE Latency */
3826 UCHAR uctRAS; /* tRAS */
3827 UCHAR uctRC; /* tRC */
3828 UCHAR uctRFC; /* tRFC */
3829 UCHAR uctRCDR; /* tRCDR */
3830 UCHAR uctRCDW; /* tRCDW */
3831 UCHAR uctRP; /* tRP */
3832 UCHAR uctRRD; /* tRRD */
3833 UCHAR uctWR; /* tWR */
3834 UCHAR uctWTR; /* tWTR */
3835 UCHAR uctPDIX; /* tPDIX */
3836 UCHAR uctFAW; /* tFAW */
3837 UCHAR uctAOND; /* tAOND */
3838 union {
3839 struct {
3840 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
3841 UCHAR ucReserved;
3842 };
3843 USHORT usDDR3_MR2;
3844 };
3845} ATOM_MEMORY_TIMING_FORMAT;
3846
3847typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
3848 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
3849 USHORT usMRS; /* mode register */
3850 USHORT usEMRS; /* extended mode register */
3851 UCHAR ucCL; /* CAS latency */
3852 UCHAR ucWL; /* WRITE Latency */
3853 UCHAR uctRAS; /* tRAS */
3854 UCHAR uctRC; /* tRC */
3855 UCHAR uctRFC; /* tRFC */
3856 UCHAR uctRCDR; /* tRCDR */
3857 UCHAR uctRCDW; /* tRCDW */
3858 UCHAR uctRP; /* tRP */
3859 UCHAR uctRRD; /* tRRD */
3860 UCHAR uctWR; /* tWR */
3861 UCHAR uctWTR; /* tWTR */
3862 UCHAR uctPDIX; /* tPDIX */
3863 UCHAR uctFAW; /* tFAW */
3864 UCHAR uctAOND; /* tAOND */
3865 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
3866/* ///////////////////////GDDR parameters/////////////////////////////////// */
3867 UCHAR uctCCDL; /* */
3868 UCHAR uctCRCRL; /* */
3869 UCHAR uctCRCWL; /* */
3870 UCHAR uctCKE; /* */
3871 UCHAR uctCKRSE; /* */
3872 UCHAR uctCKRSX; /* */
3873 UCHAR uctFAW32; /* */
3874 UCHAR ucReserved1; /* */
3875 UCHAR ucReserved2; /* */
3876 UCHAR ucTerminator;
3877} ATOM_MEMORY_TIMING_FORMAT_V1;
3878
3879typedef struct _ATOM_MEMORY_FORMAT {
3880 ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */
3881 union {
3882 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3883 USHORT usDDR3_Reserved; /* Not used for DDR3 memory */
3884 };
3885 union {
3886 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3887 USHORT usDDR3_MR3; /* Used for DDR3 memory */
3888 };
3889 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
3890 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
3891 UCHAR ucRow; /* Number of Row,in power of 2; */
3892 UCHAR ucColumn; /* Number of Column,in power of 2; */
3893 UCHAR ucBank; /* Nunber of Bank; */
3894 UCHAR ucRank; /* Number of Rank, in power of 2 */
3895 UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */
3896 UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
3897 UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */
3898 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
3899 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
3900 UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */
3901 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
3902} ATOM_MEMORY_FORMAT;
3903
3904typedef struct _ATOM_VRAM_MODULE_V3 {
3905 ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */
3906 USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */
3907 USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */
3908 USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */
3909 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3910 UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */
3911 UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */
3912 UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */
3913 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
3914 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
3915 ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */
3916} ATOM_VRAM_MODULE_V3;
3917
3918/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
3919#define NPL_RT_MASK 0x0f
3920#define BATTERY_ODT_MASK 0xc0
3921
3922#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
3923
3924typedef struct _ATOM_VRAM_MODULE_V4 {
3925 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
3926 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
3927 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3928 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
3929 USHORT usReserved;
3930 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3931 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
3932 UCHAR ucChannelNum; /* Number of channels present in this module config */
3933 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
3934 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
3935 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
3936 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
3937 UCHAR ucVREFI; /* board dependent parameter */
3938 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
3939 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
3940 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3941 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
3942 UCHAR ucReserved[3];
3943
3944/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
3945 union {
3946 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3947 USHORT usDDR3_Reserved;
3948 };
3949 union {
3950 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3951 USHORT usDDR3_MR3; /* Used for DDR3 memory */
3952 };
3953 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
3954 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
3955 UCHAR ucReserved2[2];
3956 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
3957} ATOM_VRAM_MODULE_V4;
3958
3959#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
3960#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
3961#define VRAM_MODULE_V4_MISC_BL_MASK 0x4
3962#define VRAM_MODULE_V4_MISC_BL8 0x4
3963#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
3964
3965typedef struct _ATOM_VRAM_MODULE_V5 {
3966 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
3967 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
3968 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3969 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
3970 USHORT usReserved;
3971 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3972 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
3973 UCHAR ucChannelNum; /* Number of channels present in this module config */
3974 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
3975 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
3976 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
3977 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
3978 UCHAR ucVREFI; /* board dependent parameter */
3979 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
3980 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
3981 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3982 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
3983 UCHAR ucReserved[3];
3984
3985/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
3986 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3987 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3988 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
3989 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
3990 UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
3991 UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
3992 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
3993} ATOM_VRAM_MODULE_V5;
3994
3995typedef struct _ATOM_VRAM_INFO_V2 {
3996 ATOM_COMMON_TABLE_HEADER sHeader;
3997 UCHAR ucNumOfVRAMModule;
3998 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
3999} ATOM_VRAM_INFO_V2;
4000
4001typedef struct _ATOM_VRAM_INFO_V3 {
4002 ATOM_COMMON_TABLE_HEADER sHeader;
4003 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4004 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4005 USHORT usRerseved;
4006 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4007 UCHAR ucNumOfVRAMModule;
4008 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4009 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4010 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4011} ATOM_VRAM_INFO_V3;
4012
4013#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
4014
4015typedef struct _ATOM_VRAM_INFO_V4 {
4016 ATOM_COMMON_TABLE_HEADER sHeader;
4017 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4018 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4019 USHORT usRerseved;
4020 UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
4021 ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
4022 UCHAR ucReservde[4];
4023 UCHAR ucNumOfVRAMModule;
4024 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4025 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4026 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4027} ATOM_VRAM_INFO_V4;
4028
4029typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
4030 ATOM_COMMON_TABLE_HEADER sHeader;
4031 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4032} ATOM_VRAM_GPIO_DETECTION_INFO;
4033
4034typedef struct _ATOM_MEMORY_TRAINING_INFO {
4035 ATOM_COMMON_TABLE_HEADER sHeader;
4036 UCHAR ucTrainingLoop;
4037 UCHAR ucReserved[3];
4038 ATOM_INIT_REG_BLOCK asMemTrainingSetting;
4039} ATOM_MEMORY_TRAINING_INFO;
4040
4041typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
4042 UCHAR ucControl;
4043 UCHAR ucData;
4044 UCHAR ucSatus;
4045 UCHAR ucTemp;
4046} SW_I2C_CNTL_DATA_PARAMETERS;
4047
4048#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
4049
4050typedef struct _SW_I2C_IO_DATA_PARAMETERS {
4051 USHORT GPIO_Info;
4052 UCHAR ucAct;
4053 UCHAR ucData;
4054} SW_I2C_IO_DATA_PARAMETERS;
4055
4056#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
4057
4058/****************************SW I2C CNTL DEFINITIONS**********************/
4059#define SW_I2C_IO_RESET 0
4060#define SW_I2C_IO_GET 1
4061#define SW_I2C_IO_DRIVE 2
4062#define SW_I2C_IO_SET 3
4063#define SW_I2C_IO_START 4
4064
4065#define SW_I2C_IO_CLOCK 0
4066#define SW_I2C_IO_DATA 0x80
4067
4068#define SW_I2C_IO_ZERO 0
4069#define SW_I2C_IO_ONE 0x100
4070
4071#define SW_I2C_CNTL_READ 0
4072#define SW_I2C_CNTL_WRITE 1
4073#define SW_I2C_CNTL_START 2
4074#define SW_I2C_CNTL_STOP 3
4075#define SW_I2C_CNTL_OPEN 4
4076#define SW_I2C_CNTL_CLOSE 5
4077#define SW_I2C_CNTL_WRITE1BIT 6
4078
4079/* ==============================VESA definition Portion=============================== */
4080#define VESA_OEM_PRODUCT_REV '01.00'
4081#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */
4082#define VESA_MODE_WIN_ATTRIBUTE 7
4083#define VESA_WIN_SIZE 64
4084
4085typedef struct _PTR_32_BIT_STRUCTURE {
4086 USHORT Offset16;
4087 USHORT Segment16;
4088} PTR_32_BIT_STRUCTURE;
4089
4090typedef union _PTR_32_BIT_UNION {
4091 PTR_32_BIT_STRUCTURE SegmentOffset;
4092 ULONG Ptr32_Bit;
4093} PTR_32_BIT_UNION;
4094
4095typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
4096 UCHAR VbeSignature[4];
4097 USHORT VbeVersion;
4098 PTR_32_BIT_UNION OemStringPtr;
4099 UCHAR Capabilities[4];
4100 PTR_32_BIT_UNION VideoModePtr;
4101 USHORT TotalMemory;
4102} VBE_1_2_INFO_BLOCK_UPDATABLE;
4103
4104typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
4105 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
4106 USHORT OemSoftRev;
4107 PTR_32_BIT_UNION OemVendorNamePtr;
4108 PTR_32_BIT_UNION OemProductNamePtr;
4109 PTR_32_BIT_UNION OemProductRevPtr;
4110} VBE_2_0_INFO_BLOCK_UPDATABLE;
4111
4112typedef union _VBE_VERSION_UNION {
4113 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
4114 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
4115} VBE_VERSION_UNION;
4116
4117typedef struct _VBE_INFO_BLOCK {
4118 VBE_VERSION_UNION UpdatableVBE_Info;
4119 UCHAR Reserved[222];
4120 UCHAR OemData[256];
4121} VBE_INFO_BLOCK;
4122
4123typedef struct _VBE_FP_INFO {
4124 USHORT HSize;
4125 USHORT VSize;
4126 USHORT FPType;
4127 UCHAR RedBPP;
4128 UCHAR GreenBPP;
4129 UCHAR BlueBPP;
4130 UCHAR ReservedBPP;
4131 ULONG RsvdOffScrnMemSize;
4132 ULONG RsvdOffScrnMEmPtr;
4133 UCHAR Reserved[14];
4134} VBE_FP_INFO;
4135
4136typedef struct _VESA_MODE_INFO_BLOCK {
4137/* Mandatory information for all VBE revisions */
4138 USHORT ModeAttributes; /* dw ? ; mode attributes */
4139 UCHAR WinAAttributes; /* db ? ; window A attributes */
4140 UCHAR WinBAttributes; /* db ? ; window B attributes */
4141 USHORT WinGranularity; /* dw ? ; window granularity */
4142 USHORT WinSize; /* dw ? ; window size */
4143 USHORT WinASegment; /* dw ? ; window A start segment */
4144 USHORT WinBSegment; /* dw ? ; window B start segment */
4145 ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */
4146 USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */
4147
4148/* ; Mandatory information for VBE 1.2 and above */
4149 USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */
4150 USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */
4151 UCHAR XCharSize; /* db ? ; character cell width in pixels */
4152 UCHAR YCharSize; /* db ? ; character cell height in pixels */
4153 UCHAR NumberOfPlanes; /* db ? ; number of memory planes */
4154 UCHAR BitsPerPixel; /* db ? ; bits per pixel */
4155 UCHAR NumberOfBanks; /* db ? ; number of banks */
4156 UCHAR MemoryModel; /* db ? ; memory model type */
4157 UCHAR BankSize; /* db ? ; bank size in KB */
4158 UCHAR NumberOfImagePages; /* db ? ; number of images */
4159 UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */
4160
4161/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
4162 UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */
4163 UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */
4164 UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */
4165 UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */
4166 UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */
4167 UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */
4168 UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */
4169 UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */
4170 UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */
4171
4172/* ; Mandatory information for VBE 2.0 and above */
4173 ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */
4174 ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */
4175 USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */
4176
4177/* ; Mandatory information for VBE 3.0 and above */
4178 USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */
4179 UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */
4180 UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */
4181 UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */
4182 UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */
4183 UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */
4184 UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */
4185 UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */
4186 UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */
4187 UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */
4188 UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */
4189 ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */
4190 UCHAR Reserved; /* db 190 dup (0) */
4191} VESA_MODE_INFO_BLOCK;
4192
4193/* BIOS function CALLS */
4194#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */
4195#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
4196#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
4197#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
4198#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
4199#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
4200#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
4201#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
4202#define ATOM_BIOS_FUNCTION_STV_STD 0x16
4203#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17
4204#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18
4205
4206#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
4207#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
4208#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
4209#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
4210#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
4211#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */
4212#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */
4213
4214#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
4215#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
4216#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
4217#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */
4218#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */
4219#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */
4220#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */
4221#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */
4222#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */
4223#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */
4224
4225#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */
4226#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */
4227#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */
4228#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */
4229#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */
4230#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */
4231#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */
4232#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
4233
4234#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
4235#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
4236#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
4237
4238/* structure used for VBIOS only */
4239
4240/* DispOutInfoTable */
4241typedef struct _ASIC_TRANSMITTER_INFO {
4242 USHORT usTransmitterObjId;
4243 USHORT usSupportDevice;
4244 UCHAR ucTransmitterCmdTblId;
4245 UCHAR ucConfig;
4246 UCHAR ucEncoderID; /* available 1st encoder ( default ) */
4247 UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */
4248 UCHAR uc2ndEncoderID;
4249 UCHAR ucReserved;
4250} ASIC_TRANSMITTER_INFO;
4251
4252typedef struct _ASIC_ENCODER_INFO {
4253 UCHAR ucEncoderID;
4254 UCHAR ucEncoderConfig;
4255 USHORT usEncoderCmdTblId;
4256} ASIC_ENCODER_INFO;
4257
4258typedef struct _ATOM_DISP_OUT_INFO {
4259 ATOM_COMMON_TABLE_HEADER sHeader;
4260 USHORT ptrTransmitterInfo;
4261 USHORT ptrEncoderInfo;
4262 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
4263 ASIC_ENCODER_INFO asEncoderInfo[1];
4264} ATOM_DISP_OUT_INFO;
4265
4266/* DispDevicePriorityInfo */
4267typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
4268 ATOM_COMMON_TABLE_HEADER sHeader;
4269 USHORT asDevicePriority[16];
4270} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
4271
4272/* ProcessAuxChannelTransactionTable */
4273typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
4274 USHORT lpAuxRequest;
4275 USHORT lpDataOut;
4276 UCHAR ucChannelID;
4277 union {
4278 UCHAR ucReplyStatus;
4279 UCHAR ucDelay;
4280 };
4281 UCHAR ucDataOutLen;
4282 UCHAR ucReserved;
4283} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
4284
4285#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4286
4287/* GetSinkType */
4288
4289typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4290 USHORT ucLinkClock;
4291 union {
4292 UCHAR ucConfig; /* for DP training command */
4293 UCHAR ucI2cId; /* use for GET_SINK_TYPE command */
4294 };
4295 UCHAR ucAction;
4296 UCHAR ucStatus;
4297 UCHAR ucLaneNum;
4298 UCHAR ucReserved[2];
4299} DP_ENCODER_SERVICE_PARAMETERS;
4300
4301/* ucAction */
4302#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
4303#define ATOM_DP_ACTION_TRAINING_START 0x02
4304#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
4305#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
4306#define ATOM_DP_ACTION_SET_VSWING_PREEMP 0x05
4307#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
4308#define ATOM_DP_ACTION_BLANKING 0x07
4309
4310/* ucConfig */
4311#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
4312#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
4313#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
4314#define ATOM_DP_CONFIG_EXTERNAL_ENCODER 0x02
4315#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
4316#define ATOM_DP_CONFIG_LINK_A 0x00
4317#define ATOM_DP_CONFIG_LINK_B 0x04
4318
4319#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
4320
4321/* DP_TRAINING_TABLE */
4322#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
4323#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
4324#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16)
4325#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24)
4326#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
4327#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
4328#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
4329#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60)
4330#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
4331#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
4332#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
4333#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
4334
4335typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
4336 UCHAR ucI2CSpeed;
4337 union {
4338 UCHAR ucRegIndex;
4339 UCHAR ucStatus;
4340 };
4341 USHORT lpI2CDataOut;
4342 UCHAR ucFlag;
4343 UCHAR ucTransBytes;
4344 UCHAR ucSlaveAddr;
4345 UCHAR ucLineNumber;
4346} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
4347
4348#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4349
4350/* ucFlag */
4351#define HW_I2C_WRITE 1
4352#define HW_I2C_READ 0
4353
4354/****************************************************************************/
4355/* Portion VI: Definitinos being oboselete */
4356/****************************************************************************/
4357
4358/* ========================================================================================== */
4359/* Remove the definitions below when driver is ready! */
4360typedef struct _ATOM_DAC_INFO {
4361 ATOM_COMMON_TABLE_HEADER sHeader;
4362 USHORT usMaxFrequency; /* in 10kHz unit */
4363 USHORT usReserved;
4364} ATOM_DAC_INFO;
4365
4366typedef struct _COMPASSIONATE_DATA {
4367 ATOM_COMMON_TABLE_HEADER sHeader;
4368
4369 /* ============================== DAC1 portion */
4370 UCHAR ucDAC1_BG_Adjustment;
4371 UCHAR ucDAC1_DAC_Adjustment;
4372 USHORT usDAC1_FORCE_Data;
4373 /* ============================== DAC2 portion */
4374 UCHAR ucDAC2_CRT2_BG_Adjustment;
4375 UCHAR ucDAC2_CRT2_DAC_Adjustment;
4376 USHORT usDAC2_CRT2_FORCE_Data;
4377 USHORT usDAC2_CRT2_MUX_RegisterIndex;
4378 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
4379 UCHAR ucDAC2_NTSC_BG_Adjustment;
4380 UCHAR ucDAC2_NTSC_DAC_Adjustment;
4381 USHORT usDAC2_TV1_FORCE_Data;
4382 USHORT usDAC2_TV1_MUX_RegisterIndex;
4383 UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
4384 UCHAR ucDAC2_CV_BG_Adjustment;
4385 UCHAR ucDAC2_CV_DAC_Adjustment;
4386 USHORT usDAC2_CV_FORCE_Data;
4387 USHORT usDAC2_CV_MUX_RegisterIndex;
4388 UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
4389 UCHAR ucDAC2_PAL_BG_Adjustment;
4390 UCHAR ucDAC2_PAL_DAC_Adjustment;
4391 USHORT usDAC2_TV2_FORCE_Data;
4392} COMPASSIONATE_DATA;
4393
4394/****************************Supported Device Info Table Definitions**********************/
4395/* ucConnectInfo: */
4396/* [7:4] - connector type */
4397/* = 1 - VGA connector */
4398/* = 2 - DVI-I */
4399/* = 3 - DVI-D */
4400/* = 4 - DVI-A */
4401/* = 5 - SVIDEO */
4402/* = 6 - COMPOSITE */
4403/* = 7 - LVDS */
4404/* = 8 - DIGITAL LINK */
4405/* = 9 - SCART */
4406/* = 0xA - HDMI_type A */
4407/* = 0xB - HDMI_type B */
4408/* = 0xE - Special case1 (DVI+DIN) */
4409/* Others=TBD */
4410/* [3:0] - DAC Associated */
4411/* = 0 - no DAC */
4412/* = 1 - DACA */
4413/* = 2 - DACB */
4414/* = 3 - External DAC */
4415/* Others=TBD */
4416/* */
4417
4418typedef struct _ATOM_CONNECTOR_INFO {
4419#if ATOM_BIG_ENDIAN
4420 UCHAR bfConnectorType:4;
4421 UCHAR bfAssociatedDAC:4;
4422#else
4423 UCHAR bfAssociatedDAC:4;
4424 UCHAR bfConnectorType:4;
4425#endif
4426} ATOM_CONNECTOR_INFO;
4427
4428typedef union _ATOM_CONNECTOR_INFO_ACCESS {
4429 ATOM_CONNECTOR_INFO sbfAccess;
4430 UCHAR ucAccess;
4431} ATOM_CONNECTOR_INFO_ACCESS;
4432
4433typedef struct _ATOM_CONNECTOR_INFO_I2C {
4434 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4435 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
4436} ATOM_CONNECTOR_INFO_I2C;
4437
4438typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
4439 ATOM_COMMON_TABLE_HEADER sHeader;
4440 USHORT usDeviceSupport;
4441 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
4442} ATOM_SUPPORTED_DEVICES_INFO;
4443
4444#define NO_INT_SRC_MAPPED 0xFF
4445
4446typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
4447 UCHAR ucIntSrcBitmap;
4448} ATOM_CONNECTOR_INC_SRC_BITMAP;
4449
4450typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
4451 ATOM_COMMON_TABLE_HEADER sHeader;
4452 USHORT usDeviceSupport;
4453 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4454 ATOM_CONNECTOR_INC_SRC_BITMAP
4455 asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4456} ATOM_SUPPORTED_DEVICES_INFO_2;
4457
4458typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
4459 ATOM_COMMON_TABLE_HEADER sHeader;
4460 USHORT usDeviceSupport;
4461 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
4462 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
4463} ATOM_SUPPORTED_DEVICES_INFO_2d1;
4464
4465#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
4466
4467typedef struct _ATOM_MISC_CONTROL_INFO {
4468 USHORT usFrequency;
4469 UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */
4470 UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */
4471 UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */
4472 UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */
4473} ATOM_MISC_CONTROL_INFO;
4474
4475#define ATOM_MAX_MISC_INFO 4
4476
4477typedef struct _ATOM_TMDS_INFO {
4478 ATOM_COMMON_TABLE_HEADER sHeader;
4479 USHORT usMaxFrequency; /* in 10Khz */
4480 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
4481} ATOM_TMDS_INFO;
4482
4483typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
4484 UCHAR ucTVStandard; /* Same as TV standards defined above, */
4485 UCHAR ucPadding[1];
4486} ATOM_ENCODER_ANALOG_ATTRIBUTE;
4487
4488typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
4489 UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */
4490 UCHAR ucPadding[1];
4491} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
4492
4493typedef union _ATOM_ENCODER_ATTRIBUTE {
4494 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4495 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
4496} ATOM_ENCODER_ATTRIBUTE;
4497
4498typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
4499 USHORT usPixelClock;
4500 USHORT usEncoderID;
4501 UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
4502 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
4503 ATOM_ENCODER_ATTRIBUTE usDevAttr;
4504} DVO_ENCODER_CONTROL_PARAMETERS;
4505
4506typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4507 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
4508 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
4509} DVO_ENCODER_CONTROL_PS_ALLOCATION;
4510
4511#define ATOM_XTMDS_ASIC_SI164_ID 1
4512#define ATOM_XTMDS_ASIC_SI178_ID 2
4513#define ATOM_XTMDS_ASIC_TFP513_ID 3
4514#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
4515#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
4516#define ATOM_XTMDS_MVPU_FPGA 0x00000004
4517
4518typedef struct _ATOM_XTMDS_INFO {
4519 ATOM_COMMON_TABLE_HEADER sHeader;
4520 USHORT usSingleLinkMaxFrequency;
4521 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */
4522 UCHAR ucXtransimitterID;
4523 UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */
4524 UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */
4525 /* due to design. This ID is used to alert driver that the sequence is not "standard"! */
4526 UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */
4527 UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */
4528} ATOM_XTMDS_INFO;
4529
4530typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4531 UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */
4532 UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */
4533 UCHAR ucPadding[2];
4534} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
4535
4536/****************************Legacy Power Play Table Definitions **********************/
4537
4538/* Definitions for ulPowerPlayMiscInfo */
4539#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
4540#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
4541#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
4542
4543#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L
4544#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L
4545
4546#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L
4547
4548#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
4549#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
4550#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
4551
4552#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
4553#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
4554#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
4555#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L
4556#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L
4557#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
4558#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
4559
4560#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
4561#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
4562#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
4563#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
4564#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
4565
4566#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
4567#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
4568
4569#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
4570#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
4571#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
4572#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */
4573#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */
4574#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */
4575
4576#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
4577#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
4578#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
4579
4580#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L
4581#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L
4582#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L
4583#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
4584#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
4585#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
4586#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
4587 /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
4588#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
4589#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
4590#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
4591
4592/* ucTableFormatRevision=1 */
4593/* ucTableContentRevision=1 */
4594typedef struct _ATOM_POWERMODE_INFO {
4595 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
4596 ULONG ulReserved1; /* must set to 0 */
4597 ULONG ulReserved2; /* must set to 0 */
4598 USHORT usEngineClock;
4599 USHORT usMemoryClock;
4600 UCHAR ucVoltageDropIndex; /* index to GPIO table */
4601 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
4602 UCHAR ucMinTemperature;
4603 UCHAR ucMaxTemperature;
4604 UCHAR ucNumPciELanes; /* number of PCIE lanes */
4605} ATOM_POWERMODE_INFO;
4606
4607/* ucTableFormatRevision=2 */
4608/* ucTableContentRevision=1 */
4609typedef struct _ATOM_POWERMODE_INFO_V2 {
4610 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
4611 ULONG ulMiscInfo2;
4612 ULONG ulEngineClock;
4613 ULONG ulMemoryClock;
4614 UCHAR ucVoltageDropIndex; /* index to GPIO table */
4615 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
4616 UCHAR ucMinTemperature;
4617 UCHAR ucMaxTemperature;
4618 UCHAR ucNumPciELanes; /* number of PCIE lanes */
4619} ATOM_POWERMODE_INFO_V2;
4620
4621/* ucTableFormatRevision=2 */
4622/* ucTableContentRevision=2 */
4623typedef struct _ATOM_POWERMODE_INFO_V3 {
4624 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
4625 ULONG ulMiscInfo2;
4626 ULONG ulEngineClock;
4627 ULONG ulMemoryClock;
4628 UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */
4629 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
4630 UCHAR ucMinTemperature;
4631 UCHAR ucMaxTemperature;
4632 UCHAR ucNumPciELanes; /* number of PCIE lanes */
4633 UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */
4634} ATOM_POWERMODE_INFO_V3;
4635
4636#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
4637
4638#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01
4639#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02
4640
4641#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01
4642#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02
4643#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03
4644#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
4645#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
4646#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
4647#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */
4648
4649typedef struct _ATOM_POWERPLAY_INFO {
4650 ATOM_COMMON_TABLE_HEADER sHeader;
4651 UCHAR ucOverdriveThermalController;
4652 UCHAR ucOverdriveI2cLine;
4653 UCHAR ucOverdriveIntBitmap;
4654 UCHAR ucOverdriveControllerAddress;
4655 UCHAR ucSizeOfPowerModeEntry;
4656 UCHAR ucNumOfPowerModeEntries;
4657 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4658} ATOM_POWERPLAY_INFO;
4659
4660typedef struct _ATOM_POWERPLAY_INFO_V2 {
4661 ATOM_COMMON_TABLE_HEADER sHeader;
4662 UCHAR ucOverdriveThermalController;
4663 UCHAR ucOverdriveI2cLine;
4664 UCHAR ucOverdriveIntBitmap;
4665 UCHAR ucOverdriveControllerAddress;
4666 UCHAR ucSizeOfPowerModeEntry;
4667 UCHAR ucNumOfPowerModeEntries;
4668 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4669} ATOM_POWERPLAY_INFO_V2;
4670
4671typedef struct _ATOM_POWERPLAY_INFO_V3 {
4672 ATOM_COMMON_TABLE_HEADER sHeader;
4673 UCHAR ucOverdriveThermalController;
4674 UCHAR ucOverdriveI2cLine;
4675 UCHAR ucOverdriveIntBitmap;
4676 UCHAR ucOverdriveControllerAddress;
4677 UCHAR ucSizeOfPowerModeEntry;
4678 UCHAR ucNumOfPowerModeEntries;
4679 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4680} ATOM_POWERPLAY_INFO_V3;
4681
4682/**************************************************************************/
4683
4684/* Following definitions are for compatiblity issue in different SW components. */
4685#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
4686#define Object_Info Object_Header
4687#define AdjustARB_SEQ MC_InitParameter
4688#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
4689#define ASIC_VDDCI_Info ASIC_ProfilingInfo
4690#define ASIC_MVDDQ_Info MemoryTrainingInfo
4691#define SS_Info PPLL_SS_Info
4692#define ASIC_MVDDC_Info ASIC_InternalSS_Info
4693#define DispDevicePriorityInfo SaveRestoreInfo
4694#define DispOutInfo TV_VideoMode
4695
4696#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
4697#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
4698
4699/* New device naming, remove them when both DAL/VBIOS is ready */
4700#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4701#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
4702
4703#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4704#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
4705
4706#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS
4707#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
4708
4709#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT
4710#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT
4711
4712#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
4713#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
4714
4715#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
4716#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
4717
4718#define ATOM_S0_DFP1I ATOM_S0_DFP1
4719#define ATOM_S0_DFP1X ATOM_S0_DFP2
4720
4721#define ATOM_S0_DFP2I 0x00200000L
4722#define ATOM_S0_DFP2Ib2 0x20
4723
4724#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE
4725#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE
4726
4727#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L
4728#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02
4729
4730#define ATOM_S3_DFP2I_ACTIVEb1 0x02
4731
4732#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
4733#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
4734
4735#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
4736
4737#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE
4738#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE
4739#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L
4740
4741#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02
4742#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02
4743
4744#define ATOM_S5_DOS_REQ_DFP2I 0x0200
4745#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1
4746#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2
4747
4748#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
4749#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
4750
4751#define TMDS1XEncoderControl DVOEncoderControl
4752#define DFP1XOutputControl DVOOutputControl
4753
4754#define ExternalDFPOutputControl DFP1XOutputControl
4755#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
4756
4757#define DFP1IOutputControl TMDSAOutputControl
4758#define DFP2IOutputControl LVTMAOutputControl
4759
4760#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4761#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4762
4763#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4764#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4765
4766#define ucDac1Standard ucDacStandard
4767#define ucDac2Standard ucDacStandard
4768
4769#define TMDS1EncoderControl TMDSAEncoderControl
4770#define TMDS2EncoderControl LVTMAEncoderControl
4771
4772#define DFP1OutputControl TMDSAOutputControl
4773#define DFP2OutputControl LVTMAOutputControl
4774#define CRT1OutputControl DAC1OutputControl
4775#define CRT2OutputControl DAC2OutputControl
4776
4777/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
4778#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
4779#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
4780
4781/*********************************************************************************/
4782
4783#pragma pack() /* BIOS data must use byte aligment */
4784
4785#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644
index 000000000000..c0080cc9bf8d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -0,0 +1,695 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h>
28#include <drm/radeon_drm.h>
29#include "radeon_fixed.h"
30#include "radeon.h"
31#include "atom.h"
32#include "atom-bits.h"
33
34static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
35{
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 struct drm_device *dev = crtc->dev;
38 struct radeon_device *rdev = dev->dev_private;
39 int index =
40 GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
41 ENABLE_CRTC_PS_ALLOCATION args;
42
43 memset(&args, 0, sizeof(args));
44
45 args.ucCRTC = radeon_crtc->crtc_id;
46 args.ucEnable = lock;
47
48 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
49}
50
51static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
52{
53 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
54 struct drm_device *dev = crtc->dev;
55 struct radeon_device *rdev = dev->dev_private;
56 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
57 ENABLE_CRTC_PS_ALLOCATION args;
58
59 memset(&args, 0, sizeof(args));
60
61 args.ucCRTC = radeon_crtc->crtc_id;
62 args.ucEnable = state;
63
64 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
65}
66
67static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
68{
69 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
70 struct drm_device *dev = crtc->dev;
71 struct radeon_device *rdev = dev->dev_private;
72 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
73 ENABLE_CRTC_PS_ALLOCATION args;
74
75 memset(&args, 0, sizeof(args));
76
77 args.ucCRTC = radeon_crtc->crtc_id;
78 args.ucEnable = state;
79
80 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
81}
82
83static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
84{
85 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
86 struct drm_device *dev = crtc->dev;
87 struct radeon_device *rdev = dev->dev_private;
88 int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
89 BLANK_CRTC_PS_ALLOCATION args;
90
91 memset(&args, 0, sizeof(args));
92
93 args.ucCRTC = radeon_crtc->crtc_id;
94 args.ucBlanking = state;
95
96 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
97}
98
99void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
100{
101 struct drm_device *dev = crtc->dev;
102 struct radeon_device *rdev = dev->dev_private;
103
104 switch (mode) {
105 case DRM_MODE_DPMS_ON:
106 if (ASIC_IS_DCE3(rdev))
107 atombios_enable_crtc_memreq(crtc, 1);
108 atombios_enable_crtc(crtc, 1);
109 atombios_blank_crtc(crtc, 0);
110 break;
111 case DRM_MODE_DPMS_STANDBY:
112 case DRM_MODE_DPMS_SUSPEND:
113 case DRM_MODE_DPMS_OFF:
114 atombios_blank_crtc(crtc, 1);
115 atombios_enable_crtc(crtc, 0);
116 if (ASIC_IS_DCE3(rdev))
117 atombios_enable_crtc_memreq(crtc, 0);
118 break;
119 }
120
121 if (mode != DRM_MODE_DPMS_OFF) {
122 radeon_crtc_load_lut(crtc);
123 }
124}
125
126static void
127atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
128 SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
129{
130 struct drm_device *dev = crtc->dev;
131 struct radeon_device *rdev = dev->dev_private;
132 SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
133 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
134
135 conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
136 conv_param.usH_Blanking_Time =
137 cpu_to_le16(crtc_param->usH_Blanking_Time);
138 conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
139 conv_param.usV_Blanking_Time =
140 cpu_to_le16(crtc_param->usV_Blanking_Time);
141 conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
142 conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
143 conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
144 conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
145 conv_param.susModeMiscInfo.usAccess =
146 cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
147 conv_param.ucCRTC = crtc_param->ucCRTC;
148
149 printk("executing set crtc dtd timing\n");
150 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
151}
152
153void atombios_crtc_set_timing(struct drm_crtc *crtc,
154 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
155 crtc_param)
156{
157 struct drm_device *dev = crtc->dev;
158 struct radeon_device *rdev = dev->dev_private;
159 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
160 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
161
162 conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
163 conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
164 conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
165 conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
166 conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
167 conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
168 conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
169 conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
170 conv_param.susModeMiscInfo.usAccess =
171 cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
172 conv_param.ucCRTC = crtc_param->ucCRTC;
173 conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
174 conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
175 conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
176 conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
177 conv_param.ucReserved = crtc_param->ucReserved;
178
179 printk("executing set crtc timing\n");
180 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
181}
182
183void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
184{
185 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
186 struct drm_device *dev = crtc->dev;
187 struct radeon_device *rdev = dev->dev_private;
188 struct drm_encoder *encoder = NULL;
189 struct radeon_encoder *radeon_encoder = NULL;
190 uint8_t frev, crev;
191 int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
192 SET_PIXEL_CLOCK_PS_ALLOCATION args;
193 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
194 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
195 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
196 uint32_t sclock = mode->clock;
197 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
198 struct radeon_pll *pll;
199 int pll_flags = 0;
200
201 memset(&args, 0, sizeof(args));
202
203 if (ASIC_IS_AVIVO(rdev)) {
204 uint32_t ss_cntl;
205
206 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
207 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
208 else
209 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
210
211 /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
212 if (radeon_crtc->crtc_id == 0) {
213 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
214 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
215 } else {
216 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
217 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
218 }
219 } else {
220 pll_flags |= RADEON_PLL_LEGACY;
221
222 if (mode->clock > 200000) /* range limits??? */
223 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
224 else
225 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
226
227 }
228
229 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
230 if (encoder->crtc == crtc) {
231 if (!ASIC_IS_AVIVO(rdev)) {
232 if (encoder->encoder_type !=
233 DRM_MODE_ENCODER_DAC)
234 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
235 if (!ASIC_IS_AVIVO(rdev)
236 && (encoder->encoder_type ==
237 DRM_MODE_ENCODER_LVDS))
238 pll_flags |= RADEON_PLL_USE_REF_DIV;
239 }
240 radeon_encoder = to_radeon_encoder(encoder);
241 }
242 }
243
244 if (radeon_crtc->crtc_id == 0)
245 pll = &rdev->clock.p1pll;
246 else
247 pll = &rdev->clock.p2pll;
248
249 radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
250 &ref_div, &post_div, pll_flags);
251
252 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
253 &crev);
254
255 switch (frev) {
256 case 1:
257 switch (crev) {
258 case 1:
259 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
260 spc1_ptr->usPixelClock = cpu_to_le16(sclock);
261 spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
262 spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
263 spc1_ptr->ucFracFbDiv = frac_fb_div;
264 spc1_ptr->ucPostDiv = post_div;
265 spc1_ptr->ucPpll =
266 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
267 spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
268 spc1_ptr->ucRefDivSrc = 1;
269 break;
270 case 2:
271 spc2_ptr =
272 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
273 spc2_ptr->usPixelClock = cpu_to_le16(sclock);
274 spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
275 spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
276 spc2_ptr->ucFracFbDiv = frac_fb_div;
277 spc2_ptr->ucPostDiv = post_div;
278 spc2_ptr->ucPpll =
279 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
280 spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
281 spc2_ptr->ucRefDivSrc = 1;
282 break;
283 case 3:
284 if (!encoder)
285 return;
286 spc3_ptr =
287 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
288 spc3_ptr->usPixelClock = cpu_to_le16(sclock);
289 spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
290 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
291 spc3_ptr->ucFracFbDiv = frac_fb_div;
292 spc3_ptr->ucPostDiv = post_div;
293 spc3_ptr->ucPpll =
294 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
295 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
296 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
297 spc3_ptr->ucEncoderMode =
298 atombios_get_encoder_mode(encoder);
299 break;
300 default:
301 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
302 return;
303 }
304 break;
305 default:
306 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
307 return;
308 }
309
310 printk("executing set pll\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312}
313
314int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
315 struct drm_framebuffer *old_fb)
316{
317 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
318 struct drm_device *dev = crtc->dev;
319 struct radeon_device *rdev = dev->dev_private;
320 struct radeon_framebuffer *radeon_fb;
321 struct drm_gem_object *obj;
322 struct drm_radeon_gem_object *obj_priv;
323 uint64_t fb_location;
324 uint32_t fb_format, fb_pitch_pixels;
325
326 if (!crtc->fb)
327 return -EINVAL;
328
329 radeon_fb = to_radeon_framebuffer(crtc->fb);
330
331 obj = radeon_fb->obj;
332 obj_priv = obj->driver_private;
333
334 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
335 return -EINVAL;
336 }
337
338 switch (crtc->fb->bits_per_pixel) {
339 case 15:
340 fb_format =
341 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
342 AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
343 break;
344 case 16:
345 fb_format =
346 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
347 AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
348 break;
349 case 24:
350 case 32:
351 fb_format =
352 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
353 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
354 break;
355 default:
356 DRM_ERROR("Unsupported screen depth %d\n",
357 crtc->fb->bits_per_pixel);
358 return -EINVAL;
359 }
360
361 /* TODO tiling */
362 if (radeon_crtc->crtc_id == 0)
363 WREG32(AVIVO_D1VGA_CONTROL, 0);
364 else
365 WREG32(AVIVO_D2VGA_CONTROL, 0);
366 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
367 (u32) fb_location);
368 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
369 radeon_crtc->crtc_offset, (u32) fb_location);
370 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
371
372 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
373 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
374 WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
375 WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
376 WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
377 WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
378
379 fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
380 WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
381 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
382
383 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
384 crtc->mode.vdisplay);
385 x &= ~3;
386 y &= ~1;
387 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
388 (x << 16) | y);
389 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
390 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
391
392 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
393 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
394 AVIVO_D1MODE_INTERLEAVE_EN);
395 else
396 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
397
398 if (old_fb && old_fb != crtc->fb) {
399 radeon_fb = to_radeon_framebuffer(old_fb);
400 radeon_gem_object_unpin(radeon_fb->obj);
401 }
402 return 0;
403}
404
405int atombios_crtc_mode_set(struct drm_crtc *crtc,
406 struct drm_display_mode *mode,
407 struct drm_display_mode *adjusted_mode,
408 int x, int y, struct drm_framebuffer *old_fb)
409{
410 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
411 struct drm_device *dev = crtc->dev;
412 struct radeon_device *rdev = dev->dev_private;
413 struct drm_encoder *encoder;
414 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
415
416 /* TODO color tiling */
417 memset(&crtc_timing, 0, sizeof(crtc_timing));
418
419 /* TODO tv */
420 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
421
422 }
423
424 crtc_timing.ucCRTC = radeon_crtc->crtc_id;
425 crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
426 crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
427 crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
428 crtc_timing.usH_SyncWidth =
429 adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
430
431 crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
432 crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
433 crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
434 crtc_timing.usV_SyncWidth =
435 adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
436
437 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
438 crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
439
440 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
441 crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
442
443 if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
444 crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
445
446 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
447 crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
448
449 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
450 crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
451
452 atombios_crtc_set_pll(crtc, adjusted_mode);
453 atombios_crtc_set_timing(crtc, &crtc_timing);
454
455 if (ASIC_IS_AVIVO(rdev))
456 atombios_crtc_set_base(crtc, x, y, old_fb);
457 else {
458 if (radeon_crtc->crtc_id == 0) {
459 SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
460 memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
461
462 /* setup FP shadow regs on R4xx */
463 crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
464 crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
465 crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
466 crtc_dtd_timing.usH_Blanking_Time =
467 adjusted_mode->crtc_hblank_end -
468 adjusted_mode->crtc_hdisplay;
469 crtc_dtd_timing.usV_Blanking_Time =
470 adjusted_mode->crtc_vblank_end -
471 adjusted_mode->crtc_vdisplay;
472 crtc_dtd_timing.usH_SyncOffset =
473 adjusted_mode->crtc_hsync_start -
474 adjusted_mode->crtc_hdisplay;
475 crtc_dtd_timing.usV_SyncOffset =
476 adjusted_mode->crtc_vsync_start -
477 adjusted_mode->crtc_vdisplay;
478 crtc_dtd_timing.usH_SyncWidth =
479 adjusted_mode->crtc_hsync_end -
480 adjusted_mode->crtc_hsync_start;
481 crtc_dtd_timing.usV_SyncWidth =
482 adjusted_mode->crtc_vsync_end -
483 adjusted_mode->crtc_vsync_start;
484 /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
485 /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
486
487 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
488 crtc_dtd_timing.susModeMiscInfo.usAccess |=
489 ATOM_VSYNC_POLARITY;
490
491 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
492 crtc_dtd_timing.susModeMiscInfo.usAccess |=
493 ATOM_HSYNC_POLARITY;
494
495 if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
496 crtc_dtd_timing.susModeMiscInfo.usAccess |=
497 ATOM_COMPOSITESYNC;
498
499 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
500 crtc_dtd_timing.susModeMiscInfo.usAccess |=
501 ATOM_INTERLACE;
502
503 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
504 crtc_dtd_timing.susModeMiscInfo.usAccess |=
505 ATOM_DOUBLE_CLOCK_MODE;
506
507 atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
508 }
509 radeon_crtc_set_base(crtc, x, y, old_fb);
510 radeon_legacy_atom_set_surface(crtc);
511 }
512 return 0;
513}
514
515static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
516 struct drm_display_mode *mode,
517 struct drm_display_mode *adjusted_mode)
518{
519 return true;
520}
521
522static void atombios_crtc_prepare(struct drm_crtc *crtc)
523{
524 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
525 atombios_lock_crtc(crtc, 1);
526}
527
528static void atombios_crtc_commit(struct drm_crtc *crtc)
529{
530 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
531 atombios_lock_crtc(crtc, 0);
532}
533
534static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
535 .dpms = atombios_crtc_dpms,
536 .mode_fixup = atombios_crtc_mode_fixup,
537 .mode_set = atombios_crtc_mode_set,
538 .mode_set_base = atombios_crtc_set_base,
539 .prepare = atombios_crtc_prepare,
540 .commit = atombios_crtc_commit,
541};
542
543void radeon_atombios_init_crtc(struct drm_device *dev,
544 struct radeon_crtc *radeon_crtc)
545{
546 if (radeon_crtc->crtc_id == 1)
547 radeon_crtc->crtc_offset =
548 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
549 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
550}
551
552void radeon_init_disp_bw_avivo(struct drm_device *dev,
553 struct drm_display_mode *mode1,
554 uint32_t pixel_bytes1,
555 struct drm_display_mode *mode2,
556 uint32_t pixel_bytes2)
557{
558 struct radeon_device *rdev = dev->dev_private;
559 fixed20_12 min_mem_eff;
560 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
561 fixed20_12 sclk_ff, mclk_ff;
562 uint32_t dc_lb_memory_split, temp;
563
564 min_mem_eff.full = rfixed_const_8(0);
565 if (rdev->disp_priority == 2) {
566 uint32_t mc_init_misc_lat_timer = 0;
567 if (rdev->family == CHIP_RV515)
568 mc_init_misc_lat_timer =
569 RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
570 else if (rdev->family == CHIP_RS690)
571 mc_init_misc_lat_timer =
572 RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
573
574 mc_init_misc_lat_timer &=
575 ~(R300_MC_DISP1R_INIT_LAT_MASK <<
576 R300_MC_DISP1R_INIT_LAT_SHIFT);
577 mc_init_misc_lat_timer &=
578 ~(R300_MC_DISP0R_INIT_LAT_MASK <<
579 R300_MC_DISP0R_INIT_LAT_SHIFT);
580
581 if (mode2)
582 mc_init_misc_lat_timer |=
583 (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
584 if (mode1)
585 mc_init_misc_lat_timer |=
586 (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
587
588 if (rdev->family == CHIP_RV515)
589 WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
590 mc_init_misc_lat_timer);
591 else if (rdev->family == CHIP_RS690)
592 WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
593 mc_init_misc_lat_timer);
594 }
595
596 /*
597 * determine is there is enough bw for current mode
598 */
599 temp_ff.full = rfixed_const(100);
600 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
601 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
602 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
603 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
604
605 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
606 temp_ff.full = rfixed_const(temp);
607 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
608 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
609
610 pix_clk.full = 0;
611 pix_clk2.full = 0;
612 peak_disp_bw.full = 0;
613 if (mode1) {
614 temp_ff.full = rfixed_const(1000);
615 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
616 pix_clk.full = rfixed_div(pix_clk, temp_ff);
617 temp_ff.full = rfixed_const(pixel_bytes1);
618 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
619 }
620 if (mode2) {
621 temp_ff.full = rfixed_const(1000);
622 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
623 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
624 temp_ff.full = rfixed_const(pixel_bytes2);
625 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
626 }
627
628 if (peak_disp_bw.full >= mem_bw.full) {
629 DRM_ERROR
630 ("You may not have enough display bandwidth for current mode\n"
631 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
632 printk("peak disp bw %d, mem_bw %d\n",
633 rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
634 }
635
636 /*
637 * Line Buffer Setup
638 * There is a single line buffer shared by both display controllers.
639 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
640 * controllers. The paritioning can either be done manually or via one of four
641 * preset allocations specified in bits 1:0:
642 * 0 - line buffer is divided in half and shared between each display controller
643 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
644 * 2 - D1 gets the whole buffer
645 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
646 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
647 * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
648 * 14:4; D2 allocation follows D1.
649 */
650
651 /* is auto or manual better ? */
652 dc_lb_memory_split =
653 RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
654 dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
655#if 1
656 /* auto */
657 if (mode1 && mode2) {
658 if (mode1->hdisplay > mode2->hdisplay) {
659 if (mode1->hdisplay > 2560)
660 dc_lb_memory_split |=
661 AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
662 else
663 dc_lb_memory_split |=
664 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
665 } else if (mode2->hdisplay > mode1->hdisplay) {
666 if (mode2->hdisplay > 2560)
667 dc_lb_memory_split |=
668 AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
669 else
670 dc_lb_memory_split |=
671 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
672 } else
673 dc_lb_memory_split |=
674 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
675 } else if (mode1) {
676 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
677 } else if (mode2) {
678 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
679 }
680#else
681 /* manual */
682 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
683 dc_lb_memory_split &=
684 ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
685 AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
686 if (mode1) {
687 dc_lb_memory_split |=
688 ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
689 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
690 } else if (mode2) {
691 dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
692 }
693#endif
694 WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
695}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
new file mode 100644
index 000000000000..5225f5be7ea7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -0,0 +1,1524 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_drm.h"
32#include "radeon_microcode.h"
33#include "radeon_reg.h"
34#include "radeon.h"
35
36/* This files gather functions specifics to:
37 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
38 *
39 * Some of these functions might be used by newer ASICs.
40 */
41void r100_hdp_reset(struct radeon_device *rdev);
42void r100_gpu_init(struct radeon_device *rdev);
43int r100_gui_wait_for_idle(struct radeon_device *rdev);
44int r100_mc_wait_for_idle(struct radeon_device *rdev);
45void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
46void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
47int r100_debugfs_mc_info_init(struct radeon_device *rdev);
48
49
50/*
51 * PCI GART
52 */
53void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
54{
55 /* TODO: can we do somethings here ? */
56 /* It seems hw only cache one entry so we should discard this
57 * entry otherwise if first GPU GART read hit this entry it
58 * could end up in wrong address. */
59}
60
61int r100_pci_gart_enable(struct radeon_device *rdev)
62{
63 uint32_t tmp;
64 int r;
65
66 /* Initialize common gart structure */
67 r = radeon_gart_init(rdev);
68 if (r) {
69 return r;
70 }
71 if (rdev->gart.table.ram.ptr == NULL) {
72 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
73 r = radeon_gart_table_ram_alloc(rdev);
74 if (r) {
75 return r;
76 }
77 }
78 /* discard memory request outside of configured range */
79 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
80 WREG32(RADEON_AIC_CNTL, tmp);
81 /* set address range for PCI address translate */
82 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
83 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
84 WREG32(RADEON_AIC_HI_ADDR, tmp);
85 /* Enable bus mastering */
86 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
87 WREG32(RADEON_BUS_CNTL, tmp);
88 /* set PCI GART page-table base address */
89 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
90 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
91 WREG32(RADEON_AIC_CNTL, tmp);
92 r100_pci_gart_tlb_flush(rdev);
93 rdev->gart.ready = true;
94 return 0;
95}
96
97void r100_pci_gart_disable(struct radeon_device *rdev)
98{
99 uint32_t tmp;
100
101 /* discard memory request outside of configured range */
102 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
103 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
104 WREG32(RADEON_AIC_LO_ADDR, 0);
105 WREG32(RADEON_AIC_HI_ADDR, 0);
106}
107
108int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
109{
110 if (i < 0 || i > rdev->gart.num_gpu_pages) {
111 return -EINVAL;
112 }
113 rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
114 return 0;
115}
116
117int r100_gart_enable(struct radeon_device *rdev)
118{
119 if (rdev->flags & RADEON_IS_AGP) {
120 r100_pci_gart_disable(rdev);
121 return 0;
122 }
123 return r100_pci_gart_enable(rdev);
124}
125
126
127/*
128 * MC
129 */
130void r100_mc_disable_clients(struct radeon_device *rdev)
131{
132 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
133
134 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
135 if (r100_gui_wait_for_idle(rdev)) {
136 printk(KERN_WARNING "Failed to wait GUI idle while "
137 "programming pipes. Bad things might happen.\n");
138 }
139
140 /* stop display and memory access */
141 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
142 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
143 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
144 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
145 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
146
147 r100_gpu_wait_for_vsync(rdev);
148
149 WREG32(RADEON_CRTC_GEN_CNTL,
150 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
151 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
152
153 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
154 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
155
156 r100_gpu_wait_for_vsync2(rdev);
157 WREG32(RADEON_CRTC2_GEN_CNTL,
158 (crtc2_gen_cntl &
159 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
160 RADEON_CRTC2_DISP_REQ_EN_B);
161 }
162
163 udelay(500);
164}
165
166void r100_mc_setup(struct radeon_device *rdev)
167{
168 uint32_t tmp;
169 int r;
170
171 r = r100_debugfs_mc_info_init(rdev);
172 if (r) {
173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174 }
175 /* Write VRAM size in case we are limiting it */
176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
177 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
178 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180 WREG32(RADEON_MC_FB_LOCATION, tmp);
181
182 /* Enable bus mastering */
183 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
184 WREG32(RADEON_BUS_CNTL, tmp);
185
186 if (rdev->flags & RADEON_IS_AGP) {
187 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
188 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
189 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
190 WREG32(RADEON_MC_AGP_LOCATION, tmp);
191 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
192 } else {
193 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
194 WREG32(RADEON_AGP_BASE, 0);
195 }
196
197 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
198 tmp |= (7 << 28);
199 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
200 (void)RREG32(RADEON_HOST_PATH_CNTL);
201 WREG32(RADEON_HOST_PATH_CNTL, tmp);
202 (void)RREG32(RADEON_HOST_PATH_CNTL);
203}
204
205int r100_mc_init(struct radeon_device *rdev)
206{
207 int r;
208
209 if (r100_debugfs_rbbm_init(rdev)) {
210 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
211 }
212
213 r100_gpu_init(rdev);
214 /* Disable gart which also disable out of gart access */
215 r100_pci_gart_disable(rdev);
216
217 /* Setup GPU memory space */
218 rdev->mc.vram_location = 0xFFFFFFFFUL;
219 rdev->mc.gtt_location = 0xFFFFFFFFUL;
220 if (rdev->flags & RADEON_IS_AGP) {
221 r = radeon_agp_init(rdev);
222 if (r) {
223 printk(KERN_WARNING "[drm] Disabling AGP\n");
224 rdev->flags &= ~RADEON_IS_AGP;
225 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
226 } else {
227 rdev->mc.gtt_location = rdev->mc.agp_base;
228 }
229 }
230 r = radeon_mc_setup(rdev);
231 if (r) {
232 return r;
233 }
234
235 r100_mc_disable_clients(rdev);
236 if (r100_mc_wait_for_idle(rdev)) {
237 printk(KERN_WARNING "Failed to wait MC idle while "
238 "programming pipes. Bad things might happen.\n");
239 }
240
241 r100_mc_setup(rdev);
242 return 0;
243}
244
245void r100_mc_fini(struct radeon_device *rdev)
246{
247 r100_pci_gart_disable(rdev);
248 radeon_gart_table_ram_free(rdev);
249 radeon_gart_fini(rdev);
250}
251
252
253/*
254 * Fence emission
255 */
256void r100_fence_ring_emit(struct radeon_device *rdev,
257 struct radeon_fence *fence)
258{
259 /* Who ever call radeon_fence_emit should call ring_lock and ask
260 * for enough space (today caller are ib schedule and buffer move) */
261 /* Wait until IDLE & CLEAN */
262 radeon_ring_write(rdev, PACKET0(0x1720, 0));
263 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
264 /* Emit fence sequence & fire IRQ */
265 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
266 radeon_ring_write(rdev, fence->seq);
267 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
268 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
269}
270
271
272/*
273 * Writeback
274 */
275int r100_wb_init(struct radeon_device *rdev)
276{
277 int r;
278
279 if (rdev->wb.wb_obj == NULL) {
280 r = radeon_object_create(rdev, NULL, 4096,
281 true,
282 RADEON_GEM_DOMAIN_GTT,
283 false, &rdev->wb.wb_obj);
284 if (r) {
285 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
286 return r;
287 }
288 r = radeon_object_pin(rdev->wb.wb_obj,
289 RADEON_GEM_DOMAIN_GTT,
290 &rdev->wb.gpu_addr);
291 if (r) {
292 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
293 return r;
294 }
295 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
296 if (r) {
297 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
298 return r;
299 }
300 }
301 WREG32(0x774, rdev->wb.gpu_addr);
302 WREG32(0x70C, rdev->wb.gpu_addr + 1024);
303 WREG32(0x770, 0xff);
304 return 0;
305}
306
307void r100_wb_fini(struct radeon_device *rdev)
308{
309 if (rdev->wb.wb_obj) {
310 radeon_object_kunmap(rdev->wb.wb_obj);
311 radeon_object_unpin(rdev->wb.wb_obj);
312 radeon_object_unref(&rdev->wb.wb_obj);
313 rdev->wb.wb = NULL;
314 rdev->wb.wb_obj = NULL;
315 }
316}
317
318int r100_copy_blit(struct radeon_device *rdev,
319 uint64_t src_offset,
320 uint64_t dst_offset,
321 unsigned num_pages,
322 struct radeon_fence *fence)
323{
324 uint32_t cur_pages;
325 uint32_t stride_bytes = PAGE_SIZE;
326 uint32_t pitch;
327 uint32_t stride_pixels;
328 unsigned ndw;
329 int num_loops;
330 int r = 0;
331
332 /* radeon limited to 16k stride */
333 stride_bytes &= 0x3fff;
334 /* radeon pitch is /64 */
335 pitch = stride_bytes / 64;
336 stride_pixels = stride_bytes / 4;
337 num_loops = DIV_ROUND_UP(num_pages, 8191);
338
339 /* Ask for enough room for blit + flush + fence */
340 ndw = 64 + (10 * num_loops);
341 r = radeon_ring_lock(rdev, ndw);
342 if (r) {
343 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
344 return -EINVAL;
345 }
346 while (num_pages > 0) {
347 cur_pages = num_pages;
348 if (cur_pages > 8191) {
349 cur_pages = 8191;
350 }
351 num_pages -= cur_pages;
352
353 /* pages are in Y direction - height
354 page width in X direction - width */
355 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
356 radeon_ring_write(rdev,
357 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
358 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
359 RADEON_GMC_SRC_CLIPPING |
360 RADEON_GMC_DST_CLIPPING |
361 RADEON_GMC_BRUSH_NONE |
362 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
363 RADEON_GMC_SRC_DATATYPE_COLOR |
364 RADEON_ROP3_S |
365 RADEON_DP_SRC_SOURCE_MEMORY |
366 RADEON_GMC_CLR_CMP_CNTL_DIS |
367 RADEON_GMC_WR_MSK_DIS);
368 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
369 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
370 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
371 radeon_ring_write(rdev, 0);
372 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
373 radeon_ring_write(rdev, num_pages);
374 radeon_ring_write(rdev, num_pages);
375 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
376 }
377 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
378 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
379 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
380 radeon_ring_write(rdev,
381 RADEON_WAIT_2D_IDLECLEAN |
382 RADEON_WAIT_HOST_IDLECLEAN |
383 RADEON_WAIT_DMA_GUI_IDLE);
384 if (fence) {
385 r = radeon_fence_emit(rdev, fence);
386 }
387 radeon_ring_unlock_commit(rdev);
388 return r;
389}
390
391
392/*
393 * CP
394 */
395void r100_ring_start(struct radeon_device *rdev)
396{
397 int r;
398
399 r = radeon_ring_lock(rdev, 2);
400 if (r) {
401 return;
402 }
403 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
404 radeon_ring_write(rdev,
405 RADEON_ISYNC_ANY2D_IDLE3D |
406 RADEON_ISYNC_ANY3D_IDLE2D |
407 RADEON_ISYNC_WAIT_IDLEGUI |
408 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
409 radeon_ring_unlock_commit(rdev);
410}
411
412static void r100_cp_load_microcode(struct radeon_device *rdev)
413{
414 int i;
415
416 if (r100_gui_wait_for_idle(rdev)) {
417 printk(KERN_WARNING "Failed to wait GUI idle while "
418 "programming pipes. Bad things might happen.\n");
419 }
420
421 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
422 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
423 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
424 (rdev->family == CHIP_RS200)) {
425 DRM_INFO("Loading R100 Microcode\n");
426 for (i = 0; i < 256; i++) {
427 WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
428 WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
429 }
430 } else if ((rdev->family == CHIP_R200) ||
431 (rdev->family == CHIP_RV250) ||
432 (rdev->family == CHIP_RV280) ||
433 (rdev->family == CHIP_RS300)) {
434 DRM_INFO("Loading R200 Microcode\n");
435 for (i = 0; i < 256; i++) {
436 WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
437 WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
438 }
439 } else if ((rdev->family == CHIP_R300) ||
440 (rdev->family == CHIP_R350) ||
441 (rdev->family == CHIP_RV350) ||
442 (rdev->family == CHIP_RV380) ||
443 (rdev->family == CHIP_RS400) ||
444 (rdev->family == CHIP_RS480)) {
445 DRM_INFO("Loading R300 Microcode\n");
446 for (i = 0; i < 256; i++) {
447 WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
448 WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
449 }
450 } else if ((rdev->family == CHIP_R420) ||
451 (rdev->family == CHIP_R423) ||
452 (rdev->family == CHIP_RV410)) {
453 DRM_INFO("Loading R400 Microcode\n");
454 for (i = 0; i < 256; i++) {
455 WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
456 WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
457 }
458 } else if ((rdev->family == CHIP_RS690) ||
459 (rdev->family == CHIP_RS740)) {
460 DRM_INFO("Loading RS690/RS740 Microcode\n");
461 for (i = 0; i < 256; i++) {
462 WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
463 WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
464 }
465 } else if (rdev->family == CHIP_RS600) {
466 DRM_INFO("Loading RS600 Microcode\n");
467 for (i = 0; i < 256; i++) {
468 WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
469 WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
470 }
471 } else if ((rdev->family == CHIP_RV515) ||
472 (rdev->family == CHIP_R520) ||
473 (rdev->family == CHIP_RV530) ||
474 (rdev->family == CHIP_R580) ||
475 (rdev->family == CHIP_RV560) ||
476 (rdev->family == CHIP_RV570)) {
477 DRM_INFO("Loading R500 Microcode\n");
478 for (i = 0; i < 256; i++) {
479 WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
480 WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
481 }
482 }
483}
484
485int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
486{
487 unsigned rb_bufsz;
488 unsigned rb_blksz;
489 unsigned max_fetch;
490 unsigned pre_write_timer;
491 unsigned pre_write_limit;
492 unsigned indirect2_start;
493 unsigned indirect1_start;
494 uint32_t tmp;
495 int r;
496
497 if (r100_debugfs_cp_init(rdev)) {
498 DRM_ERROR("Failed to register debugfs file for CP !\n");
499 }
500 /* Reset CP */
501 tmp = RREG32(RADEON_CP_CSQ_STAT);
502 if ((tmp & (1 << 31))) {
503 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
504 WREG32(RADEON_CP_CSQ_MODE, 0);
505 WREG32(RADEON_CP_CSQ_CNTL, 0);
506 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
507 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
508 mdelay(2);
509 WREG32(RADEON_RBBM_SOFT_RESET, 0);
510 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
511 mdelay(2);
512 tmp = RREG32(RADEON_CP_CSQ_STAT);
513 if ((tmp & (1 << 31))) {
514 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
515 }
516 } else {
517 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
518 }
519 /* Align ring size */
520 rb_bufsz = drm_order(ring_size / 8);
521 ring_size = (1 << (rb_bufsz + 1)) * 4;
522 r100_cp_load_microcode(rdev);
523 r = radeon_ring_init(rdev, ring_size);
524 if (r) {
525 return r;
526 }
527 /* Each time the cp read 1024 bytes (16 dword/quadword) update
528 * the rptr copy in system ram */
529 rb_blksz = 9;
530 /* cp will read 128bytes at a time (4 dwords) */
531 max_fetch = 1;
532 rdev->cp.align_mask = 16 - 1;
533 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
534 pre_write_timer = 64;
535 /* Force CP_RB_WPTR write if written more than one time before the
536 * delay expire
537 */
538 pre_write_limit = 0;
539 /* Setup the cp cache like this (cache size is 96 dwords) :
540 * RING 0 to 15
541 * INDIRECT1 16 to 79
542 * INDIRECT2 80 to 95
543 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
544 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
545 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
546 * Idea being that most of the gpu cmd will be through indirect1 buffer
547 * so it gets the bigger cache.
548 */
549 indirect2_start = 80;
550 indirect1_start = 16;
551 /* cp setup */
552 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
553 WREG32(RADEON_CP_RB_CNTL,
554 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
555 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
556 REG_SET(RADEON_MAX_FETCH, max_fetch) |
557 RADEON_RB_NO_UPDATE);
558 /* Set ring address */
559 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
560 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
561 /* Force read & write ptr to 0 */
562 tmp = RREG32(RADEON_CP_RB_CNTL);
563 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
564 WREG32(RADEON_CP_RB_RPTR_WR, 0);
565 WREG32(RADEON_CP_RB_WPTR, 0);
566 WREG32(RADEON_CP_RB_CNTL, tmp);
567 udelay(10);
568 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
569 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
570 /* Set cp mode to bus mastering & enable cp*/
571 WREG32(RADEON_CP_CSQ_MODE,
572 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
573 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
574 WREG32(0x718, 0);
575 WREG32(0x744, 0x00004D4D);
576 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
577 radeon_ring_start(rdev);
578 r = radeon_ring_test(rdev);
579 if (r) {
580 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
581 return r;
582 }
583 rdev->cp.ready = true;
584 return 0;
585}
586
587void r100_cp_fini(struct radeon_device *rdev)
588{
589 /* Disable ring */
590 rdev->cp.ready = false;
591 WREG32(RADEON_CP_CSQ_CNTL, 0);
592 radeon_ring_fini(rdev);
593 DRM_INFO("radeon: cp finalized\n");
594}
595
596void r100_cp_disable(struct radeon_device *rdev)
597{
598 /* Disable ring */
599 rdev->cp.ready = false;
600 WREG32(RADEON_CP_CSQ_MODE, 0);
601 WREG32(RADEON_CP_CSQ_CNTL, 0);
602 if (r100_gui_wait_for_idle(rdev)) {
603 printk(KERN_WARNING "Failed to wait GUI idle while "
604 "programming pipes. Bad things might happen.\n");
605 }
606}
607
608int r100_cp_reset(struct radeon_device *rdev)
609{
610 uint32_t tmp;
611 bool reinit_cp;
612 int i;
613
614 reinit_cp = rdev->cp.ready;
615 rdev->cp.ready = false;
616 WREG32(RADEON_CP_CSQ_MODE, 0);
617 WREG32(RADEON_CP_CSQ_CNTL, 0);
618 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
619 (void)RREG32(RADEON_RBBM_SOFT_RESET);
620 udelay(200);
621 WREG32(RADEON_RBBM_SOFT_RESET, 0);
622 /* Wait to prevent race in RBBM_STATUS */
623 mdelay(1);
624 for (i = 0; i < rdev->usec_timeout; i++) {
625 tmp = RREG32(RADEON_RBBM_STATUS);
626 if (!(tmp & (1 << 16))) {
627 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
628 tmp);
629 if (reinit_cp) {
630 return r100_cp_init(rdev, rdev->cp.ring_size);
631 }
632 return 0;
633 }
634 DRM_UDELAY(1);
635 }
636 tmp = RREG32(RADEON_RBBM_STATUS);
637 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
638 return -1;
639}
640
641
642/*
643 * CS functions
644 */
645int r100_cs_parse_packet0(struct radeon_cs_parser *p,
646 struct radeon_cs_packet *pkt,
647 unsigned *auth, unsigned n,
648 radeon_packet0_check_t check)
649{
650 unsigned reg;
651 unsigned i, j, m;
652 unsigned idx;
653 int r;
654
655 idx = pkt->idx + 1;
656 reg = pkt->reg;
657 if (pkt->one_reg_wr) {
658 if ((reg >> 7) > n) {
659 return -EINVAL;
660 }
661 } else {
662 if (((reg + (pkt->count << 2)) >> 7) > n) {
663 return -EINVAL;
664 }
665 }
666 for (i = 0; i <= pkt->count; i++, idx++) {
667 j = (reg >> 7);
668 m = 1 << ((reg >> 2) & 31);
669 if (auth[j] & m) {
670 r = check(p, pkt, idx, reg);
671 if (r) {
672 return r;
673 }
674 }
675 if (pkt->one_reg_wr) {
676 if (!(auth[j] & m)) {
677 break;
678 }
679 } else {
680 reg += 4;
681 }
682 }
683 return 0;
684}
685
686int r100_cs_parse_packet3(struct radeon_cs_parser *p,
687 struct radeon_cs_packet *pkt,
688 unsigned *auth, unsigned n,
689 radeon_packet3_check_t check)
690{
691 unsigned i, m;
692
693 if ((pkt->opcode >> 5) > n) {
694 return -EINVAL;
695 }
696 i = pkt->opcode >> 5;
697 m = 1 << (pkt->opcode & 31);
698 if (auth[i] & m) {
699 return check(p, pkt);
700 }
701 return 0;
702}
703
704void r100_cs_dump_packet(struct radeon_cs_parser *p,
705 struct radeon_cs_packet *pkt)
706{
707 struct radeon_cs_chunk *ib_chunk;
708 volatile uint32_t *ib;
709 unsigned i;
710 unsigned idx;
711
712 ib = p->ib->ptr;
713 ib_chunk = &p->chunks[p->chunk_ib_idx];
714 idx = pkt->idx;
715 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
716 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
717 }
718}
719
720/**
721 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
722 * @parser: parser structure holding parsing context.
723 * @pkt: where to store packet informations
724 *
725 * Assume that chunk_ib_index is properly set. Will return -EINVAL
726 * if packet is bigger than remaining ib size. or if packets is unknown.
727 **/
728int r100_cs_packet_parse(struct radeon_cs_parser *p,
729 struct radeon_cs_packet *pkt,
730 unsigned idx)
731{
732 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
733 uint32_t header = ib_chunk->kdata[idx];
734
735 if (idx >= ib_chunk->length_dw) {
736 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
737 idx, ib_chunk->length_dw);
738 return -EINVAL;
739 }
740 pkt->idx = idx;
741 pkt->type = CP_PACKET_GET_TYPE(header);
742 pkt->count = CP_PACKET_GET_COUNT(header);
743 switch (pkt->type) {
744 case PACKET_TYPE0:
745 pkt->reg = CP_PACKET0_GET_REG(header);
746 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
747 break;
748 case PACKET_TYPE3:
749 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
750 break;
751 case PACKET_TYPE2:
752 pkt->count = -1;
753 break;
754 default:
755 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
756 return -EINVAL;
757 }
758 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
759 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
760 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
761 return -EINVAL;
762 }
763 return 0;
764}
765
766/**
767 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
768 * @parser: parser structure holding parsing context.
769 * @data: pointer to relocation data
770 * @offset_start: starting offset
771 * @offset_mask: offset mask (to align start offset on)
772 * @reloc: reloc informations
773 *
774 * Check next packet is relocation packet3, do bo validation and compute
775 * GPU offset using the provided start.
776 **/
777int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
778 struct radeon_cs_reloc **cs_reloc)
779{
780 struct radeon_cs_chunk *ib_chunk;
781 struct radeon_cs_chunk *relocs_chunk;
782 struct radeon_cs_packet p3reloc;
783 unsigned idx;
784 int r;
785
786 if (p->chunk_relocs_idx == -1) {
787 DRM_ERROR("No relocation chunk !\n");
788 return -EINVAL;
789 }
790 *cs_reloc = NULL;
791 ib_chunk = &p->chunks[p->chunk_ib_idx];
792 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
793 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
794 if (r) {
795 return r;
796 }
797 p->idx += p3reloc.count + 2;
798 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
799 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
800 p3reloc.idx);
801 r100_cs_dump_packet(p, &p3reloc);
802 return -EINVAL;
803 }
804 idx = ib_chunk->kdata[p3reloc.idx + 1];
805 if (idx >= relocs_chunk->length_dw) {
806 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
807 idx, relocs_chunk->length_dw);
808 r100_cs_dump_packet(p, &p3reloc);
809 return -EINVAL;
810 }
811 /* FIXME: we assume reloc size is 4 dwords */
812 *cs_reloc = p->relocs_ptr[(idx / 4)];
813 return 0;
814}
815
816static int r100_packet0_check(struct radeon_cs_parser *p,
817 struct radeon_cs_packet *pkt)
818{
819 struct radeon_cs_chunk *ib_chunk;
820 struct radeon_cs_reloc *reloc;
821 volatile uint32_t *ib;
822 uint32_t tmp;
823 unsigned reg;
824 unsigned i;
825 unsigned idx;
826 bool onereg;
827 int r;
828
829 ib = p->ib->ptr;
830 ib_chunk = &p->chunks[p->chunk_ib_idx];
831 idx = pkt->idx + 1;
832 reg = pkt->reg;
833 onereg = false;
834 if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
835 onereg = true;
836 }
837 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
838 switch (reg) {
839 /* FIXME: only allow PACKET3 blit? easier to check for out of
840 * range access */
841 case RADEON_DST_PITCH_OFFSET:
842 case RADEON_SRC_PITCH_OFFSET:
843 r = r100_cs_packet_next_reloc(p, &reloc);
844 if (r) {
845 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
846 idx, reg);
847 r100_cs_dump_packet(p, pkt);
848 return r;
849 }
850 tmp = ib_chunk->kdata[idx] & 0x003fffff;
851 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
852 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
853 break;
854 case RADEON_RB3D_DEPTHOFFSET:
855 case RADEON_RB3D_COLOROFFSET:
856 case R300_RB3D_COLOROFFSET0:
857 case R300_ZB_DEPTHOFFSET:
858 case R200_PP_TXOFFSET_0:
859 case R200_PP_TXOFFSET_1:
860 case R200_PP_TXOFFSET_2:
861 case R200_PP_TXOFFSET_3:
862 case R200_PP_TXOFFSET_4:
863 case R200_PP_TXOFFSET_5:
864 case RADEON_PP_TXOFFSET_0:
865 case RADEON_PP_TXOFFSET_1:
866 case RADEON_PP_TXOFFSET_2:
867 case R300_TX_OFFSET_0:
868 case R300_TX_OFFSET_0+4:
869 case R300_TX_OFFSET_0+8:
870 case R300_TX_OFFSET_0+12:
871 case R300_TX_OFFSET_0+16:
872 case R300_TX_OFFSET_0+20:
873 case R300_TX_OFFSET_0+24:
874 case R300_TX_OFFSET_0+28:
875 case R300_TX_OFFSET_0+32:
876 case R300_TX_OFFSET_0+36:
877 case R300_TX_OFFSET_0+40:
878 case R300_TX_OFFSET_0+44:
879 case R300_TX_OFFSET_0+48:
880 case R300_TX_OFFSET_0+52:
881 case R300_TX_OFFSET_0+56:
882 case R300_TX_OFFSET_0+60:
883 r = r100_cs_packet_next_reloc(p, &reloc);
884 if (r) {
885 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
886 idx, reg);
887 r100_cs_dump_packet(p, pkt);
888 return r;
889 }
890 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
891 break;
892 default:
893 /* FIXME: we don't want to allow anyothers packet */
894 break;
895 }
896 if (onereg) {
897 /* FIXME: forbid onereg write to register on relocate */
898 break;
899 }
900 }
901 return 0;
902}
903
904static int r100_packet3_check(struct radeon_cs_parser *p,
905 struct radeon_cs_packet *pkt)
906{
907 struct radeon_cs_chunk *ib_chunk;
908 struct radeon_cs_reloc *reloc;
909 unsigned idx;
910 unsigned i, c;
911 volatile uint32_t *ib;
912 int r;
913
914 ib = p->ib->ptr;
915 ib_chunk = &p->chunks[p->chunk_ib_idx];
916 idx = pkt->idx + 1;
917 switch (pkt->opcode) {
918 case PACKET3_3D_LOAD_VBPNTR:
919 c = ib_chunk->kdata[idx++];
920 for (i = 0; i < (c - 1); i += 2, idx += 3) {
921 r = r100_cs_packet_next_reloc(p, &reloc);
922 if (r) {
923 DRM_ERROR("No reloc for packet3 %d\n",
924 pkt->opcode);
925 r100_cs_dump_packet(p, pkt);
926 return r;
927 }
928 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
929 r = r100_cs_packet_next_reloc(p, &reloc);
930 if (r) {
931 DRM_ERROR("No reloc for packet3 %d\n",
932 pkt->opcode);
933 r100_cs_dump_packet(p, pkt);
934 return r;
935 }
936 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
937 }
938 if (c & 1) {
939 r = r100_cs_packet_next_reloc(p, &reloc);
940 if (r) {
941 DRM_ERROR("No reloc for packet3 %d\n",
942 pkt->opcode);
943 r100_cs_dump_packet(p, pkt);
944 return r;
945 }
946 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
947 }
948 break;
949 case PACKET3_INDX_BUFFER:
950 r = r100_cs_packet_next_reloc(p, &reloc);
951 if (r) {
952 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
953 r100_cs_dump_packet(p, pkt);
954 return r;
955 }
956 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
957 break;
958 case 0x23:
959 /* FIXME: cleanup */
960 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
961 r = r100_cs_packet_next_reloc(p, &reloc);
962 if (r) {
963 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
964 r100_cs_dump_packet(p, pkt);
965 return r;
966 }
967 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
968 break;
969 case PACKET3_3D_DRAW_IMMD:
970 /* triggers drawing using in-packet vertex data */
971 case PACKET3_3D_DRAW_IMMD_2:
972 /* triggers drawing using in-packet vertex data */
973 case PACKET3_3D_DRAW_VBUF_2:
974 /* triggers drawing of vertex buffers setup elsewhere */
975 case PACKET3_3D_DRAW_INDX_2:
976 /* triggers drawing using indices to vertex buffer */
977 case PACKET3_3D_DRAW_VBUF:
978 /* triggers drawing of vertex buffers setup elsewhere */
979 case PACKET3_3D_DRAW_INDX:
980 /* triggers drawing using indices to vertex buffer */
981 case PACKET3_NOP:
982 break;
983 default:
984 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
985 return -EINVAL;
986 }
987 return 0;
988}
989
990int r100_cs_parse(struct radeon_cs_parser *p)
991{
992 struct radeon_cs_packet pkt;
993 int r;
994
995 do {
996 r = r100_cs_packet_parse(p, &pkt, p->idx);
997 if (r) {
998 return r;
999 }
1000 p->idx += pkt.count + 2;
1001 switch (pkt.type) {
1002 case PACKET_TYPE0:
1003 r = r100_packet0_check(p, &pkt);
1004 break;
1005 case PACKET_TYPE2:
1006 break;
1007 case PACKET_TYPE3:
1008 r = r100_packet3_check(p, &pkt);
1009 break;
1010 default:
1011 DRM_ERROR("Unknown packet type %d !\n",
1012 pkt.type);
1013 return -EINVAL;
1014 }
1015 if (r) {
1016 return r;
1017 }
1018 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1019 return 0;
1020}
1021
1022
1023/*
1024 * Global GPU functions
1025 */
1026void r100_errata(struct radeon_device *rdev)
1027{
1028 rdev->pll_errata = 0;
1029
1030 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1031 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1032 }
1033
1034 if (rdev->family == CHIP_RV100 ||
1035 rdev->family == CHIP_RS100 ||
1036 rdev->family == CHIP_RS200) {
1037 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1038 }
1039}
1040
1041/* Wait for vertical sync on primary CRTC */
1042void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1043{
1044 uint32_t crtc_gen_cntl, tmp;
1045 int i;
1046
1047 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1048 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1049 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1050 return;
1051 }
1052 /* Clear the CRTC_VBLANK_SAVE bit */
1053 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1054 for (i = 0; i < rdev->usec_timeout; i++) {
1055 tmp = RREG32(RADEON_CRTC_STATUS);
1056 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1057 return;
1058 }
1059 DRM_UDELAY(1);
1060 }
1061}
1062
1063/* Wait for vertical sync on secondary CRTC */
1064void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1065{
1066 uint32_t crtc2_gen_cntl, tmp;
1067 int i;
1068
1069 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1070 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1071 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1072 return;
1073
1074 /* Clear the CRTC_VBLANK_SAVE bit */
1075 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1076 for (i = 0; i < rdev->usec_timeout; i++) {
1077 tmp = RREG32(RADEON_CRTC2_STATUS);
1078 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1079 return;
1080 }
1081 DRM_UDELAY(1);
1082 }
1083}
1084
1085int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1086{
1087 unsigned i;
1088 uint32_t tmp;
1089
1090 for (i = 0; i < rdev->usec_timeout; i++) {
1091 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1092 if (tmp >= n) {
1093 return 0;
1094 }
1095 DRM_UDELAY(1);
1096 }
1097 return -1;
1098}
1099
1100int r100_gui_wait_for_idle(struct radeon_device *rdev)
1101{
1102 unsigned i;
1103 uint32_t tmp;
1104
1105 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1106 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1107 " Bad things might happen.\n");
1108 }
1109 for (i = 0; i < rdev->usec_timeout; i++) {
1110 tmp = RREG32(RADEON_RBBM_STATUS);
1111 if (!(tmp & (1 << 31))) {
1112 return 0;
1113 }
1114 DRM_UDELAY(1);
1115 }
1116 return -1;
1117}
1118
1119int r100_mc_wait_for_idle(struct radeon_device *rdev)
1120{
1121 unsigned i;
1122 uint32_t tmp;
1123
1124 for (i = 0; i < rdev->usec_timeout; i++) {
1125 /* read MC_STATUS */
1126 tmp = RREG32(0x0150);
1127 if (tmp & (1 << 2)) {
1128 return 0;
1129 }
1130 DRM_UDELAY(1);
1131 }
1132 return -1;
1133}
1134
1135void r100_gpu_init(struct radeon_device *rdev)
1136{
1137 /* TODO: anythings to do here ? pipes ? */
1138 r100_hdp_reset(rdev);
1139}
1140
1141void r100_hdp_reset(struct radeon_device *rdev)
1142{
1143 uint32_t tmp;
1144
1145 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1146 tmp |= (7 << 28);
1147 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1148 (void)RREG32(RADEON_HOST_PATH_CNTL);
1149 udelay(200);
1150 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1151 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1152 (void)RREG32(RADEON_HOST_PATH_CNTL);
1153}
1154
1155int r100_rb2d_reset(struct radeon_device *rdev)
1156{
1157 uint32_t tmp;
1158 int i;
1159
1160 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1161 (void)RREG32(RADEON_RBBM_SOFT_RESET);
1162 udelay(200);
1163 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1164 /* Wait to prevent race in RBBM_STATUS */
1165 mdelay(1);
1166 for (i = 0; i < rdev->usec_timeout; i++) {
1167 tmp = RREG32(RADEON_RBBM_STATUS);
1168 if (!(tmp & (1 << 26))) {
1169 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1170 tmp);
1171 return 0;
1172 }
1173 DRM_UDELAY(1);
1174 }
1175 tmp = RREG32(RADEON_RBBM_STATUS);
1176 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1177 return -1;
1178}
1179
1180int r100_gpu_reset(struct radeon_device *rdev)
1181{
1182 uint32_t status;
1183
1184 /* reset order likely matter */
1185 status = RREG32(RADEON_RBBM_STATUS);
1186 /* reset HDP */
1187 r100_hdp_reset(rdev);
1188 /* reset rb2d */
1189 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1190 r100_rb2d_reset(rdev);
1191 }
1192 /* TODO: reset 3D engine */
1193 /* reset CP */
1194 status = RREG32(RADEON_RBBM_STATUS);
1195 if (status & (1 << 16)) {
1196 r100_cp_reset(rdev);
1197 }
1198 /* Check if GPU is idle */
1199 status = RREG32(RADEON_RBBM_STATUS);
1200 if (status & (1 << 31)) {
1201 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1202 return -1;
1203 }
1204 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1205 return 0;
1206}
1207
1208
1209/*
1210 * VRAM info
1211 */
1212static void r100_vram_get_type(struct radeon_device *rdev)
1213{
1214 uint32_t tmp;
1215
1216 rdev->mc.vram_is_ddr = false;
1217 if (rdev->flags & RADEON_IS_IGP)
1218 rdev->mc.vram_is_ddr = true;
1219 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1220 rdev->mc.vram_is_ddr = true;
1221 if ((rdev->family == CHIP_RV100) ||
1222 (rdev->family == CHIP_RS100) ||
1223 (rdev->family == CHIP_RS200)) {
1224 tmp = RREG32(RADEON_MEM_CNTL);
1225 if (tmp & RV100_HALF_MODE) {
1226 rdev->mc.vram_width = 32;
1227 } else {
1228 rdev->mc.vram_width = 64;
1229 }
1230 if (rdev->flags & RADEON_SINGLE_CRTC) {
1231 rdev->mc.vram_width /= 4;
1232 rdev->mc.vram_is_ddr = true;
1233 }
1234 } else if (rdev->family <= CHIP_RV280) {
1235 tmp = RREG32(RADEON_MEM_CNTL);
1236 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1237 rdev->mc.vram_width = 128;
1238 } else {
1239 rdev->mc.vram_width = 64;
1240 }
1241 } else {
1242 /* newer IGPs */
1243 rdev->mc.vram_width = 128;
1244 }
1245}
1246
1247void r100_vram_info(struct radeon_device *rdev)
1248{
1249 r100_vram_get_type(rdev);
1250
1251 if (rdev->flags & RADEON_IS_IGP) {
1252 uint32_t tom;
1253 /* read NB_TOM to get the amount of ram stolen for the GPU */
1254 tom = RREG32(RADEON_NB_TOM);
1255 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1256 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1257 } else {
1258 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1259 /* Some production boards of m6 will report 0
1260 * if it's 8 MB
1261 */
1262 if (rdev->mc.vram_size == 0) {
1263 rdev->mc.vram_size = 8192 * 1024;
1264 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1265 }
1266 }
1267
1268 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1269 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1270 if (rdev->mc.aper_size > rdev->mc.vram_size) {
1271 /* Why does some hw doesn't have CONFIG_MEMSIZE properly
1272 * setup ? */
1273 rdev->mc.vram_size = rdev->mc.aper_size;
1274 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1275 }
1276}
1277
1278
1279/*
1280 * Indirect registers accessor
1281 */
1282void r100_pll_errata_after_index(struct radeon_device *rdev)
1283{
1284 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1285 return;
1286 }
1287 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
1288 (void)RREG32(RADEON_CRTC_GEN_CNTL);
1289}
1290
1291static void r100_pll_errata_after_data(struct radeon_device *rdev)
1292{
1293 /* This workarounds is necessary on RV100, RS100 and RS200 chips
1294 * or the chip could hang on a subsequent access
1295 */
1296 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1297 udelay(5000);
1298 }
1299
1300 /* This function is required to workaround a hardware bug in some (all?)
1301 * revisions of the R300. This workaround should be called after every
1302 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
1303 * may not be correct.
1304 */
1305 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1306 uint32_t save, tmp;
1307
1308 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1309 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1310 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1311 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1312 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1313 }
1314}
1315
1316uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1317{
1318 uint32_t data;
1319
1320 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1321 r100_pll_errata_after_index(rdev);
1322 data = RREG32(RADEON_CLOCK_CNTL_DATA);
1323 r100_pll_errata_after_data(rdev);
1324 return data;
1325}
1326
1327void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1328{
1329 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1330 r100_pll_errata_after_index(rdev);
1331 WREG32(RADEON_CLOCK_CNTL_DATA, v);
1332 r100_pll_errata_after_data(rdev);
1333}
1334
1335uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1336{
1337 if (reg < 0x10000)
1338 return readl(((void __iomem *)rdev->rmmio) + reg);
1339 else {
1340 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1341 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1342 }
1343}
1344
1345void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1346{
1347 if (reg < 0x10000)
1348 writel(v, ((void __iomem *)rdev->rmmio) + reg);
1349 else {
1350 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1351 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1352 }
1353}
1354
1355/*
1356 * Debugfs info
1357 */
1358#if defined(CONFIG_DEBUG_FS)
1359static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
1360{
1361 struct drm_info_node *node = (struct drm_info_node *) m->private;
1362 struct drm_device *dev = node->minor->dev;
1363 struct radeon_device *rdev = dev->dev_private;
1364 uint32_t reg, value;
1365 unsigned i;
1366
1367 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
1368 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
1369 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1370 for (i = 0; i < 64; i++) {
1371 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
1372 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
1373 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
1374 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
1375 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
1376 }
1377 return 0;
1378}
1379
1380static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
1381{
1382 struct drm_info_node *node = (struct drm_info_node *) m->private;
1383 struct drm_device *dev = node->minor->dev;
1384 struct radeon_device *rdev = dev->dev_private;
1385 uint32_t rdp, wdp;
1386 unsigned count, i, j;
1387
1388 radeon_ring_free_size(rdev);
1389 rdp = RREG32(RADEON_CP_RB_RPTR);
1390 wdp = RREG32(RADEON_CP_RB_WPTR);
1391 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1392 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1393 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1394 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1395 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1396 seq_printf(m, "%u dwords in ring\n", count);
1397 for (j = 0; j <= count; j++) {
1398 i = (rdp + j) & rdev->cp.ptr_mask;
1399 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1400 }
1401 return 0;
1402}
1403
1404
1405static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
1406{
1407 struct drm_info_node *node = (struct drm_info_node *) m->private;
1408 struct drm_device *dev = node->minor->dev;
1409 struct radeon_device *rdev = dev->dev_private;
1410 uint32_t csq_stat, csq2_stat, tmp;
1411 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
1412 unsigned i;
1413
1414 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1415 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
1416 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
1417 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
1418 r_rptr = (csq_stat >> 0) & 0x3ff;
1419 r_wptr = (csq_stat >> 10) & 0x3ff;
1420 ib1_rptr = (csq_stat >> 20) & 0x3ff;
1421 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
1422 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
1423 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
1424 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
1425 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
1426 seq_printf(m, "Ring rptr %u\n", r_rptr);
1427 seq_printf(m, "Ring wptr %u\n", r_wptr);
1428 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
1429 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
1430 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
1431 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
1432 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
1433 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
1434 seq_printf(m, "Ring fifo:\n");
1435 for (i = 0; i < 256; i++) {
1436 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1437 tmp = RREG32(RADEON_CP_CSQ_DATA);
1438 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
1439 }
1440 seq_printf(m, "Indirect1 fifo:\n");
1441 for (i = 256; i <= 512; i++) {
1442 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1443 tmp = RREG32(RADEON_CP_CSQ_DATA);
1444 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
1445 }
1446 seq_printf(m, "Indirect2 fifo:\n");
1447 for (i = 640; i < ib1_wptr; i++) {
1448 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1449 tmp = RREG32(RADEON_CP_CSQ_DATA);
1450 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
1451 }
1452 return 0;
1453}
1454
1455static int r100_debugfs_mc_info(struct seq_file *m, void *data)
1456{
1457 struct drm_info_node *node = (struct drm_info_node *) m->private;
1458 struct drm_device *dev = node->minor->dev;
1459 struct radeon_device *rdev = dev->dev_private;
1460 uint32_t tmp;
1461
1462 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
1463 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
1464 tmp = RREG32(RADEON_MC_FB_LOCATION);
1465 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
1466 tmp = RREG32(RADEON_BUS_CNTL);
1467 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
1468 tmp = RREG32(RADEON_MC_AGP_LOCATION);
1469 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
1470 tmp = RREG32(RADEON_AGP_BASE);
1471 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
1472 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1473 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
1474 tmp = RREG32(0x01D0);
1475 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
1476 tmp = RREG32(RADEON_AIC_LO_ADDR);
1477 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
1478 tmp = RREG32(RADEON_AIC_HI_ADDR);
1479 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
1480 tmp = RREG32(0x01E4);
1481 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
1482 return 0;
1483}
1484
1485static struct drm_info_list r100_debugfs_rbbm_list[] = {
1486 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
1487};
1488
1489static struct drm_info_list r100_debugfs_cp_list[] = {
1490 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
1491 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
1492};
1493
1494static struct drm_info_list r100_debugfs_mc_info_list[] = {
1495 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
1496};
1497#endif
1498
1499int r100_debugfs_rbbm_init(struct radeon_device *rdev)
1500{
1501#if defined(CONFIG_DEBUG_FS)
1502 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
1503#else
1504 return 0;
1505#endif
1506}
1507
1508int r100_debugfs_cp_init(struct radeon_device *rdev)
1509{
1510#if defined(CONFIG_DEBUG_FS)
1511 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
1512#else
1513 return 0;
1514#endif
1515}
1516
1517int r100_debugfs_mc_info_init(struct radeon_device *rdev)
1518{
1519#if defined(CONFIG_DEBUG_FS)
1520 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
1521#else
1522 return 0;
1523#endif
1524}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
new file mode 100644
index 000000000000..f5870a099d4f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -0,0 +1,1116 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
33
34/* r300,r350,rv350,rv370,rv380 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev);
36int r100_cp_reset(struct radeon_device *rdev);
37int r100_rb2d_reset(struct radeon_device *rdev);
38int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39int r100_pci_gart_enable(struct radeon_device *rdev);
40void r100_pci_gart_disable(struct radeon_device *rdev);
41void r100_mc_setup(struct radeon_device *rdev);
42void r100_mc_disable_clients(struct radeon_device *rdev);
43int r100_gui_wait_for_idle(struct radeon_device *rdev);
44int r100_cs_packet_parse(struct radeon_cs_parser *p,
45 struct radeon_cs_packet *pkt,
46 unsigned idx);
47int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc);
49int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt,
51 unsigned *auth, unsigned n,
52 radeon_packet0_check_t check);
53int r100_cs_parse_packet3(struct radeon_cs_parser *p,
54 struct radeon_cs_packet *pkt,
55 unsigned *auth, unsigned n,
56 radeon_packet3_check_t check);
57void r100_cs_dump_packet(struct radeon_cs_parser *p,
58 struct radeon_cs_packet *pkt);
59
60/* This files gather functions specifics to:
61 * r300,r350,rv350,rv370,rv380
62 *
63 * Some of these functions might be used by newer ASICs.
64 */
65void r300_gpu_init(struct radeon_device *rdev);
66int r300_mc_wait_for_idle(struct radeon_device *rdev);
67int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
68
69
70/*
71 * rv370,rv380 PCIE GART
72 */
73void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
74{
75 uint32_t tmp;
76 int i;
77
78 /* Workaround HW bug do flush 2 times */
79 for (i = 0; i < 2; i++) {
80 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
81 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
82 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
84 mb();
85 }
86}
87
88int rv370_pcie_gart_enable(struct radeon_device *rdev)
89{
90 uint32_t table_addr;
91 uint32_t tmp;
92 int r;
93
94 /* Initialize common gart structure */
95 r = radeon_gart_init(rdev);
96 if (r) {
97 return r;
98 }
99 r = rv370_debugfs_pcie_gart_info_init(rdev);
100 if (r) {
101 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
102 }
103 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
104 r = radeon_gart_table_vram_alloc(rdev);
105 if (r) {
106 return r;
107 }
108 /* discard memory request outside of configured range */
109 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
110 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
111 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
112 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
113 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
114 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
116 table_addr = rdev->gart.table_addr;
117 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
118 /* FIXME: setup default page */
119 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
120 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
121 /* Clear error */
122 WREG32_PCIE(0x18, 0);
123 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
124 tmp |= RADEON_PCIE_TX_GART_EN;
125 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
126 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
127 rv370_pcie_gart_tlb_flush(rdev);
128 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
129 rdev->mc.gtt_size >> 20, table_addr);
130 rdev->gart.ready = true;
131 return 0;
132}
133
134void rv370_pcie_gart_disable(struct radeon_device *rdev)
135{
136 uint32_t tmp;
137
138 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
139 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
140 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
141 if (rdev->gart.table.vram.robj) {
142 radeon_object_kunmap(rdev->gart.table.vram.robj);
143 radeon_object_unpin(rdev->gart.table.vram.robj);
144 }
145}
146
147int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
148{
149 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
150
151 if (i < 0 || i > rdev->gart.num_gpu_pages) {
152 return -EINVAL;
153 }
154 addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
155 writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
156 return 0;
157}
158
159int r300_gart_enable(struct radeon_device *rdev)
160{
161#if __OS_HAS_AGP
162 if (rdev->flags & RADEON_IS_AGP) {
163 if (rdev->family > CHIP_RV350) {
164 rv370_pcie_gart_disable(rdev);
165 } else {
166 r100_pci_gart_disable(rdev);
167 }
168 return 0;
169 }
170#endif
171 if (rdev->flags & RADEON_IS_PCIE) {
172 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
173 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
174 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
175 return rv370_pcie_gart_enable(rdev);
176 }
177 return r100_pci_gart_enable(rdev);
178}
179
180
181/*
182 * MC
183 */
184int r300_mc_init(struct radeon_device *rdev)
185{
186 int r;
187
188 if (r100_debugfs_rbbm_init(rdev)) {
189 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
190 }
191
192 r300_gpu_init(rdev);
193 r100_pci_gart_disable(rdev);
194 if (rdev->flags & RADEON_IS_PCIE) {
195 rv370_pcie_gart_disable(rdev);
196 }
197
198 /* Setup GPU memory space */
199 rdev->mc.vram_location = 0xFFFFFFFFUL;
200 rdev->mc.gtt_location = 0xFFFFFFFFUL;
201 if (rdev->flags & RADEON_IS_AGP) {
202 r = radeon_agp_init(rdev);
203 if (r) {
204 printk(KERN_WARNING "[drm] Disabling AGP\n");
205 rdev->flags &= ~RADEON_IS_AGP;
206 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
207 } else {
208 rdev->mc.gtt_location = rdev->mc.agp_base;
209 }
210 }
211 r = radeon_mc_setup(rdev);
212 if (r) {
213 return r;
214 }
215
216 /* Program GPU memory space */
217 r100_mc_disable_clients(rdev);
218 if (r300_mc_wait_for_idle(rdev)) {
219 printk(KERN_WARNING "Failed to wait MC idle while "
220 "programming pipes. Bad things might happen.\n");
221 }
222 r100_mc_setup(rdev);
223 return 0;
224}
225
226void r300_mc_fini(struct radeon_device *rdev)
227{
228 if (rdev->flags & RADEON_IS_PCIE) {
229 rv370_pcie_gart_disable(rdev);
230 radeon_gart_table_vram_free(rdev);
231 } else {
232 r100_pci_gart_disable(rdev);
233 radeon_gart_table_ram_free(rdev);
234 }
235 radeon_gart_fini(rdev);
236}
237
238
239/*
240 * Fence emission
241 */
242void r300_fence_ring_emit(struct radeon_device *rdev,
243 struct radeon_fence *fence)
244{
245 /* Who ever call radeon_fence_emit should call ring_lock and ask
246 * for enough space (today caller are ib schedule and buffer move) */
247 /* Write SC register so SC & US assert idle */
248 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
249 radeon_ring_write(rdev, 0);
250 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
251 radeon_ring_write(rdev, 0);
252 /* Flush 3D cache */
253 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
254 radeon_ring_write(rdev, (2 << 0));
255 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
256 radeon_ring_write(rdev, (1 << 0));
257 /* Wait until IDLE & CLEAN */
258 radeon_ring_write(rdev, PACKET0(0x1720, 0));
259 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
260 /* Emit fence sequence & fire IRQ */
261 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
262 radeon_ring_write(rdev, fence->seq);
263 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
264 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
265}
266
267
268/*
269 * Global GPU functions
270 */
271int r300_copy_dma(struct radeon_device *rdev,
272 uint64_t src_offset,
273 uint64_t dst_offset,
274 unsigned num_pages,
275 struct radeon_fence *fence)
276{
277 uint32_t size;
278 uint32_t cur_size;
279 int i, num_loops;
280 int r = 0;
281
282 /* radeon pitch is /64 */
283 size = num_pages << PAGE_SHIFT;
284 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
285 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
286 if (r) {
287 DRM_ERROR("radeon: moving bo (%d).\n", r);
288 return r;
289 }
290 /* Must wait for 2D idle & clean before DMA or hangs might happen */
291 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
292 radeon_ring_write(rdev, (1 << 16));
293 for (i = 0; i < num_loops; i++) {
294 cur_size = size;
295 if (cur_size > 0x1FFFFF) {
296 cur_size = 0x1FFFFF;
297 }
298 size -= cur_size;
299 radeon_ring_write(rdev, PACKET0(0x720, 2));
300 radeon_ring_write(rdev, src_offset);
301 radeon_ring_write(rdev, dst_offset);
302 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
303 src_offset += cur_size;
304 dst_offset += cur_size;
305 }
306 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
307 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
308 if (fence) {
309 r = radeon_fence_emit(rdev, fence);
310 }
311 radeon_ring_unlock_commit(rdev);
312 return r;
313}
314
315void r300_ring_start(struct radeon_device *rdev)
316{
317 unsigned gb_tile_config;
318 int r;
319
320 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
321 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
322 switch (rdev->num_gb_pipes) {
323 case 2:
324 gb_tile_config |= R300_PIPE_COUNT_R300;
325 break;
326 case 3:
327 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
328 break;
329 case 4:
330 gb_tile_config |= R300_PIPE_COUNT_R420;
331 break;
332 case 1:
333 default:
334 gb_tile_config |= R300_PIPE_COUNT_RV350;
335 break;
336 }
337
338 r = radeon_ring_lock(rdev, 64);
339 if (r) {
340 return;
341 }
342 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
343 radeon_ring_write(rdev,
344 RADEON_ISYNC_ANY2D_IDLE3D |
345 RADEON_ISYNC_ANY3D_IDLE2D |
346 RADEON_ISYNC_WAIT_IDLEGUI |
347 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
348 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
349 radeon_ring_write(rdev, gb_tile_config);
350 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
351 radeon_ring_write(rdev,
352 RADEON_WAIT_2D_IDLECLEAN |
353 RADEON_WAIT_3D_IDLECLEAN);
354 radeon_ring_write(rdev, PACKET0(0x170C, 0));
355 radeon_ring_write(rdev, 1 << 31);
356 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
357 radeon_ring_write(rdev, 0);
358 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
359 radeon_ring_write(rdev, 0);
360 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
361 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
362 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
363 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
364 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
365 radeon_ring_write(rdev,
366 RADEON_WAIT_2D_IDLECLEAN |
367 RADEON_WAIT_3D_IDLECLEAN);
368 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
369 radeon_ring_write(rdev, 0);
370 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
371 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
372 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
373 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
374 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
375 radeon_ring_write(rdev,
376 ((6 << R300_MS_X0_SHIFT) |
377 (6 << R300_MS_Y0_SHIFT) |
378 (6 << R300_MS_X1_SHIFT) |
379 (6 << R300_MS_Y1_SHIFT) |
380 (6 << R300_MS_X2_SHIFT) |
381 (6 << R300_MS_Y2_SHIFT) |
382 (6 << R300_MSBD0_Y_SHIFT) |
383 (6 << R300_MSBD0_X_SHIFT)));
384 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
385 radeon_ring_write(rdev,
386 ((6 << R300_MS_X3_SHIFT) |
387 (6 << R300_MS_Y3_SHIFT) |
388 (6 << R300_MS_X4_SHIFT) |
389 (6 << R300_MS_Y4_SHIFT) |
390 (6 << R300_MS_X5_SHIFT) |
391 (6 << R300_MS_Y5_SHIFT) |
392 (6 << R300_MSBD1_SHIFT)));
393 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
394 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
395 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
396 radeon_ring_write(rdev,
397 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
398 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
399 radeon_ring_write(rdev,
400 R300_GEOMETRY_ROUND_NEAREST |
401 R300_COLOR_ROUND_NEAREST);
402 radeon_ring_unlock_commit(rdev);
403}
404
405void r300_errata(struct radeon_device *rdev)
406{
407 rdev->pll_errata = 0;
408
409 if (rdev->family == CHIP_R300 &&
410 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
411 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
412 }
413}
414
415int r300_mc_wait_for_idle(struct radeon_device *rdev)
416{
417 unsigned i;
418 uint32_t tmp;
419
420 for (i = 0; i < rdev->usec_timeout; i++) {
421 /* read MC_STATUS */
422 tmp = RREG32(0x0150);
423 if (tmp & (1 << 4)) {
424 return 0;
425 }
426 DRM_UDELAY(1);
427 }
428 return -1;
429}
430
431void r300_gpu_init(struct radeon_device *rdev)
432{
433 uint32_t gb_tile_config, tmp;
434
435 r100_hdp_reset(rdev);
436 /* FIXME: rv380 one pipes ? */
437 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
438 /* r300,r350 */
439 rdev->num_gb_pipes = 2;
440 } else {
441 /* rv350,rv370,rv380 */
442 rdev->num_gb_pipes = 1;
443 }
444 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
445 switch (rdev->num_gb_pipes) {
446 case 2:
447 gb_tile_config |= R300_PIPE_COUNT_R300;
448 break;
449 case 3:
450 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
451 break;
452 case 4:
453 gb_tile_config |= R300_PIPE_COUNT_R420;
454 break;
455 case 1:
456 default:
457 gb_tile_config |= R300_PIPE_COUNT_RV350;
458 break;
459 }
460 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
461
462 if (r100_gui_wait_for_idle(rdev)) {
463 printk(KERN_WARNING "Failed to wait GUI idle while "
464 "programming pipes. Bad things might happen.\n");
465 }
466
467 tmp = RREG32(0x170C);
468 WREG32(0x170C, tmp | (1 << 31));
469
470 WREG32(R300_RB2D_DSTCACHE_MODE,
471 R300_DC_AUTOFLUSH_ENABLE |
472 R300_DC_DC_DISABLE_IGNORE_PE);
473
474 if (r100_gui_wait_for_idle(rdev)) {
475 printk(KERN_WARNING "Failed to wait GUI idle while "
476 "programming pipes. Bad things might happen.\n");
477 }
478 if (r300_mc_wait_for_idle(rdev)) {
479 printk(KERN_WARNING "Failed to wait MC idle while "
480 "programming pipes. Bad things might happen.\n");
481 }
482 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
483}
484
485int r300_ga_reset(struct radeon_device *rdev)
486{
487 uint32_t tmp;
488 bool reinit_cp;
489 int i;
490
491 reinit_cp = rdev->cp.ready;
492 rdev->cp.ready = false;
493 for (i = 0; i < rdev->usec_timeout; i++) {
494 WREG32(RADEON_CP_CSQ_MODE, 0);
495 WREG32(RADEON_CP_CSQ_CNTL, 0);
496 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
497 (void)RREG32(RADEON_RBBM_SOFT_RESET);
498 udelay(200);
499 WREG32(RADEON_RBBM_SOFT_RESET, 0);
500 /* Wait to prevent race in RBBM_STATUS */
501 mdelay(1);
502 tmp = RREG32(RADEON_RBBM_STATUS);
503 if (tmp & ((1 << 20) | (1 << 26))) {
504 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
505 /* GA still busy soft reset it */
506 WREG32(0x429C, 0x200);
507 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
508 WREG32(0x43E0, 0);
509 WREG32(0x43E4, 0);
510 WREG32(0x24AC, 0);
511 }
512 /* Wait to prevent race in RBBM_STATUS */
513 mdelay(1);
514 tmp = RREG32(RADEON_RBBM_STATUS);
515 if (!(tmp & ((1 << 20) | (1 << 26)))) {
516 break;
517 }
518 }
519 for (i = 0; i < rdev->usec_timeout; i++) {
520 tmp = RREG32(RADEON_RBBM_STATUS);
521 if (!(tmp & ((1 << 20) | (1 << 26)))) {
522 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
523 tmp);
524 if (reinit_cp) {
525 return r100_cp_init(rdev, rdev->cp.ring_size);
526 }
527 return 0;
528 }
529 DRM_UDELAY(1);
530 }
531 tmp = RREG32(RADEON_RBBM_STATUS);
532 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
533 return -1;
534}
535
536int r300_gpu_reset(struct radeon_device *rdev)
537{
538 uint32_t status;
539
540 /* reset order likely matter */
541 status = RREG32(RADEON_RBBM_STATUS);
542 /* reset HDP */
543 r100_hdp_reset(rdev);
544 /* reset rb2d */
545 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
546 r100_rb2d_reset(rdev);
547 }
548 /* reset GA */
549 if (status & ((1 << 20) | (1 << 26))) {
550 r300_ga_reset(rdev);
551 }
552 /* reset CP */
553 status = RREG32(RADEON_RBBM_STATUS);
554 if (status & (1 << 16)) {
555 r100_cp_reset(rdev);
556 }
557 /* Check if GPU is idle */
558 status = RREG32(RADEON_RBBM_STATUS);
559 if (status & (1 << 31)) {
560 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
561 return -1;
562 }
563 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
564 return 0;
565}
566
567
568/*
569 * r300,r350,rv350,rv380 VRAM info
570 */
571void r300_vram_info(struct radeon_device *rdev)
572{
573 uint32_t tmp;
574
575 /* DDR for all card after R300 & IGP */
576 rdev->mc.vram_is_ddr = true;
577 tmp = RREG32(RADEON_MEM_CNTL);
578 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
579 rdev->mc.vram_width = 128;
580 } else {
581 rdev->mc.vram_width = 64;
582 }
583 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
584
585 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
586 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
587}
588
589
590/*
591 * Indirect registers accessor
592 */
593uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
594{
595 uint32_t r;
596
597 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
598 (void)RREG32(RADEON_PCIE_INDEX);
599 r = RREG32(RADEON_PCIE_DATA);
600 return r;
601}
602
603void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
604{
605 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
606 (void)RREG32(RADEON_PCIE_INDEX);
607 WREG32(RADEON_PCIE_DATA, (v));
608 (void)RREG32(RADEON_PCIE_DATA);
609}
610
611/*
612 * PCIE Lanes
613 */
614
615void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
616{
617 uint32_t link_width_cntl, mask;
618
619 if (rdev->flags & RADEON_IS_IGP)
620 return;
621
622 if (!(rdev->flags & RADEON_IS_PCIE))
623 return;
624
625 /* FIXME wait for idle */
626
627 switch (lanes) {
628 case 0:
629 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
630 break;
631 case 1:
632 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
633 break;
634 case 2:
635 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
636 break;
637 case 4:
638 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
639 break;
640 case 8:
641 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
642 break;
643 case 12:
644 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
645 break;
646 case 16:
647 default:
648 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
649 break;
650 }
651
652 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
653
654 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
655 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
656 return;
657
658 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
659 RADEON_PCIE_LC_RECONFIG_NOW |
660 RADEON_PCIE_LC_RECONFIG_LATER |
661 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
662 link_width_cntl |= mask;
663 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
664 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
665 RADEON_PCIE_LC_RECONFIG_NOW));
666
667 /* wait for lane set to complete */
668 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
669 while (link_width_cntl == 0xffffffff)
670 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
671
672}
673
674
675/*
676 * Debugfs info
677 */
678#if defined(CONFIG_DEBUG_FS)
679static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
680{
681 struct drm_info_node *node = (struct drm_info_node *) m->private;
682 struct drm_device *dev = node->minor->dev;
683 struct radeon_device *rdev = dev->dev_private;
684 uint32_t tmp;
685
686 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
687 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
688 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
689 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
690 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
691 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
692 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
693 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
694 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
695 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
696 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
697 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
698 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
699 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
700 return 0;
701}
702
703static struct drm_info_list rv370_pcie_gart_info_list[] = {
704 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
705};
706#endif
707
708int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
709{
710#if defined(CONFIG_DEBUG_FS)
711 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
712#else
713 return 0;
714#endif
715}
716
717
718/*
719 * CS functions
720 */
721struct r300_cs_track_cb {
722 struct radeon_object *robj;
723 unsigned pitch;
724 unsigned cpp;
725 unsigned offset;
726};
727
728struct r300_cs_track {
729 unsigned num_cb;
730 unsigned maxy;
731 struct r300_cs_track_cb cb[4];
732 struct r300_cs_track_cb zb;
733 bool z_enabled;
734};
735
736int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
737{
738 unsigned i;
739 unsigned long size;
740
741 for (i = 0; i < track->num_cb; i++) {
742 if (track->cb[i].robj == NULL) {
743 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
744 return -EINVAL;
745 }
746 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
747 size += track->cb[i].offset;
748 if (size > radeon_object_size(track->cb[i].robj)) {
749 DRM_ERROR("[drm] Buffer too small for color buffer %d "
750 "(need %lu have %lu) !\n", i, size,
751 radeon_object_size(track->cb[i].robj));
752 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
753 i, track->cb[i].pitch, track->cb[i].cpp,
754 track->cb[i].offset, track->maxy);
755 return -EINVAL;
756 }
757 }
758 if (track->z_enabled) {
759 if (track->zb.robj == NULL) {
760 DRM_ERROR("[drm] No buffer for z buffer !\n");
761 return -EINVAL;
762 }
763 size = track->zb.pitch * track->zb.cpp * track->maxy;
764 size += track->zb.offset;
765 if (size > radeon_object_size(track->zb.robj)) {
766 DRM_ERROR("[drm] Buffer too small for z buffer "
767 "(need %lu have %lu) !\n", size,
768 radeon_object_size(track->zb.robj));
769 return -EINVAL;
770 }
771 }
772 return 0;
773}
774
775static inline void r300_cs_track_clear(struct r300_cs_track *track)
776{
777 unsigned i;
778
779 track->num_cb = 4;
780 track->maxy = 4096;
781 for (i = 0; i < track->num_cb; i++) {
782 track->cb[i].robj = NULL;
783 track->cb[i].pitch = 8192;
784 track->cb[i].cpp = 16;
785 track->cb[i].offset = 0;
786 }
787 track->z_enabled = true;
788 track->zb.robj = NULL;
789 track->zb.pitch = 8192;
790 track->zb.cpp = 4;
791 track->zb.offset = 0;
792}
793
794static unsigned r300_auth_reg[] = {
795 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
796 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
797 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
798 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
799 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
800 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
801 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
802 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
803 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
804 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
805 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
806 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
807 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
808 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
809 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
810 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
811 0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000,
812 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
813 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
814 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
815 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
816 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
817 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
818 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
819 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
820 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
821 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
822 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
823 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
824 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
825 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
826 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
827 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF,
828 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
829 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000,
830 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000000,
832 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
833 0x00000000, 0x00000000, 0x00000000, 0x00000000,
834 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
835};
836
837static int r300_packet0_check(struct radeon_cs_parser *p,
838 struct radeon_cs_packet *pkt,
839 unsigned idx, unsigned reg)
840{
841 struct radeon_cs_chunk *ib_chunk;
842 struct radeon_cs_reloc *reloc;
843 struct r300_cs_track *track;
844 volatile uint32_t *ib;
845 uint32_t tmp;
846 unsigned i;
847 int r;
848
849 ib = p->ib->ptr;
850 ib_chunk = &p->chunks[p->chunk_ib_idx];
851 track = (struct r300_cs_track *)p->track;
852 switch (reg) {
853 case RADEON_DST_PITCH_OFFSET:
854 case RADEON_SRC_PITCH_OFFSET:
855 r = r100_cs_packet_next_reloc(p, &reloc);
856 if (r) {
857 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
858 idx, reg);
859 r100_cs_dump_packet(p, pkt);
860 return r;
861 }
862 tmp = ib_chunk->kdata[idx] & 0x003fffff;
863 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
864 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
865 break;
866 case R300_RB3D_COLOROFFSET0:
867 case R300_RB3D_COLOROFFSET1:
868 case R300_RB3D_COLOROFFSET2:
869 case R300_RB3D_COLOROFFSET3:
870 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
871 r = r100_cs_packet_next_reloc(p, &reloc);
872 if (r) {
873 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
874 idx, reg);
875 r100_cs_dump_packet(p, pkt);
876 return r;
877 }
878 track->cb[i].robj = reloc->robj;
879 track->cb[i].offset = ib_chunk->kdata[idx];
880 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
881 break;
882 case R300_ZB_DEPTHOFFSET:
883 r = r100_cs_packet_next_reloc(p, &reloc);
884 if (r) {
885 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
886 idx, reg);
887 r100_cs_dump_packet(p, pkt);
888 return r;
889 }
890 track->zb.robj = reloc->robj;
891 track->zb.offset = ib_chunk->kdata[idx];
892 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
893 break;
894 case R300_TX_OFFSET_0:
895 case R300_TX_OFFSET_0+4:
896 case R300_TX_OFFSET_0+8:
897 case R300_TX_OFFSET_0+12:
898 case R300_TX_OFFSET_0+16:
899 case R300_TX_OFFSET_0+20:
900 case R300_TX_OFFSET_0+24:
901 case R300_TX_OFFSET_0+28:
902 case R300_TX_OFFSET_0+32:
903 case R300_TX_OFFSET_0+36:
904 case R300_TX_OFFSET_0+40:
905 case R300_TX_OFFSET_0+44:
906 case R300_TX_OFFSET_0+48:
907 case R300_TX_OFFSET_0+52:
908 case R300_TX_OFFSET_0+56:
909 case R300_TX_OFFSET_0+60:
910 r = r100_cs_packet_next_reloc(p, &reloc);
911 if (r) {
912 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
913 idx, reg);
914 r100_cs_dump_packet(p, pkt);
915 return r;
916 }
917 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
918 break;
919 /* Tracked registers */
920 case 0x43E4:
921 /* SC_SCISSOR1 */
922
923 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
924 if (p->rdev->family < CHIP_RV515) {
925 track->maxy -= 1440;
926 }
927 break;
928 case 0x4E00:
929 /* RB3D_CCTL */
930 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
931 break;
932 case 0x4E38:
933 case 0x4E3C:
934 case 0x4E40:
935 case 0x4E44:
936 /* RB3D_COLORPITCH0 */
937 /* RB3D_COLORPITCH1 */
938 /* RB3D_COLORPITCH2 */
939 /* RB3D_COLORPITCH3 */
940 i = (reg - 0x4E38) >> 2;
941 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
942 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
943 case 9:
944 case 11:
945 case 12:
946 track->cb[i].cpp = 1;
947 break;
948 case 3:
949 case 4:
950 case 13:
951 case 15:
952 track->cb[i].cpp = 2;
953 break;
954 case 6:
955 track->cb[i].cpp = 4;
956 break;
957 case 10:
958 track->cb[i].cpp = 8;
959 break;
960 case 7:
961 track->cb[i].cpp = 16;
962 break;
963 default:
964 DRM_ERROR("Invalid color buffer format (%d) !\n",
965 ((ib_chunk->kdata[idx] >> 21) & 0xF));
966 return -EINVAL;
967 }
968 break;
969 case 0x4F00:
970 /* ZB_CNTL */
971 if (ib_chunk->kdata[idx] & 2) {
972 track->z_enabled = true;
973 } else {
974 track->z_enabled = false;
975 }
976 break;
977 case 0x4F10:
978 /* ZB_FORMAT */
979 switch ((ib_chunk->kdata[idx] & 0xF)) {
980 case 0:
981 case 1:
982 track->zb.cpp = 2;
983 break;
984 case 2:
985 track->zb.cpp = 4;
986 break;
987 default:
988 DRM_ERROR("Invalid z buffer format (%d) !\n",
989 (ib_chunk->kdata[idx] & 0xF));
990 return -EINVAL;
991 }
992 break;
993 case 0x4F24:
994 /* ZB_DEPTHPITCH */
995 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
996 break;
997 default:
998 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx);
999 return -EINVAL;
1000 }
1001 return 0;
1002}
1003
1004static int r300_packet3_check(struct radeon_cs_parser *p,
1005 struct radeon_cs_packet *pkt)
1006{
1007 struct radeon_cs_chunk *ib_chunk;
1008 struct radeon_cs_reloc *reloc;
1009 struct r300_cs_track *track;
1010 volatile uint32_t *ib;
1011 unsigned idx;
1012 unsigned i, c;
1013 int r;
1014
1015 ib = p->ib->ptr;
1016 ib_chunk = &p->chunks[p->chunk_ib_idx];
1017 idx = pkt->idx + 1;
1018 track = (struct r300_cs_track *)p->track;
1019 switch (pkt->opcode) {
1020 case PACKET3_3D_LOAD_VBPNTR:
1021 c = ib_chunk->kdata[idx++];
1022 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1023 r = r100_cs_packet_next_reloc(p, &reloc);
1024 if (r) {
1025 DRM_ERROR("No reloc for packet3 %d\n",
1026 pkt->opcode);
1027 r100_cs_dump_packet(p, pkt);
1028 return r;
1029 }
1030 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1031 r = r100_cs_packet_next_reloc(p, &reloc);
1032 if (r) {
1033 DRM_ERROR("No reloc for packet3 %d\n",
1034 pkt->opcode);
1035 r100_cs_dump_packet(p, pkt);
1036 return r;
1037 }
1038 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1039 }
1040 if (c & 1) {
1041 r = r100_cs_packet_next_reloc(p, &reloc);
1042 if (r) {
1043 DRM_ERROR("No reloc for packet3 %d\n",
1044 pkt->opcode);
1045 r100_cs_dump_packet(p, pkt);
1046 return r;
1047 }
1048 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1049 }
1050 break;
1051 case PACKET3_INDX_BUFFER:
1052 r = r100_cs_packet_next_reloc(p, &reloc);
1053 if (r) {
1054 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1055 r100_cs_dump_packet(p, pkt);
1056 return r;
1057 }
1058 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1059 break;
1060 /* Draw packet */
1061 case PACKET3_3D_DRAW_VBUF:
1062 case PACKET3_3D_DRAW_IMMD:
1063 case PACKET3_3D_DRAW_INDX:
1064 case PACKET3_3D_DRAW_VBUF_2:
1065 case PACKET3_3D_DRAW_IMMD_2:
1066 case PACKET3_3D_DRAW_INDX_2:
1067 r = r300_cs_track_check(p->rdev, track);
1068 if (r) {
1069 return r;
1070 }
1071 break;
1072 case PACKET3_NOP:
1073 break;
1074 default:
1075 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1076 return -EINVAL;
1077 }
1078 return 0;
1079}
1080
1081int r300_cs_parse(struct radeon_cs_parser *p)
1082{
1083 struct radeon_cs_packet pkt;
1084 struct r300_cs_track track;
1085 int r;
1086
1087 r300_cs_track_clear(&track);
1088 p->track = &track;
1089 do {
1090 r = r100_cs_packet_parse(p, &pkt, p->idx);
1091 if (r) {
1092 return r;
1093 }
1094 p->idx += pkt.count + 2;
1095 switch (pkt.type) {
1096 case PACKET_TYPE0:
1097 r = r100_cs_parse_packet0(p, &pkt,
1098 r300_auth_reg,
1099 ARRAY_SIZE(r300_auth_reg),
1100 &r300_packet0_check);
1101 break;
1102 case PACKET_TYPE2:
1103 break;
1104 case PACKET_TYPE3:
1105 r = r300_packet3_check(p, &pkt);
1106 break;
1107 default:
1108 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1109 return -EINVAL;
1110 }
1111 if (r) {
1112 return r;
1113 }
1114 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1115 return 0;
1116}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index bdbc95fa6721..70f48609515e 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1,30 +1,34 @@
1/************************************************************************** 1/*
2 2 * Copyright 2005 Nicolai Haehnle et al.
3Copyright (C) 2004-2005 Nicolai Haehnle et al. 3 * Copyright 2008 Advanced Micro Devices, Inc.
4 4 * Copyright 2009 Jerome Glisse.
5Permission is hereby granted, free of charge, to any person obtaining a 5 *
6copy of this software and associated documentation files (the "Software"), 6 * Permission is hereby granted, free of charge, to any person obtaining a
7to deal in the Software without restriction, including without limitation 7 * copy of this software and associated documentation files (the "Software"),
8on the rights to use, copy, modify, merge, publish, distribute, sub 8 * to deal in the Software without restriction, including without limitation
9license, and/or sell copies of the Software, and to permit persons to whom 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10the Software is furnished to do so, subject to the following conditions: 10 * and/or sell copies of the Software, and to permit persons to whom the
11 11 * Software is furnished to do so, subject to the following conditions:
12The above copyright notice and this permission notice (including the next 12 *
13paragraph) shall be included in all copies or substantial portions of the 13 * The above copyright notice and this permission notice shall be included in
14Software. 14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Nicolai Haehnle
25 * Jerome Glisse
26 */
27#ifndef _R300_REG_H_
28#define _R300_REG_H_
15 29
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22USE OR OTHER DEALINGS IN THE SOFTWARE.
23 30
24**************************************************************************/
25 31
26#ifndef _R300_REG_H
27#define _R300_REG_H
28 32
29#define R300_MC_INIT_MISC_LAT_TIMER 0x180 33#define R300_MC_INIT_MISC_LAT_TIMER 0x180
30# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 34# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
new file mode 100644
index 000000000000..dea497a979f2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_reg.h"
31#include "radeon.h"
32
33/* r420,r423,rv410 depends on : */
34void r100_pci_gart_disable(struct radeon_device *rdev);
35void r100_hdp_reset(struct radeon_device *rdev);
36void r100_mc_setup(struct radeon_device *rdev);
37int r100_gui_wait_for_idle(struct radeon_device *rdev);
38void r100_mc_disable_clients(struct radeon_device *rdev);
39void r300_vram_info(struct radeon_device *rdev);
40int r300_mc_wait_for_idle(struct radeon_device *rdev);
41int rv370_pcie_gart_enable(struct radeon_device *rdev);
42void rv370_pcie_gart_disable(struct radeon_device *rdev);
43
44/* This files gather functions specifics to :
45 * r420,r423,rv410
46 *
47 * Some of these functions might be used by newer ASICs.
48 */
49void r420_gpu_init(struct radeon_device *rdev);
50int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
51
52
53/*
54 * MC
55 */
56int r420_mc_init(struct radeon_device *rdev)
57{
58 int r;
59
60 if (r100_debugfs_rbbm_init(rdev)) {
61 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
62 }
63 if (r420_debugfs_pipes_info_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for pipes !\n");
65 }
66
67 r420_gpu_init(rdev);
68 r100_pci_gart_disable(rdev);
69 if (rdev->flags & RADEON_IS_PCIE) {
70 rv370_pcie_gart_disable(rdev);
71 }
72
73 /* Setup GPU memory space */
74 rdev->mc.vram_location = 0xFFFFFFFFUL;
75 rdev->mc.gtt_location = 0xFFFFFFFFUL;
76 if (rdev->flags & RADEON_IS_AGP) {
77 r = radeon_agp_init(rdev);
78 if (r) {
79 printk(KERN_WARNING "[drm] Disabling AGP\n");
80 rdev->flags &= ~RADEON_IS_AGP;
81 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
82 } else {
83 rdev->mc.gtt_location = rdev->mc.agp_base;
84 }
85 }
86 r = radeon_mc_setup(rdev);
87 if (r) {
88 return r;
89 }
90
91 /* Program GPU memory space */
92 r100_mc_disable_clients(rdev);
93 if (r300_mc_wait_for_idle(rdev)) {
94 printk(KERN_WARNING "Failed to wait MC idle while "
95 "programming pipes. Bad things might happen.\n");
96 }
97 r100_mc_setup(rdev);
98 return 0;
99}
100
101void r420_mc_fini(struct radeon_device *rdev)
102{
103 rv370_pcie_gart_disable(rdev);
104 radeon_gart_table_vram_free(rdev);
105 radeon_gart_fini(rdev);
106}
107
108
109/*
110 * Global GPU functions
111 */
112void r420_errata(struct radeon_device *rdev)
113{
114 rdev->pll_errata = 0;
115}
116
117void r420_pipes_init(struct radeon_device *rdev)
118{
119 unsigned tmp;
120 unsigned gb_pipe_select;
121 unsigned num_pipes;
122
123 /* GA_ENHANCE workaround TCL deadlock issue */
124 WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
125 /* get max number of pipes */
126 gb_pipe_select = RREG32(0x402C);
127 num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
128 rdev->num_gb_pipes = num_pipes;
129 tmp = 0;
130 switch (num_pipes) {
131 default:
132 /* force to 1 pipe */
133 num_pipes = 1;
134 case 1:
135 tmp = (0 << 1);
136 break;
137 case 2:
138 tmp = (3 << 1);
139 break;
140 case 3:
141 tmp = (6 << 1);
142 break;
143 case 4:
144 tmp = (7 << 1);
145 break;
146 }
147 WREG32(0x42C8, (1 << num_pipes) - 1);
148 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
149 tmp |= (1 << 4) | (1 << 0);
150 WREG32(0x4018, tmp);
151 if (r100_gui_wait_for_idle(rdev)) {
152 printk(KERN_WARNING "Failed to wait GUI idle while "
153 "programming pipes. Bad things might happen.\n");
154 }
155
156 tmp = RREG32(0x170C);
157 WREG32(0x170C, tmp | (1 << 31));
158
159 WREG32(R300_RB2D_DSTCACHE_MODE,
160 RREG32(R300_RB2D_DSTCACHE_MODE) |
161 R300_DC_AUTOFLUSH_ENABLE |
162 R300_DC_DC_DISABLE_IGNORE_PE);
163
164 if (r100_gui_wait_for_idle(rdev)) {
165 printk(KERN_WARNING "Failed to wait GUI idle while "
166 "programming pipes. Bad things might happen.\n");
167 }
168 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
169}
170
171void r420_gpu_init(struct radeon_device *rdev)
172{
173 r100_hdp_reset(rdev);
174 r420_pipes_init(rdev);
175 if (r300_mc_wait_for_idle(rdev)) {
176 printk(KERN_WARNING "Failed to wait MC idle while "
177 "programming pipes. Bad things might happen.\n");
178 }
179}
180
181
182/*
183 * r420,r423,rv410 VRAM info
184 */
185void r420_vram_info(struct radeon_device *rdev)
186{
187 r300_vram_info(rdev);
188}
189
190
191/*
192 * Debugfs info
193 */
194#if defined(CONFIG_DEBUG_FS)
195static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
196{
197 struct drm_info_node *node = (struct drm_info_node *) m->private;
198 struct drm_device *dev = node->minor->dev;
199 struct radeon_device *rdev = dev->dev_private;
200 uint32_t tmp;
201
202 tmp = RREG32(R400_GB_PIPE_SELECT);
203 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
204 tmp = RREG32(R300_GB_TILE_CONFIG);
205 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
206 tmp = RREG32(R300_DST_PIPE_CONFIG);
207 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
208 return 0;
209}
210
211static struct drm_info_list r420_pipes_info_list[] = {
212 {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
213};
214#endif
215
216int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
217{
218#if defined(CONFIG_DEBUG_FS)
219 return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
220#else
221 return 0;
222#endif
223}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644
index 000000000000..9070a1c2ce23
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -0,0 +1,749 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R500_REG_H__
29#define __R500_REG_H__
30
31/* pipe config regs */
32#define R300_GA_POLY_MODE 0x4288
33# define R300_FRONT_PTYPE_POINT (0 << 4)
34# define R300_FRONT_PTYPE_LINE (1 << 4)
35# define R300_FRONT_PTYPE_TRIANGE (2 << 4)
36# define R300_BACK_PTYPE_POINT (0 << 7)
37# define R300_BACK_PTYPE_LINE (1 << 7)
38# define R300_BACK_PTYPE_TRIANGE (2 << 7)
39#define R300_GA_ROUND_MODE 0x428c
40# define R300_GEOMETRY_ROUND_TRUNC (0 << 0)
41# define R300_GEOMETRY_ROUND_NEAREST (1 << 0)
42# define R300_COLOR_ROUND_TRUNC (0 << 2)
43# define R300_COLOR_ROUND_NEAREST (1 << 2)
44#define R300_GB_MSPOS0 0x4010
45# define R300_MS_X0_SHIFT 0
46# define R300_MS_Y0_SHIFT 4
47# define R300_MS_X1_SHIFT 8
48# define R300_MS_Y1_SHIFT 12
49# define R300_MS_X2_SHIFT 16
50# define R300_MS_Y2_SHIFT 20
51# define R300_MSBD0_Y_SHIFT 24
52# define R300_MSBD0_X_SHIFT 28
53#define R300_GB_MSPOS1 0x4014
54# define R300_MS_X3_SHIFT 0
55# define R300_MS_Y3_SHIFT 4
56# define R300_MS_X4_SHIFT 8
57# define R300_MS_Y4_SHIFT 12
58# define R300_MS_X5_SHIFT 16
59# define R300_MS_Y5_SHIFT 20
60# define R300_MSBD1_SHIFT 24
61
62#define R300_GA_ENHANCE 0x4274
63# define R300_GA_DEADLOCK_CNTL (1 << 0)
64# define R300_GA_FASTSYNC_CNTL (1 << 1)
65#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
66# define R300_RB3D_DC_FLUSH (2 << 0)
67# define R300_RB3D_DC_FREE (2 << 2)
68# define R300_RB3D_DC_FINISH (1 << 4)
69#define R300_RB3D_ZCACHE_CTLSTAT 0x4f18
70# define R300_ZC_FLUSH (1 << 0)
71# define R300_ZC_FREE (1 << 1)
72# define R300_ZC_FLUSH_ALL 0x3
73#define R400_GB_PIPE_SELECT 0x402c
74#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
75#define R500_SU_REG_DEST 0x42c8
76#define R300_GB_TILE_CONFIG 0x4018
77# define R300_ENABLE_TILING (1 << 0)
78# define R300_PIPE_COUNT_RV350 (0 << 1)
79# define R300_PIPE_COUNT_R300 (3 << 1)
80# define R300_PIPE_COUNT_R420_3P (6 << 1)
81# define R300_PIPE_COUNT_R420 (7 << 1)
82# define R300_TILE_SIZE_8 (0 << 4)
83# define R300_TILE_SIZE_16 (1 << 4)
84# define R300_TILE_SIZE_32 (2 << 4)
85# define R300_SUBPIXEL_1_12 (0 << 16)
86# define R300_SUBPIXEL_1_16 (1 << 16)
87#define R300_DST_PIPE_CONFIG 0x170c
88# define R300_PIPE_AUTO_CONFIG (1 << 31)
89#define R300_RB2D_DSTCACHE_MODE 0x3428
90# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
91# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
92
93#define RADEON_CP_STAT 0x7C0
94#define RADEON_RBBM_CMDFIFO_ADDR 0xE70
95#define RADEON_RBBM_CMDFIFO_DATA 0xE74
96#define RADEON_ISYNC_CNTL 0x1724
97# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
98# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
99# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
100# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
101# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
102# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
103
104#define RS480_NB_MC_INDEX 0x168
105# define RS480_NB_MC_IND_WR_EN (1 << 8)
106#define RS480_NB_MC_DATA 0x16c
107
108/*
109 * RS690
110 */
111#define RS690_MCCFG_FB_LOCATION 0x100
112#define RS690_MC_FB_START_MASK 0x0000FFFF
113#define RS690_MC_FB_START_SHIFT 0
114#define RS690_MC_FB_TOP_MASK 0xFFFF0000
115#define RS690_MC_FB_TOP_SHIFT 16
116#define RS690_MCCFG_AGP_LOCATION 0x101
117#define RS690_MC_AGP_START_MASK 0x0000FFFF
118#define RS690_MC_AGP_START_SHIFT 0
119#define RS690_MC_AGP_TOP_MASK 0xFFFF0000
120#define RS690_MC_AGP_TOP_SHIFT 16
121#define RS690_MCCFG_AGP_BASE 0x102
122#define RS690_MCCFG_AGP_BASE_2 0x103
123#define RS690_MC_INIT_MISC_LAT_TIMER 0x104
124#define RS690_HDP_FB_LOCATION 0x0134
125#define RS690_MC_INDEX 0x78
126# define RS690_MC_INDEX_MASK 0x1ff
127# define RS690_MC_INDEX_WR_EN (1 << 9)
128# define RS690_MC_INDEX_WR_ACK 0x7f
129#define RS690_MC_DATA 0x7c
130#define RS690_MC_STATUS 0x90
131#define RS690_MC_STATUS_IDLE (1 << 0)
132#define RS480_AGP_BASE_2 0x0164
133#define RS480_MC_MISC_CNTL 0x18
134# define RS480_DISABLE_GTW (1 << 1)
135# define RS480_GART_INDEX_REG_EN (1 << 12)
136# define RS690_BLOCK_GFX_D3_EN (1 << 14)
137#define RS480_GART_FEATURE_ID 0x2b
138# define RS480_HANG_EN (1 << 11)
139# define RS480_TLB_ENABLE (1 << 18)
140# define RS480_P2P_ENABLE (1 << 19)
141# define RS480_GTW_LAC_EN (1 << 25)
142# define RS480_2LEVEL_GART (0 << 30)
143# define RS480_1LEVEL_GART (1 << 30)
144# define RS480_PDC_EN (1 << 31)
145#define RS480_GART_BASE 0x2c
146#define RS480_GART_CACHE_CNTRL 0x2e
147# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
148#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
149# define RS480_GART_EN (1 << 0)
150# define RS480_VA_SIZE_32MB (0 << 1)
151# define RS480_VA_SIZE_64MB (1 << 1)
152# define RS480_VA_SIZE_128MB (2 << 1)
153# define RS480_VA_SIZE_256MB (3 << 1)
154# define RS480_VA_SIZE_512MB (4 << 1)
155# define RS480_VA_SIZE_1GB (5 << 1)
156# define RS480_VA_SIZE_2GB (6 << 1)
157#define RS480_AGP_MODE_CNTL 0x39
158# define RS480_POST_GART_Q_SIZE (1 << 18)
159# define RS480_NONGART_SNOOP (1 << 19)
160# define RS480_AGP_RD_BUF_SIZE (1 << 20)
161# define RS480_REQ_TYPE_SNOOP_SHIFT 22
162# define RS480_REQ_TYPE_SNOOP_MASK 0x3
163# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
164
165#define RS690_AIC_CTRL_SCRATCH 0x3A
166# define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
167
168/*
169 * RS600
170 */
171#define RS600_MC_STATUS 0x0
172#define RS600_MC_STATUS_IDLE (1 << 0)
173#define RS600_MC_INDEX 0x70
174# define RS600_MC_ADDR_MASK 0xffff
175# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
176# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
177# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
178# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
179# define RS600_MC_IND_AIC_RBS (1 << 20)
180# define RS600_MC_IND_CITF_ARB0 (1 << 21)
181# define RS600_MC_IND_CITF_ARB1 (1 << 22)
182# define RS600_MC_IND_WR_EN (1 << 23)
183#define RS600_MC_DATA 0x74
184#define RS600_MC_STATUS 0x0
185# define RS600_MC_IDLE (1 << 1)
186#define RS600_MC_FB_LOCATION 0x4
187#define RS600_MC_FB_START_MASK 0x0000FFFF
188#define RS600_MC_FB_START_SHIFT 0
189#define RS600_MC_FB_TOP_MASK 0xFFFF0000
190#define RS600_MC_FB_TOP_SHIFT 16
191#define RS600_MC_AGP_LOCATION 0x5
192#define RS600_MC_AGP_START_MASK 0x0000FFFF
193#define RS600_MC_AGP_START_SHIFT 0
194#define RS600_MC_AGP_TOP_MASK 0xFFFF0000
195#define RS600_MC_AGP_TOP_SHIFT 16
196#define RS600_MC_AGP_BASE 0x6
197#define RS600_MC_AGP_BASE_2 0x7
198#define RS600_MC_CNTL1 0x9
199# define RS600_ENABLE_PAGE_TABLES (1 << 26)
200#define RS600_MC_PT0_CNTL 0x100
201# define RS600_ENABLE_PT (1 << 0)
202# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
203# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
204# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
205# define RS600_INVALIDATE_L2_CACHE (1 << 29)
206#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
207# define RS600_ENABLE_PAGE_TABLE (1 << 0)
208# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
209#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
210#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
211#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
212#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
213#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
214#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
215#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
216# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
217# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
218# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
219# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
220# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
221# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
222# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
223# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
224# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
225# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
226# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
227# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
228# define RS600_INVALIDATE_L1_TLB (1 << 20)
229/* rs600/rs690/rs740 */
230# define RS600_BUS_MASTER_DIS (1 << 14)
231# define RS600_MSI_REARM (1 << 20)
232/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
233
234
235
236#define RV515_MC_FB_LOCATION 0x01
237#define RV515_MC_FB_START_MASK 0x0000FFFF
238#define RV515_MC_FB_START_SHIFT 0
239#define RV515_MC_FB_TOP_MASK 0xFFFF0000
240#define RV515_MC_FB_TOP_SHIFT 16
241#define RV515_MC_AGP_LOCATION 0x02
242#define RV515_MC_AGP_START_MASK 0x0000FFFF
243#define RV515_MC_AGP_START_SHIFT 0
244#define RV515_MC_AGP_TOP_MASK 0xFFFF0000
245#define RV515_MC_AGP_TOP_SHIFT 16
246#define RV515_MC_AGP_BASE 0x03
247#define RV515_MC_AGP_BASE_2 0x04
248
249#define R520_MC_FB_LOCATION 0x04
250#define R520_MC_FB_START_MASK 0x0000FFFF
251#define R520_MC_FB_START_SHIFT 0
252#define R520_MC_FB_TOP_MASK 0xFFFF0000
253#define R520_MC_FB_TOP_SHIFT 16
254#define R520_MC_AGP_LOCATION 0x05
255#define R520_MC_AGP_START_MASK 0x0000FFFF
256#define R520_MC_AGP_START_SHIFT 0
257#define R520_MC_AGP_TOP_MASK 0xFFFF0000
258#define R520_MC_AGP_TOP_SHIFT 16
259#define R520_MC_AGP_BASE 0x06
260#define R520_MC_AGP_BASE_2 0x07
261
262
263#define AVIVO_MC_INDEX 0x0070
264#define R520_MC_STATUS 0x00
265#define R520_MC_STATUS_IDLE (1<<1)
266#define RV515_MC_STATUS 0x08
267#define RV515_MC_STATUS_IDLE (1<<4)
268#define RV515_MC_INIT_MISC_LAT_TIMER 0x09
269#define AVIVO_MC_DATA 0x0074
270
271#define R520_MC_IND_INDEX 0x70
272#define R520_MC_IND_WR_EN (1 << 24)
273#define R520_MC_IND_DATA 0x74
274
275#define RV515_MC_CNTL 0x5
276# define RV515_MEM_NUM_CHANNELS_MASK 0x3
277#define R520_MC_CNTL0 0x8
278# define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24)
279# define R520_MEM_NUM_CHANNELS_SHIFT 24
280# define R520_MC_CHANNEL_SIZE (1 << 23)
281
282#define AVIVO_CP_DYN_CNTL 0x000f /* PLL */
283# define AVIVO_CP_FORCEON (1 << 0)
284#define AVIVO_E2_DYN_CNTL 0x0011 /* PLL */
285# define AVIVO_E2_FORCEON (1 << 0)
286#define AVIVO_IDCT_DYN_CNTL 0x0013 /* PLL */
287# define AVIVO_IDCT_FORCEON (1 << 0)
288
289#define AVIVO_HDP_FB_LOCATION 0x134
290
291#define AVIVO_VGA_RENDER_CONTROL 0x0300
292# define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16)
293#define AVIVO_D1VGA_CONTROL 0x0330
294# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
295# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
296# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
297# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
298# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
299# define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
300#define AVIVO_D2VGA_CONTROL 0x0338
301
302#define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400
303#define AVIVO_EXT1_PPLL_REF_DIV 0x404
304#define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408
305#define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c
306
307#define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410
308#define AVIVO_EXT2_PPLL_REF_DIV 0x414
309#define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418
310#define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c
311
312#define AVIVO_EXT1_PPLL_FB_DIV 0x430
313#define AVIVO_EXT2_PPLL_FB_DIV 0x434
314
315#define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438
316#define AVIVO_EXT1_PPLL_POST_DIV 0x43c
317
318#define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440
319#define AVIVO_EXT2_PPLL_POST_DIV 0x444
320
321#define AVIVO_EXT1_PPLL_CNTL 0x448
322#define AVIVO_EXT2_PPLL_CNTL 0x44c
323
324#define AVIVO_P1PLL_CNTL 0x450
325#define AVIVO_P2PLL_CNTL 0x454
326#define AVIVO_P1PLL_INT_SS_CNTL 0x458
327#define AVIVO_P2PLL_INT_SS_CNTL 0x45c
328#define AVIVO_P1PLL_TMDSA_CNTL 0x460
329#define AVIVO_P2PLL_LVTMA_CNTL 0x464
330
331#define AVIVO_PCLK_CRTC1_CNTL 0x480
332#define AVIVO_PCLK_CRTC2_CNTL 0x484
333
334#define AVIVO_D1CRTC_H_TOTAL 0x6000
335#define AVIVO_D1CRTC_H_BLANK_START_END 0x6004
336#define AVIVO_D1CRTC_H_SYNC_A 0x6008
337#define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c
338#define AVIVO_D1CRTC_H_SYNC_B 0x6010
339#define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014
340
341#define AVIVO_D1CRTC_V_TOTAL 0x6020
342#define AVIVO_D1CRTC_V_BLANK_START_END 0x6024
343#define AVIVO_D1CRTC_V_SYNC_A 0x6028
344#define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c
345#define AVIVO_D1CRTC_V_SYNC_B 0x6030
346#define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034
347
348#define AVIVO_D1CRTC_CONTROL 0x6080
349# define AVIVO_CRTC_EN (1 << 0)
350#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
351#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
352#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
353#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
354
355/* master controls */
356#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
357#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
358
359#define AVIVO_D1GRPH_ENABLE 0x6100
360#define AVIVO_D1GRPH_CONTROL 0x6104
361# define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0)
362# define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0)
363# define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0)
364# define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0)
365
366# define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8)
367
368# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8)
369# define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8)
370# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8)
371# define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8)
372# define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8)
373
374# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8)
375# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8)
376# define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8)
377# define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8)
378
379
380# define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8)
381
382# define AVIVO_D1GRPH_SWAP_RB (1 << 16)
383# define AVIVO_D1GRPH_TILED (1 << 20)
384# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
385
386#define AVIVO_D1GRPH_LUT_SEL 0x6108
387#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
388#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
389#define AVIVO_D1GRPH_PITCH 0x6120
390#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
391#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
392#define AVIVO_D1GRPH_X_START 0x612c
393#define AVIVO_D1GRPH_Y_START 0x6130
394#define AVIVO_D1GRPH_X_END 0x6134
395#define AVIVO_D1GRPH_Y_END 0x6138
396#define AVIVO_D1GRPH_UPDATE 0x6144
397# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
398#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
399
400#define AVIVO_D1CUR_CONTROL 0x6400
401# define AVIVO_D1CURSOR_EN (1 << 0)
402# define AVIVO_D1CURSOR_MODE_SHIFT 8
403# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
404# define AVIVO_D1CURSOR_MODE_24BPP 2
405#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
406#define AVIVO_D1CUR_SIZE 0x6410
407#define AVIVO_D1CUR_POSITION 0x6414
408#define AVIVO_D1CUR_HOT_SPOT 0x6418
409#define AVIVO_D1CUR_UPDATE 0x6424
410# define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16)
411
412#define AVIVO_DC_LUT_RW_SELECT 0x6480
413#define AVIVO_DC_LUT_RW_MODE 0x6484
414#define AVIVO_DC_LUT_RW_INDEX 0x6488
415#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
416#define AVIVO_DC_LUT_PWL_DATA 0x6490
417#define AVIVO_DC_LUT_30_COLOR 0x6494
418#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
419#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
420#define AVIVO_DC_LUT_AUTOFILL 0x64a0
421
422#define AVIVO_DC_LUTA_CONTROL 0x64c0
423#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
424#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
425#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
426#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
427#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
428#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
429
430#define AVIVO_DC_LB_MEMORY_SPLIT 0x6520
431# define AVIVO_DC_LB_MEMORY_SPLIT_MASK 0x3
432# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT 0
433# define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
434# define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
435# define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY 2
436# define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
437# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
438# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
439# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
440
441#define R500_DxMODE_INT_MASK 0x6540
442#define R500_D1MODE_INT_MASK (1<<0)
443#define R500_D2MODE_INT_MASK (1<<8)
444
445#define AVIVO_D1MODE_DATA_FORMAT 0x6528
446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
448#define AVIVO_D1MODE_VIEWPORT_START 0x6580
449#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
450#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
451#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c
452
453#define AVIVO_D1SCL_SCALER_ENABLE 0x6590
454#define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594
455#define AVIVO_D1SCL_UPDATE 0x65cc
456# define AVIVO_D1SCL_UPDATE_LOCK (1 << 16)
457
458/* second crtc */
459#define AVIVO_D2CRTC_H_TOTAL 0x6800
460#define AVIVO_D2CRTC_H_BLANK_START_END 0x6804
461#define AVIVO_D2CRTC_H_SYNC_A 0x6808
462#define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c
463#define AVIVO_D2CRTC_H_SYNC_B 0x6810
464#define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814
465
466#define AVIVO_D2CRTC_V_TOTAL 0x6820
467#define AVIVO_D2CRTC_V_BLANK_START_END 0x6824
468#define AVIVO_D2CRTC_V_SYNC_A 0x6828
469#define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c
470#define AVIVO_D2CRTC_V_SYNC_B 0x6830
471#define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834
472
473#define AVIVO_D2CRTC_CONTROL 0x6880
474#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
475#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
476#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
477#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
478
479#define AVIVO_D2GRPH_ENABLE 0x6900
480#define AVIVO_D2GRPH_CONTROL 0x6904
481#define AVIVO_D2GRPH_LUT_SEL 0x6908
482#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
483#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
484#define AVIVO_D2GRPH_PITCH 0x6920
485#define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924
486#define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928
487#define AVIVO_D2GRPH_X_START 0x692c
488#define AVIVO_D2GRPH_Y_START 0x6930
489#define AVIVO_D2GRPH_X_END 0x6934
490#define AVIVO_D2GRPH_Y_END 0x6938
491#define AVIVO_D2GRPH_UPDATE 0x6944
492#define AVIVO_D2GRPH_FLIP_CONTROL 0x6948
493
494#define AVIVO_D2CUR_CONTROL 0x6c00
495#define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08
496#define AVIVO_D2CUR_SIZE 0x6c10
497#define AVIVO_D2CUR_POSITION 0x6c14
498
499#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
500#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
501#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
502#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c
503
504#define AVIVO_D2SCL_SCALER_ENABLE 0x6d90
505#define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94
506
507#define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214
508
509#define AVIVO_DACA_ENABLE 0x7800
510# define AVIVO_DAC_ENABLE (1 << 0)
511#define AVIVO_DACA_SOURCE_SELECT 0x7804
512# define AVIVO_DAC_SOURCE_CRTC1 (0 << 0)
513# define AVIVO_DAC_SOURCE_CRTC2 (1 << 0)
514# define AVIVO_DAC_SOURCE_TV (2 << 0)
515
516#define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c
517# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
518# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
519# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
520# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
521# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
522# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
523#define AVIVO_DACA_POWERDOWN 0x7850
524# define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0)
525# define AVIVO_DACA_POWERDOWN_BLUE (1 << 8)
526# define AVIVO_DACA_POWERDOWN_GREEN (1 << 16)
527# define AVIVO_DACA_POWERDOWN_RED (1 << 24)
528
529#define AVIVO_DACB_ENABLE 0x7a00
530#define AVIVO_DACB_SOURCE_SELECT 0x7a04
531#define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c
532# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
533# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
534# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
535# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
536# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
537# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
538#define AVIVO_DACB_POWERDOWN 0x7a50
539# define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0)
540# define AVIVO_DACB_POWERDOWN_BLUE (1 << 8)
541# define AVIVO_DACB_POWERDOWN_GREEN (1 << 16)
542# define AVIVO_DACB_POWERDOWN_RED
543
544#define AVIVO_TMDSA_CNTL 0x7880
545# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
546# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
547# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
548# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
549# define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16)
550# define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24)
551# define AVIVO_TMDSA_CNTL_SWAP (1 << 28)
552#define AVIVO_TMDSA_SOURCE_SELECT 0x7884
553/* 78a8 appears to be some kind of (reasonably tolerant) clock?
554 * 78d0 definitely hits the transmitter, definitely clock. */
555/* MYSTERY1 This appears to control dithering? */
556#define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894
557# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
558# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
559# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
560# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
561# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
562# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
563# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
564# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
565#define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0
566# define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0)
567# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
568# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
569# define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24)
570#define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8
571# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
572# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
573#define AVIVO_TMDSA_CLOCK_ENABLE 0x7900
574#define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904
575# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0)
576# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
577# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
578# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
579# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
580# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8)
581# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
582# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
583# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
584# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16)
585# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
586# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
587
588#define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910
589# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
590# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
591# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
592# define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
593# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
594# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
595# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
596# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
597# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
598# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
599# define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
600# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
601# define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
602# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
603
604#define AVIVO_LVTMA_CNTL 0x7a80
605# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
606# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
607# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
608# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
609# define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16)
610# define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24)
611# define AVIVO_LVTMA_CNTL_SWAP (1 << 28)
612#define AVIVO_LVTMA_SOURCE_SELECT 0x7a84
613#define AVIVO_LVTMA_COLOR_FORMAT 0x7a88
614#define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94
615# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
616# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
617# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
618# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
619# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
620# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
621# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
622# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
623
624
625
626#define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0
627# define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0)
628# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
629# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
630# define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24)
631
632#define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8
633# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
634# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
635#define R500_LVTMA_CLOCK_ENABLE 0x7b00
636#define R600_LVTMA_CLOCK_ENABLE 0x7b04
637
638#define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04
639#define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08
640# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
641# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
642# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
643# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
644# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5)
645# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9)
646# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
647# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
648# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
649# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
650# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
651
652#define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10
653#define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14
654# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
655# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
656# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
657# define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
658# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
659# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
660# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
661# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
662# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
663# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
664# define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
665# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
666# define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
667# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
668
669#define R500_LVTMA_PWRSEQ_CNTL 0x7af0
670#define R600_LVTMA_PWRSEQ_CNTL 0x7af4
671# define AVIVO_LVTMA_PWRSEQ_EN (1 << 0)
672# define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2)
673# define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3)
674# define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4)
675# define AVIVO_LVTMA_SYNCEN (1 << 8)
676# define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9)
677# define AVIVO_LVTMA_SYNCEN_POL (1 << 10)
678# define AVIVO_LVTMA_DIGON (1 << 16)
679# define AVIVO_LVTMA_DIGON_OVRD (1 << 17)
680# define AVIVO_LVTMA_DIGON_POL (1 << 18)
681# define AVIVO_LVTMA_BLON (1 << 24)
682# define AVIVO_LVTMA_BLON_OVRD (1 << 25)
683# define AVIVO_LVTMA_BLON_POL (1 << 26)
684
685#define R500_LVTMA_PWRSEQ_STATE 0x7af4
686#define R600_LVTMA_PWRSEQ_STATE 0x7af8
687# define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0)
688# define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1)
689# define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2)
690# define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3)
691# define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4)
692# define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8)
693
694#define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8
695# define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0)
696# define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00
697# define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8
698
699#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
700
701#define AVIVO_GPIO_0 0x7e30
702#define AVIVO_GPIO_1 0x7e40
703#define AVIVO_GPIO_2 0x7e50
704#define AVIVO_GPIO_3 0x7e60
705
706#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
707
708#define AVIVO_I2C_STATUS 0x7d30
709# define AVIVO_I2C_STATUS_DONE (1 << 0)
710# define AVIVO_I2C_STATUS_NACK (1 << 1)
711# define AVIVO_I2C_STATUS_HALT (1 << 2)
712# define AVIVO_I2C_STATUS_GO (1 << 3)
713# define AVIVO_I2C_STATUS_MASK 0x7
714/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
715 * DONE? */
716# define AVIVO_I2C_STATUS_CMD_RESET 0x7
717# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
718#define AVIVO_I2C_STOP 0x7d34
719#define AVIVO_I2C_START_CNTL 0x7d38
720# define AVIVO_I2C_START (1 << 8)
721# define AVIVO_I2C_CONNECTOR0 (0 << 16)
722# define AVIVO_I2C_CONNECTOR1 (1 << 16)
723#define R520_I2C_START (1<<0)
724#define R520_I2C_STOP (1<<1)
725#define R520_I2C_RX (1<<2)
726#define R520_I2C_EN (1<<8)
727#define R520_I2C_DDC1 (0<<16)
728#define R520_I2C_DDC2 (1<<16)
729#define R520_I2C_DDC3 (2<<16)
730#define R520_I2C_DDC_MASK (3<<16)
731#define AVIVO_I2C_CONTROL2 0x7d3c
732# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
733# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
734#define AVIVO_I2C_CONTROL3 0x7d40
735/* Reading is done 4 bytes at a time: read the bottom 8 bits from
736 * 7d44, four times in a row.
737 * Writing is a little more complex. First write DATA with
738 * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
739 * magic number, zz is, I think, the slave address, and yy is the byte
740 * you want to write. */
741#define AVIVO_I2C_DATA 0x7d44
742#define R520_I2C_ADDR_COUNT_MASK (0x7)
743#define R520_I2C_DATA_COUNT_SHIFT (8)
744#define R520_I2C_DATA_COUNT_MASK (0xF00)
745#define AVIVO_I2C_CNTL 0x7d50
746# define AVIVO_I2C_EN (1 << 0)
747# define AVIVO_I2C_RESET (1 << 8)
748
749#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
new file mode 100644
index 000000000000..570a244bd88b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -0,0 +1,234 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* r520,rv530,rv560,rv570,r580 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev);
34int rv370_pcie_gart_enable(struct radeon_device *rdev);
35void rv370_pcie_gart_disable(struct radeon_device *rdev);
36void r420_pipes_init(struct radeon_device *rdev);
37void rs600_mc_disable_clients(struct radeon_device *rdev);
38void rs600_disable_vga(struct radeon_device *rdev);
39int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
40int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
41
42/* This files gather functions specifics to:
43 * r520,rv530,rv560,rv570,r580
44 *
45 * Some of these functions might be used by newer ASICs.
46 */
47void r520_gpu_init(struct radeon_device *rdev);
48int r520_mc_wait_for_idle(struct radeon_device *rdev);
49
50
51/*
52 * MC
53 */
54int r520_mc_init(struct radeon_device *rdev)
55{
56 uint32_t tmp;
57 int r;
58
59 if (r100_debugfs_rbbm_init(rdev)) {
60 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
61 }
62 if (rv515_debugfs_pipes_info_init(rdev)) {
63 DRM_ERROR("Failed to register debugfs file for pipes !\n");
64 }
65 if (rv515_debugfs_ga_info_init(rdev)) {
66 DRM_ERROR("Failed to register debugfs file for pipes !\n");
67 }
68
69 r520_gpu_init(rdev);
70 rv370_pcie_gart_disable(rdev);
71
72 /* Setup GPU memory space */
73 rdev->mc.vram_location = 0xFFFFFFFFUL;
74 rdev->mc.gtt_location = 0xFFFFFFFFUL;
75 if (rdev->flags & RADEON_IS_AGP) {
76 r = radeon_agp_init(rdev);
77 if (r) {
78 printk(KERN_WARNING "[drm] Disabling AGP\n");
79 rdev->flags &= ~RADEON_IS_AGP;
80 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
81 } else {
82 rdev->mc.gtt_location = rdev->mc.agp_base;
83 }
84 }
85 r = radeon_mc_setup(rdev);
86 if (r) {
87 return r;
88 }
89
90 /* Program GPU memory space */
91 rs600_mc_disable_clients(rdev);
92 if (r520_mc_wait_for_idle(rdev)) {
93 printk(KERN_WARNING "Failed to wait MC idle while "
94 "programming pipes. Bad things might happen.\n");
95 }
96 /* Write VRAM size in case we are limiting it */
97 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
98 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
99 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
100 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
101 WREG32_MC(R520_MC_FB_LOCATION, tmp);
102 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
103 WREG32(0x310, rdev->mc.vram_location);
104 if (rdev->flags & RADEON_IS_AGP) {
105 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
106 tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
107 tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
108 WREG32_MC(R520_MC_AGP_LOCATION, tmp);
109 WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
110 WREG32_MC(R520_MC_AGP_BASE_2, 0);
111 } else {
112 WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
113 WREG32_MC(R520_MC_AGP_BASE, 0);
114 WREG32_MC(R520_MC_AGP_BASE_2, 0);
115 }
116 return 0;
117}
118
119void r520_mc_fini(struct radeon_device *rdev)
120{
121 rv370_pcie_gart_disable(rdev);
122 radeon_gart_table_vram_free(rdev);
123 radeon_gart_fini(rdev);
124}
125
126
127/*
128 * Global GPU functions
129 */
130void r520_errata(struct radeon_device *rdev)
131{
132 rdev->pll_errata = 0;
133}
134
135int r520_mc_wait_for_idle(struct radeon_device *rdev)
136{
137 unsigned i;
138 uint32_t tmp;
139
140 for (i = 0; i < rdev->usec_timeout; i++) {
141 /* read MC_STATUS */
142 tmp = RREG32_MC(R520_MC_STATUS);
143 if (tmp & R520_MC_STATUS_IDLE) {
144 return 0;
145 }
146 DRM_UDELAY(1);
147 }
148 return -1;
149}
150
151void r520_gpu_init(struct radeon_device *rdev)
152{
153 unsigned pipe_select_current, gb_pipe_select, tmp;
154
155 r100_hdp_reset(rdev);
156 rs600_disable_vga(rdev);
157 /*
158 * DST_PIPE_CONFIG 0x170C
159 * GB_TILE_CONFIG 0x4018
160 * GB_FIFO_SIZE 0x4024
161 * GB_PIPE_SELECT 0x402C
162 * GB_PIPE_SELECT2 0x4124
163 * Z_PIPE_SHIFT 0
164 * Z_PIPE_MASK 0x000000003
165 * GB_FIFO_SIZE2 0x4128
166 * SC_SFIFO_SIZE_SHIFT 0
167 * SC_SFIFO_SIZE_MASK 0x000000003
168 * SC_MFIFO_SIZE_SHIFT 2
169 * SC_MFIFO_SIZE_MASK 0x00000000C
170 * FG_SFIFO_SIZE_SHIFT 4
171 * FG_SFIFO_SIZE_MASK 0x000000030
172 * ZB_MFIFO_SIZE_SHIFT 6
173 * ZB_MFIFO_SIZE_MASK 0x0000000C0
174 * GA_ENHANCE 0x4274
175 * SU_REG_DEST 0x42C8
176 */
177 /* workaround for RV530 */
178 if (rdev->family == CHIP_RV530) {
179 WREG32(0x4124, 1);
180 WREG32(0x4128, 0xFF);
181 }
182 r420_pipes_init(rdev);
183 gb_pipe_select = RREG32(0x402C);
184 tmp = RREG32(0x170C);
185 pipe_select_current = (tmp >> 2) & 3;
186 tmp = (1 << pipe_select_current) |
187 (((gb_pipe_select >> 8) & 0xF) << 4);
188 WREG32_PLL(0x000D, tmp);
189 if (r520_mc_wait_for_idle(rdev)) {
190 printk(KERN_WARNING "Failed to wait MC idle while "
191 "programming pipes. Bad things might happen.\n");
192 }
193}
194
195
196/*
197 * VRAM info
198 */
199static void r520_vram_get_type(struct radeon_device *rdev)
200{
201 uint32_t tmp;
202
203 rdev->mc.vram_width = 128;
204 rdev->mc.vram_is_ddr = true;
205 tmp = RREG32_MC(R520_MC_CNTL0);
206 switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
207 case 0:
208 rdev->mc.vram_width = 32;
209 break;
210 case 1:
211 rdev->mc.vram_width = 64;
212 break;
213 case 2:
214 rdev->mc.vram_width = 128;
215 break;
216 case 3:
217 rdev->mc.vram_width = 256;
218 break;
219 default:
220 rdev->mc.vram_width = 128;
221 break;
222 }
223 if (tmp & R520_MC_CHANNEL_SIZE)
224 rdev->mc.vram_width *= 2;
225}
226
227void r520_vram_info(struct radeon_device *rdev)
228{
229 r520_vram_get_type(rdev);
230 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
231
232 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
233 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
234}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
new file mode 100644
index 000000000000..c45559fc97fd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * r600,rv610,rv630,rv620,rv635,rv670
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int r600_mc_wait_for_idle(struct radeon_device *rdev);
41void r600_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int r600_mc_init(struct radeon_device *rdev)
48{
49 uint32_t tmp;
50
51 r600_gpu_init(rdev);
52
53 /* setup the gart before changing location so we can ask to
54 * discard unmapped mc request
55 */
56 /* FIXME: disable out of gart access */
57 tmp = rdev->mc.gtt_location / 4096;
58 tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
59 WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
60 tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
61 tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
62 WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
63
64 rs600_mc_disable_clients(rdev);
65 if (r600_mc_wait_for_idle(rdev)) {
66 printk(KERN_WARNING "Failed to wait MC idle while "
67 "programming pipes. Bad things might happen.\n");
68 }
69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R600_MC_VM_FB_LOCATION, tmp);
74 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
75 tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
76 WREG32(R600_MC_VM_AGP_TOP, tmp);
77 tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
78 WREG32(R600_MC_VM_AGP_BOT, tmp);
79 return 0;
80}
81
82void r600_mc_fini(struct radeon_device *rdev)
83{
84 /* FIXME: implement */
85}
86
87
88/*
89 * Global GPU functions
90 */
91void r600_errata(struct radeon_device *rdev)
92{
93 rdev->pll_errata = 0;
94}
95
96int r600_mc_wait_for_idle(struct radeon_device *rdev)
97{
98 /* FIXME: implement */
99 return 0;
100}
101
102void r600_gpu_init(struct radeon_device *rdev)
103{
104 /* FIXME: implement */
105}
106
107
108/*
109 * VRAM info
110 */
111void r600_vram_get_type(struct radeon_device *rdev)
112{
113 uint32_t tmp;
114 int chansize;
115
116 rdev->mc.vram_width = 128;
117 rdev->mc.vram_is_ddr = true;
118
119 tmp = RREG32(R600_RAMCFG);
120 if (tmp & R600_CHANSIZE_OVERRIDE) {
121 chansize = 16;
122 } else if (tmp & R600_CHANSIZE) {
123 chansize = 64;
124 } else {
125 chansize = 32;
126 }
127 if (rdev->family == CHIP_R600) {
128 rdev->mc.vram_width = 8 * chansize;
129 } else if (rdev->family == CHIP_RV670) {
130 rdev->mc.vram_width = 4 * chansize;
131 } else if ((rdev->family == CHIP_RV610) ||
132 (rdev->family == CHIP_RV620)) {
133 rdev->mc.vram_width = chansize;
134 } else if ((rdev->family == CHIP_RV630) ||
135 (rdev->family == CHIP_RV635)) {
136 rdev->mc.vram_width = 2 * chansize;
137 }
138}
139
140void r600_vram_info(struct radeon_device *rdev)
141{
142 r600_vram_get_type(rdev);
143 rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE);
144
145 /* Could aper size report 0 ? */
146 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
147 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
148}
149
150/*
151 * Indirect registers accessor
152 */
153uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg)
154{
155 uint32_t r;
156
157 WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
158 (void)RREG32(R600_PCIE_PORT_INDEX);
159 r = RREG32(R600_PCIE_PORT_DATA);
160 return r;
161}
162
163void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
164{
165 WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
166 (void)RREG32(R600_PCIE_PORT_INDEX);
167 WREG32(R600_PCIE_PORT_DATA, (v));
168 (void)RREG32(R600_PCIE_PORT_DATA);
169}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644
index 000000000000..e2d1f5f33f7e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R600_REG_H__
29#define __R600_REG_H__
30
31#define R600_PCIE_PORT_INDEX 0x0038
32#define R600_PCIE_PORT_DATA 0x003c
33
34#define R600_MC_VM_FB_LOCATION 0x2180
35#define R600_MC_FB_BASE_MASK 0x0000FFFF
36#define R600_MC_FB_BASE_SHIFT 0
37#define R600_MC_FB_TOP_MASK 0xFFFF0000
38#define R600_MC_FB_TOP_SHIFT 16
39#define R600_MC_VM_AGP_TOP 0x2184
40#define R600_MC_AGP_TOP_MASK 0x0003FFFF
41#define R600_MC_AGP_TOP_SHIFT 0
42#define R600_MC_VM_AGP_BOT 0x2188
43#define R600_MC_AGP_BOT_MASK 0x0003FFFF
44#define R600_MC_AGP_BOT_SHIFT 0
45#define R600_MC_VM_AGP_BASE 0x218c
46#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
47#define R600_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
48#define R600_LOGICAL_PAGE_NUMBER_SHIFT 0
49#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
50#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
51
52#define R700_MC_VM_FB_LOCATION 0x2024
53#define R700_MC_FB_BASE_MASK 0x0000FFFF
54#define R700_MC_FB_BASE_SHIFT 0
55#define R700_MC_FB_TOP_MASK 0xFFFF0000
56#define R700_MC_FB_TOP_SHIFT 16
57#define R700_MC_VM_AGP_TOP 0x2028
58#define R700_MC_AGP_TOP_MASK 0x0003FFFF
59#define R700_MC_AGP_TOP_SHIFT 0
60#define R700_MC_VM_AGP_BOT 0x202c
61#define R700_MC_AGP_BOT_MASK 0x0003FFFF
62#define R700_MC_AGP_BOT_SHIFT 0
63#define R700_MC_VM_AGP_BASE 0x2030
64#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
65#define R700_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
66#define R700_LOGICAL_PAGE_NUMBER_SHIFT 0
67#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
68#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
69
70#define R600_RAMCFG 0x2408
71# define R600_CHANSIZE (1 << 7)
72# define R600_CHANSIZE_OVERRIDE (1 << 10)
73
74
75#define R600_GENERAL_PWRMGT 0x618
76# define R600_OPEN_DRAIN_PADS (1 << 11)
77
78#define R600_LOWER_GPIO_ENABLE 0x710
79#define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718
80#define R600_HIGH_VID_LOWER_GPIO_CNTL 0x71c
81#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
82#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
83
84
85
86#define R600_HDP_NONSURFACE_BASE 0x2c04
87
88#define R600_BUS_CNTL 0x5420
89#define R600_CONFIG_CNTL 0x5424
90#define R600_CONFIG_MEMSIZE 0x5428
91#define R600_CONFIG_F0_BASE 0x542C
92#define R600_CONFIG_APER_SIZE 0x5430
93
94#define R600_ROM_CNTL 0x1600
95# define R600_SCK_OVERWRITE (1 << 1)
96# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
97# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
98
99#define R600_CG_SPLL_FUNC_CNTL 0x600
100# define R600_SPLL_BYPASS_EN (1 << 3)
101#define R600_CG_SPLL_STATUS 0x60c
102# define R600_SPLL_CHG_STATUS (1 << 1)
103
104#define R600_BIOS_0_SCRATCH 0x1724
105#define R600_BIOS_1_SCRATCH 0x1728
106#define R600_BIOS_2_SCRATCH 0x172c
107#define R600_BIOS_3_SCRATCH 0x1730
108#define R600_BIOS_4_SCRATCH 0x1734
109#define R600_BIOS_5_SCRATCH 0x1738
110#define R600_BIOS_6_SCRATCH 0x173c
111#define R600_BIOS_7_SCRATCH 0x1740
112
113
114#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
new file mode 100644
index 000000000000..c3f24cc56009
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -0,0 +1,793 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
31#include "radeon_object.h"
32
33/* TODO: Here are things that needs to be done :
34 * - surface allocator & initializer : (bit like scratch reg) should
35 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
36 * related to surface
37 * - WB : write back stuff (do it bit like scratch reg things)
38 * - Vblank : look at Jesse's rework and what we should do
39 * - r600/r700: gart & cp
40 * - cs : clean cs ioctl use bitmap & things like that.
41 * - power management stuff
42 * - Barrier in gart code
43 * - Unmappabled vram ?
44 * - TESTING, TESTING, TESTING
45 */
46
47#include <asm/atomic.h>
48#include <linux/wait.h>
49#include <linux/list.h>
50#include <linux/kref.h>
51
52#include "radeon_mode.h"
53#include "radeon_reg.h"
54
55
56/*
57 * Modules parameters.
58 */
59extern int radeon_no_wb;
60extern int radeon_modeset;
61extern int radeon_dynclks;
62extern int radeon_r4xx_atom;
63extern int radeon_agpmode;
64extern int radeon_vram_limit;
65extern int radeon_gart_size;
66extern int radeon_benchmarking;
67extern int radeon_connector_table;
68
69/*
70 * Copy from radeon_drv.h so we don't have to include both and have conflicting
71 * symbol;
72 */
73#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
74#define RADEON_IB_POOL_SIZE 16
75#define RADEON_DEBUGFS_MAX_NUM_FILES 32
76#define RADEONFB_CONN_LIMIT 4
77
78enum radeon_family {
79 CHIP_R100,
80 CHIP_RV100,
81 CHIP_RS100,
82 CHIP_RV200,
83 CHIP_RS200,
84 CHIP_R200,
85 CHIP_RV250,
86 CHIP_RS300,
87 CHIP_RV280,
88 CHIP_R300,
89 CHIP_R350,
90 CHIP_RV350,
91 CHIP_RV380,
92 CHIP_R420,
93 CHIP_R423,
94 CHIP_RV410,
95 CHIP_RS400,
96 CHIP_RS480,
97 CHIP_RS600,
98 CHIP_RS690,
99 CHIP_RS740,
100 CHIP_RV515,
101 CHIP_R520,
102 CHIP_RV530,
103 CHIP_RV560,
104 CHIP_RV570,
105 CHIP_R580,
106 CHIP_R600,
107 CHIP_RV610,
108 CHIP_RV630,
109 CHIP_RV620,
110 CHIP_RV635,
111 CHIP_RV670,
112 CHIP_RS780,
113 CHIP_RV770,
114 CHIP_RV730,
115 CHIP_RV710,
116 CHIP_LAST,
117};
118
119enum radeon_chip_flags {
120 RADEON_FAMILY_MASK = 0x0000ffffUL,
121 RADEON_FLAGS_MASK = 0xffff0000UL,
122 RADEON_IS_MOBILITY = 0x00010000UL,
123 RADEON_IS_IGP = 0x00020000UL,
124 RADEON_SINGLE_CRTC = 0x00040000UL,
125 RADEON_IS_AGP = 0x00080000UL,
126 RADEON_HAS_HIERZ = 0x00100000UL,
127 RADEON_IS_PCIE = 0x00200000UL,
128 RADEON_NEW_MEMMAP = 0x00400000UL,
129 RADEON_IS_PCI = 0x00800000UL,
130 RADEON_IS_IGPGART = 0x01000000UL,
131};
132
133
134/*
135 * Errata workarounds.
136 */
137enum radeon_pll_errata {
138 CHIP_ERRATA_R300_CG = 0x00000001,
139 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
140 CHIP_ERRATA_PLL_DELAY = 0x00000004
141};
142
143
144struct radeon_device;
145
146
147/*
148 * BIOS.
149 */
150bool radeon_get_bios(struct radeon_device *rdev);
151
152/*
153 * Clocks
154 */
155
156struct radeon_clock {
157 struct radeon_pll p1pll;
158 struct radeon_pll p2pll;
159 struct radeon_pll spll;
160 struct radeon_pll mpll;
161 /* 10 Khz units */
162 uint32_t default_mclk;
163 uint32_t default_sclk;
164};
165
166/*
167 * Fences.
168 */
169struct radeon_fence_driver {
170 uint32_t scratch_reg;
171 atomic_t seq;
172 uint32_t last_seq;
173 unsigned long count_timeout;
174 wait_queue_head_t queue;
175 rwlock_t lock;
176 struct list_head created;
177 struct list_head emited;
178 struct list_head signaled;
179};
180
181struct radeon_fence {
182 struct radeon_device *rdev;
183 struct kref kref;
184 struct list_head list;
185 /* protected by radeon_fence.lock */
186 uint32_t seq;
187 unsigned long timeout;
188 bool emited;
189 bool signaled;
190};
191
192int radeon_fence_driver_init(struct radeon_device *rdev);
193void radeon_fence_driver_fini(struct radeon_device *rdev);
194int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
195int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
196void radeon_fence_process(struct radeon_device *rdev);
197bool radeon_fence_signaled(struct radeon_fence *fence);
198int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
199int radeon_fence_wait_next(struct radeon_device *rdev);
200int radeon_fence_wait_last(struct radeon_device *rdev);
201struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
202void radeon_fence_unref(struct radeon_fence **fence);
203
204
205/*
206 * Radeon buffer.
207 */
208struct radeon_object;
209
210struct radeon_object_list {
211 struct list_head list;
212 struct radeon_object *robj;
213 uint64_t gpu_offset;
214 unsigned rdomain;
215 unsigned wdomain;
216};
217
218int radeon_object_init(struct radeon_device *rdev);
219void radeon_object_fini(struct radeon_device *rdev);
220int radeon_object_create(struct radeon_device *rdev,
221 struct drm_gem_object *gobj,
222 unsigned long size,
223 bool kernel,
224 uint32_t domain,
225 bool interruptible,
226 struct radeon_object **robj_ptr);
227int radeon_object_kmap(struct radeon_object *robj, void **ptr);
228void radeon_object_kunmap(struct radeon_object *robj);
229void radeon_object_unref(struct radeon_object **robj);
230int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
231 uint64_t *gpu_addr);
232void radeon_object_unpin(struct radeon_object *robj);
233int radeon_object_wait(struct radeon_object *robj);
234int radeon_object_evict_vram(struct radeon_device *rdev);
235int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
236void radeon_object_force_delete(struct radeon_device *rdev);
237void radeon_object_list_add_object(struct radeon_object_list *lobj,
238 struct list_head *head);
239int radeon_object_list_validate(struct list_head *head, void *fence);
240void radeon_object_list_unvalidate(struct list_head *head);
241void radeon_object_list_clean(struct list_head *head);
242int radeon_object_fbdev_mmap(struct radeon_object *robj,
243 struct vm_area_struct *vma);
244unsigned long radeon_object_size(struct radeon_object *robj);
245
246
247/*
248 * GEM objects.
249 */
250struct radeon_gem {
251 struct list_head objects;
252};
253
254int radeon_gem_init(struct radeon_device *rdev);
255void radeon_gem_fini(struct radeon_device *rdev);
256int radeon_gem_object_create(struct radeon_device *rdev, int size,
257 int alignment, int initial_domain,
258 bool discardable, bool kernel,
259 bool interruptible,
260 struct drm_gem_object **obj);
261int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
262 uint64_t *gpu_addr);
263void radeon_gem_object_unpin(struct drm_gem_object *obj);
264
265
266/*
267 * GART structures, functions & helpers
268 */
269struct radeon_mc;
270
271struct radeon_gart_table_ram {
272 volatile uint32_t *ptr;
273};
274
275struct radeon_gart_table_vram {
276 struct radeon_object *robj;
277 volatile uint32_t *ptr;
278};
279
280union radeon_gart_table {
281 struct radeon_gart_table_ram ram;
282 struct radeon_gart_table_vram vram;
283};
284
285struct radeon_gart {
286 dma_addr_t table_addr;
287 unsigned num_gpu_pages;
288 unsigned num_cpu_pages;
289 unsigned table_size;
290 union radeon_gart_table table;
291 struct page **pages;
292 dma_addr_t *pages_addr;
293 bool ready;
294};
295
296int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
297void radeon_gart_table_ram_free(struct radeon_device *rdev);
298int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
299void radeon_gart_table_vram_free(struct radeon_device *rdev);
300int radeon_gart_init(struct radeon_device *rdev);
301void radeon_gart_fini(struct radeon_device *rdev);
302void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
303 int pages);
304int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
305 int pages, struct page **pagelist);
306
307
308/*
309 * GPU MC structures, functions & helpers
310 */
311struct radeon_mc {
312 resource_size_t aper_size;
313 resource_size_t aper_base;
314 resource_size_t agp_base;
315 unsigned gtt_location;
316 unsigned gtt_size;
317 unsigned vram_location;
318 unsigned vram_size;
319 unsigned vram_width;
320 int vram_mtrr;
321 bool vram_is_ddr;
322};
323
324int radeon_mc_setup(struct radeon_device *rdev);
325
326
327/*
328 * GPU scratch registers structures, functions & helpers
329 */
330struct radeon_scratch {
331 unsigned num_reg;
332 bool free[32];
333 uint32_t reg[32];
334};
335
336int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
337void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
338
339
340/*
341 * IRQS.
342 */
343struct radeon_irq {
344 bool installed;
345 bool sw_int;
346 /* FIXME: use a define max crtc rather than hardcode it */
347 bool crtc_vblank_int[2];
348};
349
350int radeon_irq_kms_init(struct radeon_device *rdev);
351void radeon_irq_kms_fini(struct radeon_device *rdev);
352
353
354/*
355 * CP & ring.
356 */
357struct radeon_ib {
358 struct list_head list;
359 unsigned long idx;
360 uint64_t gpu_addr;
361 struct radeon_fence *fence;
362 volatile uint32_t *ptr;
363 uint32_t length_dw;
364};
365
366struct radeon_ib_pool {
367 struct mutex mutex;
368 struct radeon_object *robj;
369 struct list_head scheduled_ibs;
370 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
371 bool ready;
372 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
373};
374
375struct radeon_cp {
376 struct radeon_object *ring_obj;
377 volatile uint32_t *ring;
378 unsigned rptr;
379 unsigned wptr;
380 unsigned wptr_old;
381 unsigned ring_size;
382 unsigned ring_free_dw;
383 int count_dw;
384 uint64_t gpu_addr;
385 uint32_t align_mask;
386 uint32_t ptr_mask;
387 struct mutex mutex;
388 bool ready;
389};
390
391int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
392void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
393int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
394int radeon_ib_pool_init(struct radeon_device *rdev);
395void radeon_ib_pool_fini(struct radeon_device *rdev);
396int radeon_ib_test(struct radeon_device *rdev);
397/* Ring access between begin & end cannot sleep */
398void radeon_ring_free_size(struct radeon_device *rdev);
399int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
400void radeon_ring_unlock_commit(struct radeon_device *rdev);
401void radeon_ring_unlock_undo(struct radeon_device *rdev);
402int radeon_ring_test(struct radeon_device *rdev);
403int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
404void radeon_ring_fini(struct radeon_device *rdev);
405
406
407/*
408 * CS.
409 */
410struct radeon_cs_reloc {
411 struct drm_gem_object *gobj;
412 struct radeon_object *robj;
413 struct radeon_object_list lobj;
414 uint32_t handle;
415 uint32_t flags;
416};
417
418struct radeon_cs_chunk {
419 uint32_t chunk_id;
420 uint32_t length_dw;
421 uint32_t *kdata;
422};
423
424struct radeon_cs_parser {
425 struct radeon_device *rdev;
426 struct drm_file *filp;
427 /* chunks */
428 unsigned nchunks;
429 struct radeon_cs_chunk *chunks;
430 uint64_t *chunks_array;
431 /* IB */
432 unsigned idx;
433 /* relocations */
434 unsigned nrelocs;
435 struct radeon_cs_reloc *relocs;
436 struct radeon_cs_reloc **relocs_ptr;
437 struct list_head validated;
438 /* indices of various chunks */
439 int chunk_ib_idx;
440 int chunk_relocs_idx;
441 struct radeon_ib *ib;
442 void *track;
443};
444
445struct radeon_cs_packet {
446 unsigned idx;
447 unsigned type;
448 unsigned reg;
449 unsigned opcode;
450 int count;
451 unsigned one_reg_wr;
452};
453
454typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
455 struct radeon_cs_packet *pkt,
456 unsigned idx, unsigned reg);
457typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
458 struct radeon_cs_packet *pkt);
459
460
461/*
462 * AGP
463 */
464int radeon_agp_init(struct radeon_device *rdev);
465void radeon_agp_fini(struct radeon_device *rdev);
466
467
468/*
469 * Writeback
470 */
471struct radeon_wb {
472 struct radeon_object *wb_obj;
473 volatile uint32_t *wb;
474 uint64_t gpu_addr;
475};
476
477
478/*
479 * Benchmarking
480 */
481void radeon_benchmark(struct radeon_device *rdev);
482
483
484/*
485 * Debugfs
486 */
487int radeon_debugfs_add_files(struct radeon_device *rdev,
488 struct drm_info_list *files,
489 unsigned nfiles);
490int radeon_debugfs_fence_init(struct radeon_device *rdev);
491int r100_debugfs_rbbm_init(struct radeon_device *rdev);
492int r100_debugfs_cp_init(struct radeon_device *rdev);
493
494
495/*
496 * ASIC specific functions.
497 */
498struct radeon_asic {
499 void (*errata)(struct radeon_device *rdev);
500 void (*vram_info)(struct radeon_device *rdev);
501 int (*gpu_reset)(struct radeon_device *rdev);
502 int (*mc_init)(struct radeon_device *rdev);
503 void (*mc_fini)(struct radeon_device *rdev);
504 int (*wb_init)(struct radeon_device *rdev);
505 void (*wb_fini)(struct radeon_device *rdev);
506 int (*gart_enable)(struct radeon_device *rdev);
507 void (*gart_disable)(struct radeon_device *rdev);
508 void (*gart_tlb_flush)(struct radeon_device *rdev);
509 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
510 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
511 void (*cp_fini)(struct radeon_device *rdev);
512 void (*cp_disable)(struct radeon_device *rdev);
513 void (*ring_start)(struct radeon_device *rdev);
514 int (*irq_set)(struct radeon_device *rdev);
515 int (*irq_process)(struct radeon_device *rdev);
516 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
517 int (*cs_parse)(struct radeon_cs_parser *p);
518 int (*copy_blit)(struct radeon_device *rdev,
519 uint64_t src_offset,
520 uint64_t dst_offset,
521 unsigned num_pages,
522 struct radeon_fence *fence);
523 int (*copy_dma)(struct radeon_device *rdev,
524 uint64_t src_offset,
525 uint64_t dst_offset,
526 unsigned num_pages,
527 struct radeon_fence *fence);
528 int (*copy)(struct radeon_device *rdev,
529 uint64_t src_offset,
530 uint64_t dst_offset,
531 unsigned num_pages,
532 struct radeon_fence *fence);
533 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
534 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
535 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
536 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
537};
538
539
540/*
541 * IOCTL.
542 */
543int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
544 struct drm_file *filp);
545int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
546 struct drm_file *filp);
547int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_priv);
549int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
550 struct drm_file *file_priv);
551int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv);
553int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
554 struct drm_file *file_priv);
555int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
556 struct drm_file *filp);
557int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
558 struct drm_file *filp);
559int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
560 struct drm_file *filp);
561int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *filp);
563int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
564
565
566/*
567 * Core structure, functions and helpers.
568 */
569typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
570typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
571
572struct radeon_device {
573 struct drm_device *ddev;
574 struct pci_dev *pdev;
575 /* ASIC */
576 enum radeon_family family;
577 unsigned long flags;
578 int usec_timeout;
579 enum radeon_pll_errata pll_errata;
580 int num_gb_pipes;
581 int disp_priority;
582 /* BIOS */
583 uint8_t *bios;
584 bool is_atom_bios;
585 uint16_t bios_header_start;
586 struct radeon_object *stollen_vga_memory;
587 struct fb_info *fbdev_info;
588 struct radeon_object *fbdev_robj;
589 struct radeon_framebuffer *fbdev_rfb;
590 /* Register mmio */
591 unsigned long rmmio_base;
592 unsigned long rmmio_size;
593 void *rmmio;
594 radeon_rreg_t mm_rreg;
595 radeon_wreg_t mm_wreg;
596 radeon_rreg_t mc_rreg;
597 radeon_wreg_t mc_wreg;
598 radeon_rreg_t pll_rreg;
599 radeon_wreg_t pll_wreg;
600 radeon_rreg_t pcie_rreg;
601 radeon_wreg_t pcie_wreg;
602 radeon_rreg_t pciep_rreg;
603 radeon_wreg_t pciep_wreg;
604 struct radeon_clock clock;
605 struct radeon_mc mc;
606 struct radeon_gart gart;
607 struct radeon_mode_info mode_info;
608 struct radeon_scratch scratch;
609 struct radeon_mman mman;
610 struct radeon_fence_driver fence_drv;
611 struct radeon_cp cp;
612 struct radeon_ib_pool ib_pool;
613 struct radeon_irq irq;
614 struct radeon_asic *asic;
615 struct radeon_gem gem;
616 struct mutex cs_mutex;
617 struct radeon_wb wb;
618 bool gpu_lockup;
619 bool shutdown;
620 bool suspend;
621};
622
623int radeon_device_init(struct radeon_device *rdev,
624 struct drm_device *ddev,
625 struct pci_dev *pdev,
626 uint32_t flags);
627void radeon_device_fini(struct radeon_device *rdev);
628int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
629
630
631/*
632 * Registers read & write functions.
633 */
634#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
635#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
636#define RREG32(reg) rdev->mm_rreg(rdev, (reg))
637#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v))
638#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
639#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
640#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
641#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
642#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
643#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
644#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg))
645#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v))
646#define WREG32_P(reg, val, mask) \
647 do { \
648 uint32_t tmp_ = RREG32(reg); \
649 tmp_ &= (mask); \
650 tmp_ |= ((val) & ~(mask)); \
651 WREG32(reg, tmp_); \
652 } while (0)
653#define WREG32_PLL_P(reg, val, mask) \
654 do { \
655 uint32_t tmp_ = RREG32_PLL(reg); \
656 tmp_ &= (mask); \
657 tmp_ |= ((val) & ~(mask)); \
658 WREG32_PLL(reg, tmp_); \
659 } while (0)
660
661void r100_pll_errata_after_index(struct radeon_device *rdev);
662
663
664/*
665 * ASICs helpers.
666 */
667#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
668 (rdev->family == CHIP_RV200) || \
669 (rdev->family == CHIP_RS100) || \
670 (rdev->family == CHIP_RS200) || \
671 (rdev->family == CHIP_RV250) || \
672 (rdev->family == CHIP_RV280) || \
673 (rdev->family == CHIP_RS300))
674#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
675 (rdev->family == CHIP_RV350) || \
676 (rdev->family == CHIP_R350) || \
677 (rdev->family == CHIP_RV380) || \
678 (rdev->family == CHIP_R420) || \
679 (rdev->family == CHIP_R423) || \
680 (rdev->family == CHIP_RV410) || \
681 (rdev->family == CHIP_RS400) || \
682 (rdev->family == CHIP_RS480))
683#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
684#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
685#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
686
687
688/*
689 * BIOS helpers.
690 */
691#define RBIOS8(i) (rdev->bios[i])
692#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
693#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
694
695int radeon_combios_init(struct radeon_device *rdev);
696void radeon_combios_fini(struct radeon_device *rdev);
697int radeon_atombios_init(struct radeon_device *rdev);
698void radeon_atombios_fini(struct radeon_device *rdev);
699
700
701/*
702 * RING helpers.
703 */
704#define CP_PACKET0 0x00000000
705#define PACKET0_BASE_INDEX_SHIFT 0
706#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
707#define PACKET0_COUNT_SHIFT 16
708#define PACKET0_COUNT_MASK (0x3fff << 16)
709#define CP_PACKET1 0x40000000
710#define CP_PACKET2 0x80000000
711#define PACKET2_PAD_SHIFT 0
712#define PACKET2_PAD_MASK (0x3fffffff << 0)
713#define CP_PACKET3 0xC0000000
714#define PACKET3_IT_OPCODE_SHIFT 8
715#define PACKET3_IT_OPCODE_MASK (0xff << 8)
716#define PACKET3_COUNT_SHIFT 16
717#define PACKET3_COUNT_MASK (0x3fff << 16)
718/* PACKET3 op code */
719#define PACKET3_NOP 0x10
720#define PACKET3_3D_DRAW_VBUF 0x28
721#define PACKET3_3D_DRAW_IMMD 0x29
722#define PACKET3_3D_DRAW_INDX 0x2A
723#define PACKET3_3D_LOAD_VBPNTR 0x2F
724#define PACKET3_INDX_BUFFER 0x33
725#define PACKET3_3D_DRAW_VBUF_2 0x34
726#define PACKET3_3D_DRAW_IMMD_2 0x35
727#define PACKET3_3D_DRAW_INDX_2 0x36
728#define PACKET3_BITBLT_MULTI 0x9B
729
730#define PACKET0(reg, n) (CP_PACKET0 | \
731 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
732 REG_SET(PACKET0_COUNT, (n)))
733#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
734#define PACKET3(op, n) (CP_PACKET3 | \
735 REG_SET(PACKET3_IT_OPCODE, (op)) | \
736 REG_SET(PACKET3_COUNT, (n)))
737
738#define PACKET_TYPE0 0
739#define PACKET_TYPE1 1
740#define PACKET_TYPE2 2
741#define PACKET_TYPE3 3
742
743#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
744#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
745#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
746#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
747#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
748
749static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
750{
751#if DRM_DEBUG_CODE
752 if (rdev->cp.count_dw <= 0) {
753 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
754 }
755#endif
756 rdev->cp.ring[rdev->cp.wptr++] = v;
757 rdev->cp.wptr &= rdev->cp.ptr_mask;
758 rdev->cp.count_dw--;
759 rdev->cp.ring_free_dw--;
760}
761
762
763/*
764 * ASICs macro.
765 */
766#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
767#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
768#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
769#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
770#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
771#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
772#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
773#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
774#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
775#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
776#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
777#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
778#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
779#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
780#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
781#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
782#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
783#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
784#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
785#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
786#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
787#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
788#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
789#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
790#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
791#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
792
793#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644
index 000000000000..23ea9955ac59
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2008 Red Hat Inc.
3 * Copyright 2009 Jerome Glisse.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27#include "drmP.h"
28#include "drm.h"
29#include "radeon.h"
30#include "radeon_drm.h"
31
32#if __OS_HAS_AGP
33
34struct radeon_agpmode_quirk {
35 u32 hostbridge_vendor;
36 u32 hostbridge_device;
37 u32 chip_vendor;
38 u32 chip_device;
39 u32 subsys_vendor;
40 u32 subsys_device;
41 u32 default_mode;
42};
43
44static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
45 /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
46 { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
47 /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
48 { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
49 /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
50 { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
51 0x148c, 0x2073, 4},
52 /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
53 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
54 PCI_VENDOR_ID_IBM, 0x052f, 1},
55 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
56 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
57 PCI_VENDOR_ID_IBM, 0x0550, 1},
58 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
59 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
60 PCI_VENDOR_ID_IBM, 0x0530, 1},
61 /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
62 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
63 PCI_VENDOR_ID_IBM, 0x054f, 2},
64 /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
65 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
66 PCI_VENDOR_ID_SONY, 0x816b, 2},
67 /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
68 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
69 PCI_VENDOR_ID_SONY, 0x8195, 8},
70 /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
71 { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
72 PCI_VENDOR_ID_DELL, 0x00e3, 2},
73 /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
74 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
75 PCI_VENDOR_ID_DELL, 0x0149, 1},
76 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
77 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
78 0x1025, 0x0061, 1},
79 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
80 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
81 0x1025, 0x0064, 1},
82 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
83 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
84 PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
85 /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
86 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
87 0x10cf, 0x127f, 1},
88 /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
89 { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
90 0x1787, 0x5960, 4},
91 /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
92 { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
93 0x17af, 0x2020, 4},
94 /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
95 { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
96 PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
97 /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
98 { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
99 PCI_VENDOR_ID_ATI, 0x013a, 2},
100 /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
101 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
102 PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
103 /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
104 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
105 PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
106 /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
107 { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
108 0x174b, 0x7149, 4},
109 /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
110 { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
111 0x1462, 0x0380, 4},
112 /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
113 { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
114 0x148c, 0x2073, 4},
115 /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
116 { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
117 PCI_VENDOR_ID_SONY, 0x8175, 1},
118 /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
119 { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
120 PCI_VENDOR_ID_ATI, 0x0152, 2},
121 { 0, 0, 0, 0, 0, 0, 0 },
122};
123#endif
124
125int radeon_agp_init(struct radeon_device *rdev)
126{
127#if __OS_HAS_AGP
128 struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
129 struct drm_agp_mode mode;
130 struct drm_agp_info info;
131 uint32_t agp_status;
132 int default_mode;
133 bool is_v3;
134 int ret;
135
136 /* Acquire AGP. */
137 if (!rdev->ddev->agp->acquired) {
138 ret = drm_agp_acquire(rdev->ddev);
139 if (ret) {
140 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
141 return ret;
142 }
143 }
144
145 ret = drm_agp_info(rdev->ddev, &info);
146 if (ret) {
147 DRM_ERROR("Unable to get AGP info: %d\n", ret);
148 return ret;
149 }
150 mode.mode = info.mode;
151 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
152 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
153
154 if (is_v3) {
155 default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
156 } else {
157 if (agp_status & RADEON_AGP_4X_MODE) {
158 default_mode = 4;
159 } else if (agp_status & RADEON_AGP_2X_MODE) {
160 default_mode = 2;
161 } else {
162 default_mode = 1;
163 }
164 }
165
166 /* Apply AGPMode Quirks */
167 while (p && p->chip_device != 0) {
168 if (info.id_vendor == p->hostbridge_vendor &&
169 info.id_device == p->hostbridge_device &&
170 rdev->pdev->vendor == p->chip_vendor &&
171 rdev->pdev->device == p->chip_device &&
172 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
173 rdev->pdev->subsystem_device == p->subsys_device) {
174 default_mode = p->default_mode;
175 }
176 ++p;
177 }
178
179 if (radeon_agpmode > 0) {
180 if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
181 (radeon_agpmode > (is_v3 ? 8 : 4)) ||
182 (radeon_agpmode & (radeon_agpmode - 1))) {
183 DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
184 radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
185 default_mode);
186 radeon_agpmode = default_mode;
187 } else {
188 DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
189 }
190 } else {
191 radeon_agpmode = default_mode;
192 }
193
194 mode.mode &= ~RADEON_AGP_MODE_MASK;
195 if (is_v3) {
196 switch (radeon_agpmode) {
197 case 8:
198 mode.mode |= RADEON_AGPv3_8X_MODE;
199 break;
200 case 4:
201 default:
202 mode.mode |= RADEON_AGPv3_4X_MODE;
203 break;
204 }
205 } else {
206 switch (radeon_agpmode) {
207 case 4:
208 mode.mode |= RADEON_AGP_4X_MODE;
209 break;
210 case 2:
211 mode.mode |= RADEON_AGP_2X_MODE;
212 break;
213 case 1:
214 default:
215 mode.mode |= RADEON_AGP_1X_MODE;
216 break;
217 }
218 }
219
220 mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
221 ret = drm_agp_enable(rdev->ddev, mode);
222 if (ret) {
223 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
224 return ret;
225 }
226
227 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
228 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
229
230 /* workaround some hw issues */
231 if (rdev->family < CHIP_R200) {
232 WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
233 }
234 return 0;
235#else
236 return 0;
237#endif
238}
239
240void radeon_agp_fini(struct radeon_device *rdev)
241{
242#if __OS_HAS_AGP
243 if (rdev->flags & RADEON_IS_AGP) {
244 if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
245 drm_agp_release(rdev->ddev);
246 }
247 }
248#endif
249}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644
index 000000000000..e57d8a784e9f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -0,0 +1,405 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_ASIC_H__
29#define __RADEON_ASIC_H__
30
31/*
32 * common functions
33 */
34void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
35void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
36
37void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
38void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
39void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
40
41/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */
44uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
45void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
46void r100_errata(struct radeon_device *rdev);
47void r100_vram_info(struct radeon_device *rdev);
48int r100_gpu_reset(struct radeon_device *rdev);
49int r100_mc_init(struct radeon_device *rdev);
50void r100_mc_fini(struct radeon_device *rdev);
51int r100_wb_init(struct radeon_device *rdev);
52void r100_wb_fini(struct radeon_device *rdev);
53int r100_gart_enable(struct radeon_device *rdev);
54void r100_pci_gart_disable(struct radeon_device *rdev);
55void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
56int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
57int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
58void r100_cp_fini(struct radeon_device *rdev);
59void r100_cp_disable(struct radeon_device *rdev);
60void r100_ring_start(struct radeon_device *rdev);
61int r100_irq_set(struct radeon_device *rdev);
62int r100_irq_process(struct radeon_device *rdev);
63void r100_fence_ring_emit(struct radeon_device *rdev,
64 struct radeon_fence *fence);
65int r100_cs_parse(struct radeon_cs_parser *p);
66void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
67uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
68int r100_copy_blit(struct radeon_device *rdev,
69 uint64_t src_offset,
70 uint64_t dst_offset,
71 unsigned num_pages,
72 struct radeon_fence *fence);
73
74static struct radeon_asic r100_asic = {
75 .errata = &r100_errata,
76 .vram_info = &r100_vram_info,
77 .gpu_reset = &r100_gpu_reset,
78 .mc_init = &r100_mc_init,
79 .mc_fini = &r100_mc_fini,
80 .wb_init = &r100_wb_init,
81 .wb_fini = &r100_wb_fini,
82 .gart_enable = &r100_gart_enable,
83 .gart_disable = &r100_pci_gart_disable,
84 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
85 .gart_set_page = &r100_pci_gart_set_page,
86 .cp_init = &r100_cp_init,
87 .cp_fini = &r100_cp_fini,
88 .cp_disable = &r100_cp_disable,
89 .ring_start = &r100_ring_start,
90 .irq_set = &r100_irq_set,
91 .irq_process = &r100_irq_process,
92 .fence_ring_emit = &r100_fence_ring_emit,
93 .cs_parse = &r100_cs_parse,
94 .copy_blit = &r100_copy_blit,
95 .copy_dma = NULL,
96 .copy = &r100_copy_blit,
97 .set_engine_clock = &radeon_legacy_set_engine_clock,
98 .set_memory_clock = NULL,
99 .set_pcie_lanes = NULL,
100 .set_clock_gating = &radeon_legacy_set_clock_gating,
101};
102
103
104/*
105 * r300,r350,rv350,rv380
106 */
107void r300_errata(struct radeon_device *rdev);
108void r300_vram_info(struct radeon_device *rdev);
109int r300_gpu_reset(struct radeon_device *rdev);
110int r300_mc_init(struct radeon_device *rdev);
111void r300_mc_fini(struct radeon_device *rdev);
112void r300_ring_start(struct radeon_device *rdev);
113void r300_fence_ring_emit(struct radeon_device *rdev,
114 struct radeon_fence *fence);
115int r300_cs_parse(struct radeon_cs_parser *p);
116int r300_gart_enable(struct radeon_device *rdev);
117void rv370_pcie_gart_disable(struct radeon_device *rdev);
118void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
119int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
120uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
121void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
122void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
123int r300_copy_dma(struct radeon_device *rdev,
124 uint64_t src_offset,
125 uint64_t dst_offset,
126 unsigned num_pages,
127 struct radeon_fence *fence);
128static struct radeon_asic r300_asic = {
129 .errata = &r300_errata,
130 .vram_info = &r300_vram_info,
131 .gpu_reset = &r300_gpu_reset,
132 .mc_init = &r300_mc_init,
133 .mc_fini = &r300_mc_fini,
134 .wb_init = &r100_wb_init,
135 .wb_fini = &r100_wb_fini,
136 .gart_enable = &r300_gart_enable,
137 .gart_disable = &r100_pci_gart_disable,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page,
140 .cp_init = &r100_cp_init,
141 .cp_fini = &r100_cp_fini,
142 .cp_disable = &r100_cp_disable,
143 .ring_start = &r300_ring_start,
144 .irq_set = &r100_irq_set,
145 .irq_process = &r100_irq_process,
146 .fence_ring_emit = &r300_fence_ring_emit,
147 .cs_parse = &r300_cs_parse,
148 .copy_blit = &r100_copy_blit,
149 .copy_dma = &r300_copy_dma,
150 .copy = &r100_copy_blit,
151 .set_engine_clock = &radeon_legacy_set_engine_clock,
152 .set_memory_clock = NULL,
153 .set_pcie_lanes = &rv370_set_pcie_lanes,
154 .set_clock_gating = &radeon_legacy_set_clock_gating,
155};
156
157/*
158 * r420,r423,rv410
159 */
160void r420_errata(struct radeon_device *rdev);
161void r420_vram_info(struct radeon_device *rdev);
162int r420_mc_init(struct radeon_device *rdev);
163void r420_mc_fini(struct radeon_device *rdev);
164static struct radeon_asic r420_asic = {
165 .errata = &r420_errata,
166 .vram_info = &r420_vram_info,
167 .gpu_reset = &r300_gpu_reset,
168 .mc_init = &r420_mc_init,
169 .mc_fini = &r420_mc_fini,
170 .wb_init = &r100_wb_init,
171 .wb_fini = &r100_wb_fini,
172 .gart_enable = &r300_gart_enable,
173 .gart_disable = &rv370_pcie_gart_disable,
174 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
175 .gart_set_page = &rv370_pcie_gart_set_page,
176 .cp_init = &r100_cp_init,
177 .cp_fini = &r100_cp_fini,
178 .cp_disable = &r100_cp_disable,
179 .ring_start = &r300_ring_start,
180 .irq_set = &r100_irq_set,
181 .irq_process = &r100_irq_process,
182 .fence_ring_emit = &r300_fence_ring_emit,
183 .cs_parse = &r300_cs_parse,
184 .copy_blit = &r100_copy_blit,
185 .copy_dma = &r300_copy_dma,
186 .copy = &r100_copy_blit,
187 .set_engine_clock = &radeon_atom_set_engine_clock,
188 .set_memory_clock = &radeon_atom_set_memory_clock,
189 .set_pcie_lanes = &rv370_set_pcie_lanes,
190 .set_clock_gating = &radeon_atom_set_clock_gating,
191};
192
193
194/*
195 * rs400,rs480
196 */
197void rs400_errata(struct radeon_device *rdev);
198void rs400_vram_info(struct radeon_device *rdev);
199int rs400_mc_init(struct radeon_device *rdev);
200void rs400_mc_fini(struct radeon_device *rdev);
201int rs400_gart_enable(struct radeon_device *rdev);
202void rs400_gart_disable(struct radeon_device *rdev);
203void rs400_gart_tlb_flush(struct radeon_device *rdev);
204int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
205uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
206void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
207static struct radeon_asic rs400_asic = {
208 .errata = &rs400_errata,
209 .vram_info = &rs400_vram_info,
210 .gpu_reset = &r300_gpu_reset,
211 .mc_init = &rs400_mc_init,
212 .mc_fini = &rs400_mc_fini,
213 .wb_init = &r100_wb_init,
214 .wb_fini = &r100_wb_fini,
215 .gart_enable = &rs400_gart_enable,
216 .gart_disable = &rs400_gart_disable,
217 .gart_tlb_flush = &rs400_gart_tlb_flush,
218 .gart_set_page = &rs400_gart_set_page,
219 .cp_init = &r100_cp_init,
220 .cp_fini = &r100_cp_fini,
221 .cp_disable = &r100_cp_disable,
222 .ring_start = &r300_ring_start,
223 .irq_set = &r100_irq_set,
224 .irq_process = &r100_irq_process,
225 .fence_ring_emit = &r300_fence_ring_emit,
226 .cs_parse = &r300_cs_parse,
227 .copy_blit = &r100_copy_blit,
228 .copy_dma = &r300_copy_dma,
229 .copy = &r100_copy_blit,
230 .set_engine_clock = &radeon_legacy_set_engine_clock,
231 .set_memory_clock = NULL,
232 .set_pcie_lanes = NULL,
233 .set_clock_gating = &radeon_legacy_set_clock_gating,
234};
235
236
237/*
238 * rs600.
239 */
240void rs600_errata(struct radeon_device *rdev);
241void rs600_vram_info(struct radeon_device *rdev);
242int rs600_mc_init(struct radeon_device *rdev);
243void rs600_mc_fini(struct radeon_device *rdev);
244int rs600_irq_set(struct radeon_device *rdev);
245int rs600_gart_enable(struct radeon_device *rdev);
246void rs600_gart_disable(struct radeon_device *rdev);
247void rs600_gart_tlb_flush(struct radeon_device *rdev);
248int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
249uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
250void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
251static struct radeon_asic rs600_asic = {
252 .errata = &rs600_errata,
253 .vram_info = &rs600_vram_info,
254 .gpu_reset = &r300_gpu_reset,
255 .mc_init = &rs600_mc_init,
256 .mc_fini = &rs600_mc_fini,
257 .wb_init = &r100_wb_init,
258 .wb_fini = &r100_wb_fini,
259 .gart_enable = &rs600_gart_enable,
260 .gart_disable = &rs600_gart_disable,
261 .gart_tlb_flush = &rs600_gart_tlb_flush,
262 .gart_set_page = &rs600_gart_set_page,
263 .cp_init = &r100_cp_init,
264 .cp_fini = &r100_cp_fini,
265 .cp_disable = &r100_cp_disable,
266 .ring_start = &r300_ring_start,
267 .irq_set = &rs600_irq_set,
268 .irq_process = &r100_irq_process,
269 .fence_ring_emit = &r300_fence_ring_emit,
270 .cs_parse = &r300_cs_parse,
271 .copy_blit = &r100_copy_blit,
272 .copy_dma = &r300_copy_dma,
273 .copy = &r100_copy_blit,
274 .set_engine_clock = &radeon_atom_set_engine_clock,
275 .set_memory_clock = &radeon_atom_set_memory_clock,
276 .set_pcie_lanes = NULL,
277 .set_clock_gating = &radeon_atom_set_clock_gating,
278};
279
280
281/*
282 * rs690,rs740
283 */
284void rs690_errata(struct radeon_device *rdev);
285void rs690_vram_info(struct radeon_device *rdev);
286int rs690_mc_init(struct radeon_device *rdev);
287void rs690_mc_fini(struct radeon_device *rdev);
288uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
289void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
290static struct radeon_asic rs690_asic = {
291 .errata = &rs690_errata,
292 .vram_info = &rs690_vram_info,
293 .gpu_reset = &r300_gpu_reset,
294 .mc_init = &rs690_mc_init,
295 .mc_fini = &rs690_mc_fini,
296 .wb_init = &r100_wb_init,
297 .wb_fini = &r100_wb_fini,
298 .gart_enable = &rs400_gart_enable,
299 .gart_disable = &rs400_gart_disable,
300 .gart_tlb_flush = &rs400_gart_tlb_flush,
301 .gart_set_page = &rs400_gart_set_page,
302 .cp_init = &r100_cp_init,
303 .cp_fini = &r100_cp_fini,
304 .cp_disable = &r100_cp_disable,
305 .ring_start = &r300_ring_start,
306 .irq_set = &rs600_irq_set,
307 .irq_process = &r100_irq_process,
308 .fence_ring_emit = &r300_fence_ring_emit,
309 .cs_parse = &r300_cs_parse,
310 .copy_blit = &r100_copy_blit,
311 .copy_dma = &r300_copy_dma,
312 .copy = &r300_copy_dma,
313 .set_engine_clock = &radeon_atom_set_engine_clock,
314 .set_memory_clock = &radeon_atom_set_memory_clock,
315 .set_pcie_lanes = NULL,
316 .set_clock_gating = &radeon_atom_set_clock_gating,
317};
318
319
320/*
321 * rv515
322 */
323void rv515_errata(struct radeon_device *rdev);
324void rv515_vram_info(struct radeon_device *rdev);
325int rv515_gpu_reset(struct radeon_device *rdev);
326int rv515_mc_init(struct radeon_device *rdev);
327void rv515_mc_fini(struct radeon_device *rdev);
328uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
329void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
330void rv515_ring_start(struct radeon_device *rdev);
331uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
332void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
333static struct radeon_asic rv515_asic = {
334 .errata = &rv515_errata,
335 .vram_info = &rv515_vram_info,
336 .gpu_reset = &rv515_gpu_reset,
337 .mc_init = &rv515_mc_init,
338 .mc_fini = &rv515_mc_fini,
339 .wb_init = &r100_wb_init,
340 .wb_fini = &r100_wb_fini,
341 .gart_enable = &r300_gart_enable,
342 .gart_disable = &rv370_pcie_gart_disable,
343 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
344 .gart_set_page = &rv370_pcie_gart_set_page,
345 .cp_init = &r100_cp_init,
346 .cp_fini = &r100_cp_fini,
347 .cp_disable = &r100_cp_disable,
348 .ring_start = &rv515_ring_start,
349 .irq_set = &r100_irq_set,
350 .irq_process = &r100_irq_process,
351 .fence_ring_emit = &r300_fence_ring_emit,
352 .cs_parse = &r100_cs_parse,
353 .copy_blit = &r100_copy_blit,
354 .copy_dma = &r300_copy_dma,
355 .copy = &r100_copy_blit,
356 .set_engine_clock = &radeon_atom_set_engine_clock,
357 .set_memory_clock = &radeon_atom_set_memory_clock,
358 .set_pcie_lanes = &rv370_set_pcie_lanes,
359 .set_clock_gating = &radeon_atom_set_clock_gating,
360};
361
362
363/*
364 * r520,rv530,rv560,rv570,r580
365 */
366void r520_errata(struct radeon_device *rdev);
367void r520_vram_info(struct radeon_device *rdev);
368int r520_mc_init(struct radeon_device *rdev);
369void r520_mc_fini(struct radeon_device *rdev);
370static struct radeon_asic r520_asic = {
371 .errata = &r520_errata,
372 .vram_info = &r520_vram_info,
373 .gpu_reset = &rv515_gpu_reset,
374 .mc_init = &r520_mc_init,
375 .mc_fini = &r520_mc_fini,
376 .wb_init = &r100_wb_init,
377 .wb_fini = &r100_wb_fini,
378 .gart_enable = &r300_gart_enable,
379 .gart_disable = &rv370_pcie_gart_disable,
380 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
381 .gart_set_page = &rv370_pcie_gart_set_page,
382 .cp_init = &r100_cp_init,
383 .cp_fini = &r100_cp_fini,
384 .cp_disable = &r100_cp_disable,
385 .ring_start = &rv515_ring_start,
386 .irq_set = &r100_irq_set,
387 .irq_process = &r100_irq_process,
388 .fence_ring_emit = &r300_fence_ring_emit,
389 .cs_parse = &r100_cs_parse,
390 .copy_blit = &r100_copy_blit,
391 .copy_dma = &r300_copy_dma,
392 .copy = &r100_copy_blit,
393 .set_engine_clock = &radeon_atom_set_engine_clock,
394 .set_memory_clock = &radeon_atom_set_memory_clock,
395 .set_pcie_lanes = &rv370_set_pcie_lanes,
396 .set_clock_gating = &radeon_atom_set_clock_gating,
397};
398
399/*
400 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710
401 */
402uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
403void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
404
405#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644
index 000000000000..786632d3e378
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -0,0 +1,1298 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#include "atom.h"
31#include "atom-bits.h"
32
33/* from radeon_encoder.c */
34extern uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
36 uint8_t dac);
37extern void radeon_link_encoder_connector(struct drm_device *dev);
38extern void
39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
40 uint32_t supported_device);
41
42/* from radeon_connector.c */
43extern void
44radeon_add_atom_connector(struct drm_device *dev,
45 uint32_t connector_id,
46 uint32_t supported_device,
47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info);
50
51/* from radeon_legacy_encoder.c */
52extern void
53radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
54 uint32_t supported_device);
55
56union atom_supported_devices {
57 struct _ATOM_SUPPORTED_DEVICES_INFO info;
58 struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
59 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
60};
61
62static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
63 *dev, uint8_t id)
64{
65 struct radeon_device *rdev = dev->dev_private;
66 struct atom_context *ctx = rdev->mode_info.atom_context;
67 ATOM_GPIO_I2C_ASSIGMENT gpio;
68 struct radeon_i2c_bus_rec i2c;
69 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
70 struct _ATOM_GPIO_I2C_INFO *i2c_info;
71 uint16_t data_offset;
72
73 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
74 i2c.valid = false;
75
76 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
77
78 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
79
80 gpio = i2c_info->asGPIO_Info[id];
81
82 i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
83 i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
84 i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
85 i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
86 i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
87 i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
88 i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
89 i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
90 i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
91 i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
92 i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
93 i2c.put_data_mask = (1 << gpio.ucDataEnShift);
94 i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
95 i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
96 i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
97 i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
98 i2c.valid = true;
99
100 return i2c;
101}
102
103static bool radeon_atom_apply_quirks(struct drm_device *dev,
104 uint32_t supported_device,
105 int *connector_type,
106 struct radeon_i2c_bus_rec *i2c_bus)
107{
108
109 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
110 if ((dev->pdev->device == 0x791e) &&
111 (dev->pdev->subsystem_vendor == 0x1043) &&
112 (dev->pdev->subsystem_device == 0x826d)) {
113 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
114 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
115 *connector_type = DRM_MODE_CONNECTOR_DVID;
116 }
117
118 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
119 if ((dev->pdev->device == 0x7941) &&
120 (dev->pdev->subsystem_vendor == 0x147b) &&
121 (dev->pdev->subsystem_device == 0x2412)) {
122 if (*connector_type == DRM_MODE_CONNECTOR_DVII)
123 return false;
124 }
125
126 /* Falcon NW laptop lists vga ddc line for LVDS */
127 if ((dev->pdev->device == 0x5653) &&
128 (dev->pdev->subsystem_vendor == 0x1462) &&
129 (dev->pdev->subsystem_device == 0x0291)) {
130 if (*connector_type == DRM_MODE_CONNECTOR_LVDS)
131 i2c_bus->valid = false;
132 }
133
134 /* Funky macbooks */
135 if ((dev->pdev->device == 0x71C5) &&
136 (dev->pdev->subsystem_vendor == 0x106b) &&
137 (dev->pdev->subsystem_device == 0x0080)) {
138 if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
139 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
140 return false;
141 }
142
143 /* some BIOSes seem to report DAC on HDMI - they hurt me with their lies */
144 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
145 (*connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
146 if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
147 return false;
148 }
149 }
150
151 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
152 if ((dev->pdev->device == 0x9598) &&
153 (dev->pdev->subsystem_vendor == 0x1043) &&
154 (dev->pdev->subsystem_device == 0x01da)) {
155 if (*connector_type == DRM_MODE_CONNECTOR_HDMIB) {
156 *connector_type = DRM_MODE_CONNECTOR_DVID;
157 }
158 }
159
160 return true;
161}
162
163const int supported_devices_connector_convert[] = {
164 DRM_MODE_CONNECTOR_Unknown,
165 DRM_MODE_CONNECTOR_VGA,
166 DRM_MODE_CONNECTOR_DVII,
167 DRM_MODE_CONNECTOR_DVID,
168 DRM_MODE_CONNECTOR_DVIA,
169 DRM_MODE_CONNECTOR_SVIDEO,
170 DRM_MODE_CONNECTOR_Composite,
171 DRM_MODE_CONNECTOR_LVDS,
172 DRM_MODE_CONNECTOR_Unknown,
173 DRM_MODE_CONNECTOR_Unknown,
174 DRM_MODE_CONNECTOR_HDMIA,
175 DRM_MODE_CONNECTOR_HDMIB,
176 DRM_MODE_CONNECTOR_Unknown,
177 DRM_MODE_CONNECTOR_Unknown,
178 DRM_MODE_CONNECTOR_9PinDIN,
179 DRM_MODE_CONNECTOR_DisplayPort
180};
181
182const int object_connector_convert[] = {
183 DRM_MODE_CONNECTOR_Unknown,
184 DRM_MODE_CONNECTOR_DVII,
185 DRM_MODE_CONNECTOR_DVII,
186 DRM_MODE_CONNECTOR_DVID,
187 DRM_MODE_CONNECTOR_DVID,
188 DRM_MODE_CONNECTOR_VGA,
189 DRM_MODE_CONNECTOR_Composite,
190 DRM_MODE_CONNECTOR_SVIDEO,
191 DRM_MODE_CONNECTOR_Unknown,
192 DRM_MODE_CONNECTOR_9PinDIN,
193 DRM_MODE_CONNECTOR_Unknown,
194 DRM_MODE_CONNECTOR_HDMIA,
195 DRM_MODE_CONNECTOR_HDMIB,
196 DRM_MODE_CONNECTOR_HDMIB,
197 DRM_MODE_CONNECTOR_LVDS,
198 DRM_MODE_CONNECTOR_9PinDIN,
199 DRM_MODE_CONNECTOR_Unknown,
200 DRM_MODE_CONNECTOR_Unknown,
201 DRM_MODE_CONNECTOR_Unknown,
202 DRM_MODE_CONNECTOR_DisplayPort
203};
204
205bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
206{
207 struct radeon_device *rdev = dev->dev_private;
208 struct radeon_mode_info *mode_info = &rdev->mode_info;
209 struct atom_context *ctx = mode_info->atom_context;
210 int index = GetIndexIntoMasterTable(DATA, Object_Header);
211 uint16_t size, data_offset;
212 uint8_t frev, crev, line_mux = 0;
213 ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
214 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
215 ATOM_OBJECT_HEADER *obj_header;
216 int i, j, path_size, device_support;
217 int connector_type;
218 uint16_t igp_lane_info;
219 bool linkb;
220 struct radeon_i2c_bus_rec ddc_bus;
221
222 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
223
224 if (data_offset == 0)
225 return false;
226
227 if (crev < 2)
228 return false;
229
230 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
231 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
232 (ctx->bios + data_offset +
233 le16_to_cpu(obj_header->usDisplayPathTableOffset));
234 con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
235 (ctx->bios + data_offset +
236 le16_to_cpu(obj_header->usConnectorObjectTableOffset));
237 device_support = le16_to_cpu(obj_header->usDeviceSupport);
238
239 path_size = 0;
240 for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
241 uint8_t *addr = (uint8_t *) path_obj->asDispPath;
242 ATOM_DISPLAY_OBJECT_PATH *path;
243 addr += path_size;
244 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
245 path_size += le16_to_cpu(path->usSize);
246 linkb = false;
247
248 if (device_support & le16_to_cpu(path->usDeviceTag)) {
249 uint8_t con_obj_id, con_obj_num, con_obj_type;
250
251 con_obj_id =
252 (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
253 >> OBJECT_ID_SHIFT;
254 con_obj_num =
255 (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
256 >> ENUM_ID_SHIFT;
257 con_obj_type =
258 (le16_to_cpu(path->usConnObjectId) &
259 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
260
261 if ((le16_to_cpu(path->usDeviceTag) ==
262 ATOM_DEVICE_TV1_SUPPORT)
263 || (le16_to_cpu(path->usDeviceTag) ==
264 ATOM_DEVICE_TV2_SUPPORT)
265 || (le16_to_cpu(path->usDeviceTag) ==
266 ATOM_DEVICE_CV_SUPPORT))
267 continue;
268
269 if ((rdev->family == CHIP_RS780) &&
270 (con_obj_id ==
271 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
272 uint16_t igp_offset = 0;
273 ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
274
275 index =
276 GetIndexIntoMasterTable(DATA,
277 IntegratedSystemInfo);
278
279 atom_parse_data_header(ctx, index, &size, &frev,
280 &crev, &igp_offset);
281
282 if (crev >= 2) {
283 igp_obj =
284 (ATOM_INTEGRATED_SYSTEM_INFO_V2
285 *) (ctx->bios + igp_offset);
286
287 if (igp_obj) {
288 uint32_t slot_config, ct;
289
290 if (con_obj_num == 1)
291 slot_config =
292 igp_obj->
293 ulDDISlot1Config;
294 else
295 slot_config =
296 igp_obj->
297 ulDDISlot2Config;
298
299 ct = (slot_config >> 16) & 0xff;
300 connector_type =
301 object_connector_convert
302 [ct];
303 igp_lane_info =
304 slot_config & 0xffff;
305 } else
306 continue;
307 } else
308 continue;
309 } else {
310 igp_lane_info = 0;
311 connector_type =
312 object_connector_convert[con_obj_id];
313 }
314
315 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
316 continue;
317
318 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
319 j++) {
320 uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
321
322 enc_obj_id =
323 (le16_to_cpu(path->usGraphicObjIds[j]) &
324 OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
325 enc_obj_num =
326 (le16_to_cpu(path->usGraphicObjIds[j]) &
327 ENUM_ID_MASK) >> ENUM_ID_SHIFT;
328 enc_obj_type =
329 (le16_to_cpu(path->usGraphicObjIds[j]) &
330 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
331
332 /* FIXME: add support for router objects */
333 if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
334 if (enc_obj_num == 2)
335 linkb = true;
336 else
337 linkb = false;
338
339 radeon_add_atom_encoder(dev,
340 enc_obj_id,
341 le16_to_cpu
342 (path->
343 usDeviceTag));
344
345 }
346 }
347
348 /* look up gpio for ddc */
349 if ((le16_to_cpu(path->usDeviceTag) &
350 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
351 == 0) {
352 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
353 if (le16_to_cpu(path->usConnObjectId) ==
354 le16_to_cpu(con_obj->asObjects[j].
355 usObjectID)) {
356 ATOM_COMMON_RECORD_HEADER
357 *record =
358 (ATOM_COMMON_RECORD_HEADER
359 *)
360 (ctx->bios + data_offset +
361 le16_to_cpu(con_obj->
362 asObjects[j].
363 usRecordOffset));
364 ATOM_I2C_RECORD *i2c_record;
365
366 while (record->ucRecordType > 0
367 && record->
368 ucRecordType <=
369 ATOM_MAX_OBJECT_RECORD_NUMBER) {
370 DRM_ERROR
371 ("record type %d\n",
372 record->
373 ucRecordType);
374 switch (record->
375 ucRecordType) {
376 case ATOM_I2C_RECORD_TYPE:
377 i2c_record =
378 (ATOM_I2C_RECORD
379 *) record;
380 line_mux =
381 i2c_record->
382 sucI2cId.
383 bfI2C_LineMux;
384 break;
385 }
386 record =
387 (ATOM_COMMON_RECORD_HEADER
388 *) ((char *)record
389 +
390 record->
391 ucRecordSize);
392 }
393 break;
394 }
395 }
396 } else
397 line_mux = 0;
398
399 if ((le16_to_cpu(path->usDeviceTag) ==
400 ATOM_DEVICE_TV1_SUPPORT)
401 || (le16_to_cpu(path->usDeviceTag) ==
402 ATOM_DEVICE_TV2_SUPPORT)
403 || (le16_to_cpu(path->usDeviceTag) ==
404 ATOM_DEVICE_CV_SUPPORT))
405 ddc_bus.valid = false;
406 else
407 ddc_bus = radeon_lookup_gpio(dev, line_mux);
408
409 radeon_add_atom_connector(dev,
410 le16_to_cpu(path->
411 usConnObjectId),
412 le16_to_cpu(path->
413 usDeviceTag),
414 connector_type, &ddc_bus,
415 linkb, igp_lane_info);
416
417 }
418 }
419
420 radeon_link_encoder_connector(dev);
421
422 return true;
423}
424
425struct bios_connector {
426 bool valid;
427 uint8_t line_mux;
428 uint16_t devices;
429 int connector_type;
430 struct radeon_i2c_bus_rec ddc_bus;
431};
432
433bool radeon_get_atom_connector_info_from_supported_devices_table(struct
434 drm_device
435 *dev)
436{
437 struct radeon_device *rdev = dev->dev_private;
438 struct radeon_mode_info *mode_info = &rdev->mode_info;
439 struct atom_context *ctx = mode_info->atom_context;
440 int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
441 uint16_t size, data_offset;
442 uint8_t frev, crev;
443 uint16_t device_support;
444 uint8_t dac;
445 union atom_supported_devices *supported_devices;
446 int i, j;
447 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
448
449 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
450
451 supported_devices =
452 (union atom_supported_devices *)(ctx->bios + data_offset);
453
454 device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
455
456 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
457 ATOM_CONNECTOR_INFO_I2C ci =
458 supported_devices->info.asConnInfo[i];
459
460 bios_connectors[i].valid = false;
461
462 if (!(device_support & (1 << i))) {
463 continue;
464 }
465
466 if (i == ATOM_DEVICE_CV_INDEX) {
467 DRM_DEBUG("Skipping Component Video\n");
468 continue;
469 }
470
471 if (i == ATOM_DEVICE_TV1_INDEX) {
472 DRM_DEBUG("Skipping TV Out\n");
473 continue;
474 }
475
476 bios_connectors[i].connector_type =
477 supported_devices_connector_convert[ci.sucConnectorInfo.
478 sbfAccess.
479 bfConnectorType];
480
481 if (bios_connectors[i].connector_type ==
482 DRM_MODE_CONNECTOR_Unknown)
483 continue;
484
485 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
486
487 if ((rdev->family == CHIP_RS690) ||
488 (rdev->family == CHIP_RS740)) {
489 if ((i == ATOM_DEVICE_DFP2_INDEX)
490 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
491 bios_connectors[i].line_mux =
492 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
493 else if ((i == ATOM_DEVICE_DFP3_INDEX)
494 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
495 bios_connectors[i].line_mux =
496 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
497 else
498 bios_connectors[i].line_mux =
499 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
500 } else
501 bios_connectors[i].line_mux =
502 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
503
504 /* give tv unique connector ids */
505 if (i == ATOM_DEVICE_TV1_INDEX) {
506 bios_connectors[i].ddc_bus.valid = false;
507 bios_connectors[i].line_mux = 50;
508 } else if (i == ATOM_DEVICE_TV2_INDEX) {
509 bios_connectors[i].ddc_bus.valid = false;
510 bios_connectors[i].line_mux = 51;
511 } else if (i == ATOM_DEVICE_CV_INDEX) {
512 bios_connectors[i].ddc_bus.valid = false;
513 bios_connectors[i].line_mux = 52;
514 } else
515 bios_connectors[i].ddc_bus =
516 radeon_lookup_gpio(dev,
517 bios_connectors[i].line_mux);
518
519 /* Always set the connector type to VGA for CRT1/CRT2. if they are
520 * shared with a DVI port, we'll pick up the DVI connector when we
521 * merge the outputs. Some bioses incorrectly list VGA ports as DVI.
522 */
523 if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
524 bios_connectors[i].connector_type =
525 DRM_MODE_CONNECTOR_VGA;
526
527 if (!radeon_atom_apply_quirks
528 (dev, (1 << i), &bios_connectors[i].connector_type,
529 &bios_connectors[i].ddc_bus))
530 continue;
531
532 bios_connectors[i].valid = true;
533 bios_connectors[i].devices = (1 << i);
534
535 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
536 radeon_add_atom_encoder(dev,
537 radeon_get_encoder_id(dev,
538 (1 << i),
539 dac),
540 (1 << i));
541 else
542 radeon_add_legacy_encoder(dev,
543 radeon_get_encoder_id(dev,
544 (1 <<
545 i),
546 dac),
547 (1 << i));
548 }
549
550 /* combine shared connectors */
551 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
552 if (bios_connectors[i].valid) {
553 for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
554 if (bios_connectors[j].valid && (i != j)) {
555 if (bios_connectors[i].line_mux ==
556 bios_connectors[j].line_mux) {
557 if (((bios_connectors[i].
558 devices &
559 (ATOM_DEVICE_DFP_SUPPORT))
560 && (bios_connectors[j].
561 devices &
562 (ATOM_DEVICE_CRT_SUPPORT)))
563 ||
564 ((bios_connectors[j].
565 devices &
566 (ATOM_DEVICE_DFP_SUPPORT))
567 && (bios_connectors[i].
568 devices &
569 (ATOM_DEVICE_CRT_SUPPORT)))) {
570 bios_connectors[i].
571 devices |=
572 bios_connectors[j].
573 devices;
574 bios_connectors[i].
575 connector_type =
576 DRM_MODE_CONNECTOR_DVII;
577 bios_connectors[j].
578 valid = false;
579 }
580 }
581 }
582 }
583 }
584 }
585
586 /* add the connectors */
587 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
588 if (bios_connectors[i].valid)
589 radeon_add_atom_connector(dev,
590 bios_connectors[i].line_mux,
591 bios_connectors[i].devices,
592 bios_connectors[i].
593 connector_type,
594 &bios_connectors[i].ddc_bus,
595 false, 0);
596 }
597
598 radeon_link_encoder_connector(dev);
599
600 return true;
601}
602
603union firmware_info {
604 ATOM_FIRMWARE_INFO info;
605 ATOM_FIRMWARE_INFO_V1_2 info_12;
606 ATOM_FIRMWARE_INFO_V1_3 info_13;
607 ATOM_FIRMWARE_INFO_V1_4 info_14;
608};
609
610bool radeon_atom_get_clock_info(struct drm_device *dev)
611{
612 struct radeon_device *rdev = dev->dev_private;
613 struct radeon_mode_info *mode_info = &rdev->mode_info;
614 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
615 union firmware_info *firmware_info;
616 uint8_t frev, crev;
617 struct radeon_pll *p1pll = &rdev->clock.p1pll;
618 struct radeon_pll *p2pll = &rdev->clock.p2pll;
619 struct radeon_pll *spll = &rdev->clock.spll;
620 struct radeon_pll *mpll = &rdev->clock.mpll;
621 uint16_t data_offset;
622
623 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
624 &crev, &data_offset);
625
626 firmware_info =
627 (union firmware_info *)(mode_info->atom_context->bios +
628 data_offset);
629
630 if (firmware_info) {
631 /* pixel clocks */
632 p1pll->reference_freq =
633 le16_to_cpu(firmware_info->info.usReferenceClock);
634 p1pll->reference_div = 0;
635
636 p1pll->pll_out_min =
637 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
638 p1pll->pll_out_max =
639 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
640
641 if (p1pll->pll_out_min == 0) {
642 if (ASIC_IS_AVIVO(rdev))
643 p1pll->pll_out_min = 64800;
644 else
645 p1pll->pll_out_min = 20000;
646 }
647
648 p1pll->pll_in_min =
649 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
650 p1pll->pll_in_max =
651 le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
652
653 *p2pll = *p1pll;
654
655 /* system clock */
656 spll->reference_freq =
657 le16_to_cpu(firmware_info->info.usReferenceClock);
658 spll->reference_div = 0;
659
660 spll->pll_out_min =
661 le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
662 spll->pll_out_max =
663 le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
664
665 /* ??? */
666 if (spll->pll_out_min == 0) {
667 if (ASIC_IS_AVIVO(rdev))
668 spll->pll_out_min = 64800;
669 else
670 spll->pll_out_min = 20000;
671 }
672
673 spll->pll_in_min =
674 le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
675 spll->pll_in_max =
676 le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
677
678 /* memory clock */
679 mpll->reference_freq =
680 le16_to_cpu(firmware_info->info.usReferenceClock);
681 mpll->reference_div = 0;
682
683 mpll->pll_out_min =
684 le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
685 mpll->pll_out_max =
686 le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
687
688 /* ??? */
689 if (mpll->pll_out_min == 0) {
690 if (ASIC_IS_AVIVO(rdev))
691 mpll->pll_out_min = 64800;
692 else
693 mpll->pll_out_min = 20000;
694 }
695
696 mpll->pll_in_min =
697 le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
698 mpll->pll_in_max =
699 le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
700
701 rdev->clock.default_sclk =
702 le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
703 rdev->clock.default_mclk =
704 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
705
706 return true;
707 }
708 return false;
709}
710
711struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct
712 radeon_encoder
713 *encoder)
714{
715 struct drm_device *dev = encoder->base.dev;
716 struct radeon_device *rdev = dev->dev_private;
717 struct radeon_mode_info *mode_info = &rdev->mode_info;
718 int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
719 uint16_t data_offset;
720 struct _ATOM_TMDS_INFO *tmds_info;
721 uint8_t frev, crev;
722 uint16_t maxfreq;
723 int i;
724 struct radeon_encoder_int_tmds *tmds = NULL;
725
726 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
727 &crev, &data_offset);
728
729 tmds_info =
730 (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
731 data_offset);
732
733 if (tmds_info) {
734 tmds =
735 kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
736
737 if (!tmds)
738 return NULL;
739
740 maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
741 for (i = 0; i < 4; i++) {
742 tmds->tmds_pll[i].freq =
743 le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
744 tmds->tmds_pll[i].value =
745 tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
746 tmds->tmds_pll[i].value |=
747 (tmds_info->asMiscInfo[i].
748 ucPLL_VCO_Gain & 0x3f) << 6;
749 tmds->tmds_pll[i].value |=
750 (tmds_info->asMiscInfo[i].
751 ucPLL_DutyCycle & 0xf) << 12;
752 tmds->tmds_pll[i].value |=
753 (tmds_info->asMiscInfo[i].
754 ucPLL_VoltageSwing & 0xf) << 16;
755
756 DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
757 tmds->tmds_pll[i].freq,
758 tmds->tmds_pll[i].value);
759
760 if (maxfreq == tmds->tmds_pll[i].freq) {
761 tmds->tmds_pll[i].freq = 0xffffffff;
762 break;
763 }
764 }
765 }
766 return tmds;
767}
768
769union lvds_info {
770 struct _ATOM_LVDS_INFO info;
771 struct _ATOM_LVDS_INFO_V12 info_12;
772};
773
774struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
775 radeon_encoder
776 *encoder)
777{
778 struct drm_device *dev = encoder->base.dev;
779 struct radeon_device *rdev = dev->dev_private;
780 struct radeon_mode_info *mode_info = &rdev->mode_info;
781 int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
782 uint16_t data_offset;
783 union lvds_info *lvds_info;
784 uint8_t frev, crev;
785 struct radeon_encoder_atom_dig *lvds = NULL;
786
787 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
788 &crev, &data_offset);
789
790 lvds_info =
791 (union lvds_info *)(mode_info->atom_context->bios + data_offset);
792
793 if (lvds_info) {
794 lvds =
795 kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
796
797 if (!lvds)
798 return NULL;
799
800 lvds->native_mode.dotclock =
801 le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
802 lvds->native_mode.panel_xres =
803 le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
804 lvds->native_mode.panel_yres =
805 le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
806 lvds->native_mode.hblank =
807 le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
808 lvds->native_mode.hoverplus =
809 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
810 lvds->native_mode.hsync_width =
811 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
812 lvds->native_mode.vblank =
813 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
814 lvds->native_mode.voverplus =
815 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
816 lvds->native_mode.vsync_width =
817 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
818 lvds->panel_pwr_delay =
819 le16_to_cpu(lvds_info->info.usOffDelayInMs);
820 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
821
822 encoder->native_mode = lvds->native_mode;
823 }
824 return lvds;
825}
826
827struct radeon_encoder_primary_dac *
828radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
829{
830 struct drm_device *dev = encoder->base.dev;
831 struct radeon_device *rdev = dev->dev_private;
832 struct radeon_mode_info *mode_info = &rdev->mode_info;
833 int index = GetIndexIntoMasterTable(DATA, CompassionateData);
834 uint16_t data_offset;
835 struct _COMPASSIONATE_DATA *dac_info;
836 uint8_t frev, crev;
837 uint8_t bg, dac;
838 int i;
839 struct radeon_encoder_primary_dac *p_dac = NULL;
840
841 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
842
843 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
844
845 if (dac_info) {
846 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
847
848 if (!p_dac)
849 return NULL;
850
851 bg = dac_info->ucDAC1_BG_Adjustment;
852 dac = dac_info->ucDAC1_DAC_Adjustment;
853 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
854
855 }
856 return p_dac;
857}
858
859struct radeon_encoder_tv_dac *
860radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
861{
862 struct drm_device *dev = encoder->base.dev;
863 struct radeon_device *rdev = dev->dev_private;
864 struct radeon_mode_info *mode_info = &rdev->mode_info;
865 int index = GetIndexIntoMasterTable(DATA, CompassionateData);
866 uint16_t data_offset;
867 struct _COMPASSIONATE_DATA *dac_info;
868 uint8_t frev, crev;
869 uint8_t bg, dac;
870 int i;
871 struct radeon_encoder_tv_dac *tv_dac = NULL;
872
873 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
874
875 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
876
877 if (dac_info) {
878 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
879
880 if (!tv_dac)
881 return NULL;
882
883 bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
884 dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
885 tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
886
887 bg = dac_info->ucDAC2_PAL_BG_Adjustment;
888 dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
889 tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
890
891 bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
892 dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
893 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
894
895 }
896 return tv_dac;
897}
898
899void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
900{
901 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
902 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
903
904 args.ucEnable = enable;
905
906 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
907}
908
909void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
910{
911 ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
912 int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
913
914 args.ucEnable = enable;
915
916 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
917}
918
919void radeon_atom_set_engine_clock(struct radeon_device *rdev,
920 uint32_t eng_clock)
921{
922 SET_ENGINE_CLOCK_PS_ALLOCATION args;
923 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
924
925 args.ulTargetEngineClock = eng_clock; /* 10 khz */
926
927 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
928}
929
930void radeon_atom_set_memory_clock(struct radeon_device *rdev,
931 uint32_t mem_clock)
932{
933 SET_MEMORY_CLOCK_PS_ALLOCATION args;
934 int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
935
936 if (rdev->flags & RADEON_IS_IGP)
937 return;
938
939 args.ulTargetMemoryClock = mem_clock; /* 10 khz */
940
941 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
942}
943
944void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
945{
946 struct radeon_device *rdev = dev->dev_private;
947 uint32_t bios_2_scratch, bios_6_scratch;
948
949 if (rdev->family >= CHIP_R600) {
950 bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
951 bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
952 } else {
953 bios_2_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
954 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
955 }
956
957 /* let the bios control the backlight */
958 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
959
960 /* tell the bios not to handle mode switching */
961 bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
962
963 if (rdev->family >= CHIP_R600) {
964 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
965 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
966 } else {
967 WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
968 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
969 }
970
971}
972
973void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
974{
975 struct drm_device *dev = encoder->dev;
976 struct radeon_device *rdev = dev->dev_private;
977 uint32_t bios_6_scratch;
978
979 if (rdev->family >= CHIP_R600)
980 bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
981 else
982 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
983
984 if (lock)
985 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
986 else
987 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
988
989 if (rdev->family >= CHIP_R600)
990 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
991 else
992 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
993}
994
995/* at some point we may want to break this out into individual functions */
996void
997radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
998 struct drm_encoder *encoder,
999 bool connected)
1000{
1001 struct drm_device *dev = connector->dev;
1002 struct radeon_device *rdev = dev->dev_private;
1003 struct radeon_connector *radeon_connector =
1004 to_radeon_connector(connector);
1005 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1006 uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
1007
1008 if (rdev->family >= CHIP_R600) {
1009 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
1010 bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
1011 bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
1012 } else {
1013 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
1014 bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
1015 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
1016 }
1017
1018 if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
1019 (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
1020 if (connected) {
1021 DRM_DEBUG("TV1 connected\n");
1022 bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
1023 bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
1024 } else {
1025 DRM_DEBUG("TV1 disconnected\n");
1026 bios_0_scratch &= ~ATOM_S0_TV1_MASK;
1027 bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
1028 bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
1029 }
1030 }
1031 if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
1032 (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
1033 if (connected) {
1034 DRM_DEBUG("CV connected\n");
1035 bios_3_scratch |= ATOM_S3_CV_ACTIVE;
1036 bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
1037 } else {
1038 DRM_DEBUG("CV disconnected\n");
1039 bios_0_scratch &= ~ATOM_S0_CV_MASK;
1040 bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
1041 bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
1042 }
1043 }
1044 if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
1045 (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
1046 if (connected) {
1047 DRM_DEBUG("LCD1 connected\n");
1048 bios_0_scratch |= ATOM_S0_LCD1;
1049 bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
1050 bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
1051 } else {
1052 DRM_DEBUG("LCD1 disconnected\n");
1053 bios_0_scratch &= ~ATOM_S0_LCD1;
1054 bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
1055 bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
1056 }
1057 }
1058 if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
1059 (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
1060 if (connected) {
1061 DRM_DEBUG("CRT1 connected\n");
1062 bios_0_scratch |= ATOM_S0_CRT1_COLOR;
1063 bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
1064 bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
1065 } else {
1066 DRM_DEBUG("CRT1 disconnected\n");
1067 bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
1068 bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
1069 bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
1070 }
1071 }
1072 if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
1073 (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
1074 if (connected) {
1075 DRM_DEBUG("CRT2 connected\n");
1076 bios_0_scratch |= ATOM_S0_CRT2_COLOR;
1077 bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
1078 bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
1079 } else {
1080 DRM_DEBUG("CRT2 disconnected\n");
1081 bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
1082 bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
1083 bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
1084 }
1085 }
1086 if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
1087 (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
1088 if (connected) {
1089 DRM_DEBUG("DFP1 connected\n");
1090 bios_0_scratch |= ATOM_S0_DFP1;
1091 bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
1092 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
1093 } else {
1094 DRM_DEBUG("DFP1 disconnected\n");
1095 bios_0_scratch &= ~ATOM_S0_DFP1;
1096 bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
1097 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
1098 }
1099 }
1100 if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
1101 (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
1102 if (connected) {
1103 DRM_DEBUG("DFP2 connected\n");
1104 bios_0_scratch |= ATOM_S0_DFP2;
1105 bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
1106 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
1107 } else {
1108 DRM_DEBUG("DFP2 disconnected\n");
1109 bios_0_scratch &= ~ATOM_S0_DFP2;
1110 bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
1111 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
1112 }
1113 }
1114 if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
1115 (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
1116 if (connected) {
1117 DRM_DEBUG("DFP3 connected\n");
1118 bios_0_scratch |= ATOM_S0_DFP3;
1119 bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
1120 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
1121 } else {
1122 DRM_DEBUG("DFP3 disconnected\n");
1123 bios_0_scratch &= ~ATOM_S0_DFP3;
1124 bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
1125 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
1126 }
1127 }
1128 if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
1129 (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
1130 if (connected) {
1131 DRM_DEBUG("DFP4 connected\n");
1132 bios_0_scratch |= ATOM_S0_DFP4;
1133 bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
1134 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
1135 } else {
1136 DRM_DEBUG("DFP4 disconnected\n");
1137 bios_0_scratch &= ~ATOM_S0_DFP4;
1138 bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
1139 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
1140 }
1141 }
1142 if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
1143 (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
1144 if (connected) {
1145 DRM_DEBUG("DFP5 connected\n");
1146 bios_0_scratch |= ATOM_S0_DFP5;
1147 bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
1148 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
1149 } else {
1150 DRM_DEBUG("DFP5 disconnected\n");
1151 bios_0_scratch &= ~ATOM_S0_DFP5;
1152 bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
1153 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
1154 }
1155 }
1156
1157 if (rdev->family >= CHIP_R600) {
1158 WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
1159 WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
1160 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
1161 } else {
1162 WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
1163 WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
1164 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
1165 }
1166}
1167
1168void
1169radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
1170{
1171 struct drm_device *dev = encoder->dev;
1172 struct radeon_device *rdev = dev->dev_private;
1173 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1174 uint32_t bios_3_scratch;
1175
1176 if (rdev->family >= CHIP_R600)
1177 bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
1178 else
1179 bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
1180
1181 if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1182 bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
1183 bios_3_scratch |= (crtc << 18);
1184 }
1185 if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1186 bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
1187 bios_3_scratch |= (crtc << 24);
1188 }
1189 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1190 bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
1191 bios_3_scratch |= (crtc << 16);
1192 }
1193 if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1194 bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
1195 bios_3_scratch |= (crtc << 20);
1196 }
1197 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1198 bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
1199 bios_3_scratch |= (crtc << 17);
1200 }
1201 if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
1202 bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
1203 bios_3_scratch |= (crtc << 19);
1204 }
1205 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
1206 bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
1207 bios_3_scratch |= (crtc << 23);
1208 }
1209 if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
1210 bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
1211 bios_3_scratch |= (crtc << 25);
1212 }
1213
1214 if (rdev->family >= CHIP_R600)
1215 WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
1216 else
1217 WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
1218}
1219
1220void
1221radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
1222{
1223 struct drm_device *dev = encoder->dev;
1224 struct radeon_device *rdev = dev->dev_private;
1225 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1226 uint32_t bios_2_scratch;
1227
1228 if (rdev->family >= CHIP_R600)
1229 bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
1230 else
1231 bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
1232
1233 if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1234 if (on)
1235 bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
1236 else
1237 bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
1238 }
1239 if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1240 if (on)
1241 bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
1242 else
1243 bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
1244 }
1245 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1246 if (on)
1247 bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
1248 else
1249 bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
1250 }
1251 if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1252 if (on)
1253 bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
1254 else
1255 bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
1256 }
1257 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1258 if (on)
1259 bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
1260 else
1261 bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
1262 }
1263 if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
1264 if (on)
1265 bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
1266 else
1267 bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
1268 }
1269 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
1270 if (on)
1271 bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
1272 else
1273 bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
1274 }
1275 if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
1276 if (on)
1277 bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
1278 else
1279 bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
1280 }
1281 if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
1282 if (on)
1283 bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
1284 else
1285 bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
1286 }
1287 if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
1288 if (on)
1289 bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
1290 else
1291 bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
1292 }
1293
1294 if (rdev->family >= CHIP_R600)
1295 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
1296 else
1297 WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
1298}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644
index 000000000000..c44403a2ca76
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -0,0 +1,133 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Jerome Glisse
23 */
24#include <drm/drmP.h>
25#include <drm/radeon_drm.h>
26#include "radeon_reg.h"
27#include "radeon.h"
28
29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
30 unsigned sdomain, unsigned ddomain)
31{
32 struct radeon_object *dobj = NULL;
33 struct radeon_object *sobj = NULL;
34 struct radeon_fence *fence = NULL;
35 uint64_t saddr, daddr;
36 unsigned long start_jiffies;
37 unsigned long end_jiffies;
38 unsigned long time;
39 unsigned i, n, size;
40 int r;
41
42 size = bsize;
43 n = 1024;
44 r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
45 if (r) {
46 goto out_cleanup;
47 }
48 r = radeon_object_pin(sobj, sdomain, &saddr);
49 if (r) {
50 goto out_cleanup;
51 }
52 r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
53 if (r) {
54 goto out_cleanup;
55 }
56 r = radeon_object_pin(dobj, ddomain, &daddr);
57 if (r) {
58 goto out_cleanup;
59 }
60 start_jiffies = jiffies;
61 for (i = 0; i < n; i++) {
62 r = radeon_fence_create(rdev, &fence);
63 if (r) {
64 goto out_cleanup;
65 }
66 r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence);
67 if (r) {
68 goto out_cleanup;
69 }
70 r = radeon_fence_wait(fence, false);
71 if (r) {
72 goto out_cleanup;
73 }
74 radeon_fence_unref(&fence);
75 }
76 end_jiffies = jiffies;
77 time = end_jiffies - start_jiffies;
78 time = jiffies_to_msecs(time);
79 if (time > 0) {
80 i = ((n * size) >> 10) / time;
81 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
82 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
83 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
84 }
85 start_jiffies = jiffies;
86 for (i = 0; i < n; i++) {
87 r = radeon_fence_create(rdev, &fence);
88 if (r) {
89 goto out_cleanup;
90 }
91 r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence);
92 if (r) {
93 goto out_cleanup;
94 }
95 r = radeon_fence_wait(fence, false);
96 if (r) {
97 goto out_cleanup;
98 }
99 radeon_fence_unref(&fence);
100 }
101 end_jiffies = jiffies;
102 time = end_jiffies - start_jiffies;
103 time = jiffies_to_msecs(time);
104 if (time > 0) {
105 i = ((n * size) >> 10) / time;
106 printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
107 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
108 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
109 }
110out_cleanup:
111 if (sobj) {
112 radeon_object_unpin(sobj);
113 radeon_object_unref(&sobj);
114 }
115 if (dobj) {
116 radeon_object_unpin(dobj);
117 radeon_object_unref(&dobj);
118 }
119 if (fence) {
120 radeon_fence_unref(&fence);
121 }
122 if (r) {
123 printk(KERN_WARNING "Error while benchmarking BO move.\n");
124 }
125}
126
127void radeon_benchmark(struct radeon_device *rdev)
128{
129 radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
130 RADEON_GEM_DOMAIN_VRAM);
131 radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
132 RADEON_GEM_DOMAIN_GTT);
133}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644
index 000000000000..96e37a6e7ce4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -0,0 +1,390 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31#include "atom.h"
32
33/*
34 * BIOS.
35 */
36static bool radeon_read_bios(struct radeon_device *rdev)
37{
38 uint8_t __iomem *bios;
39 size_t size;
40
41 rdev->bios = NULL;
42 bios = pci_map_rom(rdev->pdev, &size);
43 if (!bios) {
44 return false;
45 }
46
47 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
48 pci_unmap_rom(rdev->pdev, bios);
49 return false;
50 }
51 rdev->bios = kmalloc(size, GFP_KERNEL);
52 if (rdev->bios == NULL) {
53 pci_unmap_rom(rdev->pdev, bios);
54 return false;
55 }
56 memcpy(rdev->bios, bios, size);
57 pci_unmap_rom(rdev->pdev, bios);
58 return true;
59}
60
61static bool r700_read_disabled_bios(struct radeon_device *rdev)
62{
63 uint32_t viph_control;
64 uint32_t bus_cntl;
65 uint32_t d1vga_control;
66 uint32_t d2vga_control;
67 uint32_t vga_render_control;
68 uint32_t rom_cntl;
69 uint32_t cg_spll_func_cntl = 0;
70 uint32_t cg_spll_status;
71 bool r;
72
73 viph_control = RREG32(RADEON_VIPH_CONTROL);
74 bus_cntl = RREG32(RADEON_BUS_CNTL);
75 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
76 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
77 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
78 rom_cntl = RREG32(R600_ROM_CNTL);
79
80 /* disable VIP */
81 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
82 /* enable the rom */
83 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
84 /* Disable VGA mode */
85 WREG32(AVIVO_D1VGA_CONTROL,
86 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
87 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
88 WREG32(AVIVO_D2VGA_CONTROL,
89 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
90 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
91 WREG32(AVIVO_VGA_RENDER_CONTROL,
92 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
93
94 if (rdev->family == CHIP_RV730) {
95 cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
96
97 /* enable bypass mode */
98 WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
99 R600_SPLL_BYPASS_EN));
100
101 /* wait for SPLL_CHG_STATUS to change to 1 */
102 cg_spll_status = 0;
103 while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
104 cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
105
106 WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
107 } else
108 WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
109
110 r = radeon_read_bios(rdev);
111
112 /* restore regs */
113 if (rdev->family == CHIP_RV730) {
114 WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
115
116 /* wait for SPLL_CHG_STATUS to change to 1 */
117 cg_spll_status = 0;
118 while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
119 cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
120 }
121 WREG32(RADEON_VIPH_CONTROL, viph_control);
122 WREG32(RADEON_BUS_CNTL, bus_cntl);
123 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
124 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
125 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
126 WREG32(R600_ROM_CNTL, rom_cntl);
127 return r;
128}
129
130static bool r600_read_disabled_bios(struct radeon_device *rdev)
131{
132 uint32_t viph_control;
133 uint32_t bus_cntl;
134 uint32_t d1vga_control;
135 uint32_t d2vga_control;
136 uint32_t vga_render_control;
137 uint32_t rom_cntl;
138 uint32_t general_pwrmgt;
139 uint32_t low_vid_lower_gpio_cntl;
140 uint32_t medium_vid_lower_gpio_cntl;
141 uint32_t high_vid_lower_gpio_cntl;
142 uint32_t ctxsw_vid_lower_gpio_cntl;
143 uint32_t lower_gpio_enable;
144 bool r;
145
146 viph_control = RREG32(RADEON_VIPH_CONTROL);
147 bus_cntl = RREG32(RADEON_BUS_CNTL);
148 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
149 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
150 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
151 rom_cntl = RREG32(R600_ROM_CNTL);
152 general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
153 low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
154 medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
155 high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
156 ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
157 lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
158
159 /* disable VIP */
160 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
161 /* enable the rom */
162 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
163 /* Disable VGA mode */
164 WREG32(AVIVO_D1VGA_CONTROL,
165 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
166 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
167 WREG32(AVIVO_D2VGA_CONTROL,
168 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
169 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
170 WREG32(AVIVO_VGA_RENDER_CONTROL,
171 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
172
173 WREG32(R600_ROM_CNTL,
174 ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
175 (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
176 R600_SCK_OVERWRITE));
177
178 WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
179 WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
180 (low_vid_lower_gpio_cntl & ~0x400));
181 WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
182 (medium_vid_lower_gpio_cntl & ~0x400));
183 WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
184 (high_vid_lower_gpio_cntl & ~0x400));
185 WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
186 (ctxsw_vid_lower_gpio_cntl & ~0x400));
187 WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
188
189 r = radeon_read_bios(rdev);
190
191 /* restore regs */
192 WREG32(RADEON_VIPH_CONTROL, viph_control);
193 WREG32(RADEON_BUS_CNTL, bus_cntl);
194 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
195 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
196 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
197 WREG32(R600_ROM_CNTL, rom_cntl);
198 WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
199 WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
200 WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
201 WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
202 WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
203 WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
204 return r;
205}
206
207static bool avivo_read_disabled_bios(struct radeon_device *rdev)
208{
209 uint32_t seprom_cntl1;
210 uint32_t viph_control;
211 uint32_t bus_cntl;
212 uint32_t d1vga_control;
213 uint32_t d2vga_control;
214 uint32_t vga_render_control;
215 uint32_t gpiopad_a;
216 uint32_t gpiopad_en;
217 uint32_t gpiopad_mask;
218 bool r;
219
220 seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
221 viph_control = RREG32(RADEON_VIPH_CONTROL);
222 bus_cntl = RREG32(RADEON_BUS_CNTL);
223 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
224 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
225 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
226 gpiopad_a = RREG32(RADEON_GPIOPAD_A);
227 gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
228 gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
229
230 WREG32(RADEON_SEPROM_CNTL1,
231 ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
232 (0xc << RADEON_SCK_PRESCALE_SHIFT)));
233 WREG32(RADEON_GPIOPAD_A, 0);
234 WREG32(RADEON_GPIOPAD_EN, 0);
235 WREG32(RADEON_GPIOPAD_MASK, 0);
236
237 /* disable VIP */
238 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
239
240 /* enable the rom */
241 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
242
243 /* Disable VGA mode */
244 WREG32(AVIVO_D1VGA_CONTROL,
245 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
246 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
247 WREG32(AVIVO_D2VGA_CONTROL,
248 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
249 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
250 WREG32(AVIVO_VGA_RENDER_CONTROL,
251 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
252
253 r = radeon_read_bios(rdev);
254
255 /* restore regs */
256 WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
257 WREG32(RADEON_VIPH_CONTROL, viph_control);
258 WREG32(RADEON_BUS_CNTL, bus_cntl);
259 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
260 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
261 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
262 WREG32(RADEON_GPIOPAD_A, gpiopad_a);
263 WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
264 WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
265 return r;
266}
267
268static bool legacy_read_disabled_bios(struct radeon_device *rdev)
269{
270 uint32_t seprom_cntl1;
271 uint32_t viph_control;
272 uint32_t bus_cntl;
273 uint32_t crtc_gen_cntl;
274 uint32_t crtc2_gen_cntl;
275 uint32_t crtc_ext_cntl;
276 uint32_t fp2_gen_cntl;
277 bool r;
278
279 seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
280 viph_control = RREG32(RADEON_VIPH_CONTROL);
281 bus_cntl = RREG32(RADEON_BUS_CNTL);
282 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
283 crtc2_gen_cntl = 0;
284 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
285 fp2_gen_cntl = 0;
286
287 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
288 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
289 }
290
291 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
292 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
293 }
294
295 WREG32(RADEON_SEPROM_CNTL1,
296 ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
297 (0xc << RADEON_SCK_PRESCALE_SHIFT)));
298
299 /* disable VIP */
300 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
301
302 /* enable the rom */
303 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
304
305 /* Turn off mem requests and CRTC for both controllers */
306 WREG32(RADEON_CRTC_GEN_CNTL,
307 ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
308 (RADEON_CRTC_DISP_REQ_EN_B |
309 RADEON_CRTC_EXT_DISP_EN)));
310 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
311 WREG32(RADEON_CRTC2_GEN_CNTL,
312 ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
313 RADEON_CRTC2_DISP_REQ_EN_B));
314 }
315 /* Turn off CRTC */
316 WREG32(RADEON_CRTC_EXT_CNTL,
317 ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
318 (RADEON_CRTC_SYNC_TRISTAT |
319 RADEON_CRTC_DISPLAY_DIS)));
320
321 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
322 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
323 }
324
325 r = radeon_read_bios(rdev);
326
327 /* restore regs */
328 WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
329 WREG32(RADEON_VIPH_CONTROL, viph_control);
330 WREG32(RADEON_BUS_CNTL, bus_cntl);
331 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
332 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
333 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
334 }
335 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
336 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
337 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
338 }
339 return r;
340}
341
342static bool radeon_read_disabled_bios(struct radeon_device *rdev)
343{
344 if (rdev->family >= CHIP_RV770)
345 return r700_read_disabled_bios(rdev);
346 else if (rdev->family >= CHIP_R600)
347 return r600_read_disabled_bios(rdev);
348 else if (rdev->family >= CHIP_RS600)
349 return avivo_read_disabled_bios(rdev);
350 else
351 return legacy_read_disabled_bios(rdev);
352}
353
354bool radeon_get_bios(struct radeon_device *rdev)
355{
356 bool r;
357 uint16_t tmp;
358
359 r = radeon_read_bios(rdev);
360 if (r == false) {
361 r = radeon_read_disabled_bios(rdev);
362 }
363 if (r == false || rdev->bios == NULL) {
364 DRM_ERROR("Unable to locate a BIOS ROM\n");
365 rdev->bios = NULL;
366 return false;
367 }
368 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
369 goto free_bios;
370 }
371
372 rdev->bios_header_start = RBIOS16(0x48);
373 if (!rdev->bios_header_start) {
374 goto free_bios;
375 }
376 tmp = rdev->bios_header_start + 4;
377 if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
378 !memcmp(rdev->bios + tmp, "MOTA", 4)) {
379 rdev->is_atom_bios = true;
380 } else {
381 rdev->is_atom_bios = false;
382 }
383
384 DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
385 return true;
386free_bios:
387 kfree(rdev->bios);
388 rdev->bios = NULL;
389 return false;
390}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644
index 000000000000..a37cbce53181
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -0,0 +1,833 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon_reg.h"
31#include "radeon.h"
32#include "atom.h"
33
34/* 10 khz */
35static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
36{
37 struct radeon_pll *spll = &rdev->clock.spll;
38 uint32_t fb_div, ref_div, post_div, sclk;
39
40 fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
41 fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
42 fb_div <<= 1;
43 fb_div *= spll->reference_freq;
44
45 ref_div =
46 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
47 sclk = fb_div / ref_div;
48
49 post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
50 if (post_div == 2)
51 sclk >>= 1;
52 else if (post_div == 3)
53 sclk >>= 2;
54 else if (post_div == 4)
55 sclk >>= 4;
56
57 return sclk;
58}
59
60/* 10 khz */
61static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
62{
63 struct radeon_pll *mpll = &rdev->clock.mpll;
64 uint32_t fb_div, ref_div, post_div, mclk;
65
66 fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
67 fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
68 fb_div <<= 1;
69 fb_div *= mpll->reference_freq;
70
71 ref_div =
72 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
73 mclk = fb_div / ref_div;
74
75 post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
76 if (post_div == 2)
77 mclk >>= 1;
78 else if (post_div == 3)
79 mclk >>= 2;
80 else if (post_div == 4)
81 mclk >>= 4;
82
83 return mclk;
84}
85
86void radeon_get_clock_info(struct drm_device *dev)
87{
88 struct radeon_device *rdev = dev->dev_private;
89 struct radeon_pll *p1pll = &rdev->clock.p1pll;
90 struct radeon_pll *p2pll = &rdev->clock.p2pll;
91 struct radeon_pll *spll = &rdev->clock.spll;
92 struct radeon_pll *mpll = &rdev->clock.mpll;
93 int ret;
94
95 if (rdev->is_atom_bios)
96 ret = radeon_atom_get_clock_info(dev);
97 else
98 ret = radeon_combios_get_clock_info(dev);
99
100 if (ret) {
101 if (p1pll->reference_div < 2)
102 p1pll->reference_div = 12;
103 if (p2pll->reference_div < 2)
104 p2pll->reference_div = 12;
105 if (spll->reference_div < 2)
106 spll->reference_div =
107 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
108 RADEON_M_SPLL_REF_DIV_MASK;
109 if (mpll->reference_div < 2)
110 mpll->reference_div = spll->reference_div;
111 } else {
112 if (ASIC_IS_AVIVO(rdev)) {
113 /* TODO FALLBACK */
114 } else {
115 DRM_INFO("Using generic clock info\n");
116
117 if (rdev->flags & RADEON_IS_IGP) {
118 p1pll->reference_freq = 1432;
119 p2pll->reference_freq = 1432;
120 spll->reference_freq = 1432;
121 mpll->reference_freq = 1432;
122 } else {
123 p1pll->reference_freq = 2700;
124 p2pll->reference_freq = 2700;
125 spll->reference_freq = 2700;
126 mpll->reference_freq = 2700;
127 }
128 p1pll->reference_div =
129 RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
130 if (p1pll->reference_div < 2)
131 p1pll->reference_div = 12;
132 p2pll->reference_div = p1pll->reference_div;
133
134 if (rdev->family >= CHIP_R420) {
135 p1pll->pll_in_min = 100;
136 p1pll->pll_in_max = 1350;
137 p1pll->pll_out_min = 20000;
138 p1pll->pll_out_max = 50000;
139 p2pll->pll_in_min = 100;
140 p2pll->pll_in_max = 1350;
141 p2pll->pll_out_min = 20000;
142 p2pll->pll_out_max = 50000;
143 } else {
144 p1pll->pll_in_min = 40;
145 p1pll->pll_in_max = 500;
146 p1pll->pll_out_min = 12500;
147 p1pll->pll_out_max = 35000;
148 p2pll->pll_in_min = 40;
149 p2pll->pll_in_max = 500;
150 p2pll->pll_out_min = 12500;
151 p2pll->pll_out_max = 35000;
152 }
153
154 spll->reference_div =
155 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
156 RADEON_M_SPLL_REF_DIV_MASK;
157 mpll->reference_div = spll->reference_div;
158 rdev->clock.default_sclk =
159 radeon_legacy_get_engine_clock(rdev);
160 rdev->clock.default_mclk =
161 radeon_legacy_get_memory_clock(rdev);
162 }
163 }
164
165 /* pixel clocks */
166 if (ASIC_IS_AVIVO(rdev)) {
167 p1pll->min_post_div = 2;
168 p1pll->max_post_div = 0x7f;
169 p1pll->min_frac_feedback_div = 0;
170 p1pll->max_frac_feedback_div = 9;
171 p2pll->min_post_div = 2;
172 p2pll->max_post_div = 0x7f;
173 p2pll->min_frac_feedback_div = 0;
174 p2pll->max_frac_feedback_div = 9;
175 } else {
176 p1pll->min_post_div = 1;
177 p1pll->max_post_div = 16;
178 p1pll->min_frac_feedback_div = 0;
179 p1pll->max_frac_feedback_div = 0;
180 p2pll->min_post_div = 1;
181 p2pll->max_post_div = 12;
182 p2pll->min_frac_feedback_div = 0;
183 p2pll->max_frac_feedback_div = 0;
184 }
185
186 p1pll->min_ref_div = 2;
187 p1pll->max_ref_div = 0x3ff;
188 p1pll->min_feedback_div = 4;
189 p1pll->max_feedback_div = 0x7ff;
190 p1pll->best_vco = 0;
191
192 p2pll->min_ref_div = 2;
193 p2pll->max_ref_div = 0x3ff;
194 p2pll->min_feedback_div = 4;
195 p2pll->max_feedback_div = 0x7ff;
196 p2pll->best_vco = 0;
197
198 /* system clock */
199 spll->min_post_div = 1;
200 spll->max_post_div = 1;
201 spll->min_ref_div = 2;
202 spll->max_ref_div = 0xff;
203 spll->min_feedback_div = 4;
204 spll->max_feedback_div = 0xff;
205 spll->best_vco = 0;
206
207 /* memory clock */
208 mpll->min_post_div = 1;
209 mpll->max_post_div = 1;
210 mpll->min_ref_div = 2;
211 mpll->max_ref_div = 0xff;
212 mpll->min_feedback_div = 4;
213 mpll->max_feedback_div = 0xff;
214 mpll->best_vco = 0;
215
216}
217
218/* 10 khz */
219static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
220 uint32_t req_clock,
221 int *fb_div, int *post_div)
222{
223 struct radeon_pll *spll = &rdev->clock.spll;
224 int ref_div = spll->reference_div;
225
226 if (!ref_div)
227 ref_div =
228 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
229 RADEON_M_SPLL_REF_DIV_MASK;
230
231 if (req_clock < 15000) {
232 *post_div = 8;
233 req_clock *= 8;
234 } else if (req_clock < 30000) {
235 *post_div = 4;
236 req_clock *= 4;
237 } else if (req_clock < 60000) {
238 *post_div = 2;
239 req_clock *= 2;
240 } else
241 *post_div = 1;
242
243 req_clock *= ref_div;
244 req_clock += spll->reference_freq;
245 req_clock /= (2 * spll->reference_freq);
246
247 *fb_div = req_clock & 0xff;
248
249 req_clock = (req_clock & 0xffff) << 1;
250 req_clock *= spll->reference_freq;
251 req_clock /= ref_div;
252 req_clock /= *post_div;
253
254 return req_clock;
255}
256
257/* 10 khz */
258void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
259 uint32_t eng_clock)
260{
261 uint32_t tmp;
262 int fb_div, post_div;
263
264 /* XXX: wait for idle */
265
266 eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
267
268 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
269 tmp &= ~RADEON_DONT_USE_XTALIN;
270 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
271
272 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
273 tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
274 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
275
276 udelay(10);
277
278 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
279 tmp |= RADEON_SPLL_SLEEP;
280 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
281
282 udelay(2);
283
284 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
285 tmp |= RADEON_SPLL_RESET;
286 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
287
288 udelay(200);
289
290 tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
291 tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
292 tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
293 WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
294
295 /* XXX: verify on different asics */
296 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
297 tmp &= ~RADEON_SPLL_PVG_MASK;
298 if ((eng_clock * post_div) >= 90000)
299 tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
300 else
301 tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
302 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
303
304 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
305 tmp &= ~RADEON_SPLL_SLEEP;
306 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
307
308 udelay(2);
309
310 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
311 tmp &= ~RADEON_SPLL_RESET;
312 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
313
314 udelay(200);
315
316 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
317 tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
318 switch (post_div) {
319 case 1:
320 default:
321 tmp |= 1;
322 break;
323 case 2:
324 tmp |= 2;
325 break;
326 case 4:
327 tmp |= 3;
328 break;
329 case 8:
330 tmp |= 4;
331 break;
332 }
333 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
334
335 udelay(20);
336
337 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
338 tmp |= RADEON_DONT_USE_XTALIN;
339 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
340
341 udelay(10);
342}
343
344void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
345{
346 uint32_t tmp;
347
348 if (enable) {
349 if (rdev->flags & RADEON_SINGLE_CRTC) {
350 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
351 if ((RREG32(RADEON_CONFIG_CNTL) &
352 RADEON_CFG_ATI_REV_ID_MASK) >
353 RADEON_CFG_ATI_REV_A13) {
354 tmp &=
355 ~(RADEON_SCLK_FORCE_CP |
356 RADEON_SCLK_FORCE_RB);
357 }
358 tmp &=
359 ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
360 RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
361 RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
362 RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
363 RADEON_SCLK_FORCE_TDM);
364 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
365 } else if (ASIC_IS_R300(rdev)) {
366 if ((rdev->family == CHIP_RS400) ||
367 (rdev->family == CHIP_RS480)) {
368 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
369 tmp &=
370 ~(RADEON_SCLK_FORCE_DISP2 |
371 RADEON_SCLK_FORCE_CP |
372 RADEON_SCLK_FORCE_HDP |
373 RADEON_SCLK_FORCE_DISP1 |
374 RADEON_SCLK_FORCE_TOP |
375 RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
376 | RADEON_SCLK_FORCE_IDCT |
377 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
378 | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
379 | R300_SCLK_FORCE_US |
380 RADEON_SCLK_FORCE_TV_SCLK |
381 R300_SCLK_FORCE_SU |
382 RADEON_SCLK_FORCE_OV0);
383 tmp |= RADEON_DYN_STOP_LAT_MASK;
384 tmp |=
385 RADEON_SCLK_FORCE_TOP |
386 RADEON_SCLK_FORCE_VIP;
387 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
388
389 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
390 tmp &= ~RADEON_SCLK_MORE_FORCEON;
391 tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
392 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
393
394 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
395 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
396 RADEON_PIXCLK_DAC_ALWAYS_ONb);
397 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
398
399 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
400 tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
401 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
402 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
403 R300_DVOCLK_ALWAYS_ONb |
404 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
405 RADEON_PIXCLK_GV_ALWAYS_ONb |
406 R300_PIXCLK_DVO_ALWAYS_ONb |
407 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
408 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
409 R300_PIXCLK_TRANS_ALWAYS_ONb |
410 R300_PIXCLK_TVO_ALWAYS_ONb |
411 R300_P2G2CLK_ALWAYS_ONb |
412 R300_P2G2CLK_ALWAYS_ONb);
413 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
414 } else if (rdev->family >= CHIP_RV350) {
415 tmp = RREG32_PLL(R300_SCLK_CNTL2);
416 tmp &= ~(R300_SCLK_FORCE_TCL |
417 R300_SCLK_FORCE_GA |
418 R300_SCLK_FORCE_CBA);
419 tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
420 R300_SCLK_GA_MAX_DYN_STOP_LAT |
421 R300_SCLK_CBA_MAX_DYN_STOP_LAT);
422 WREG32_PLL(R300_SCLK_CNTL2, tmp);
423
424 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
425 tmp &=
426 ~(RADEON_SCLK_FORCE_DISP2 |
427 RADEON_SCLK_FORCE_CP |
428 RADEON_SCLK_FORCE_HDP |
429 RADEON_SCLK_FORCE_DISP1 |
430 RADEON_SCLK_FORCE_TOP |
431 RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
432 | RADEON_SCLK_FORCE_IDCT |
433 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
434 | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
435 | R300_SCLK_FORCE_US |
436 RADEON_SCLK_FORCE_TV_SCLK |
437 R300_SCLK_FORCE_SU |
438 RADEON_SCLK_FORCE_OV0);
439 tmp |= RADEON_DYN_STOP_LAT_MASK;
440 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
441
442 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
443 tmp &= ~RADEON_SCLK_MORE_FORCEON;
444 tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
445 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
446
447 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
448 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
449 RADEON_PIXCLK_DAC_ALWAYS_ONb);
450 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
451
452 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
453 tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
454 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
455 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
456 R300_DVOCLK_ALWAYS_ONb |
457 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
458 RADEON_PIXCLK_GV_ALWAYS_ONb |
459 R300_PIXCLK_DVO_ALWAYS_ONb |
460 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
461 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
462 R300_PIXCLK_TRANS_ALWAYS_ONb |
463 R300_PIXCLK_TVO_ALWAYS_ONb |
464 R300_P2G2CLK_ALWAYS_ONb |
465 R300_P2G2CLK_ALWAYS_ONb);
466 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
467
468 tmp = RREG32_PLL(RADEON_MCLK_MISC);
469 tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
470 RADEON_IO_MCLK_DYN_ENABLE);
471 WREG32_PLL(RADEON_MCLK_MISC, tmp);
472
473 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
474 tmp |= (RADEON_FORCEON_MCLKA |
475 RADEON_FORCEON_MCLKB);
476
477 tmp &= ~(RADEON_FORCEON_YCLKA |
478 RADEON_FORCEON_YCLKB |
479 RADEON_FORCEON_MC);
480
481 /* Some releases of vbios have set DISABLE_MC_MCLKA
482 and DISABLE_MC_MCLKB bits in the vbios table. Setting these
483 bits will cause H/W hang when reading video memory with dynamic clocking
484 enabled. */
485 if ((tmp & R300_DISABLE_MC_MCLKA) &&
486 (tmp & R300_DISABLE_MC_MCLKB)) {
487 /* If both bits are set, then check the active channels */
488 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
489 if (rdev->mc.vram_width == 64) {
490 if (RREG32(RADEON_MEM_CNTL) &
491 R300_MEM_USE_CD_CH_ONLY)
492 tmp &=
493 ~R300_DISABLE_MC_MCLKB;
494 else
495 tmp &=
496 ~R300_DISABLE_MC_MCLKA;
497 } else {
498 tmp &= ~(R300_DISABLE_MC_MCLKA |
499 R300_DISABLE_MC_MCLKB);
500 }
501 }
502
503 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
504 } else {
505 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
506 tmp &= ~(R300_SCLK_FORCE_VAP);
507 tmp |= RADEON_SCLK_FORCE_CP;
508 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
509 udelay(15000);
510
511 tmp = RREG32_PLL(R300_SCLK_CNTL2);
512 tmp &= ~(R300_SCLK_FORCE_TCL |
513 R300_SCLK_FORCE_GA |
514 R300_SCLK_FORCE_CBA);
515 WREG32_PLL(R300_SCLK_CNTL2, tmp);
516 }
517 } else {
518 tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
519
520 tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
521 RADEON_DISP_DYN_STOP_LAT_MASK |
522 RADEON_DYN_STOP_MODE_MASK);
523
524 tmp |= (RADEON_ENGIN_DYNCLK_MODE |
525 (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
526 WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
527 udelay(15000);
528
529 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
530 tmp |= RADEON_SCLK_DYN_START_CNTL;
531 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
532 udelay(15000);
533
534 /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
535 to lockup randomly, leave them as set by BIOS.
536 */
537 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
538 /*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
539 tmp &= ~RADEON_SCLK_FORCEON_MASK;
540
541 /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
542 if (((rdev->family == CHIP_RV250) &&
543 ((RREG32(RADEON_CONFIG_CNTL) &
544 RADEON_CFG_ATI_REV_ID_MASK) <
545 RADEON_CFG_ATI_REV_A13))
546 || ((rdev->family == CHIP_RV100)
547 &&
548 ((RREG32(RADEON_CONFIG_CNTL) &
549 RADEON_CFG_ATI_REV_ID_MASK) <=
550 RADEON_CFG_ATI_REV_A13))) {
551 tmp |= RADEON_SCLK_FORCE_CP;
552 tmp |= RADEON_SCLK_FORCE_VIP;
553 }
554
555 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
556
557 if ((rdev->family == CHIP_RV200) ||
558 (rdev->family == CHIP_RV250) ||
559 (rdev->family == CHIP_RV280)) {
560 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
561 tmp &= ~RADEON_SCLK_MORE_FORCEON;
562
563 /* RV200::A11 A12 RV250::A11 A12 */
564 if (((rdev->family == CHIP_RV200) ||
565 (rdev->family == CHIP_RV250)) &&
566 ((RREG32(RADEON_CONFIG_CNTL) &
567 RADEON_CFG_ATI_REV_ID_MASK) <
568 RADEON_CFG_ATI_REV_A13)) {
569 tmp |= RADEON_SCLK_MORE_FORCEON;
570 }
571 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
572 udelay(15000);
573 }
574
575 /* RV200::A11 A12, RV250::A11 A12 */
576 if (((rdev->family == CHIP_RV200) ||
577 (rdev->family == CHIP_RV250)) &&
578 ((RREG32(RADEON_CONFIG_CNTL) &
579 RADEON_CFG_ATI_REV_ID_MASK) <
580 RADEON_CFG_ATI_REV_A13)) {
581 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
582 tmp |= RADEON_TCL_BYPASS_DISABLE;
583 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
584 }
585 udelay(15000);
586
587 /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
588 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
589 tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
590 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
591 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
592 RADEON_PIXCLK_GV_ALWAYS_ONb |
593 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
594 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
595 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
596
597 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
598 udelay(15000);
599
600 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
601 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
602 RADEON_PIXCLK_DAC_ALWAYS_ONb);
603
604 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
605 udelay(15000);
606 }
607 } else {
608 /* Turn everything OFF (ForceON to everything) */
609 if (rdev->flags & RADEON_SINGLE_CRTC) {
610 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
611 tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
612 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
613 | RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
614 RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
615 RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
616 RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
617 RADEON_SCLK_FORCE_RB);
618 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
619 } else if ((rdev->family == CHIP_RS400) ||
620 (rdev->family == CHIP_RS480)) {
621 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
622 tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
623 RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
624 | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
625 R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
626 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
627 R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
628 R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
629 R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
630 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
631
632 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
633 tmp |= RADEON_SCLK_MORE_FORCEON;
634 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
635
636 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
637 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
638 RADEON_PIXCLK_DAC_ALWAYS_ONb |
639 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
640 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
641
642 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
643 tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
644 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
645 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
646 R300_DVOCLK_ALWAYS_ONb |
647 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
648 RADEON_PIXCLK_GV_ALWAYS_ONb |
649 R300_PIXCLK_DVO_ALWAYS_ONb |
650 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
651 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
652 R300_PIXCLK_TRANS_ALWAYS_ONb |
653 R300_PIXCLK_TVO_ALWAYS_ONb |
654 R300_P2G2CLK_ALWAYS_ONb |
655 R300_P2G2CLK_ALWAYS_ONb |
656 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
657 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
658 } else if (rdev->family >= CHIP_RV350) {
659 /* for RV350/M10, no delays are required. */
660 tmp = RREG32_PLL(R300_SCLK_CNTL2);
661 tmp |= (R300_SCLK_FORCE_TCL |
662 R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
663 WREG32_PLL(R300_SCLK_CNTL2, tmp);
664
665 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
666 tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
667 RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
668 | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
669 R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
670 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
671 R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
672 R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
673 R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
674 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
675
676 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
677 tmp |= RADEON_SCLK_MORE_FORCEON;
678 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
679
680 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
681 tmp |= (RADEON_FORCEON_MCLKA |
682 RADEON_FORCEON_MCLKB |
683 RADEON_FORCEON_YCLKA |
684 RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
685 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
686
687 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
688 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
689 RADEON_PIXCLK_DAC_ALWAYS_ONb |
690 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
691 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
692
693 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
694 tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
695 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
696 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
697 R300_DVOCLK_ALWAYS_ONb |
698 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
699 RADEON_PIXCLK_GV_ALWAYS_ONb |
700 R300_PIXCLK_DVO_ALWAYS_ONb |
701 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
702 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
703 R300_PIXCLK_TRANS_ALWAYS_ONb |
704 R300_PIXCLK_TVO_ALWAYS_ONb |
705 R300_P2G2CLK_ALWAYS_ONb |
706 R300_P2G2CLK_ALWAYS_ONb |
707 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
708 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
709 } else {
710 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
711 tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
712 tmp |= RADEON_SCLK_FORCE_SE;
713
714 if (rdev->flags & RADEON_SINGLE_CRTC) {
715 tmp |= (RADEON_SCLK_FORCE_RB |
716 RADEON_SCLK_FORCE_TDM |
717 RADEON_SCLK_FORCE_TAM |
718 RADEON_SCLK_FORCE_PB |
719 RADEON_SCLK_FORCE_RE |
720 RADEON_SCLK_FORCE_VIP |
721 RADEON_SCLK_FORCE_IDCT |
722 RADEON_SCLK_FORCE_TOP |
723 RADEON_SCLK_FORCE_DISP1 |
724 RADEON_SCLK_FORCE_DISP2 |
725 RADEON_SCLK_FORCE_HDP);
726 } else if ((rdev->family == CHIP_R300) ||
727 (rdev->family == CHIP_R350)) {
728 tmp |= (RADEON_SCLK_FORCE_HDP |
729 RADEON_SCLK_FORCE_DISP1 |
730 RADEON_SCLK_FORCE_DISP2 |
731 RADEON_SCLK_FORCE_TOP |
732 RADEON_SCLK_FORCE_IDCT |
733 RADEON_SCLK_FORCE_VIP);
734 }
735 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
736
737 udelay(16000);
738
739 if ((rdev->family == CHIP_R300) ||
740 (rdev->family == CHIP_R350)) {
741 tmp = RREG32_PLL(R300_SCLK_CNTL2);
742 tmp |= (R300_SCLK_FORCE_TCL |
743 R300_SCLK_FORCE_GA |
744 R300_SCLK_FORCE_CBA);
745 WREG32_PLL(R300_SCLK_CNTL2, tmp);
746 udelay(16000);
747 }
748
749 if (rdev->flags & RADEON_IS_IGP) {
750 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
751 tmp &= ~(RADEON_FORCEON_MCLKA |
752 RADEON_FORCEON_YCLKA);
753 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
754 udelay(16000);
755 }
756
757 if ((rdev->family == CHIP_RV200) ||
758 (rdev->family == CHIP_RV250) ||
759 (rdev->family == CHIP_RV280)) {
760 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
761 tmp |= RADEON_SCLK_MORE_FORCEON;
762 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
763 udelay(16000);
764 }
765
766 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
767 tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
768 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
769 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
770 RADEON_PIXCLK_GV_ALWAYS_ONb |
771 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
772 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
773 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
774
775 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
776 udelay(16000);
777
778 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
779 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
780 RADEON_PIXCLK_DAC_ALWAYS_ONb);
781 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
782 }
783 }
784}
785
786static void radeon_apply_clock_quirks(struct radeon_device *rdev)
787{
788 uint32_t tmp;
789
790 /* XXX make sure engine is idle */
791
792 if (rdev->family < CHIP_RS600) {
793 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
794 if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
795 tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
796 if ((rdev->family == CHIP_RV250)
797 || (rdev->family == CHIP_RV280))
798 tmp |=
799 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
800 if ((rdev->family == CHIP_RV350)
801 || (rdev->family == CHIP_RV380))
802 tmp |= R300_SCLK_FORCE_VAP;
803 if (rdev->family == CHIP_R420)
804 tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
805 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
806 } else if (rdev->family < CHIP_R600) {
807 tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
808 tmp |= AVIVO_CP_FORCEON;
809 WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
810
811 tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
812 tmp |= AVIVO_E2_FORCEON;
813 WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
814
815 tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
816 tmp |= AVIVO_IDCT_FORCEON;
817 WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
818 }
819}
820
821int radeon_static_clocks_init(struct drm_device *dev)
822{
823 struct radeon_device *rdev = dev->dev_private;
824
825 /* XXX make sure engine is idle */
826
827 if (radeon_dynclks != -1) {
828 if (radeon_dynclks)
829 radeon_set_clock_gating(rdev, 1);
830 }
831 radeon_apply_clock_quirks(rdev);
832 return 0;
833}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644
index 000000000000..06e8038bc4ac
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -0,0 +1,2481 @@
1/*
2 * Copyright 2004 ATI Technologies Inc., Markham, Ontario
3 * Copyright 2007-8 Advanced Micro Devices, Inc.
4 * Copyright 2008 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 */
27#include "drmP.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32#ifdef CONFIG_PPC_PMAC
33/* not sure which of these are needed */
34#include <asm/machdep.h>
35#include <asm/pmac_feature.h>
36#include <asm/prom.h>
37#include <asm/pci-bridge.h>
38#endif /* CONFIG_PPC_PMAC */
39
40/* from radeon_encoder.c */
41extern uint32_t
42radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
43 uint8_t dac);
44extern void radeon_link_encoder_connector(struct drm_device *dev);
45
46/* from radeon_connector.c */
47extern void
48radeon_add_legacy_connector(struct drm_device *dev,
49 uint32_t connector_id,
50 uint32_t supported_device,
51 int connector_type,
52 struct radeon_i2c_bus_rec *i2c_bus);
53
54/* from radeon_legacy_encoder.c */
55extern void
56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
57 uint32_t supported_device);
58
59/* old legacy ATI BIOS routines */
60
61/* COMBIOS table offsets */
62enum radeon_combios_table_offset {
63 /* absolute offset tables */
64 COMBIOS_ASIC_INIT_1_TABLE,
65 COMBIOS_BIOS_SUPPORT_TABLE,
66 COMBIOS_DAC_PROGRAMMING_TABLE,
67 COMBIOS_MAX_COLOR_DEPTH_TABLE,
68 COMBIOS_CRTC_INFO_TABLE,
69 COMBIOS_PLL_INFO_TABLE,
70 COMBIOS_TV_INFO_TABLE,
71 COMBIOS_DFP_INFO_TABLE,
72 COMBIOS_HW_CONFIG_INFO_TABLE,
73 COMBIOS_MULTIMEDIA_INFO_TABLE,
74 COMBIOS_TV_STD_PATCH_TABLE,
75 COMBIOS_LCD_INFO_TABLE,
76 COMBIOS_MOBILE_INFO_TABLE,
77 COMBIOS_PLL_INIT_TABLE,
78 COMBIOS_MEM_CONFIG_TABLE,
79 COMBIOS_SAVE_MASK_TABLE,
80 COMBIOS_HARDCODED_EDID_TABLE,
81 COMBIOS_ASIC_INIT_2_TABLE,
82 COMBIOS_CONNECTOR_INFO_TABLE,
83 COMBIOS_DYN_CLK_1_TABLE,
84 COMBIOS_RESERVED_MEM_TABLE,
85 COMBIOS_EXT_TMDS_INFO_TABLE,
86 COMBIOS_MEM_CLK_INFO_TABLE,
87 COMBIOS_EXT_DAC_INFO_TABLE,
88 COMBIOS_MISC_INFO_TABLE,
89 COMBIOS_CRT_INFO_TABLE,
90 COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
91 COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
92 COMBIOS_FAN_SPEED_INFO_TABLE,
93 COMBIOS_OVERDRIVE_INFO_TABLE,
94 COMBIOS_OEM_INFO_TABLE,
95 COMBIOS_DYN_CLK_2_TABLE,
96 COMBIOS_POWER_CONNECTOR_INFO_TABLE,
97 COMBIOS_I2C_INFO_TABLE,
98 /* relative offset tables */
99 COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */
100 COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */
101 COMBIOS_DETECTED_MEM_TABLE, /* offset from misc info */
102 COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */
103 COMBIOS_RAM_RESET_TABLE, /* offset from mem config */
104 COMBIOS_POWERPLAY_INFO_TABLE, /* offset from mobile info */
105 COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */
106 COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */
107 COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */
108 COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */
109 COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */
110};
111
112enum radeon_combios_ddc {
113 DDC_NONE_DETECTED,
114 DDC_MONID,
115 DDC_DVI,
116 DDC_VGA,
117 DDC_CRT2,
118 DDC_LCD,
119 DDC_GPIO,
120};
121
122enum radeon_combios_connector {
123 CONNECTOR_NONE_LEGACY,
124 CONNECTOR_PROPRIETARY_LEGACY,
125 CONNECTOR_CRT_LEGACY,
126 CONNECTOR_DVI_I_LEGACY,
127 CONNECTOR_DVI_D_LEGACY,
128 CONNECTOR_CTV_LEGACY,
129 CONNECTOR_STV_LEGACY,
130 CONNECTOR_UNSUPPORTED_LEGACY
131};
132
133const int legacy_connector_convert[] = {
134 DRM_MODE_CONNECTOR_Unknown,
135 DRM_MODE_CONNECTOR_DVID,
136 DRM_MODE_CONNECTOR_VGA,
137 DRM_MODE_CONNECTOR_DVII,
138 DRM_MODE_CONNECTOR_DVID,
139 DRM_MODE_CONNECTOR_Composite,
140 DRM_MODE_CONNECTOR_SVIDEO,
141 DRM_MODE_CONNECTOR_Unknown,
142};
143
144static uint16_t combios_get_table_offset(struct drm_device *dev,
145 enum radeon_combios_table_offset table)
146{
147 struct radeon_device *rdev = dev->dev_private;
148 int rev;
149 uint16_t offset = 0, check_offset;
150
151 switch (table) {
152 /* absolute offset tables */
153 case COMBIOS_ASIC_INIT_1_TABLE:
154 check_offset = RBIOS16(rdev->bios_header_start + 0xc);
155 if (check_offset)
156 offset = check_offset;
157 break;
158 case COMBIOS_BIOS_SUPPORT_TABLE:
159 check_offset = RBIOS16(rdev->bios_header_start + 0x14);
160 if (check_offset)
161 offset = check_offset;
162 break;
163 case COMBIOS_DAC_PROGRAMMING_TABLE:
164 check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
165 if (check_offset)
166 offset = check_offset;
167 break;
168 case COMBIOS_MAX_COLOR_DEPTH_TABLE:
169 check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
170 if (check_offset)
171 offset = check_offset;
172 break;
173 case COMBIOS_CRTC_INFO_TABLE:
174 check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
175 if (check_offset)
176 offset = check_offset;
177 break;
178 case COMBIOS_PLL_INFO_TABLE:
179 check_offset = RBIOS16(rdev->bios_header_start + 0x30);
180 if (check_offset)
181 offset = check_offset;
182 break;
183 case COMBIOS_TV_INFO_TABLE:
184 check_offset = RBIOS16(rdev->bios_header_start + 0x32);
185 if (check_offset)
186 offset = check_offset;
187 break;
188 case COMBIOS_DFP_INFO_TABLE:
189 check_offset = RBIOS16(rdev->bios_header_start + 0x34);
190 if (check_offset)
191 offset = check_offset;
192 break;
193 case COMBIOS_HW_CONFIG_INFO_TABLE:
194 check_offset = RBIOS16(rdev->bios_header_start + 0x36);
195 if (check_offset)
196 offset = check_offset;
197 break;
198 case COMBIOS_MULTIMEDIA_INFO_TABLE:
199 check_offset = RBIOS16(rdev->bios_header_start + 0x38);
200 if (check_offset)
201 offset = check_offset;
202 break;
203 case COMBIOS_TV_STD_PATCH_TABLE:
204 check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
205 if (check_offset)
206 offset = check_offset;
207 break;
208 case COMBIOS_LCD_INFO_TABLE:
209 check_offset = RBIOS16(rdev->bios_header_start + 0x40);
210 if (check_offset)
211 offset = check_offset;
212 break;
213 case COMBIOS_MOBILE_INFO_TABLE:
214 check_offset = RBIOS16(rdev->bios_header_start + 0x42);
215 if (check_offset)
216 offset = check_offset;
217 break;
218 case COMBIOS_PLL_INIT_TABLE:
219 check_offset = RBIOS16(rdev->bios_header_start + 0x46);
220 if (check_offset)
221 offset = check_offset;
222 break;
223 case COMBIOS_MEM_CONFIG_TABLE:
224 check_offset = RBIOS16(rdev->bios_header_start + 0x48);
225 if (check_offset)
226 offset = check_offset;
227 break;
228 case COMBIOS_SAVE_MASK_TABLE:
229 check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
230 if (check_offset)
231 offset = check_offset;
232 break;
233 case COMBIOS_HARDCODED_EDID_TABLE:
234 check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
235 if (check_offset)
236 offset = check_offset;
237 break;
238 case COMBIOS_ASIC_INIT_2_TABLE:
239 check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
240 if (check_offset)
241 offset = check_offset;
242 break;
243 case COMBIOS_CONNECTOR_INFO_TABLE:
244 check_offset = RBIOS16(rdev->bios_header_start + 0x50);
245 if (check_offset)
246 offset = check_offset;
247 break;
248 case COMBIOS_DYN_CLK_1_TABLE:
249 check_offset = RBIOS16(rdev->bios_header_start + 0x52);
250 if (check_offset)
251 offset = check_offset;
252 break;
253 case COMBIOS_RESERVED_MEM_TABLE:
254 check_offset = RBIOS16(rdev->bios_header_start + 0x54);
255 if (check_offset)
256 offset = check_offset;
257 break;
258 case COMBIOS_EXT_TMDS_INFO_TABLE:
259 check_offset = RBIOS16(rdev->bios_header_start + 0x58);
260 if (check_offset)
261 offset = check_offset;
262 break;
263 case COMBIOS_MEM_CLK_INFO_TABLE:
264 check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
265 if (check_offset)
266 offset = check_offset;
267 break;
268 case COMBIOS_EXT_DAC_INFO_TABLE:
269 check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
270 if (check_offset)
271 offset = check_offset;
272 break;
273 case COMBIOS_MISC_INFO_TABLE:
274 check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
275 if (check_offset)
276 offset = check_offset;
277 break;
278 case COMBIOS_CRT_INFO_TABLE:
279 check_offset = RBIOS16(rdev->bios_header_start + 0x60);
280 if (check_offset)
281 offset = check_offset;
282 break;
283 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
284 check_offset = RBIOS16(rdev->bios_header_start + 0x62);
285 if (check_offset)
286 offset = check_offset;
287 break;
288 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
289 check_offset = RBIOS16(rdev->bios_header_start + 0x64);
290 if (check_offset)
291 offset = check_offset;
292 break;
293 case COMBIOS_FAN_SPEED_INFO_TABLE:
294 check_offset = RBIOS16(rdev->bios_header_start + 0x66);
295 if (check_offset)
296 offset = check_offset;
297 break;
298 case COMBIOS_OVERDRIVE_INFO_TABLE:
299 check_offset = RBIOS16(rdev->bios_header_start + 0x68);
300 if (check_offset)
301 offset = check_offset;
302 break;
303 case COMBIOS_OEM_INFO_TABLE:
304 check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
305 if (check_offset)
306 offset = check_offset;
307 break;
308 case COMBIOS_DYN_CLK_2_TABLE:
309 check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
310 if (check_offset)
311 offset = check_offset;
312 break;
313 case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
314 check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
315 if (check_offset)
316 offset = check_offset;
317 break;
318 case COMBIOS_I2C_INFO_TABLE:
319 check_offset = RBIOS16(rdev->bios_header_start + 0x70);
320 if (check_offset)
321 offset = check_offset;
322 break;
323 /* relative offset tables */
324 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
325 check_offset =
326 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
327 if (check_offset) {
328 rev = RBIOS8(check_offset);
329 if (rev > 0) {
330 check_offset = RBIOS16(check_offset + 0x3);
331 if (check_offset)
332 offset = check_offset;
333 }
334 }
335 break;
336 case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */
337 check_offset =
338 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
339 if (check_offset) {
340 rev = RBIOS8(check_offset);
341 if (rev > 0) {
342 check_offset = RBIOS16(check_offset + 0x5);
343 if (check_offset)
344 offset = check_offset;
345 }
346 }
347 break;
348 case COMBIOS_DETECTED_MEM_TABLE: /* offset from misc info */
349 check_offset =
350 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
351 if (check_offset) {
352 rev = RBIOS8(check_offset);
353 if (rev > 0) {
354 check_offset = RBIOS16(check_offset + 0x7);
355 if (check_offset)
356 offset = check_offset;
357 }
358 }
359 break;
360 case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */
361 check_offset =
362 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
363 if (check_offset) {
364 rev = RBIOS8(check_offset);
365 if (rev == 2) {
366 check_offset = RBIOS16(check_offset + 0x9);
367 if (check_offset)
368 offset = check_offset;
369 }
370 }
371 break;
372 case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */
373 check_offset =
374 combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
375 if (check_offset) {
376 while (RBIOS8(check_offset++));
377 check_offset += 2;
378 if (check_offset)
379 offset = check_offset;
380 }
381 break;
382 case COMBIOS_POWERPLAY_INFO_TABLE: /* offset from mobile info */
383 check_offset =
384 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
385 if (check_offset) {
386 check_offset = RBIOS16(check_offset + 0x11);
387 if (check_offset)
388 offset = check_offset;
389 }
390 break;
391 case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */
392 check_offset =
393 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
394 if (check_offset) {
395 check_offset = RBIOS16(check_offset + 0x13);
396 if (check_offset)
397 offset = check_offset;
398 }
399 break;
400 case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */
401 check_offset =
402 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
403 if (check_offset) {
404 check_offset = RBIOS16(check_offset + 0x15);
405 if (check_offset)
406 offset = check_offset;
407 }
408 break;
409 case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */
410 check_offset =
411 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
412 if (check_offset) {
413 check_offset = RBIOS16(check_offset + 0x17);
414 if (check_offset)
415 offset = check_offset;
416 }
417 break;
418 case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */
419 check_offset =
420 combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
421 if (check_offset) {
422 check_offset = RBIOS16(check_offset + 0x2);
423 if (check_offset)
424 offset = check_offset;
425 }
426 break;
427 case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */
428 check_offset =
429 combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
430 if (check_offset) {
431 check_offset = RBIOS16(check_offset + 0x4);
432 if (check_offset)
433 offset = check_offset;
434 }
435 break;
436 default:
437 break;
438 }
439
440 return offset;
441
442}
443
444struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
445{
446 struct radeon_i2c_bus_rec i2c;
447
448 i2c.mask_clk_mask = RADEON_GPIO_EN_1;
449 i2c.mask_data_mask = RADEON_GPIO_EN_0;
450 i2c.a_clk_mask = RADEON_GPIO_A_1;
451 i2c.a_data_mask = RADEON_GPIO_A_0;
452 i2c.put_clk_mask = RADEON_GPIO_EN_1;
453 i2c.put_data_mask = RADEON_GPIO_EN_0;
454 i2c.get_clk_mask = RADEON_GPIO_Y_1;
455 i2c.get_data_mask = RADEON_GPIO_Y_0;
456 if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
457 (ddc_line == RADEON_MDGPIO_EN_REG)) {
458 i2c.mask_clk_reg = ddc_line;
459 i2c.mask_data_reg = ddc_line;
460 i2c.a_clk_reg = ddc_line;
461 i2c.a_data_reg = ddc_line;
462 i2c.put_clk_reg = ddc_line;
463 i2c.put_data_reg = ddc_line;
464 i2c.get_clk_reg = ddc_line + 4;
465 i2c.get_data_reg = ddc_line + 4;
466 } else {
467 i2c.mask_clk_reg = ddc_line;
468 i2c.mask_data_reg = ddc_line;
469 i2c.a_clk_reg = ddc_line;
470 i2c.a_data_reg = ddc_line;
471 i2c.put_clk_reg = ddc_line;
472 i2c.put_data_reg = ddc_line;
473 i2c.get_clk_reg = ddc_line;
474 i2c.get_data_reg = ddc_line;
475 }
476
477 if (ddc_line)
478 i2c.valid = true;
479 else
480 i2c.valid = false;
481
482 return i2c;
483}
484
485bool radeon_combios_get_clock_info(struct drm_device *dev)
486{
487 struct radeon_device *rdev = dev->dev_private;
488 uint16_t pll_info;
489 struct radeon_pll *p1pll = &rdev->clock.p1pll;
490 struct radeon_pll *p2pll = &rdev->clock.p2pll;
491 struct radeon_pll *spll = &rdev->clock.spll;
492 struct radeon_pll *mpll = &rdev->clock.mpll;
493 int8_t rev;
494 uint16_t sclk, mclk;
495
496 if (rdev->bios == NULL)
497 return NULL;
498
499 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
500 if (pll_info) {
501 rev = RBIOS8(pll_info);
502
503 /* pixel clocks */
504 p1pll->reference_freq = RBIOS16(pll_info + 0xe);
505 p1pll->reference_div = RBIOS16(pll_info + 0x10);
506 p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
507 p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
508
509 if (rev > 9) {
510 p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
511 p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
512 } else {
513 p1pll->pll_in_min = 40;
514 p1pll->pll_in_max = 500;
515 }
516 *p2pll = *p1pll;
517
518 /* system clock */
519 spll->reference_freq = RBIOS16(pll_info + 0x1a);
520 spll->reference_div = RBIOS16(pll_info + 0x1c);
521 spll->pll_out_min = RBIOS32(pll_info + 0x1e);
522 spll->pll_out_max = RBIOS32(pll_info + 0x22);
523
524 if (rev > 10) {
525 spll->pll_in_min = RBIOS32(pll_info + 0x48);
526 spll->pll_in_max = RBIOS32(pll_info + 0x4c);
527 } else {
528 /* ??? */
529 spll->pll_in_min = 40;
530 spll->pll_in_max = 500;
531 }
532
533 /* memory clock */
534 mpll->reference_freq = RBIOS16(pll_info + 0x26);
535 mpll->reference_div = RBIOS16(pll_info + 0x28);
536 mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
537 mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
538
539 if (rev > 10) {
540 mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
541 mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
542 } else {
543 /* ??? */
544 mpll->pll_in_min = 40;
545 mpll->pll_in_max = 500;
546 }
547
548 /* default sclk/mclk */
549 sclk = RBIOS16(pll_info + 0xa);
550 mclk = RBIOS16(pll_info + 0x8);
551 if (sclk == 0)
552 sclk = 200 * 100;
553 if (mclk == 0)
554 mclk = 200 * 100;
555
556 rdev->clock.default_sclk = sclk;
557 rdev->clock.default_mclk = mclk;
558
559 return true;
560 }
561 return false;
562}
563
564struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
565 radeon_encoder
566 *encoder)
567{
568 struct drm_device *dev = encoder->base.dev;
569 struct radeon_device *rdev = dev->dev_private;
570 uint16_t dac_info;
571 uint8_t rev, bg, dac;
572 struct radeon_encoder_primary_dac *p_dac = NULL;
573
574 if (rdev->bios == NULL)
575 return NULL;
576
577 /* check CRT table */
578 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
579 if (dac_info) {
580 p_dac =
581 kzalloc(sizeof(struct radeon_encoder_primary_dac),
582 GFP_KERNEL);
583
584 if (!p_dac)
585 return NULL;
586
587 rev = RBIOS8(dac_info) & 0x3;
588 if (rev < 2) {
589 bg = RBIOS8(dac_info + 0x2) & 0xf;
590 dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
591 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
592 } else {
593 bg = RBIOS8(dac_info + 0x2) & 0xf;
594 dac = RBIOS8(dac_info + 0x3) & 0xf;
595 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
596 }
597
598 }
599
600 return p_dac;
601}
602
603static enum radeon_tv_std
604radeon_combios_get_tv_info(struct radeon_encoder *encoder)
605{
606 struct drm_device *dev = encoder->base.dev;
607 struct radeon_device *rdev = dev->dev_private;
608 uint16_t tv_info;
609 enum radeon_tv_std tv_std = TV_STD_NTSC;
610
611 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
612 if (tv_info) {
613 if (RBIOS8(tv_info + 6) == 'T') {
614 switch (RBIOS8(tv_info + 7) & 0xf) {
615 case 1:
616 tv_std = TV_STD_NTSC;
617 DRM_INFO("Default TV standard: NTSC\n");
618 break;
619 case 2:
620 tv_std = TV_STD_PAL;
621 DRM_INFO("Default TV standard: PAL\n");
622 break;
623 case 3:
624 tv_std = TV_STD_PAL_M;
625 DRM_INFO("Default TV standard: PAL-M\n");
626 break;
627 case 4:
628 tv_std = TV_STD_PAL_60;
629 DRM_INFO("Default TV standard: PAL-60\n");
630 break;
631 case 5:
632 tv_std = TV_STD_NTSC_J;
633 DRM_INFO("Default TV standard: NTSC-J\n");
634 break;
635 case 6:
636 tv_std = TV_STD_SCART_PAL;
637 DRM_INFO("Default TV standard: SCART-PAL\n");
638 break;
639 default:
640 tv_std = TV_STD_NTSC;
641 DRM_INFO
642 ("Unknown TV standard; defaulting to NTSC\n");
643 break;
644 }
645
646 switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
647 case 0:
648 DRM_INFO("29.498928713 MHz TV ref clk\n");
649 break;
650 case 1:
651 DRM_INFO("28.636360000 MHz TV ref clk\n");
652 break;
653 case 2:
654 DRM_INFO("14.318180000 MHz TV ref clk\n");
655 break;
656 case 3:
657 DRM_INFO("27.000000000 MHz TV ref clk\n");
658 break;
659 default:
660 break;
661 }
662 }
663 }
664 return tv_std;
665}
666
667static const uint32_t default_tvdac_adj[CHIP_LAST] = {
668 0x00000000, /* r100 */
669 0x00280000, /* rv100 */
670 0x00000000, /* rs100 */
671 0x00880000, /* rv200 */
672 0x00000000, /* rs200 */
673 0x00000000, /* r200 */
674 0x00770000, /* rv250 */
675 0x00290000, /* rs300 */
676 0x00560000, /* rv280 */
677 0x00780000, /* r300 */
678 0x00770000, /* r350 */
679 0x00780000, /* rv350 */
680 0x00780000, /* rv380 */
681 0x01080000, /* r420 */
682 0x01080000, /* r423 */
683 0x01080000, /* rv410 */
684 0x00780000, /* rs400 */
685 0x00780000, /* rs480 */
686};
687
688static struct radeon_encoder_tv_dac
689 *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev)
690{
691 struct radeon_encoder_tv_dac *tv_dac = NULL;
692
693 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
694
695 if (!tv_dac)
696 return NULL;
697
698 tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
699 if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
700 tv_dac->ps2_tvdac_adj = 0x00880000;
701 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
702 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
703
704 return tv_dac;
705}
706
707struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
708 radeon_encoder
709 *encoder)
710{
711 struct drm_device *dev = encoder->base.dev;
712 struct radeon_device *rdev = dev->dev_private;
713 uint16_t dac_info;
714 uint8_t rev, bg, dac;
715 struct radeon_encoder_tv_dac *tv_dac = NULL;
716
717 if (rdev->bios == NULL)
718 return radeon_legacy_get_tv_dac_info_from_table(rdev);
719
720 /* first check TV table */
721 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
722 if (dac_info) {
723 tv_dac =
724 kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
725
726 if (!tv_dac)
727 return NULL;
728
729 rev = RBIOS8(dac_info + 0x3);
730 if (rev > 4) {
731 bg = RBIOS8(dac_info + 0xc) & 0xf;
732 dac = RBIOS8(dac_info + 0xd) & 0xf;
733 tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
734
735 bg = RBIOS8(dac_info + 0xe) & 0xf;
736 dac = RBIOS8(dac_info + 0xf) & 0xf;
737 tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
738
739 bg = RBIOS8(dac_info + 0x10) & 0xf;
740 dac = RBIOS8(dac_info + 0x11) & 0xf;
741 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
742 } else if (rev > 1) {
743 bg = RBIOS8(dac_info + 0xc) & 0xf;
744 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
745 tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
746
747 bg = RBIOS8(dac_info + 0xd) & 0xf;
748 dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
749 tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
750
751 bg = RBIOS8(dac_info + 0xe) & 0xf;
752 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
753 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
754 }
755
756 tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
757
758 } else {
759 /* then check CRT table */
760 dac_info =
761 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
762 if (dac_info) {
763 tv_dac =
764 kzalloc(sizeof(struct radeon_encoder_tv_dac),
765 GFP_KERNEL);
766
767 if (!tv_dac)
768 return NULL;
769
770 rev = RBIOS8(dac_info) & 0x3;
771 if (rev < 2) {
772 bg = RBIOS8(dac_info + 0x3) & 0xf;
773 dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
774 tv_dac->ps2_tvdac_adj =
775 (bg << 16) | (dac << 20);
776 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
777 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
778 } else {
779 bg = RBIOS8(dac_info + 0x4) & 0xf;
780 dac = RBIOS8(dac_info + 0x5) & 0xf;
781 tv_dac->ps2_tvdac_adj =
782 (bg << 16) | (dac << 20);
783 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
784 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
785 }
786 } else {
787 DRM_INFO("No TV DAC info found in BIOS\n");
788 return radeon_legacy_get_tv_dac_info_from_table(rdev);
789 }
790 }
791
792 return tv_dac;
793}
794
795static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
796 radeon_device
797 *rdev)
798{
799 struct radeon_encoder_lvds *lvds = NULL;
800 uint32_t fp_vert_stretch, fp_horz_stretch;
801 uint32_t ppll_div_sel, ppll_val;
802
803 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
804
805 if (!lvds)
806 return NULL;
807
808 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
809 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
810
811 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
812 lvds->native_mode.panel_yres =
813 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
814 RADEON_VERT_PANEL_SHIFT) + 1;
815 else
816 lvds->native_mode.panel_yres =
817 (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
818
819 if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
820 lvds->native_mode.panel_xres =
821 (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
822 RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
823 else
824 lvds->native_mode.panel_xres =
825 ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
826
827 if ((lvds->native_mode.panel_xres < 640) ||
828 (lvds->native_mode.panel_yres < 480)) {
829 lvds->native_mode.panel_xres = 640;
830 lvds->native_mode.panel_yres = 480;
831 }
832
833 ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
834 ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
835 if ((ppll_val & 0x000707ff) == 0x1bb)
836 lvds->use_bios_dividers = false;
837 else {
838 lvds->panel_ref_divider =
839 RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
840 lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
841 lvds->panel_fb_divider = ppll_val & 0x7ff;
842
843 if ((lvds->panel_ref_divider != 0) &&
844 (lvds->panel_fb_divider > 3))
845 lvds->use_bios_dividers = true;
846 }
847 lvds->panel_vcc_delay = 200;
848
849 DRM_INFO("Panel info derived from registers\n");
850 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
851 lvds->native_mode.panel_yres);
852
853 return lvds;
854}
855
856struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
857 *encoder)
858{
859 struct drm_device *dev = encoder->base.dev;
860 struct radeon_device *rdev = dev->dev_private;
861 uint16_t lcd_info;
862 uint32_t panel_setup;
863 char stmp[30];
864 int tmp, i;
865 struct radeon_encoder_lvds *lvds = NULL;
866
867 if (rdev->bios == NULL)
868 return radeon_legacy_get_lvds_info_from_regs(rdev);
869
870 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
871
872 if (lcd_info) {
873 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
874
875 if (!lvds)
876 return NULL;
877
878 for (i = 0; i < 24; i++)
879 stmp[i] = RBIOS8(lcd_info + i + 1);
880 stmp[24] = 0;
881
882 DRM_INFO("Panel ID String: %s\n", stmp);
883
884 lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19);
885 lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b);
886
887 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
888 lvds->native_mode.panel_yres);
889
890 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
891 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
892 lvds->panel_vcc_delay = 2000;
893
894 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
895 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
896 lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
897
898 lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
899 lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
900 lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
901 if ((lvds->panel_ref_divider != 0) &&
902 (lvds->panel_fb_divider > 3))
903 lvds->use_bios_dividers = true;
904
905 panel_setup = RBIOS32(lcd_info + 0x39);
906 lvds->lvds_gen_cntl = 0xff00;
907 if (panel_setup & 0x1)
908 lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
909
910 if ((panel_setup >> 4) & 0x1)
911 lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
912
913 switch ((panel_setup >> 8) & 0x7) {
914 case 0:
915 lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
916 break;
917 case 1:
918 lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
919 break;
920 case 2:
921 lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
922 break;
923 default:
924 break;
925 }
926
927 if ((panel_setup >> 16) & 0x1)
928 lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
929
930 if ((panel_setup >> 17) & 0x1)
931 lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
932
933 if ((panel_setup >> 18) & 0x1)
934 lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
935
936 if ((panel_setup >> 23) & 0x1)
937 lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
938
939 lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
940
941 for (i = 0; i < 32; i++) {
942 tmp = RBIOS16(lcd_info + 64 + i * 2);
943 if (tmp == 0)
944 break;
945
946 if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) &&
947 (RBIOS16(tmp + 2) ==
948 lvds->native_mode.panel_yres)) {
949 lvds->native_mode.hblank =
950 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
951 lvds->native_mode.hoverplus =
952 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) -
953 1) * 8;
954 lvds->native_mode.hsync_width =
955 RBIOS8(tmp + 23) * 8;
956
957 lvds->native_mode.vblank = (RBIOS16(tmp + 24) -
958 RBIOS16(tmp + 26));
959 lvds->native_mode.voverplus =
960 ((RBIOS16(tmp + 28) & 0x7ff) -
961 RBIOS16(tmp + 26));
962 lvds->native_mode.vsync_width =
963 ((RBIOS16(tmp + 28) & 0xf800) >> 11);
964 lvds->native_mode.dotclock =
965 RBIOS16(tmp + 9) * 10;
966 lvds->native_mode.flags = 0;
967 }
968 }
969 encoder->native_mode = lvds->native_mode;
970 } else {
971 DRM_INFO("No panel info found in BIOS\n");
972 return radeon_legacy_get_lvds_info_from_regs(rdev);
973 }
974 return lvds;
975}
976
977static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
978 {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R100 */
979 {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV100 */
980 {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS100 */
981 {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV200 */
982 {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RS200 */
983 {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R200 */
984 {{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}}, /* CHIP_RV250 */
985 {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS300 */
986 {{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}}, /* CHIP_RV280 */
987 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R300 */
988 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R350 */
989 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV350 */
990 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV380 */
991 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
992 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
993 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
994 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
995 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
996};
997
998static struct radeon_encoder_int_tmds
999 *radeon_legacy_get_tmds_info_from_table(struct radeon_device *rdev)
1000{
1001 int i;
1002 struct radeon_encoder_int_tmds *tmds = NULL;
1003
1004 tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
1005
1006 if (!tmds)
1007 return NULL;
1008
1009 for (i = 0; i < 4; i++) {
1010 tmds->tmds_pll[i].value =
1011 default_tmds_pll[rdev->family][i].value;
1012 tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
1013 }
1014
1015 return tmds;
1016}
1017
1018struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct
1019 radeon_encoder
1020 *encoder)
1021{
1022 struct drm_device *dev = encoder->base.dev;
1023 struct radeon_device *rdev = dev->dev_private;
1024 uint16_t tmds_info;
1025 int i, n;
1026 uint8_t ver;
1027 struct radeon_encoder_int_tmds *tmds = NULL;
1028
1029 if (rdev->bios == NULL)
1030 return radeon_legacy_get_tmds_info_from_table(rdev);
1031
1032 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1033
1034 if (tmds_info) {
1035 tmds =
1036 kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
1037
1038 if (!tmds)
1039 return NULL;
1040
1041 ver = RBIOS8(tmds_info);
1042 DRM_INFO("DFP table revision: %d\n", ver);
1043 if (ver == 3) {
1044 n = RBIOS8(tmds_info + 5) + 1;
1045 if (n > 4)
1046 n = 4;
1047 for (i = 0; i < n; i++) {
1048 tmds->tmds_pll[i].value =
1049 RBIOS32(tmds_info + i * 10 + 0x08);
1050 tmds->tmds_pll[i].freq =
1051 RBIOS16(tmds_info + i * 10 + 0x10);
1052 DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
1053 tmds->tmds_pll[i].freq,
1054 tmds->tmds_pll[i].value);
1055 }
1056 } else if (ver == 4) {
1057 int stride = 0;
1058 n = RBIOS8(tmds_info + 5) + 1;
1059 if (n > 4)
1060 n = 4;
1061 for (i = 0; i < n; i++) {
1062 tmds->tmds_pll[i].value =
1063 RBIOS32(tmds_info + stride + 0x08);
1064 tmds->tmds_pll[i].freq =
1065 RBIOS16(tmds_info + stride + 0x10);
1066 if (i == 0)
1067 stride += 10;
1068 else
1069 stride += 6;
1070 DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
1071 tmds->tmds_pll[i].freq,
1072 tmds->tmds_pll[i].value);
1073 }
1074 }
1075 } else
1076 DRM_INFO("No TMDS info found in BIOS\n");
1077 return tmds;
1078}
1079
1080void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
1081{
1082 struct drm_device *dev = encoder->base.dev;
1083 struct radeon_device *rdev = dev->dev_private;
1084 uint16_t ext_tmds_info;
1085 uint8_t ver;
1086
1087 if (rdev->bios == NULL)
1088 return;
1089
1090 ext_tmds_info =
1091 combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1092 if (ext_tmds_info) {
1093 ver = RBIOS8(ext_tmds_info);
1094 DRM_INFO("External TMDS Table revision: %d\n", ver);
1095 // TODO
1096 }
1097}
1098
1099bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1100{
1101 struct radeon_device *rdev = dev->dev_private;
1102 struct radeon_i2c_bus_rec ddc_i2c;
1103
1104 rdev->mode_info.connector_table = radeon_connector_table;
1105 if (rdev->mode_info.connector_table == CT_NONE) {
1106#ifdef CONFIG_PPC_PMAC
1107 if (machine_is_compatible("PowerBook3,3")) {
1108 /* powerbook with VGA */
1109 rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
1110 } else if (machine_is_compatible("PowerBook3,4") ||
1111 machine_is_compatible("PowerBook3,5")) {
1112 /* powerbook with internal tmds */
1113 rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
1114 } else if (machine_is_compatible("PowerBook5,1") ||
1115 machine_is_compatible("PowerBook5,2") ||
1116 machine_is_compatible("PowerBook5,3") ||
1117 machine_is_compatible("PowerBook5,4") ||
1118 machine_is_compatible("PowerBook5,5")) {
1119 /* powerbook with external single link tmds (sil164) */
1120 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1121 } else if (machine_is_compatible("PowerBook5,6")) {
1122 /* powerbook with external dual or single link tmds */
1123 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1124 } else if (machine_is_compatible("PowerBook5,7") ||
1125 machine_is_compatible("PowerBook5,8") ||
1126 machine_is_compatible("PowerBook5,9")) {
1127 /* PowerBook6,2 ? */
1128 /* powerbook with external dual link tmds (sil1178?) */
1129 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1130 } else if (machine_is_compatible("PowerBook4,1") ||
1131 machine_is_compatible("PowerBook4,2") ||
1132 machine_is_compatible("PowerBook4,3") ||
1133 machine_is_compatible("PowerBook6,3") ||
1134 machine_is_compatible("PowerBook6,5") ||
1135 machine_is_compatible("PowerBook6,7")) {
1136 /* ibook */
1137 rdev->mode_info.connector_table = CT_IBOOK;
1138 } else if (machine_is_compatible("PowerMac4,4")) {
1139 /* emac */
1140 rdev->mode_info.connector_table = CT_EMAC;
1141 } else if (machine_is_compatible("PowerMac10,1")) {
1142 /* mini with internal tmds */
1143 rdev->mode_info.connector_table = CT_MINI_INTERNAL;
1144 } else if (machine_is_compatible("PowerMac10,2")) {
1145 /* mini with external tmds */
1146 rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
1147 } else if (machine_is_compatible("PowerMac12,1")) {
1148 /* PowerMac8,1 ? */
1149 /* imac g5 isight */
1150 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1151 } else
1152#endif /* CONFIG_PPC_PMAC */
1153 rdev->mode_info.connector_table = CT_GENERIC;
1154 }
1155
1156 switch (rdev->mode_info.connector_table) {
1157 case CT_GENERIC:
1158 DRM_INFO("Connector Table: %d (generic)\n",
1159 rdev->mode_info.connector_table);
1160 /* these are the most common settings */
1161 if (rdev->flags & RADEON_SINGLE_CRTC) {
1162 /* VGA - primary dac */
1163 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1164 radeon_add_legacy_encoder(dev,
1165 radeon_get_encoder_id(dev,
1166 ATOM_DEVICE_CRT1_SUPPORT,
1167 1),
1168 ATOM_DEVICE_CRT1_SUPPORT);
1169 radeon_add_legacy_connector(dev, 0,
1170 ATOM_DEVICE_CRT1_SUPPORT,
1171 DRM_MODE_CONNECTOR_VGA,
1172 &ddc_i2c);
1173 } else if (rdev->flags & RADEON_IS_MOBILITY) {
1174 /* LVDS */
1175 ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
1176 radeon_add_legacy_encoder(dev,
1177 radeon_get_encoder_id(dev,
1178 ATOM_DEVICE_LCD1_SUPPORT,
1179 0),
1180 ATOM_DEVICE_LCD1_SUPPORT);
1181 radeon_add_legacy_connector(dev, 0,
1182 ATOM_DEVICE_LCD1_SUPPORT,
1183 DRM_MODE_CONNECTOR_LVDS,
1184 &ddc_i2c);
1185
1186 /* VGA - primary dac */
1187 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1188 radeon_add_legacy_encoder(dev,
1189 radeon_get_encoder_id(dev,
1190 ATOM_DEVICE_CRT1_SUPPORT,
1191 1),
1192 ATOM_DEVICE_CRT1_SUPPORT);
1193 radeon_add_legacy_connector(dev, 1,
1194 ATOM_DEVICE_CRT1_SUPPORT,
1195 DRM_MODE_CONNECTOR_VGA,
1196 &ddc_i2c);
1197 } else {
1198 /* DVI-I - tv dac, int tmds */
1199 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1200 radeon_add_legacy_encoder(dev,
1201 radeon_get_encoder_id(dev,
1202 ATOM_DEVICE_DFP1_SUPPORT,
1203 0),
1204 ATOM_DEVICE_DFP1_SUPPORT);
1205 radeon_add_legacy_encoder(dev,
1206 radeon_get_encoder_id(dev,
1207 ATOM_DEVICE_CRT2_SUPPORT,
1208 2),
1209 ATOM_DEVICE_CRT2_SUPPORT);
1210 radeon_add_legacy_connector(dev, 0,
1211 ATOM_DEVICE_DFP1_SUPPORT |
1212 ATOM_DEVICE_CRT2_SUPPORT,
1213 DRM_MODE_CONNECTOR_DVII,
1214 &ddc_i2c);
1215
1216 /* VGA - primary dac */
1217 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1218 radeon_add_legacy_encoder(dev,
1219 radeon_get_encoder_id(dev,
1220 ATOM_DEVICE_CRT1_SUPPORT,
1221 1),
1222 ATOM_DEVICE_CRT1_SUPPORT);
1223 radeon_add_legacy_connector(dev, 1,
1224 ATOM_DEVICE_CRT1_SUPPORT,
1225 DRM_MODE_CONNECTOR_VGA,
1226 &ddc_i2c);
1227 }
1228
1229 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
1230 /* TV - tv dac */
1231 radeon_add_legacy_encoder(dev,
1232 radeon_get_encoder_id(dev,
1233 ATOM_DEVICE_TV1_SUPPORT,
1234 2),
1235 ATOM_DEVICE_TV1_SUPPORT);
1236 radeon_add_legacy_connector(dev, 2,
1237 ATOM_DEVICE_TV1_SUPPORT,
1238 DRM_MODE_CONNECTOR_SVIDEO,
1239 &ddc_i2c);
1240 }
1241 break;
1242 case CT_IBOOK:
1243 DRM_INFO("Connector Table: %d (ibook)\n",
1244 rdev->mode_info.connector_table);
1245 /* LVDS */
1246 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1247 radeon_add_legacy_encoder(dev,
1248 radeon_get_encoder_id(dev,
1249 ATOM_DEVICE_LCD1_SUPPORT,
1250 0),
1251 ATOM_DEVICE_LCD1_SUPPORT);
1252 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1253 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1254 /* VGA - TV DAC */
1255 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1256 radeon_add_legacy_encoder(dev,
1257 radeon_get_encoder_id(dev,
1258 ATOM_DEVICE_CRT2_SUPPORT,
1259 2),
1260 ATOM_DEVICE_CRT2_SUPPORT);
1261 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1262 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1263 /* TV - TV DAC */
1264 radeon_add_legacy_encoder(dev,
1265 radeon_get_encoder_id(dev,
1266 ATOM_DEVICE_TV1_SUPPORT,
1267 2),
1268 ATOM_DEVICE_TV1_SUPPORT);
1269 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1270 DRM_MODE_CONNECTOR_SVIDEO,
1271 &ddc_i2c);
1272 break;
1273 case CT_POWERBOOK_EXTERNAL:
1274 DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
1275 rdev->mode_info.connector_table);
1276 /* LVDS */
1277 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1278 radeon_add_legacy_encoder(dev,
1279 radeon_get_encoder_id(dev,
1280 ATOM_DEVICE_LCD1_SUPPORT,
1281 0),
1282 ATOM_DEVICE_LCD1_SUPPORT);
1283 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1284 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1285 /* DVI-I - primary dac, ext tmds */
1286 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1287 radeon_add_legacy_encoder(dev,
1288 radeon_get_encoder_id(dev,
1289 ATOM_DEVICE_DFP2_SUPPORT,
1290 0),
1291 ATOM_DEVICE_DFP2_SUPPORT);
1292 radeon_add_legacy_encoder(dev,
1293 radeon_get_encoder_id(dev,
1294 ATOM_DEVICE_CRT1_SUPPORT,
1295 1),
1296 ATOM_DEVICE_CRT1_SUPPORT);
1297 radeon_add_legacy_connector(dev, 1,
1298 ATOM_DEVICE_DFP2_SUPPORT |
1299 ATOM_DEVICE_CRT1_SUPPORT,
1300 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1301 /* TV - TV DAC */
1302 radeon_add_legacy_encoder(dev,
1303 radeon_get_encoder_id(dev,
1304 ATOM_DEVICE_TV1_SUPPORT,
1305 2),
1306 ATOM_DEVICE_TV1_SUPPORT);
1307 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1308 DRM_MODE_CONNECTOR_SVIDEO,
1309 &ddc_i2c);
1310 break;
1311 case CT_POWERBOOK_INTERNAL:
1312 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
1313 rdev->mode_info.connector_table);
1314 /* LVDS */
1315 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1316 radeon_add_legacy_encoder(dev,
1317 radeon_get_encoder_id(dev,
1318 ATOM_DEVICE_LCD1_SUPPORT,
1319 0),
1320 ATOM_DEVICE_LCD1_SUPPORT);
1321 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1322 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1323 /* DVI-I - primary dac, int tmds */
1324 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1325 radeon_add_legacy_encoder(dev,
1326 radeon_get_encoder_id(dev,
1327 ATOM_DEVICE_DFP1_SUPPORT,
1328 0),
1329 ATOM_DEVICE_DFP1_SUPPORT);
1330 radeon_add_legacy_encoder(dev,
1331 radeon_get_encoder_id(dev,
1332 ATOM_DEVICE_CRT1_SUPPORT,
1333 1),
1334 ATOM_DEVICE_CRT1_SUPPORT);
1335 radeon_add_legacy_connector(dev, 1,
1336 ATOM_DEVICE_DFP1_SUPPORT |
1337 ATOM_DEVICE_CRT1_SUPPORT,
1338 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1339 /* TV - TV DAC */
1340 radeon_add_legacy_encoder(dev,
1341 radeon_get_encoder_id(dev,
1342 ATOM_DEVICE_TV1_SUPPORT,
1343 2),
1344 ATOM_DEVICE_TV1_SUPPORT);
1345 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1346 DRM_MODE_CONNECTOR_SVIDEO,
1347 &ddc_i2c);
1348 break;
1349 case CT_POWERBOOK_VGA:
1350 DRM_INFO("Connector Table: %d (powerbook vga)\n",
1351 rdev->mode_info.connector_table);
1352 /* LVDS */
1353 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1354 radeon_add_legacy_encoder(dev,
1355 radeon_get_encoder_id(dev,
1356 ATOM_DEVICE_LCD1_SUPPORT,
1357 0),
1358 ATOM_DEVICE_LCD1_SUPPORT);
1359 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1360 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1361 /* VGA - primary dac */
1362 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1363 radeon_add_legacy_encoder(dev,
1364 radeon_get_encoder_id(dev,
1365 ATOM_DEVICE_CRT1_SUPPORT,
1366 1),
1367 ATOM_DEVICE_CRT1_SUPPORT);
1368 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
1369 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1370 /* TV - TV DAC */
1371 radeon_add_legacy_encoder(dev,
1372 radeon_get_encoder_id(dev,
1373 ATOM_DEVICE_TV1_SUPPORT,
1374 2),
1375 ATOM_DEVICE_TV1_SUPPORT);
1376 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1377 DRM_MODE_CONNECTOR_SVIDEO,
1378 &ddc_i2c);
1379 break;
1380 case CT_MINI_EXTERNAL:
1381 DRM_INFO("Connector Table: %d (mini external tmds)\n",
1382 rdev->mode_info.connector_table);
1383 /* DVI-I - tv dac, ext tmds */
1384 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1385 radeon_add_legacy_encoder(dev,
1386 radeon_get_encoder_id(dev,
1387 ATOM_DEVICE_DFP2_SUPPORT,
1388 0),
1389 ATOM_DEVICE_DFP2_SUPPORT);
1390 radeon_add_legacy_encoder(dev,
1391 radeon_get_encoder_id(dev,
1392 ATOM_DEVICE_CRT2_SUPPORT,
1393 2),
1394 ATOM_DEVICE_CRT2_SUPPORT);
1395 radeon_add_legacy_connector(dev, 0,
1396 ATOM_DEVICE_DFP2_SUPPORT |
1397 ATOM_DEVICE_CRT2_SUPPORT,
1398 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1399 /* TV - TV DAC */
1400 radeon_add_legacy_encoder(dev,
1401 radeon_get_encoder_id(dev,
1402 ATOM_DEVICE_TV1_SUPPORT,
1403 2),
1404 ATOM_DEVICE_TV1_SUPPORT);
1405 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1406 DRM_MODE_CONNECTOR_SVIDEO,
1407 &ddc_i2c);
1408 break;
1409 case CT_MINI_INTERNAL:
1410 DRM_INFO("Connector Table: %d (mini internal tmds)\n",
1411 rdev->mode_info.connector_table);
1412 /* DVI-I - tv dac, int tmds */
1413 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1414 radeon_add_legacy_encoder(dev,
1415 radeon_get_encoder_id(dev,
1416 ATOM_DEVICE_DFP1_SUPPORT,
1417 0),
1418 ATOM_DEVICE_DFP1_SUPPORT);
1419 radeon_add_legacy_encoder(dev,
1420 radeon_get_encoder_id(dev,
1421 ATOM_DEVICE_CRT2_SUPPORT,
1422 2),
1423 ATOM_DEVICE_CRT2_SUPPORT);
1424 radeon_add_legacy_connector(dev, 0,
1425 ATOM_DEVICE_DFP1_SUPPORT |
1426 ATOM_DEVICE_CRT2_SUPPORT,
1427 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1428 /* TV - TV DAC */
1429 radeon_add_legacy_encoder(dev,
1430 radeon_get_encoder_id(dev,
1431 ATOM_DEVICE_TV1_SUPPORT,
1432 2),
1433 ATOM_DEVICE_TV1_SUPPORT);
1434 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1435 DRM_MODE_CONNECTOR_SVIDEO,
1436 &ddc_i2c);
1437 break;
1438 case CT_IMAC_G5_ISIGHT:
1439 DRM_INFO("Connector Table: %d (imac g5 isight)\n",
1440 rdev->mode_info.connector_table);
1441 /* DVI-D - int tmds */
1442 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
1443 radeon_add_legacy_encoder(dev,
1444 radeon_get_encoder_id(dev,
1445 ATOM_DEVICE_DFP1_SUPPORT,
1446 0),
1447 ATOM_DEVICE_DFP1_SUPPORT);
1448 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
1449 DRM_MODE_CONNECTOR_DVID, &ddc_i2c);
1450 /* VGA - tv dac */
1451 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1452 radeon_add_legacy_encoder(dev,
1453 radeon_get_encoder_id(dev,
1454 ATOM_DEVICE_CRT2_SUPPORT,
1455 2),
1456 ATOM_DEVICE_CRT2_SUPPORT);
1457 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1458 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1459 /* TV - TV DAC */
1460 radeon_add_legacy_encoder(dev,
1461 radeon_get_encoder_id(dev,
1462 ATOM_DEVICE_TV1_SUPPORT,
1463 2),
1464 ATOM_DEVICE_TV1_SUPPORT);
1465 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1466 DRM_MODE_CONNECTOR_SVIDEO,
1467 &ddc_i2c);
1468 break;
1469 case CT_EMAC:
1470 DRM_INFO("Connector Table: %d (emac)\n",
1471 rdev->mode_info.connector_table);
1472 /* VGA - primary dac */
1473 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1474 radeon_add_legacy_encoder(dev,
1475 radeon_get_encoder_id(dev,
1476 ATOM_DEVICE_CRT1_SUPPORT,
1477 1),
1478 ATOM_DEVICE_CRT1_SUPPORT);
1479 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
1480 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1481 /* VGA - tv dac */
1482 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1483 radeon_add_legacy_encoder(dev,
1484 radeon_get_encoder_id(dev,
1485 ATOM_DEVICE_CRT2_SUPPORT,
1486 2),
1487 ATOM_DEVICE_CRT2_SUPPORT);
1488 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1489 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1490 /* TV - TV DAC */
1491 radeon_add_legacy_encoder(dev,
1492 radeon_get_encoder_id(dev,
1493 ATOM_DEVICE_TV1_SUPPORT,
1494 2),
1495 ATOM_DEVICE_TV1_SUPPORT);
1496 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1497 DRM_MODE_CONNECTOR_SVIDEO,
1498 &ddc_i2c);
1499 break;
1500 default:
1501 DRM_INFO("Connector table: %d (invalid)\n",
1502 rdev->mode_info.connector_table);
1503 return false;
1504 }
1505
1506 radeon_link_encoder_connector(dev);
1507
1508 return true;
1509}
1510
1511static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1512 int bios_index,
1513 enum radeon_combios_connector
1514 *legacy_connector,
1515 struct radeon_i2c_bus_rec *ddc_i2c)
1516{
1517 struct radeon_device *rdev = dev->dev_private;
1518
1519 /* XPRESS DDC quirks */
1520 if ((rdev->family == CHIP_RS400 ||
1521 rdev->family == CHIP_RS480) &&
1522 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1523 *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
1524 else if ((rdev->family == CHIP_RS400 ||
1525 rdev->family == CHIP_RS480) &&
1526 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
1527 ddc_i2c->valid = true;
1528 ddc_i2c->mask_clk_mask = (0x20 << 8);
1529 ddc_i2c->mask_data_mask = 0x80;
1530 ddc_i2c->a_clk_mask = (0x20 << 8);
1531 ddc_i2c->a_data_mask = 0x80;
1532 ddc_i2c->put_clk_mask = (0x20 << 8);
1533 ddc_i2c->put_data_mask = 0x80;
1534 ddc_i2c->get_clk_mask = (0x20 << 8);
1535 ddc_i2c->get_data_mask = 0x80;
1536 ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
1537 ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
1538 ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
1539 ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
1540 ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
1541 ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
1542 ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
1543 ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
1544 }
1545
1546 /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
1547 one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
1548 if (dev->pdev->device == 0x515e &&
1549 dev->pdev->subsystem_vendor == 0x1014) {
1550 if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
1551 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1552 return false;
1553 }
1554
1555 /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
1556 if (dev->pdev->device == 0x5159 &&
1557 dev->pdev->subsystem_vendor == 0x1002 &&
1558 dev->pdev->subsystem_device == 0x013a) {
1559 if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
1560 *legacy_connector = CONNECTOR_CRT_LEGACY;
1561
1562 }
1563
1564 /* X300 card with extra non-existent DVI port */
1565 if (dev->pdev->device == 0x5B60 &&
1566 dev->pdev->subsystem_vendor == 0x17af &&
1567 dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
1568 if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
1569 return false;
1570 }
1571
1572 return true;
1573}
1574
1575bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1576{
1577 struct radeon_device *rdev = dev->dev_private;
1578 uint32_t conn_info, entry, devices;
1579 uint16_t tmp;
1580 enum radeon_combios_ddc ddc_type;
1581 enum radeon_combios_connector connector;
1582 int i = 0;
1583 struct radeon_i2c_bus_rec ddc_i2c;
1584
1585 if (rdev->bios == NULL)
1586 return false;
1587
1588 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
1589 if (conn_info) {
1590 for (i = 0; i < 4; i++) {
1591 entry = conn_info + 2 + i * 2;
1592
1593 if (!RBIOS16(entry))
1594 break;
1595
1596 tmp = RBIOS16(entry);
1597
1598 connector = (tmp >> 12) & 0xf;
1599
1600 ddc_type = (tmp >> 8) & 0xf;
1601 switch (ddc_type) {
1602 case DDC_MONID:
1603 ddc_i2c =
1604 combios_setup_i2c_bus(RADEON_GPIO_MONID);
1605 break;
1606 case DDC_DVI:
1607 ddc_i2c =
1608 combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1609 break;
1610 case DDC_VGA:
1611 ddc_i2c =
1612 combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1613 break;
1614 case DDC_CRT2:
1615 ddc_i2c =
1616 combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1617 break;
1618 default:
1619 break;
1620 }
1621
1622 radeon_apply_legacy_quirks(dev, i, &connector,
1623 &ddc_i2c);
1624
1625 switch (connector) {
1626 case CONNECTOR_PROPRIETARY_LEGACY:
1627 if ((tmp >> 4) & 0x1)
1628 devices = ATOM_DEVICE_DFP2_SUPPORT;
1629 else
1630 devices = ATOM_DEVICE_DFP1_SUPPORT;
1631 radeon_add_legacy_encoder(dev,
1632 radeon_get_encoder_id
1633 (dev, devices, 0),
1634 devices);
1635 radeon_add_legacy_connector(dev, i, devices,
1636 legacy_connector_convert
1637 [connector],
1638 &ddc_i2c);
1639 break;
1640 case CONNECTOR_CRT_LEGACY:
1641 if (tmp & 0x1) {
1642 devices = ATOM_DEVICE_CRT2_SUPPORT;
1643 radeon_add_legacy_encoder(dev,
1644 radeon_get_encoder_id
1645 (dev,
1646 ATOM_DEVICE_CRT2_SUPPORT,
1647 2),
1648 ATOM_DEVICE_CRT2_SUPPORT);
1649 } else {
1650 devices = ATOM_DEVICE_CRT1_SUPPORT;
1651 radeon_add_legacy_encoder(dev,
1652 radeon_get_encoder_id
1653 (dev,
1654 ATOM_DEVICE_CRT1_SUPPORT,
1655 1),
1656 ATOM_DEVICE_CRT1_SUPPORT);
1657 }
1658 radeon_add_legacy_connector(dev,
1659 i,
1660 devices,
1661 legacy_connector_convert
1662 [connector],
1663 &ddc_i2c);
1664 break;
1665 case CONNECTOR_DVI_I_LEGACY:
1666 devices = 0;
1667 if (tmp & 0x1) {
1668 devices |= ATOM_DEVICE_CRT2_SUPPORT;
1669 radeon_add_legacy_encoder(dev,
1670 radeon_get_encoder_id
1671 (dev,
1672 ATOM_DEVICE_CRT2_SUPPORT,
1673 2),
1674 ATOM_DEVICE_CRT2_SUPPORT);
1675 } else {
1676 devices |= ATOM_DEVICE_CRT1_SUPPORT;
1677 radeon_add_legacy_encoder(dev,
1678 radeon_get_encoder_id
1679 (dev,
1680 ATOM_DEVICE_CRT1_SUPPORT,
1681 1),
1682 ATOM_DEVICE_CRT1_SUPPORT);
1683 }
1684 if ((tmp >> 4) & 0x1) {
1685 devices |= ATOM_DEVICE_DFP2_SUPPORT;
1686 radeon_add_legacy_encoder(dev,
1687 radeon_get_encoder_id
1688 (dev,
1689 ATOM_DEVICE_DFP2_SUPPORT,
1690 0),
1691 ATOM_DEVICE_DFP2_SUPPORT);
1692 } else {
1693 devices |= ATOM_DEVICE_DFP1_SUPPORT;
1694 radeon_add_legacy_encoder(dev,
1695 radeon_get_encoder_id
1696 (dev,
1697 ATOM_DEVICE_DFP1_SUPPORT,
1698 0),
1699 ATOM_DEVICE_DFP1_SUPPORT);
1700 }
1701 radeon_add_legacy_connector(dev,
1702 i,
1703 devices,
1704 legacy_connector_convert
1705 [connector],
1706 &ddc_i2c);
1707 break;
1708 case CONNECTOR_DVI_D_LEGACY:
1709 if ((tmp >> 4) & 0x1)
1710 devices = ATOM_DEVICE_DFP2_SUPPORT;
1711 else
1712 devices = ATOM_DEVICE_DFP1_SUPPORT;
1713 radeon_add_legacy_encoder(dev,
1714 radeon_get_encoder_id
1715 (dev, devices, 0),
1716 devices);
1717 radeon_add_legacy_connector(dev, i, devices,
1718 legacy_connector_convert
1719 [connector],
1720 &ddc_i2c);
1721 break;
1722 case CONNECTOR_CTV_LEGACY:
1723 case CONNECTOR_STV_LEGACY:
1724 radeon_add_legacy_encoder(dev,
1725 radeon_get_encoder_id
1726 (dev,
1727 ATOM_DEVICE_TV1_SUPPORT,
1728 2),
1729 ATOM_DEVICE_TV1_SUPPORT);
1730 radeon_add_legacy_connector(dev, i,
1731 ATOM_DEVICE_TV1_SUPPORT,
1732 legacy_connector_convert
1733 [connector],
1734 &ddc_i2c);
1735 break;
1736 default:
1737 DRM_ERROR("Unknown connector type: %d\n",
1738 connector);
1739 continue;
1740 }
1741
1742 }
1743 } else {
1744 uint16_t tmds_info =
1745 combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1746 if (tmds_info) {
1747 DRM_DEBUG("Found DFP table, assuming DVI connector\n");
1748
1749 radeon_add_legacy_encoder(dev,
1750 radeon_get_encoder_id(dev,
1751 ATOM_DEVICE_CRT1_SUPPORT,
1752 1),
1753 ATOM_DEVICE_CRT1_SUPPORT);
1754 radeon_add_legacy_encoder(dev,
1755 radeon_get_encoder_id(dev,
1756 ATOM_DEVICE_DFP1_SUPPORT,
1757 0),
1758 ATOM_DEVICE_DFP1_SUPPORT);
1759
1760 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1761 radeon_add_legacy_connector(dev,
1762 0,
1763 ATOM_DEVICE_CRT1_SUPPORT |
1764 ATOM_DEVICE_DFP1_SUPPORT,
1765 DRM_MODE_CONNECTOR_DVII,
1766 &ddc_i2c);
1767 } else {
1768 DRM_DEBUG("No connector info found\n");
1769 return false;
1770 }
1771 }
1772
1773 if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
1774 uint16_t lcd_info =
1775 combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
1776 if (lcd_info) {
1777 uint16_t lcd_ddc_info =
1778 combios_get_table_offset(dev,
1779 COMBIOS_LCD_DDC_INFO_TABLE);
1780
1781 radeon_add_legacy_encoder(dev,
1782 radeon_get_encoder_id(dev,
1783 ATOM_DEVICE_LCD1_SUPPORT,
1784 0),
1785 ATOM_DEVICE_LCD1_SUPPORT);
1786
1787 if (lcd_ddc_info) {
1788 ddc_type = RBIOS8(lcd_ddc_info + 2);
1789 switch (ddc_type) {
1790 case DDC_MONID:
1791 ddc_i2c =
1792 combios_setup_i2c_bus
1793 (RADEON_GPIO_MONID);
1794 break;
1795 case DDC_DVI:
1796 ddc_i2c =
1797 combios_setup_i2c_bus
1798 (RADEON_GPIO_DVI_DDC);
1799 break;
1800 case DDC_VGA:
1801 ddc_i2c =
1802 combios_setup_i2c_bus
1803 (RADEON_GPIO_VGA_DDC);
1804 break;
1805 case DDC_CRT2:
1806 ddc_i2c =
1807 combios_setup_i2c_bus
1808 (RADEON_GPIO_CRT2_DDC);
1809 break;
1810 case DDC_LCD:
1811 ddc_i2c =
1812 combios_setup_i2c_bus
1813 (RADEON_LCD_GPIO_MASK);
1814 ddc_i2c.mask_clk_mask =
1815 RBIOS32(lcd_ddc_info + 3);
1816 ddc_i2c.mask_data_mask =
1817 RBIOS32(lcd_ddc_info + 7);
1818 ddc_i2c.a_clk_mask =
1819 RBIOS32(lcd_ddc_info + 3);
1820 ddc_i2c.a_data_mask =
1821 RBIOS32(lcd_ddc_info + 7);
1822 ddc_i2c.put_clk_mask =
1823 RBIOS32(lcd_ddc_info + 3);
1824 ddc_i2c.put_data_mask =
1825 RBIOS32(lcd_ddc_info + 7);
1826 ddc_i2c.get_clk_mask =
1827 RBIOS32(lcd_ddc_info + 3);
1828 ddc_i2c.get_data_mask =
1829 RBIOS32(lcd_ddc_info + 7);
1830 break;
1831 case DDC_GPIO:
1832 ddc_i2c =
1833 combios_setup_i2c_bus
1834 (RADEON_MDGPIO_EN_REG);
1835 ddc_i2c.mask_clk_mask =
1836 RBIOS32(lcd_ddc_info + 3);
1837 ddc_i2c.mask_data_mask =
1838 RBIOS32(lcd_ddc_info + 7);
1839 ddc_i2c.a_clk_mask =
1840 RBIOS32(lcd_ddc_info + 3);
1841 ddc_i2c.a_data_mask =
1842 RBIOS32(lcd_ddc_info + 7);
1843 ddc_i2c.put_clk_mask =
1844 RBIOS32(lcd_ddc_info + 3);
1845 ddc_i2c.put_data_mask =
1846 RBIOS32(lcd_ddc_info + 7);
1847 ddc_i2c.get_clk_mask =
1848 RBIOS32(lcd_ddc_info + 3);
1849 ddc_i2c.get_data_mask =
1850 RBIOS32(lcd_ddc_info + 7);
1851 break;
1852 default:
1853 ddc_i2c.valid = false;
1854 break;
1855 }
1856 DRM_DEBUG("LCD DDC Info Table found!\n");
1857 } else
1858 ddc_i2c.valid = false;
1859
1860 radeon_add_legacy_connector(dev,
1861 5,
1862 ATOM_DEVICE_LCD1_SUPPORT,
1863 DRM_MODE_CONNECTOR_LVDS,
1864 &ddc_i2c);
1865 }
1866 }
1867
1868 /* check TV table */
1869 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
1870 uint32_t tv_info =
1871 combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
1872 if (tv_info) {
1873 if (RBIOS8(tv_info + 6) == 'T') {
1874 radeon_add_legacy_encoder(dev,
1875 radeon_get_encoder_id
1876 (dev,
1877 ATOM_DEVICE_TV1_SUPPORT,
1878 2),
1879 ATOM_DEVICE_TV1_SUPPORT);
1880 radeon_add_legacy_connector(dev, 6,
1881 ATOM_DEVICE_TV1_SUPPORT,
1882 DRM_MODE_CONNECTOR_SVIDEO,
1883 &ddc_i2c);
1884 }
1885 }
1886 }
1887
1888 radeon_link_encoder_connector(dev);
1889
1890 return true;
1891}
1892
1893static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
1894{
1895 struct radeon_device *rdev = dev->dev_private;
1896
1897 if (offset) {
1898 while (RBIOS16(offset)) {
1899 uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
1900 uint32_t addr = (RBIOS16(offset) & 0x1fff);
1901 uint32_t val, and_mask, or_mask;
1902 uint32_t tmp;
1903
1904 offset += 2;
1905 switch (cmd) {
1906 case 0:
1907 val = RBIOS32(offset);
1908 offset += 4;
1909 WREG32(addr, val);
1910 break;
1911 case 1:
1912 val = RBIOS32(offset);
1913 offset += 4;
1914 WREG32(addr, val);
1915 break;
1916 case 2:
1917 and_mask = RBIOS32(offset);
1918 offset += 4;
1919 or_mask = RBIOS32(offset);
1920 offset += 4;
1921 tmp = RREG32(addr);
1922 tmp &= and_mask;
1923 tmp |= or_mask;
1924 WREG32(addr, tmp);
1925 break;
1926 case 3:
1927 and_mask = RBIOS32(offset);
1928 offset += 4;
1929 or_mask = RBIOS32(offset);
1930 offset += 4;
1931 tmp = RREG32(addr);
1932 tmp &= and_mask;
1933 tmp |= or_mask;
1934 WREG32(addr, tmp);
1935 break;
1936 case 4:
1937 val = RBIOS16(offset);
1938 offset += 2;
1939 udelay(val);
1940 break;
1941 case 5:
1942 val = RBIOS16(offset);
1943 offset += 2;
1944 switch (addr) {
1945 case 8:
1946 while (val--) {
1947 if (!
1948 (RREG32_PLL
1949 (RADEON_CLK_PWRMGT_CNTL) &
1950 RADEON_MC_BUSY))
1951 break;
1952 }
1953 break;
1954 case 9:
1955 while (val--) {
1956 if ((RREG32(RADEON_MC_STATUS) &
1957 RADEON_MC_IDLE))
1958 break;
1959 }
1960 break;
1961 default:
1962 break;
1963 }
1964 break;
1965 default:
1966 break;
1967 }
1968 }
1969 }
1970}
1971
1972static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
1973{
1974 struct radeon_device *rdev = dev->dev_private;
1975
1976 if (offset) {
1977 while (RBIOS8(offset)) {
1978 uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
1979 uint8_t addr = (RBIOS8(offset) & 0x3f);
1980 uint32_t val, shift, tmp;
1981 uint32_t and_mask, or_mask;
1982
1983 offset++;
1984 switch (cmd) {
1985 case 0:
1986 val = RBIOS32(offset);
1987 offset += 4;
1988 WREG32_PLL(addr, val);
1989 break;
1990 case 1:
1991 shift = RBIOS8(offset) * 8;
1992 offset++;
1993 and_mask = RBIOS8(offset) << shift;
1994 and_mask |= ~(0xff << shift);
1995 offset++;
1996 or_mask = RBIOS8(offset) << shift;
1997 offset++;
1998 tmp = RREG32_PLL(addr);
1999 tmp &= and_mask;
2000 tmp |= or_mask;
2001 WREG32_PLL(addr, tmp);
2002 break;
2003 case 2:
2004 case 3:
2005 tmp = 1000;
2006 switch (addr) {
2007 case 1:
2008 udelay(150);
2009 break;
2010 case 2:
2011 udelay(1000);
2012 break;
2013 case 3:
2014 while (tmp--) {
2015 if (!
2016 (RREG32_PLL
2017 (RADEON_CLK_PWRMGT_CNTL) &
2018 RADEON_MC_BUSY))
2019 break;
2020 }
2021 break;
2022 case 4:
2023 while (tmp--) {
2024 if (RREG32_PLL
2025 (RADEON_CLK_PWRMGT_CNTL) &
2026 RADEON_DLL_READY)
2027 break;
2028 }
2029 break;
2030 case 5:
2031 tmp =
2032 RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
2033 if (tmp & RADEON_CG_NO1_DEBUG_0) {
2034#if 0
2035 uint32_t mclk_cntl =
2036 RREG32_PLL
2037 (RADEON_MCLK_CNTL);
2038 mclk_cntl &= 0xffff0000;
2039 /*mclk_cntl |= 0x00001111;*//* ??? */
2040 WREG32_PLL(RADEON_MCLK_CNTL,
2041 mclk_cntl);
2042 udelay(10000);
2043#endif
2044 WREG32_PLL
2045 (RADEON_CLK_PWRMGT_CNTL,
2046 tmp &
2047 ~RADEON_CG_NO1_DEBUG_0);
2048 udelay(10000);
2049 }
2050 break;
2051 default:
2052 break;
2053 }
2054 break;
2055 default:
2056 break;
2057 }
2058 }
2059 }
2060}
2061
2062static void combios_parse_ram_reset_table(struct drm_device *dev,
2063 uint16_t offset)
2064{
2065 struct radeon_device *rdev = dev->dev_private;
2066 uint32_t tmp;
2067
2068 if (offset) {
2069 uint8_t val = RBIOS8(offset);
2070 while (val != 0xff) {
2071 offset++;
2072
2073 if (val == 0x0f) {
2074 uint32_t channel_complete_mask;
2075
2076 if (ASIC_IS_R300(rdev))
2077 channel_complete_mask =
2078 R300_MEM_PWRUP_COMPLETE;
2079 else
2080 channel_complete_mask =
2081 RADEON_MEM_PWRUP_COMPLETE;
2082 tmp = 20000;
2083 while (tmp--) {
2084 if ((RREG32(RADEON_MEM_STR_CNTL) &
2085 channel_complete_mask) ==
2086 channel_complete_mask)
2087 break;
2088 }
2089 } else {
2090 uint32_t or_mask = RBIOS16(offset);
2091 offset += 2;
2092
2093 tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2094 tmp &= RADEON_SDRAM_MODE_MASK;
2095 tmp |= or_mask;
2096 WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
2097
2098 or_mask = val << 24;
2099 tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2100 tmp &= RADEON_B3MEM_RESET_MASK;
2101 tmp |= or_mask;
2102 WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
2103 }
2104 val = RBIOS8(offset);
2105 }
2106 }
2107}
2108
2109static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
2110 int mem_addr_mapping)
2111{
2112 struct radeon_device *rdev = dev->dev_private;
2113 uint32_t mem_cntl;
2114 uint32_t mem_size;
2115 uint32_t addr = 0;
2116
2117 mem_cntl = RREG32(RADEON_MEM_CNTL);
2118 if (mem_cntl & RV100_HALF_MODE)
2119 ram /= 2;
2120 mem_size = ram;
2121 mem_cntl &= ~(0xff << 8);
2122 mem_cntl |= (mem_addr_mapping & 0xff) << 8;
2123 WREG32(RADEON_MEM_CNTL, mem_cntl);
2124 RREG32(RADEON_MEM_CNTL);
2125
2126 /* sdram reset ? */
2127
2128 /* something like this???? */
2129 while (ram--) {
2130 addr = ram * 1024 * 1024;
2131 /* write to each page */
2132 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
2133 WREG32(RADEON_MM_DATA, 0xdeadbeef);
2134 /* read back and verify */
2135 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
2136 if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
2137 return 0;
2138 }
2139
2140 return mem_size;
2141}
2142
2143static void combios_write_ram_size(struct drm_device *dev)
2144{
2145 struct radeon_device *rdev = dev->dev_private;
2146 uint8_t rev;
2147 uint16_t offset;
2148 uint32_t mem_size = 0;
2149 uint32_t mem_cntl = 0;
2150
2151 /* should do something smarter here I guess... */
2152 if (rdev->flags & RADEON_IS_IGP)
2153 return;
2154
2155 /* first check detected mem table */
2156 offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
2157 if (offset) {
2158 rev = RBIOS8(offset);
2159 if (rev < 3) {
2160 mem_cntl = RBIOS32(offset + 1);
2161 mem_size = RBIOS16(offset + 5);
2162 if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
2163 ((dev->pdev->device != 0x515e)
2164 && (dev->pdev->device != 0x5969)))
2165 WREG32(RADEON_MEM_CNTL, mem_cntl);
2166 }
2167 }
2168
2169 if (!mem_size) {
2170 offset =
2171 combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
2172 if (offset) {
2173 rev = RBIOS8(offset - 1);
2174 if (rev < 1) {
2175 if (((rdev->flags & RADEON_FAMILY_MASK) <
2176 CHIP_R200)
2177 && ((dev->pdev->device != 0x515e)
2178 && (dev->pdev->device != 0x5969))) {
2179 int ram = 0;
2180 int mem_addr_mapping = 0;
2181
2182 while (RBIOS8(offset)) {
2183 ram = RBIOS8(offset);
2184 mem_addr_mapping =
2185 RBIOS8(offset + 1);
2186 if (mem_addr_mapping != 0x25)
2187 ram *= 2;
2188 mem_size =
2189 combios_detect_ram(dev, ram,
2190 mem_addr_mapping);
2191 if (mem_size)
2192 break;
2193 offset += 2;
2194 }
2195 } else
2196 mem_size = RBIOS8(offset);
2197 } else {
2198 mem_size = RBIOS8(offset);
2199 mem_size *= 2; /* convert to MB */
2200 }
2201 }
2202 }
2203
2204 mem_size *= (1024 * 1024); /* convert to bytes */
2205 WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
2206}
2207
2208void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
2209{
2210 uint16_t dyn_clk_info =
2211 combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
2212
2213 if (dyn_clk_info)
2214 combios_parse_pll_table(dev, dyn_clk_info);
2215}
2216
2217void radeon_combios_asic_init(struct drm_device *dev)
2218{
2219 struct radeon_device *rdev = dev->dev_private;
2220 uint16_t table;
2221
2222 /* port hardcoded mac stuff from radeonfb */
2223 if (rdev->bios == NULL)
2224 return;
2225
2226 /* ASIC INIT 1 */
2227 table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
2228 if (table)
2229 combios_parse_mmio_table(dev, table);
2230
2231 /* PLL INIT */
2232 table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
2233 if (table)
2234 combios_parse_pll_table(dev, table);
2235
2236 /* ASIC INIT 2 */
2237 table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
2238 if (table)
2239 combios_parse_mmio_table(dev, table);
2240
2241 if (!(rdev->flags & RADEON_IS_IGP)) {
2242 /* ASIC INIT 4 */
2243 table =
2244 combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
2245 if (table)
2246 combios_parse_mmio_table(dev, table);
2247
2248 /* RAM RESET */
2249 table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
2250 if (table)
2251 combios_parse_ram_reset_table(dev, table);
2252
2253 /* ASIC INIT 3 */
2254 table =
2255 combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
2256 if (table)
2257 combios_parse_mmio_table(dev, table);
2258
2259 /* write CONFIG_MEMSIZE */
2260 combios_write_ram_size(dev);
2261 }
2262
2263 /* DYN CLK 1 */
2264 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
2265 if (table)
2266 combios_parse_pll_table(dev, table);
2267
2268}
2269
2270void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
2271{
2272 struct radeon_device *rdev = dev->dev_private;
2273 uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
2274
2275 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
2276 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2277 bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
2278
2279 /* let the bios control the backlight */
2280 bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
2281
2282 /* tell the bios not to handle mode switching */
2283 bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
2284 RADEON_ACC_MODE_CHANGE);
2285
2286 /* tell the bios a driver is loaded */
2287 bios_7_scratch |= RADEON_DRV_LOADED;
2288
2289 WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
2290 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
2291 WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
2292}
2293
2294void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
2295{
2296 struct drm_device *dev = encoder->dev;
2297 struct radeon_device *rdev = dev->dev_private;
2298 uint32_t bios_6_scratch;
2299
2300 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2301
2302 if (lock)
2303 bios_6_scratch |= RADEON_DRIVER_CRITICAL;
2304 else
2305 bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
2306
2307 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
2308}
2309
2310void
2311radeon_combios_connected_scratch_regs(struct drm_connector *connector,
2312 struct drm_encoder *encoder,
2313 bool connected)
2314{
2315 struct drm_device *dev = connector->dev;
2316 struct radeon_device *rdev = dev->dev_private;
2317 struct radeon_connector *radeon_connector =
2318 to_radeon_connector(connector);
2319 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2320 uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
2321 uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
2322
2323 if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
2324 (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
2325 if (connected) {
2326 DRM_DEBUG("TV1 connected\n");
2327 /* fix me */
2328 bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
2329 /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
2330 bios_5_scratch |= RADEON_TV1_ON;
2331 bios_5_scratch |= RADEON_ACC_REQ_TV1;
2332 } else {
2333 DRM_DEBUG("TV1 disconnected\n");
2334 bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
2335 bios_5_scratch &= ~RADEON_TV1_ON;
2336 bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
2337 }
2338 }
2339 if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
2340 (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
2341 if (connected) {
2342 DRM_DEBUG("LCD1 connected\n");
2343 bios_4_scratch |= RADEON_LCD1_ATTACHED;
2344 bios_5_scratch |= RADEON_LCD1_ON;
2345 bios_5_scratch |= RADEON_ACC_REQ_LCD1;
2346 } else {
2347 DRM_DEBUG("LCD1 disconnected\n");
2348 bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
2349 bios_5_scratch &= ~RADEON_LCD1_ON;
2350 bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
2351 }
2352 }
2353 if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
2354 (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
2355 if (connected) {
2356 DRM_DEBUG("CRT1 connected\n");
2357 bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
2358 bios_5_scratch |= RADEON_CRT1_ON;
2359 bios_5_scratch |= RADEON_ACC_REQ_CRT1;
2360 } else {
2361 DRM_DEBUG("CRT1 disconnected\n");
2362 bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
2363 bios_5_scratch &= ~RADEON_CRT1_ON;
2364 bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
2365 }
2366 }
2367 if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
2368 (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
2369 if (connected) {
2370 DRM_DEBUG("CRT2 connected\n");
2371 bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
2372 bios_5_scratch |= RADEON_CRT2_ON;
2373 bios_5_scratch |= RADEON_ACC_REQ_CRT2;
2374 } else {
2375 DRM_DEBUG("CRT2 disconnected\n");
2376 bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
2377 bios_5_scratch &= ~RADEON_CRT2_ON;
2378 bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
2379 }
2380 }
2381 if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
2382 (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
2383 if (connected) {
2384 DRM_DEBUG("DFP1 connected\n");
2385 bios_4_scratch |= RADEON_DFP1_ATTACHED;
2386 bios_5_scratch |= RADEON_DFP1_ON;
2387 bios_5_scratch |= RADEON_ACC_REQ_DFP1;
2388 } else {
2389 DRM_DEBUG("DFP1 disconnected\n");
2390 bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
2391 bios_5_scratch &= ~RADEON_DFP1_ON;
2392 bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
2393 }
2394 }
2395 if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
2396 (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
2397 if (connected) {
2398 DRM_DEBUG("DFP2 connected\n");
2399 bios_4_scratch |= RADEON_DFP2_ATTACHED;
2400 bios_5_scratch |= RADEON_DFP2_ON;
2401 bios_5_scratch |= RADEON_ACC_REQ_DFP2;
2402 } else {
2403 DRM_DEBUG("DFP2 disconnected\n");
2404 bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
2405 bios_5_scratch &= ~RADEON_DFP2_ON;
2406 bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
2407 }
2408 }
2409 WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
2410 WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
2411}
2412
2413void
2414radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
2415{
2416 struct drm_device *dev = encoder->dev;
2417 struct radeon_device *rdev = dev->dev_private;
2418 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2419 uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
2420
2421 if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
2422 bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
2423 bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
2424 }
2425 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
2426 bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
2427 bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
2428 }
2429 if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
2430 bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
2431 bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
2432 }
2433 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
2434 bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
2435 bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
2436 }
2437 if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
2438 bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
2439 bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
2440 }
2441 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
2442 bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
2443 bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
2444 }
2445 WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
2446}
2447
2448void
2449radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
2450{
2451 struct drm_device *dev = encoder->dev;
2452 struct radeon_device *rdev = dev->dev_private;
2453 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2454 uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2455
2456 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
2457 if (on)
2458 bios_6_scratch |= RADEON_TV_DPMS_ON;
2459 else
2460 bios_6_scratch &= ~RADEON_TV_DPMS_ON;
2461 }
2462 if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2463 if (on)
2464 bios_6_scratch |= RADEON_CRT_DPMS_ON;
2465 else
2466 bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
2467 }
2468 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2469 if (on)
2470 bios_6_scratch |= RADEON_LCD_DPMS_ON;
2471 else
2472 bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
2473 }
2474 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
2475 if (on)
2476 bios_6_scratch |= RADEON_DFP_DPMS_ON;
2477 else
2478 bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
2479 }
2480 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
2481}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644
index 000000000000..70ede6a52d4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -0,0 +1,603 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_edid.h"
28#include "drm_crtc_helper.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31
32extern void
33radeon_combios_connected_scratch_regs(struct drm_connector *connector,
34 struct drm_encoder *encoder,
35 bool connected);
36extern void
37radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
38 struct drm_encoder *encoder,
39 bool connected);
40
41static void
42radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
43{
44 struct drm_device *dev = connector->dev;
45 struct radeon_device *rdev = dev->dev_private;
46 struct drm_encoder *best_encoder = NULL;
47 struct drm_encoder *encoder = NULL;
48 struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
49 struct drm_mode_object *obj;
50 bool connected;
51 int i;
52
53 best_encoder = connector_funcs->best_encoder(connector);
54
55 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
56 if (connector->encoder_ids[i] == 0)
57 break;
58
59 obj = drm_mode_object_find(connector->dev,
60 connector->encoder_ids[i],
61 DRM_MODE_OBJECT_ENCODER);
62 if (!obj)
63 continue;
64
65 encoder = obj_to_encoder(obj);
66
67 if ((encoder == best_encoder) && (status == connector_status_connected))
68 connected = true;
69 else
70 connected = false;
71
72 if (rdev->is_atom_bios)
73 radeon_atombios_connected_scratch_regs(connector, encoder, connected);
74 else
75 radeon_combios_connected_scratch_regs(connector, encoder, connected);
76
77 }
78}
79
80struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
81{
82 int enc_id = connector->encoder_ids[0];
83 struct drm_mode_object *obj;
84 struct drm_encoder *encoder;
85
86 /* pick the encoder ids */
87 if (enc_id) {
88 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
89 if (!obj)
90 return NULL;
91 encoder = obj_to_encoder(obj);
92 return encoder;
93 }
94 return NULL;
95}
96
97static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
98{
99 struct drm_device *dev = encoder->dev;
100 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
101 struct drm_display_mode *mode = NULL;
102 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
103
104 if (native_mode->panel_xres != 0 &&
105 native_mode->panel_yres != 0 &&
106 native_mode->dotclock != 0) {
107 mode = drm_mode_create(dev);
108
109 mode->hdisplay = native_mode->panel_xres;
110 mode->vdisplay = native_mode->panel_yres;
111
112 mode->htotal = mode->hdisplay + native_mode->hblank;
113 mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
114 mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
115 mode->vtotal = mode->vdisplay + native_mode->vblank;
116 mode->vsync_start = mode->vdisplay + native_mode->voverplus;
117 mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
118 mode->clock = native_mode->dotclock;
119 mode->flags = 0;
120
121 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
122 drm_mode_set_name(mode);
123
124 DRM_DEBUG("Adding native panel mode %s\n", mode->name);
125 }
126 return mode;
127}
128
129int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
130 uint64_t val)
131{
132 return 0;
133}
134
135
136static int radeon_lvds_get_modes(struct drm_connector *connector)
137{
138 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
139 struct drm_encoder *encoder;
140 int ret = 0;
141 struct drm_display_mode *mode;
142
143 if (radeon_connector->ddc_bus) {
144 ret = radeon_ddc_get_modes(radeon_connector);
145 if (ret > 0) {
146 return ret;
147 }
148 }
149
150 encoder = radeon_best_single_encoder(connector);
151 if (!encoder)
152 return 0;
153
154 /* we have no EDID modes */
155 mode = radeon_fp_native_mode(encoder);
156 if (mode) {
157 ret = 1;
158 drm_mode_probed_add(connector, mode);
159 }
160 return ret;
161}
162
163static int radeon_lvds_mode_valid(struct drm_connector *connector,
164 struct drm_display_mode *mode)
165{
166 return MODE_OK;
167}
168
169static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
170{
171 enum drm_connector_status ret = connector_status_connected;
172 /* check acpi lid status ??? */
173 radeon_connector_update_scratch_regs(connector, ret);
174 return ret;
175}
176
177static void radeon_connector_destroy(struct drm_connector *connector)
178{
179 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
180
181 if (radeon_connector->ddc_bus)
182 radeon_i2c_destroy(radeon_connector->ddc_bus);
183 kfree(radeon_connector->con_priv);
184 drm_sysfs_connector_remove(connector);
185 drm_connector_cleanup(connector);
186 kfree(connector);
187}
188
189struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
190 .get_modes = radeon_lvds_get_modes,
191 .mode_valid = radeon_lvds_mode_valid,
192 .best_encoder = radeon_best_single_encoder,
193};
194
195struct drm_connector_funcs radeon_lvds_connector_funcs = {
196 .dpms = drm_helper_connector_dpms,
197 .detect = radeon_lvds_detect,
198 .fill_modes = drm_helper_probe_single_connector_modes,
199 .destroy = radeon_connector_destroy,
200 .set_property = radeon_connector_set_property,
201};
202
203static int radeon_vga_get_modes(struct drm_connector *connector)
204{
205 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
206 int ret;
207
208 ret = radeon_ddc_get_modes(radeon_connector);
209
210 return ret;
211}
212
213static int radeon_vga_mode_valid(struct drm_connector *connector,
214 struct drm_display_mode *mode)
215{
216
217 return MODE_OK;
218}
219
220static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
221{
222 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
223 struct drm_encoder *encoder;
224 struct drm_encoder_helper_funcs *encoder_funcs;
225 bool dret;
226 enum drm_connector_status ret = connector_status_disconnected;
227
228 radeon_i2c_do_lock(radeon_connector, 1);
229 dret = radeon_ddc_probe(radeon_connector);
230 radeon_i2c_do_lock(radeon_connector, 0);
231 if (dret)
232 ret = connector_status_connected;
233 else {
234 /* if EDID fails to a load detect */
235 encoder = radeon_best_single_encoder(connector);
236 if (!encoder)
237 ret = connector_status_disconnected;
238 else {
239 encoder_funcs = encoder->helper_private;
240 ret = encoder_funcs->detect(encoder, connector);
241 }
242 }
243
244 radeon_connector_update_scratch_regs(connector, ret);
245 return ret;
246}
247
248struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
249 .get_modes = radeon_vga_get_modes,
250 .mode_valid = radeon_vga_mode_valid,
251 .best_encoder = radeon_best_single_encoder,
252};
253
254struct drm_connector_funcs radeon_vga_connector_funcs = {
255 .dpms = drm_helper_connector_dpms,
256 .detect = radeon_vga_detect,
257 .fill_modes = drm_helper_probe_single_connector_modes,
258 .destroy = radeon_connector_destroy,
259 .set_property = radeon_connector_set_property,
260};
261
262static int radeon_dvi_get_modes(struct drm_connector *connector)
263{
264 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
265 int ret;
266
267 ret = radeon_ddc_get_modes(radeon_connector);
268 /* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
269 radeon_connector_update_scratch_regs(connector, connector_status_connected);
270 return ret;
271}
272
273static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
274{
275 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
276 struct drm_encoder *encoder;
277 struct drm_encoder_helper_funcs *encoder_funcs;
278 struct drm_mode_object *obj;
279 int i;
280 enum drm_connector_status ret = connector_status_disconnected;
281 bool dret;
282
283 radeon_i2c_do_lock(radeon_connector, 1);
284 dret = radeon_ddc_probe(radeon_connector);
285 radeon_i2c_do_lock(radeon_connector, 0);
286 if (dret)
287 ret = connector_status_connected;
288 else {
289 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
290 if (connector->encoder_ids[i] == 0)
291 break;
292
293 obj = drm_mode_object_find(connector->dev,
294 connector->encoder_ids[i],
295 DRM_MODE_OBJECT_ENCODER);
296 if (!obj)
297 continue;
298
299 encoder = obj_to_encoder(obj);
300
301 encoder_funcs = encoder->helper_private;
302 if (encoder_funcs->detect) {
303 ret = encoder_funcs->detect(encoder, connector);
304 if (ret == connector_status_connected) {
305 radeon_connector->use_digital = 0;
306 break;
307 }
308 }
309 }
310 }
311
312 /* updated in get modes as well since we need to know if it's analog or digital */
313 radeon_connector_update_scratch_regs(connector, ret);
314 return ret;
315}
316
317/* okay need to be smart in here about which encoder to pick */
318struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
319{
320 int enc_id = connector->encoder_ids[0];
321 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
322 struct drm_mode_object *obj;
323 struct drm_encoder *encoder;
324 int i;
325 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
326 if (connector->encoder_ids[i] == 0)
327 break;
328
329 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
330 if (!obj)
331 continue;
332
333 encoder = obj_to_encoder(obj);
334
335 if (radeon_connector->use_digital) {
336 if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
337 return encoder;
338 } else {
339 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
340 encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
341 return encoder;
342 }
343 }
344
345 /* see if we have a default encoder TODO */
346
347 /* then check use digitial */
348 /* pick the first one */
349 if (enc_id) {
350 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
351 if (!obj)
352 return NULL;
353 encoder = obj_to_encoder(obj);
354 return encoder;
355 }
356 return NULL;
357}
358
359struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
360 .get_modes = radeon_dvi_get_modes,
361 .mode_valid = radeon_vga_mode_valid,
362 .best_encoder = radeon_dvi_encoder,
363};
364
365struct drm_connector_funcs radeon_dvi_connector_funcs = {
366 .dpms = drm_helper_connector_dpms,
367 .detect = radeon_dvi_detect,
368 .fill_modes = drm_helper_probe_single_connector_modes,
369 .set_property = radeon_connector_set_property,
370 .destroy = radeon_connector_destroy,
371};
372
373void
374radeon_add_atom_connector(struct drm_device *dev,
375 uint32_t connector_id,
376 uint32_t supported_device,
377 int connector_type,
378 struct radeon_i2c_bus_rec *i2c_bus,
379 bool linkb,
380 uint32_t igp_lane_info)
381{
382 struct drm_connector *connector;
383 struct radeon_connector *radeon_connector;
384 struct radeon_connector_atom_dig *radeon_dig_connector;
385 uint32_t subpixel_order = SubPixelNone;
386
387 /* fixme - tv/cv/din */
388 if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
389 (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
390 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
391 (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
392 return;
393
394 /* see if we already added it */
395 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
396 radeon_connector = to_radeon_connector(connector);
397 if (radeon_connector->connector_id == connector_id) {
398 radeon_connector->devices |= supported_device;
399 return;
400 }
401 }
402
403 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
404 if (!radeon_connector)
405 return;
406
407 connector = &radeon_connector->base;
408
409 radeon_connector->connector_id = connector_id;
410 radeon_connector->devices = supported_device;
411 switch (connector_type) {
412 case DRM_MODE_CONNECTOR_VGA:
413 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
414 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
415 if (i2c_bus->valid) {
416 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
417 if (!radeon_connector->ddc_bus)
418 goto failed;
419 }
420 break;
421 case DRM_MODE_CONNECTOR_DVIA:
422 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
423 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
424 if (i2c_bus->valid) {
425 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
426 if (!radeon_connector->ddc_bus)
427 goto failed;
428 }
429 break;
430 case DRM_MODE_CONNECTOR_DVII:
431 case DRM_MODE_CONNECTOR_DVID:
432 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
433 if (!radeon_dig_connector)
434 goto failed;
435 radeon_dig_connector->linkb = linkb;
436 radeon_dig_connector->igp_lane_info = igp_lane_info;
437 radeon_connector->con_priv = radeon_dig_connector;
438 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
439 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
440 if (i2c_bus->valid) {
441 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
442 if (!radeon_connector->ddc_bus)
443 goto failed;
444 }
445 subpixel_order = SubPixelHorizontalRGB;
446 break;
447 case DRM_MODE_CONNECTOR_HDMIA:
448 case DRM_MODE_CONNECTOR_HDMIB:
449 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
450 if (!radeon_dig_connector)
451 goto failed;
452 radeon_dig_connector->linkb = linkb;
453 radeon_dig_connector->igp_lane_info = igp_lane_info;
454 radeon_connector->con_priv = radeon_dig_connector;
455 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
456 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
457 if (i2c_bus->valid) {
458 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
459 if (!radeon_connector->ddc_bus)
460 goto failed;
461 }
462 subpixel_order = SubPixelHorizontalRGB;
463 break;
464 case DRM_MODE_CONNECTOR_DisplayPort:
465 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
466 if (!radeon_dig_connector)
467 goto failed;
468 radeon_dig_connector->linkb = linkb;
469 radeon_dig_connector->igp_lane_info = igp_lane_info;
470 radeon_connector->con_priv = radeon_dig_connector;
471 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
472 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
473 if (i2c_bus->valid) {
474 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
475 if (!radeon_connector->ddc_bus)
476 goto failed;
477 }
478 subpixel_order = SubPixelHorizontalRGB;
479 break;
480 case DRM_MODE_CONNECTOR_SVIDEO:
481 case DRM_MODE_CONNECTOR_Composite:
482 case DRM_MODE_CONNECTOR_9PinDIN:
483 break;
484 case DRM_MODE_CONNECTOR_LVDS:
485 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
486 if (!radeon_dig_connector)
487 goto failed;
488 radeon_dig_connector->linkb = linkb;
489 radeon_dig_connector->igp_lane_info = igp_lane_info;
490 radeon_connector->con_priv = radeon_dig_connector;
491 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
492 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
493 if (i2c_bus->valid) {
494 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
495 if (!radeon_connector->ddc_bus)
496 goto failed;
497 }
498 subpixel_order = SubPixelHorizontalRGB;
499 break;
500 }
501
502 connector->display_info.subpixel_order = subpixel_order;
503 drm_sysfs_connector_add(connector);
504 return;
505
506failed:
507 if (radeon_connector->ddc_bus)
508 radeon_i2c_destroy(radeon_connector->ddc_bus);
509 drm_connector_cleanup(connector);
510 kfree(connector);
511}
512
513void
514radeon_add_legacy_connector(struct drm_device *dev,
515 uint32_t connector_id,
516 uint32_t supported_device,
517 int connector_type,
518 struct radeon_i2c_bus_rec *i2c_bus)
519{
520 struct drm_connector *connector;
521 struct radeon_connector *radeon_connector;
522 uint32_t subpixel_order = SubPixelNone;
523
524 /* fixme - tv/cv/din */
525 if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
526 (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
527 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
528 (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
529 return;
530
531 /* see if we already added it */
532 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
533 radeon_connector = to_radeon_connector(connector);
534 if (radeon_connector->connector_id == connector_id) {
535 radeon_connector->devices |= supported_device;
536 return;
537 }
538 }
539
540 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
541 if (!radeon_connector)
542 return;
543
544 connector = &radeon_connector->base;
545
546 radeon_connector->connector_id = connector_id;
547 radeon_connector->devices = supported_device;
548 switch (connector_type) {
549 case DRM_MODE_CONNECTOR_VGA:
550 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
551 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
552 if (i2c_bus->valid) {
553 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
554 if (!radeon_connector->ddc_bus)
555 goto failed;
556 }
557 break;
558 case DRM_MODE_CONNECTOR_DVIA:
559 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
560 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
561 if (i2c_bus->valid) {
562 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
563 if (!radeon_connector->ddc_bus)
564 goto failed;
565 }
566 break;
567 case DRM_MODE_CONNECTOR_DVII:
568 case DRM_MODE_CONNECTOR_DVID:
569 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
570 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
571 if (i2c_bus->valid) {
572 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
573 if (!radeon_connector->ddc_bus)
574 goto failed;
575 }
576 subpixel_order = SubPixelHorizontalRGB;
577 break;
578 case DRM_MODE_CONNECTOR_SVIDEO:
579 case DRM_MODE_CONNECTOR_Composite:
580 case DRM_MODE_CONNECTOR_9PinDIN:
581 break;
582 case DRM_MODE_CONNECTOR_LVDS:
583 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
584 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
585 if (i2c_bus->valid) {
586 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
587 if (!radeon_connector->ddc_bus)
588 goto failed;
589 }
590 subpixel_order = SubPixelHorizontalRGB;
591 break;
592 }
593
594 connector->display_info.subpixel_order = subpixel_order;
595 drm_sysfs_connector_add(connector);
596 return;
597
598failed:
599 if (radeon_connector->ddc_bus)
600 radeon_i2c_destroy(radeon_connector->ddc_bus);
601 drm_connector_cleanup(connector);
602 kfree(connector);
603}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644
index 000000000000..b843f9bdfb14
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27#include "drmP.h"
28#include "radeon_drm.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt);
34
35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36{
37 struct drm_device *ddev = p->rdev->ddev;
38 struct radeon_cs_chunk *chunk;
39 unsigned i, j;
40 bool duplicate;
41
42 if (p->chunk_relocs_idx == -1) {
43 return 0;
44 }
45 chunk = &p->chunks[p->chunk_relocs_idx];
46 /* FIXME: we assume that each relocs use 4 dwords */
47 p->nrelocs = chunk->length_dw / 4;
48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
49 if (p->relocs_ptr == NULL) {
50 return -ENOMEM;
51 }
52 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
53 if (p->relocs == NULL) {
54 return -ENOMEM;
55 }
56 for (i = 0; i < p->nrelocs; i++) {
57 struct drm_radeon_cs_reloc *r;
58
59 duplicate = false;
60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
61 for (j = 0; j < p->nrelocs; j++) {
62 if (r->handle == p->relocs[j].handle) {
63 p->relocs_ptr[i] = &p->relocs[j];
64 duplicate = true;
65 break;
66 }
67 }
68 if (!duplicate) {
69 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
70 p->filp,
71 r->handle);
72 if (p->relocs[i].gobj == NULL) {
73 DRM_ERROR("gem object lookup failed 0x%x\n",
74 r->handle);
75 return -EINVAL;
76 }
77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.robj = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain;
82 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_object_list_add_object(&p->relocs[i].lobj,
86 &p->validated);
87 }
88 }
89 return radeon_object_list_validate(&p->validated, p->ib->fence);
90}
91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{
94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr;
96 unsigned size, i;
97
98 if (!cs->num_chunks) {
99 return 0;
100 }
101 /* get chunks */
102 INIT_LIST_HEAD(&p->validated);
103 p->idx = 0;
104 p->chunk_ib_idx = -1;
105 p->chunk_relocs_idx = -1;
106 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107 if (p->chunks_array == NULL) {
108 return -ENOMEM;
109 }
110 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112 sizeof(uint64_t)*cs->num_chunks)) {
113 return -EFAULT;
114 }
115 p->nchunks = cs->num_chunks;
116 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117 if (p->chunks == NULL) {
118 return -ENOMEM;
119 }
120 for (i = 0; i < p->nchunks; i++) {
121 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122 struct drm_radeon_cs_chunk user_chunk;
123 uint32_t __user *cdata;
124
125 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127 sizeof(struct drm_radeon_cs_chunk))) {
128 return -EFAULT;
129 }
130 p->chunks[i].chunk_id = user_chunk.chunk_id;
131 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
132 p->chunk_relocs_idx = i;
133 }
134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
135 p->chunk_ib_idx = i;
136 }
137 p->chunks[i].length_dw = user_chunk.length_dw;
138 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
139
140 p->chunks[i].kdata = NULL;
141 size = p->chunks[i].length_dw * sizeof(uint32_t);
142 p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
143 if (p->chunks[i].kdata == NULL) {
144 return -ENOMEM;
145 }
146 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
147 return -EFAULT;
148 }
149 }
150 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
151 DRM_ERROR("cs IB too big: %d\n",
152 p->chunks[p->chunk_ib_idx].length_dw);
153 return -EINVAL;
154 }
155 return 0;
156}
157
158/**
159 * cs_parser_fini() - clean parser states
160 * @parser: parser structure holding parsing context.
161 * @error: error number
162 *
163 * If error is set than unvalidate buffer, otherwise just free memory
164 * used by parsing context.
165 **/
166static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
167{
168 unsigned i;
169
170 if (error) {
171 radeon_object_list_unvalidate(&parser->validated);
172 } else {
173 radeon_object_list_clean(&parser->validated);
174 }
175 for (i = 0; i < parser->nrelocs; i++) {
176 if (parser->relocs[i].gobj) {
177 mutex_lock(&parser->rdev->ddev->struct_mutex);
178 drm_gem_object_unreference(parser->relocs[i].gobj);
179 mutex_unlock(&parser->rdev->ddev->struct_mutex);
180 }
181 }
182 kfree(parser->relocs);
183 kfree(parser->relocs_ptr);
184 for (i = 0; i < parser->nchunks; i++) {
185 kfree(parser->chunks[i].kdata);
186 }
187 kfree(parser->chunks);
188 kfree(parser->chunks_array);
189 radeon_ib_free(parser->rdev, &parser->ib);
190}
191
192int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
193{
194 struct radeon_device *rdev = dev->dev_private;
195 struct radeon_cs_parser parser;
196 struct radeon_cs_chunk *ib_chunk;
197 int r;
198
199 mutex_lock(&rdev->cs_mutex);
200 if (rdev->gpu_lockup) {
201 mutex_unlock(&rdev->cs_mutex);
202 return -EINVAL;
203 }
204 /* initialize parser */
205 memset(&parser, 0, sizeof(struct radeon_cs_parser));
206 parser.filp = filp;
207 parser.rdev = rdev;
208 r = radeon_cs_parser_init(&parser, data);
209 if (r) {
210 DRM_ERROR("Failed to initialize parser !\n");
211 radeon_cs_parser_fini(&parser, r);
212 mutex_unlock(&rdev->cs_mutex);
213 return r;
214 }
215 r = radeon_ib_get(rdev, &parser.ib);
216 if (r) {
217 DRM_ERROR("Failed to get ib !\n");
218 radeon_cs_parser_fini(&parser, r);
219 mutex_unlock(&rdev->cs_mutex);
220 return r;
221 }
222 r = radeon_cs_parser_relocs(&parser);
223 if (r) {
224 DRM_ERROR("Failed to parse relocation !\n");
225 radeon_cs_parser_fini(&parser, r);
226 mutex_unlock(&rdev->cs_mutex);
227 return r;
228 }
229 /* Copy the packet into the IB, the parser will read from the
230 * input memory (cached) and write to the IB (which can be
231 * uncached). */
232 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
233 parser.ib->length_dw = ib_chunk->length_dw;
234 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
235 r = radeon_cs_parse(&parser);
236 if (r) {
237 DRM_ERROR("Invalid command stream !\n");
238 radeon_cs_parser_fini(&parser, r);
239 mutex_unlock(&rdev->cs_mutex);
240 return r;
241 }
242 r = radeon_ib_schedule(rdev, parser.ib);
243 if (r) {
244 DRM_ERROR("Faild to schedule IB !\n");
245 }
246 radeon_cs_parser_fini(&parser, r);
247 mutex_unlock(&rdev->cs_mutex);
248 return r;
249}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644
index 000000000000..5232441f119b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -0,0 +1,252 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#define CURSOR_WIDTH 64
31#define CURSOR_HEIGHT 64
32
33static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
34{
35 struct radeon_device *rdev = crtc->dev->dev_private;
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 uint32_t cur_lock;
38
39 if (ASIC_IS_AVIVO(rdev)) {
40 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock)
42 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
43 else
44 cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
45 WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
46 } else {
47 cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
48 if (lock)
49 cur_lock |= RADEON_CUR_LOCK;
50 else
51 cur_lock &= ~RADEON_CUR_LOCK;
52 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
53 }
54}
55
56static void radeon_hide_cursor(struct drm_crtc *crtc)
57{
58 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
59 struct radeon_device *rdev = crtc->dev->dev_private;
60
61 if (ASIC_IS_AVIVO(rdev)) {
62 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
63 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
64 } else {
65 switch (radeon_crtc->crtc_id) {
66 case 0:
67 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
68 break;
69 case 1:
70 WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
71 break;
72 default:
73 return;
74 }
75 WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
76 }
77}
78
79static void radeon_show_cursor(struct drm_crtc *crtc)
80{
81 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
82 struct radeon_device *rdev = crtc->dev->dev_private;
83
84 if (ASIC_IS_AVIVO(rdev)) {
85 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
86 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
87 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
88 } else {
89 switch (radeon_crtc->crtc_id) {
90 case 0:
91 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
92 break;
93 case 1:
94 WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
95 break;
96 default:
97 return;
98 }
99
100 WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
101 (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
102 ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
103 }
104}
105
106static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
107 uint32_t gpu_addr)
108{
109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
110 struct radeon_device *rdev = crtc->dev->dev_private;
111
112 if (ASIC_IS_AVIVO(rdev))
113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
114 else
115 /* offset is from DISP(2)_BASE_ADDRESS */
116 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr);
117}
118
119int radeon_crtc_cursor_set(struct drm_crtc *crtc,
120 struct drm_file *file_priv,
121 uint32_t handle,
122 uint32_t width,
123 uint32_t height)
124{
125 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
126 struct drm_gem_object *obj;
127 uint64_t gpu_addr;
128 int ret;
129
130 if (!handle) {
131 /* turn off cursor */
132 radeon_hide_cursor(crtc);
133 obj = NULL;
134 goto unpin;
135 }
136
137 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
138 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
139 return -EINVAL;
140 }
141
142 radeon_crtc->cursor_width = width;
143 radeon_crtc->cursor_height = height;
144
145 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
146 if (!obj) {
147 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
148 return -EINVAL;
149 }
150
151 ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
152 if (ret)
153 goto fail;
154
155 radeon_lock_cursor(crtc, true);
156 /* XXX only 27 bit offset for legacy cursor */
157 radeon_set_cursor(crtc, obj, gpu_addr);
158 radeon_show_cursor(crtc);
159 radeon_lock_cursor(crtc, false);
160
161unpin:
162 if (radeon_crtc->cursor_bo) {
163 radeon_gem_object_unpin(radeon_crtc->cursor_bo);
164 mutex_lock(&crtc->dev->struct_mutex);
165 drm_gem_object_unreference(radeon_crtc->cursor_bo);
166 mutex_unlock(&crtc->dev->struct_mutex);
167 }
168
169 radeon_crtc->cursor_bo = obj;
170 return 0;
171fail:
172 mutex_lock(&crtc->dev->struct_mutex);
173 drm_gem_object_unreference(obj);
174 mutex_unlock(&crtc->dev->struct_mutex);
175
176 return 0;
177}
178
179int radeon_crtc_cursor_move(struct drm_crtc *crtc,
180 int x, int y)
181{
182 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
183 struct radeon_device *rdev = crtc->dev->dev_private;
184 int xorigin = 0, yorigin = 0;
185
186 if (x < 0)
187 xorigin = -x + 1;
188 if (y < 0)
189 yorigin = -y + 1;
190 if (xorigin >= CURSOR_WIDTH)
191 xorigin = CURSOR_WIDTH - 1;
192 if (yorigin >= CURSOR_HEIGHT)
193 yorigin = CURSOR_HEIGHT - 1;
194
195 radeon_lock_cursor(crtc, true);
196 if (ASIC_IS_AVIVO(rdev)) {
197 int w = radeon_crtc->cursor_width;
198 int i = 0;
199 struct drm_crtc *crtc_p;
200
201 /* avivo cursor are offset into the total surface */
202 x += crtc->x;
203 y += crtc->y;
204 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
205
206 /* avivo cursor image can't end on 128 pixel boundry or
207 * go past the end of the frame if both crtcs are enabled
208 */
209 list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
210 if (crtc_p->enabled)
211 i++;
212 }
213 if (i > 1) {
214 int cursor_end, frame_end;
215
216 cursor_end = x - xorigin + w;
217 frame_end = crtc->x + crtc->mode.crtc_hdisplay;
218 if (cursor_end >= frame_end) {
219 w = w - (cursor_end - frame_end);
220 if (!(frame_end & 0x7f))
221 w--;
222 } else {
223 if (!(cursor_end & 0x7f))
224 w--;
225 }
226 if (w <= 0)
227 w = 1;
228 }
229
230 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
231 ((xorigin ? 0 : x) << 16) |
232 (yorigin ? 0 : y));
233 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
234 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
235 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
236 } else {
237 if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
238 y *= 2;
239
240 WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
241 (RADEON_CUR_LOCK
242 | (xorigin << 16)
243 | yorigin));
244 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
245 (RADEON_CUR_LOCK
246 | ((xorigin ? 0 : x) << 16)
247 | (yorigin ? 0 : y)));
248 }
249 radeon_lock_cursor(crtc, false);
250
251 return 0;
252}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644
index 000000000000..5fd2b639bf66
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -0,0 +1,813 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h>
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "atom.h"
36
37/*
38 * GPU scratch registers helpers function.
39 */
40static void radeon_scratch_init(struct radeon_device *rdev)
41{
42 int i;
43
44 /* FIXME: check this out */
45 if (rdev->family < CHIP_R300) {
46 rdev->scratch.num_reg = 5;
47 } else {
48 rdev->scratch.num_reg = 7;
49 }
50 for (i = 0; i < rdev->scratch.num_reg; i++) {
51 rdev->scratch.free[i] = true;
52 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
53 }
54}
55
56int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
57{
58 int i;
59
60 for (i = 0; i < rdev->scratch.num_reg; i++) {
61 if (rdev->scratch.free[i]) {
62 rdev->scratch.free[i] = false;
63 *reg = rdev->scratch.reg[i];
64 return 0;
65 }
66 }
67 return -EINVAL;
68}
69
70void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
71{
72 int i;
73
74 for (i = 0; i < rdev->scratch.num_reg; i++) {
75 if (rdev->scratch.reg[i] == reg) {
76 rdev->scratch.free[i] = true;
77 return;
78 }
79 }
80}
81
82/*
83 * MC common functions
84 */
85int radeon_mc_setup(struct radeon_device *rdev)
86{
87 uint32_t tmp;
88
89 /* Some chips have an "issue" with the memory controller, the
90 * location must be aligned to the size. We just align it down,
91 * too bad if we walk over the top of system memory, we don't
92 * use DMA without a remapped anyway.
93 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
94 */
95 /* FGLRX seems to setup like this, VRAM a 0, then GART.
96 */
97 /*
98 * Note: from R6xx the address space is 40bits but here we only
99 * use 32bits (still have to see a card which would exhaust 4G
100 * address space).
101 */
102 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
103 /* vram location was already setup try to put gtt after
104 * if it fits */
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size;
106 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
107 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
108 rdev->mc.gtt_location = tmp;
109 } else {
110 if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
111 printk(KERN_ERR "[drm] GTT too big to fit "
112 "before or after vram location.\n");
113 return -EINVAL;
114 }
115 rdev->mc.gtt_location = 0;
116 }
117 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
118 /* gtt location was already setup try to put vram before
119 * if it fits */
120 if (rdev->mc.vram_size < rdev->mc.gtt_location) {
121 rdev->mc.vram_location = 0;
122 } else {
123 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
124 tmp += (rdev->mc.vram_size - 1);
125 tmp &= ~(rdev->mc.vram_size - 1);
126 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
127 rdev->mc.vram_location = tmp;
128 } else {
129 printk(KERN_ERR "[drm] vram too big to fit "
130 "before or after GTT location.\n");
131 return -EINVAL;
132 }
133 }
134 } else {
135 rdev->mc.vram_location = 0;
136 rdev->mc.gtt_location = rdev->mc.vram_size;
137 }
138 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
139 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
140 rdev->mc.vram_location,
141 rdev->mc.vram_location + rdev->mc.vram_size - 1);
142 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
143 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
144 rdev->mc.gtt_location,
145 rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
146 return 0;
147}
148
149
150/*
151 * GPU helpers function.
152 */
153static bool radeon_card_posted(struct radeon_device *rdev)
154{
155 uint32_t reg;
156
157 /* first check CRTCs */
158 if (ASIC_IS_AVIVO(rdev)) {
159 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
160 RREG32(AVIVO_D2CRTC_CONTROL);
161 if (reg & AVIVO_CRTC_EN) {
162 return true;
163 }
164 } else {
165 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
166 RREG32(RADEON_CRTC2_GEN_CNTL);
167 if (reg & RADEON_CRTC_EN) {
168 return true;
169 }
170 }
171
172 /* then check MEM_SIZE, in case the crtcs are off */
173 if (rdev->family >= CHIP_R600)
174 reg = RREG32(R600_CONFIG_MEMSIZE);
175 else
176 reg = RREG32(RADEON_CONFIG_MEMSIZE);
177
178 if (reg)
179 return true;
180
181 return false;
182
183}
184
185
186/*
187 * Registers accessors functions.
188 */
189uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
190{
191 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
192 BUG_ON(1);
193 return 0;
194}
195
196void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
197{
198 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
199 reg, v);
200 BUG_ON(1);
201}
202
203void radeon_register_accessor_init(struct radeon_device *rdev)
204{
205 rdev->mm_rreg = &r100_mm_rreg;
206 rdev->mm_wreg = &r100_mm_wreg;
207 rdev->mc_rreg = &radeon_invalid_rreg;
208 rdev->mc_wreg = &radeon_invalid_wreg;
209 rdev->pll_rreg = &radeon_invalid_rreg;
210 rdev->pll_wreg = &radeon_invalid_wreg;
211 rdev->pcie_rreg = &radeon_invalid_rreg;
212 rdev->pcie_wreg = &radeon_invalid_wreg;
213 rdev->pciep_rreg = &radeon_invalid_rreg;
214 rdev->pciep_wreg = &radeon_invalid_wreg;
215
216 /* Don't change order as we are overridding accessor. */
217 if (rdev->family < CHIP_RV515) {
218 rdev->pcie_rreg = &rv370_pcie_rreg;
219 rdev->pcie_wreg = &rv370_pcie_wreg;
220 }
221 if (rdev->family >= CHIP_RV515) {
222 rdev->pcie_rreg = &rv515_pcie_rreg;
223 rdev->pcie_wreg = &rv515_pcie_wreg;
224 }
225 /* FIXME: not sure here */
226 if (rdev->family <= CHIP_R580) {
227 rdev->pll_rreg = &r100_pll_rreg;
228 rdev->pll_wreg = &r100_pll_wreg;
229 }
230 if (rdev->family >= CHIP_RV515) {
231 rdev->mc_rreg = &rv515_mc_rreg;
232 rdev->mc_wreg = &rv515_mc_wreg;
233 }
234 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
235 rdev->mc_rreg = &rs400_mc_rreg;
236 rdev->mc_wreg = &rs400_mc_wreg;
237 }
238 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
239 rdev->mc_rreg = &rs690_mc_rreg;
240 rdev->mc_wreg = &rs690_mc_wreg;
241 }
242 if (rdev->family == CHIP_RS600) {
243 rdev->mc_rreg = &rs600_mc_rreg;
244 rdev->mc_wreg = &rs600_mc_wreg;
245 }
246 if (rdev->family >= CHIP_R600) {
247 rdev->pciep_rreg = &r600_pciep_rreg;
248 rdev->pciep_wreg = &r600_pciep_wreg;
249 }
250}
251
252
253/*
254 * ASIC
255 */
256int radeon_asic_init(struct radeon_device *rdev)
257{
258 radeon_register_accessor_init(rdev);
259 switch (rdev->family) {
260 case CHIP_R100:
261 case CHIP_RV100:
262 case CHIP_RS100:
263 case CHIP_RV200:
264 case CHIP_RS200:
265 case CHIP_R200:
266 case CHIP_RV250:
267 case CHIP_RS300:
268 case CHIP_RV280:
269 rdev->asic = &r100_asic;
270 break;
271 case CHIP_R300:
272 case CHIP_R350:
273 case CHIP_RV350:
274 case CHIP_RV380:
275 rdev->asic = &r300_asic;
276 break;
277 case CHIP_R420:
278 case CHIP_R423:
279 case CHIP_RV410:
280 rdev->asic = &r420_asic;
281 break;
282 case CHIP_RS400:
283 case CHIP_RS480:
284 rdev->asic = &rs400_asic;
285 break;
286 case CHIP_RS600:
287 rdev->asic = &rs600_asic;
288 break;
289 case CHIP_RS690:
290 case CHIP_RS740:
291 rdev->asic = &rs690_asic;
292 break;
293 case CHIP_RV515:
294 rdev->asic = &rv515_asic;
295 break;
296 case CHIP_R520:
297 case CHIP_RV530:
298 case CHIP_RV560:
299 case CHIP_RV570:
300 case CHIP_R580:
301 rdev->asic = &r520_asic;
302 break;
303 case CHIP_R600:
304 case CHIP_RV610:
305 case CHIP_RV630:
306 case CHIP_RV620:
307 case CHIP_RV635:
308 case CHIP_RV670:
309 case CHIP_RS780:
310 case CHIP_RV770:
311 case CHIP_RV730:
312 case CHIP_RV710:
313 default:
314 /* FIXME: not supported yet */
315 return -EINVAL;
316 }
317 return 0;
318}
319
320
321/*
322 * Wrapper around modesetting bits.
323 */
324int radeon_clocks_init(struct radeon_device *rdev)
325{
326 int r;
327
328 radeon_get_clock_info(rdev->ddev);
329 r = radeon_static_clocks_init(rdev->ddev);
330 if (r) {
331 return r;
332 }
333 DRM_INFO("Clocks initialized !\n");
334 return 0;
335}
336
337void radeon_clocks_fini(struct radeon_device *rdev)
338{
339}
340
341/* ATOM accessor methods */
342static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
343{
344 struct radeon_device *rdev = info->dev->dev_private;
345 uint32_t r;
346
347 r = rdev->pll_rreg(rdev, reg);
348 return r;
349}
350
351static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
352{
353 struct radeon_device *rdev = info->dev->dev_private;
354
355 rdev->pll_wreg(rdev, reg, val);
356}
357
358static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
359{
360 struct radeon_device *rdev = info->dev->dev_private;
361 uint32_t r;
362
363 r = rdev->mc_rreg(rdev, reg);
364 return r;
365}
366
367static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
368{
369 struct radeon_device *rdev = info->dev->dev_private;
370
371 rdev->mc_wreg(rdev, reg, val);
372}
373
374static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
375{
376 struct radeon_device *rdev = info->dev->dev_private;
377
378 WREG32(reg*4, val);
379}
380
381static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
382{
383 struct radeon_device *rdev = info->dev->dev_private;
384 uint32_t r;
385
386 r = RREG32(reg*4);
387 return r;
388}
389
390static struct card_info atom_card_info = {
391 .dev = NULL,
392 .reg_read = cail_reg_read,
393 .reg_write = cail_reg_write,
394 .mc_read = cail_mc_read,
395 .mc_write = cail_mc_write,
396 .pll_read = cail_pll_read,
397 .pll_write = cail_pll_write,
398};
399
400int radeon_atombios_init(struct radeon_device *rdev)
401{
402 atom_card_info.dev = rdev->ddev;
403 rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
404 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
405 return 0;
406}
407
408void radeon_atombios_fini(struct radeon_device *rdev)
409{
410 kfree(rdev->mode_info.atom_context);
411}
412
413int radeon_combios_init(struct radeon_device *rdev)
414{
415 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
416 return 0;
417}
418
419void radeon_combios_fini(struct radeon_device *rdev)
420{
421}
422
423int radeon_modeset_init(struct radeon_device *rdev);
424void radeon_modeset_fini(struct radeon_device *rdev);
425
426
427/*
428 * Radeon device.
429 */
430int radeon_device_init(struct radeon_device *rdev,
431 struct drm_device *ddev,
432 struct pci_dev *pdev,
433 uint32_t flags)
434{
435 int r, ret;
436
437 DRM_INFO("radeon: Initializing kernel modesetting.\n");
438 rdev->shutdown = false;
439 rdev->ddev = ddev;
440 rdev->pdev = pdev;
441 rdev->flags = flags;
442 rdev->family = flags & RADEON_FAMILY_MASK;
443 rdev->is_atom_bios = false;
444 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
445 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
446 rdev->gpu_lockup = false;
447 /* mutex initialization are all done here so we
448 * can recall function without having locking issues */
449 mutex_init(&rdev->cs_mutex);
450 mutex_init(&rdev->ib_pool.mutex);
451 mutex_init(&rdev->cp.mutex);
452 rwlock_init(&rdev->fence_drv.lock);
453
454 if (radeon_agpmode == -1) {
455 rdev->flags &= ~RADEON_IS_AGP;
456 if (rdev->family > CHIP_RV515 ||
457 rdev->family == CHIP_RV380 ||
458 rdev->family == CHIP_RV410 ||
459 rdev->family == CHIP_R423) {
460 DRM_INFO("Forcing AGP to PCIE mode\n");
461 rdev->flags |= RADEON_IS_PCIE;
462 } else {
463 DRM_INFO("Forcing AGP to PCI mode\n");
464 rdev->flags |= RADEON_IS_PCI;
465 }
466 }
467
468 /* Set asic functions */
469 r = radeon_asic_init(rdev);
470 if (r) {
471 return r;
472 }
473
474 /* Report DMA addressing limitation */
475 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
476 if (r) {
477 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
478 }
479
480 /* Registers mapping */
481 /* TODO: block userspace mapping of io register */
482 rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
483 rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
484 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
485 if (rdev->rmmio == NULL) {
486 return -ENOMEM;
487 }
488 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
489 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
490
491 /* Setup errata flags */
492 radeon_errata(rdev);
493 /* Initialize scratch registers */
494 radeon_scratch_init(rdev);
495
496 /* TODO: disable VGA need to use VGA request */
497 /* BIOS*/
498 if (!radeon_get_bios(rdev)) {
499 if (ASIC_IS_AVIVO(rdev))
500 return -EINVAL;
501 }
502 if (rdev->is_atom_bios) {
503 r = radeon_atombios_init(rdev);
504 if (r) {
505 return r;
506 }
507 } else {
508 r = radeon_combios_init(rdev);
509 if (r) {
510 return r;
511 }
512 }
513 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
514 if (radeon_gpu_reset(rdev)) {
515 /* FIXME: what do we want to do here ? */
516 }
517 /* check if cards are posted or not */
518 if (!radeon_card_posted(rdev) && rdev->bios) {
519 DRM_INFO("GPU not posted. posting now...\n");
520 if (rdev->is_atom_bios) {
521 atom_asic_init(rdev->mode_info.atom_context);
522 } else {
523 radeon_combios_asic_init(rdev->ddev);
524 }
525 }
526 /* Get vram informations */
527 radeon_vram_info(rdev);
528 /* Device is severly broken if aper size > vram size.
529 * for RN50/M6/M7 - Novell bug 204882 ?
530 */
531 if (rdev->mc.vram_size < rdev->mc.aper_size) {
532 rdev->mc.aper_size = rdev->mc.vram_size;
533 }
534 /* Add an MTRR for the VRAM */
535 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
536 MTRR_TYPE_WRCOMB, 1);
537 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
538 rdev->mc.vram_size >> 20,
539 (unsigned)rdev->mc.aper_size >> 20);
540 DRM_INFO("RAM width %dbits %cDR\n",
541 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
542 /* Initialize clocks */
543 r = radeon_clocks_init(rdev);
544 if (r) {
545 return r;
546 }
547 /* Initialize memory controller (also test AGP) */
548 r = radeon_mc_init(rdev);
549 if (r) {
550 return r;
551 }
552 /* Fence driver */
553 r = radeon_fence_driver_init(rdev);
554 if (r) {
555 return r;
556 }
557 r = radeon_irq_kms_init(rdev);
558 if (r) {
559 return r;
560 }
561 /* Memory manager */
562 r = radeon_object_init(rdev);
563 if (r) {
564 return r;
565 }
566 /* Initialize GART (initialize after TTM so we can allocate
567 * memory through TTM but finalize after TTM) */
568 r = radeon_gart_enable(rdev);
569 if (!r) {
570 r = radeon_gem_init(rdev);
571 }
572
573 /* 1M ring buffer */
574 if (!r) {
575 r = radeon_cp_init(rdev, 1024 * 1024);
576 }
577 if (!r) {
578 r = radeon_wb_init(rdev);
579 if (r) {
580 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
581 return r;
582 }
583 }
584 if (!r) {
585 r = radeon_ib_pool_init(rdev);
586 if (r) {
587 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
588 return r;
589 }
590 }
591 if (!r) {
592 r = radeon_ib_test(rdev);
593 if (r) {
594 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
595 return r;
596 }
597 }
598 ret = r;
599 r = radeon_modeset_init(rdev);
600 if (r) {
601 return r;
602 }
603 if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
604 rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
605 }
606 if (!ret) {
607 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
608 }
609 if (radeon_benchmarking) {
610 radeon_benchmark(rdev);
611 }
612 return ret;
613}
614
615void radeon_device_fini(struct radeon_device *rdev)
616{
617 if (rdev == NULL || rdev->rmmio == NULL) {
618 return;
619 }
620 DRM_INFO("radeon: finishing device.\n");
621 rdev->shutdown = true;
622 /* Order matter so becarefull if you rearrange anythings */
623 radeon_modeset_fini(rdev);
624 radeon_ib_pool_fini(rdev);
625 radeon_cp_fini(rdev);
626 radeon_wb_fini(rdev);
627 radeon_gem_fini(rdev);
628 radeon_object_fini(rdev);
629 /* mc_fini must be after object_fini */
630 radeon_mc_fini(rdev);
631#if __OS_HAS_AGP
632 radeon_agp_fini(rdev);
633#endif
634 radeon_irq_kms_fini(rdev);
635 radeon_fence_driver_fini(rdev);
636 radeon_clocks_fini(rdev);
637 if (rdev->is_atom_bios) {
638 radeon_atombios_fini(rdev);
639 } else {
640 radeon_combios_fini(rdev);
641 }
642 kfree(rdev->bios);
643 rdev->bios = NULL;
644 iounmap(rdev->rmmio);
645 rdev->rmmio = NULL;
646}
647
648
649/*
650 * Suspend & resume.
651 */
652int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
653{
654 struct radeon_device *rdev = dev->dev_private;
655 struct drm_crtc *crtc;
656
657 if (dev == NULL || rdev == NULL) {
658 return -ENODEV;
659 }
660 if (state.event == PM_EVENT_PRETHAW) {
661 return 0;
662 }
663 /* unpin the front buffers */
664 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
665 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
666 struct radeon_object *robj;
667
668 if (rfb == NULL || rfb->obj == NULL) {
669 continue;
670 }
671 robj = rfb->obj->driver_private;
672 if (robj != rdev->fbdev_robj) {
673 radeon_object_unpin(robj);
674 }
675 }
676 /* evict vram memory */
677 radeon_object_evict_vram(rdev);
678 /* wait for gpu to finish processing current batch */
679 radeon_fence_wait_last(rdev);
680
681 radeon_cp_disable(rdev);
682 radeon_gart_disable(rdev);
683
684 /* evict remaining vram memory */
685 radeon_object_evict_vram(rdev);
686
687 rdev->irq.sw_int = false;
688 radeon_irq_set(rdev);
689
690 pci_save_state(dev->pdev);
691 if (state.event == PM_EVENT_SUSPEND) {
692 /* Shut down the device */
693 pci_disable_device(dev->pdev);
694 pci_set_power_state(dev->pdev, PCI_D3hot);
695 }
696 acquire_console_sem();
697 fb_set_suspend(rdev->fbdev_info, 1);
698 release_console_sem();
699 return 0;
700}
701
702int radeon_resume_kms(struct drm_device *dev)
703{
704 struct radeon_device *rdev = dev->dev_private;
705 int r;
706
707 acquire_console_sem();
708 pci_set_power_state(dev->pdev, PCI_D0);
709 pci_restore_state(dev->pdev);
710 if (pci_enable_device(dev->pdev)) {
711 release_console_sem();
712 return -1;
713 }
714 pci_set_master(dev->pdev);
715 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
716 if (radeon_gpu_reset(rdev)) {
717 /* FIXME: what do we want to do here ? */
718 }
719 /* post card */
720 if (rdev->is_atom_bios) {
721 atom_asic_init(rdev->mode_info.atom_context);
722 } else {
723 radeon_combios_asic_init(rdev->ddev);
724 }
725 /* Initialize clocks */
726 r = radeon_clocks_init(rdev);
727 if (r) {
728 release_console_sem();
729 return r;
730 }
731 /* Enable IRQ */
732 rdev->irq.sw_int = true;
733 radeon_irq_set(rdev);
734 /* Initialize GPU Memory Controller */
735 r = radeon_mc_init(rdev);
736 if (r) {
737 goto out;
738 }
739 r = radeon_gart_enable(rdev);
740 if (r) {
741 goto out;
742 }
743 r = radeon_cp_init(rdev, rdev->cp.ring_size);
744 if (r) {
745 goto out;
746 }
747out:
748 fb_set_suspend(rdev->fbdev_info, 0);
749 release_console_sem();
750
751 /* blat the mode back in */
752 drm_helper_resume_force_mode(dev);
753 return 0;
754}
755
756
757/*
758 * Debugfs
759 */
760struct radeon_debugfs {
761 struct drm_info_list *files;
762 unsigned num_files;
763};
764static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
765static unsigned _radeon_debugfs_count = 0;
766
767int radeon_debugfs_add_files(struct radeon_device *rdev,
768 struct drm_info_list *files,
769 unsigned nfiles)
770{
771 unsigned i;
772
773 for (i = 0; i < _radeon_debugfs_count; i++) {
774 if (_radeon_debugfs[i].files == files) {
775 /* Already registered */
776 return 0;
777 }
778 }
779 if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
780 DRM_ERROR("Reached maximum number of debugfs files.\n");
781 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
782 return -EINVAL;
783 }
784 _radeon_debugfs[_radeon_debugfs_count].files = files;
785 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
786 _radeon_debugfs_count++;
787#if defined(CONFIG_DEBUG_FS)
788 drm_debugfs_create_files(files, nfiles,
789 rdev->ddev->control->debugfs_root,
790 rdev->ddev->control);
791 drm_debugfs_create_files(files, nfiles,
792 rdev->ddev->primary->debugfs_root,
793 rdev->ddev->primary);
794#endif
795 return 0;
796}
797
798#if defined(CONFIG_DEBUG_FS)
799int radeon_debugfs_init(struct drm_minor *minor)
800{
801 return 0;
802}
803
804void radeon_debugfs_cleanup(struct drm_minor *minor)
805{
806 unsigned i;
807
808 for (i = 0; i < _radeon_debugfs_count; i++) {
809 drm_debugfs_remove_files(_radeon_debugfs[i].files,
810 _radeon_debugfs[i].num_files, minor);
811 }
812}
813#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644
index 000000000000..5452bb9d925e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -0,0 +1,692 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#include "atom.h"
31#include <asm/div64.h>
32
33#include "drm_crtc_helper.h"
34#include "drm_edid.h"
35
36static int radeon_ddc_dump(struct drm_connector *connector);
37
38static void avivo_crtc_load_lut(struct drm_crtc *crtc)
39{
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
41 struct drm_device *dev = crtc->dev;
42 struct radeon_device *rdev = dev->dev_private;
43 int i;
44
45 DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
46 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
47
48 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
51
52 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
55
56 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
57 WREG32(AVIVO_DC_LUT_RW_MODE, 0);
58 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
59
60 WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
61 for (i = 0; i < 256; i++) {
62 WREG32(AVIVO_DC_LUT_30_COLOR,
63 (radeon_crtc->lut_r[i] << 20) |
64 (radeon_crtc->lut_g[i] << 10) |
65 (radeon_crtc->lut_b[i] << 0));
66 }
67
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69}
70
71static void legacy_crtc_load_lut(struct drm_crtc *crtc)
72{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev;
75 struct radeon_device *rdev = dev->dev_private;
76 int i;
77 uint32_t dac2_cntl;
78
79 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
80 if (radeon_crtc->crtc_id == 0)
81 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
82 else
83 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
84 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
85
86 WREG8(RADEON_PALETTE_INDEX, 0);
87 for (i = 0; i < 256; i++) {
88 WREG32(RADEON_PALETTE_30_DATA,
89 (radeon_crtc->lut_r[i] << 20) |
90 (radeon_crtc->lut_g[i] << 10) |
91 (radeon_crtc->lut_b[i] << 0));
92 }
93}
94
95void radeon_crtc_load_lut(struct drm_crtc *crtc)
96{
97 struct drm_device *dev = crtc->dev;
98 struct radeon_device *rdev = dev->dev_private;
99
100 if (!crtc->enabled)
101 return;
102
103 if (ASIC_IS_AVIVO(rdev))
104 avivo_crtc_load_lut(crtc);
105 else
106 legacy_crtc_load_lut(crtc);
107}
108
109/** Sets the color ramps on behalf of RandR */
110void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno)
112{
113 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
114
115 if (regno == 0)
116 DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
117 radeon_crtc->lut_r[regno] = red >> 6;
118 radeon_crtc->lut_g[regno] = green >> 6;
119 radeon_crtc->lut_b[regno] = blue >> 6;
120}
121
122static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
123 u16 *blue, uint32_t size)
124{
125 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
126 int i, j;
127
128 if (size != 256) {
129 return;
130 }
131 if (crtc->fb == NULL) {
132 return;
133 }
134
135 if (crtc->fb->depth == 16) {
136 for (i = 0; i < 64; i++) {
137 if (i <= 31) {
138 for (j = 0; j < 8; j++) {
139 radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
140 radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
141 }
142 }
143 for (j = 0; j < 4; j++)
144 radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
145 }
146 } else {
147 for (i = 0; i < 256; i++) {
148 radeon_crtc->lut_r[i] = red[i] >> 6;
149 radeon_crtc->lut_g[i] = green[i] >> 6;
150 radeon_crtc->lut_b[i] = blue[i] >> 6;
151 }
152 }
153
154 radeon_crtc_load_lut(crtc);
155}
156
157static void radeon_crtc_destroy(struct drm_crtc *crtc)
158{
159 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
160
161 if (radeon_crtc->mode_set.mode) {
162 drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
163 }
164 drm_crtc_cleanup(crtc);
165 kfree(radeon_crtc);
166}
167
168static const struct drm_crtc_funcs radeon_crtc_funcs = {
169 .cursor_set = radeon_crtc_cursor_set,
170 .cursor_move = radeon_crtc_cursor_move,
171 .gamma_set = radeon_crtc_gamma_set,
172 .set_config = drm_crtc_helper_set_config,
173 .destroy = radeon_crtc_destroy,
174};
175
176static void radeon_crtc_init(struct drm_device *dev, int index)
177{
178 struct radeon_device *rdev = dev->dev_private;
179 struct radeon_crtc *radeon_crtc;
180 int i;
181
182 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
183 if (radeon_crtc == NULL)
184 return;
185
186 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
187
188 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
189 radeon_crtc->crtc_id = index;
190
191 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
192 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
193 radeon_crtc->mode_set.num_connectors = 0;
194
195 for (i = 0; i < 256; i++) {
196 radeon_crtc->lut_r[i] = i << 2;
197 radeon_crtc->lut_g[i] = i << 2;
198 radeon_crtc->lut_b[i] = i << 2;
199 }
200
201 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
202 radeon_atombios_init_crtc(dev, radeon_crtc);
203 else
204 radeon_legacy_init_crtc(dev, radeon_crtc);
205}
206
207static const char *encoder_names[34] = {
208 "NONE",
209 "INTERNAL_LVDS",
210 "INTERNAL_TMDS1",
211 "INTERNAL_TMDS2",
212 "INTERNAL_DAC1",
213 "INTERNAL_DAC2",
214 "INTERNAL_SDVOA",
215 "INTERNAL_SDVOB",
216 "SI170B",
217 "CH7303",
218 "CH7301",
219 "INTERNAL_DVO1",
220 "EXTERNAL_SDVOA",
221 "EXTERNAL_SDVOB",
222 "TITFP513",
223 "INTERNAL_LVTM1",
224 "VT1623",
225 "HDMI_SI1930",
226 "HDMI_INTERNAL",
227 "INTERNAL_KLDSCP_TMDS1",
228 "INTERNAL_KLDSCP_DVO1",
229 "INTERNAL_KLDSCP_DAC1",
230 "INTERNAL_KLDSCP_DAC2",
231 "SI178",
232 "MVPU_FPGA",
233 "INTERNAL_DDI",
234 "VT1625",
235 "HDMI_SI1932",
236 "DP_AN9801",
237 "DP_DP501",
238 "INTERNAL_UNIPHY",
239 "INTERNAL_KLDSCP_LVTMA",
240 "INTERNAL_UNIPHY1",
241 "INTERNAL_UNIPHY2",
242};
243
244static const char *connector_names[13] = {
245 "Unknown",
246 "VGA",
247 "DVI-I",
248 "DVI-D",
249 "DVI-A",
250 "Composite",
251 "S-video",
252 "LVDS",
253 "Component",
254 "DIN",
255 "DisplayPort",
256 "HDMI-A",
257 "HDMI-B",
258};
259
260static void radeon_print_display_setup(struct drm_device *dev)
261{
262 struct drm_connector *connector;
263 struct radeon_connector *radeon_connector;
264 struct drm_encoder *encoder;
265 struct radeon_encoder *radeon_encoder;
266 uint32_t devices;
267 int i = 0;
268
269 DRM_INFO("Radeon Display Connectors\n");
270 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
271 radeon_connector = to_radeon_connector(connector);
272 DRM_INFO("Connector %d:\n", i);
273 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
274 if (radeon_connector->ddc_bus)
275 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
276 radeon_connector->ddc_bus->rec.mask_clk_reg,
277 radeon_connector->ddc_bus->rec.mask_data_reg,
278 radeon_connector->ddc_bus->rec.a_clk_reg,
279 radeon_connector->ddc_bus->rec.a_data_reg,
280 radeon_connector->ddc_bus->rec.put_clk_reg,
281 radeon_connector->ddc_bus->rec.put_data_reg,
282 radeon_connector->ddc_bus->rec.get_clk_reg,
283 radeon_connector->ddc_bus->rec.get_data_reg);
284 DRM_INFO(" Encoders:\n");
285 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
286 radeon_encoder = to_radeon_encoder(encoder);
287 devices = radeon_encoder->devices & radeon_connector->devices;
288 if (devices) {
289 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
290 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
291 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
292 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
293 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
294 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
295 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
296 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
297 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
298 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
299 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
300 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
301 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
302 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
303 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
304 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
305 if (devices & ATOM_DEVICE_TV1_SUPPORT)
306 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
307 if (devices & ATOM_DEVICE_CV_SUPPORT)
308 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
309 }
310 }
311 i++;
312 }
313}
314
315bool radeon_setup_enc_conn(struct drm_device *dev)
316{
317 struct radeon_device *rdev = dev->dev_private;
318 struct drm_connector *drm_connector;
319 bool ret = false;
320
321 if (rdev->bios) {
322 if (rdev->is_atom_bios) {
323 if (rdev->family >= CHIP_R600)
324 ret = radeon_get_atom_connector_info_from_object_table(dev);
325 else
326 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
327 } else
328 ret = radeon_get_legacy_connector_info_from_bios(dev);
329 } else {
330 if (!ASIC_IS_AVIVO(rdev))
331 ret = radeon_get_legacy_connector_info_from_table(dev);
332 }
333 if (ret) {
334 radeon_print_display_setup(dev);
335 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
336 radeon_ddc_dump(drm_connector);
337 }
338
339 return ret;
340}
341
342int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
343{
344 struct edid *edid;
345 int ret = 0;
346
347 if (!radeon_connector->ddc_bus)
348 return -1;
349 radeon_i2c_do_lock(radeon_connector, 1);
350 edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
351 radeon_i2c_do_lock(radeon_connector, 0);
352 if (edid) {
353 /* update digital bits here */
354 if (edid->digital)
355 radeon_connector->use_digital = 1;
356 else
357 radeon_connector->use_digital = 0;
358 drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
359 ret = drm_add_edid_modes(&radeon_connector->base, edid);
360 kfree(edid);
361 return ret;
362 }
363 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
364 return -1;
365}
366
367static int radeon_ddc_dump(struct drm_connector *connector)
368{
369 struct edid *edid;
370 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
371 int ret = 0;
372
373 if (!radeon_connector->ddc_bus)
374 return -1;
375 radeon_i2c_do_lock(radeon_connector, 1);
376 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
377 radeon_i2c_do_lock(radeon_connector, 0);
378 if (edid) {
379 kfree(edid);
380 }
381 return ret;
382}
383
384static inline uint32_t radeon_div(uint64_t n, uint32_t d)
385{
386 uint64_t mod;
387
388 n += d / 2;
389
390 mod = do_div(n, d);
391 return n;
392}
393
394void radeon_compute_pll(struct radeon_pll *pll,
395 uint64_t freq,
396 uint32_t *dot_clock_p,
397 uint32_t *fb_div_p,
398 uint32_t *frac_fb_div_p,
399 uint32_t *ref_div_p,
400 uint32_t *post_div_p,
401 int flags)
402{
403 uint32_t min_ref_div = pll->min_ref_div;
404 uint32_t max_ref_div = pll->max_ref_div;
405 uint32_t min_fractional_feed_div = 0;
406 uint32_t max_fractional_feed_div = 0;
407 uint32_t best_vco = pll->best_vco;
408 uint32_t best_post_div = 1;
409 uint32_t best_ref_div = 1;
410 uint32_t best_feedback_div = 1;
411 uint32_t best_frac_feedback_div = 0;
412 uint32_t best_freq = -1;
413 uint32_t best_error = 0xffffffff;
414 uint32_t best_vco_diff = 1;
415 uint32_t post_div;
416
417 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
418 freq = freq * 1000;
419
420 if (flags & RADEON_PLL_USE_REF_DIV)
421 min_ref_div = max_ref_div = pll->reference_div;
422 else {
423 while (min_ref_div < max_ref_div-1) {
424 uint32_t mid = (min_ref_div + max_ref_div) / 2;
425 uint32_t pll_in = pll->reference_freq / mid;
426 if (pll_in < pll->pll_in_min)
427 max_ref_div = mid;
428 else if (pll_in > pll->pll_in_max)
429 min_ref_div = mid;
430 else
431 break;
432 }
433 }
434
435 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
436 min_fractional_feed_div = pll->min_frac_feedback_div;
437 max_fractional_feed_div = pll->max_frac_feedback_div;
438 }
439
440 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
441 uint32_t ref_div;
442
443 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
444 continue;
445
446 /* legacy radeons only have a few post_divs */
447 if (flags & RADEON_PLL_LEGACY) {
448 if ((post_div == 5) ||
449 (post_div == 7) ||
450 (post_div == 9) ||
451 (post_div == 10) ||
452 (post_div == 11) ||
453 (post_div == 13) ||
454 (post_div == 14) ||
455 (post_div == 15))
456 continue;
457 }
458
459 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
460 uint32_t feedback_div, current_freq = 0, error, vco_diff;
461 uint32_t pll_in = pll->reference_freq / ref_div;
462 uint32_t min_feed_div = pll->min_feedback_div;
463 uint32_t max_feed_div = pll->max_feedback_div + 1;
464
465 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
466 continue;
467
468 while (min_feed_div < max_feed_div) {
469 uint32_t vco;
470 uint32_t min_frac_feed_div = min_fractional_feed_div;
471 uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
472 uint32_t frac_feedback_div;
473 uint64_t tmp;
474
475 feedback_div = (min_feed_div + max_feed_div) / 2;
476
477 tmp = (uint64_t)pll->reference_freq * feedback_div;
478 vco = radeon_div(tmp, ref_div);
479
480 if (vco < pll->pll_out_min) {
481 min_feed_div = feedback_div + 1;
482 continue;
483 } else if (vco > pll->pll_out_max) {
484 max_feed_div = feedback_div;
485 continue;
486 }
487
488 while (min_frac_feed_div < max_frac_feed_div) {
489 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
490 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
491 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
492 current_freq = radeon_div(tmp, ref_div * post_div);
493
494 error = abs(current_freq - freq);
495 vco_diff = abs(vco - best_vco);
496
497 if ((best_vco == 0 && error < best_error) ||
498 (best_vco != 0 &&
499 (error < best_error - 100 ||
500 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
501 best_post_div = post_div;
502 best_ref_div = ref_div;
503 best_feedback_div = feedback_div;
504 best_frac_feedback_div = frac_feedback_div;
505 best_freq = current_freq;
506 best_error = error;
507 best_vco_diff = vco_diff;
508 } else if (current_freq == freq) {
509 if (best_freq == -1) {
510 best_post_div = post_div;
511 best_ref_div = ref_div;
512 best_feedback_div = feedback_div;
513 best_frac_feedback_div = frac_feedback_div;
514 best_freq = current_freq;
515 best_error = error;
516 best_vco_diff = vco_diff;
517 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
518 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
519 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
520 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
521 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
522 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
523 best_post_div = post_div;
524 best_ref_div = ref_div;
525 best_feedback_div = feedback_div;
526 best_frac_feedback_div = frac_feedback_div;
527 best_freq = current_freq;
528 best_error = error;
529 best_vco_diff = vco_diff;
530 }
531 }
532 if (current_freq < freq)
533 min_frac_feed_div = frac_feedback_div + 1;
534 else
535 max_frac_feed_div = frac_feedback_div;
536 }
537 if (current_freq < freq)
538 min_feed_div = feedback_div + 1;
539 else
540 max_feed_div = feedback_div;
541 }
542 }
543 }
544
545 *dot_clock_p = best_freq / 10000;
546 *fb_div_p = best_feedback_div;
547 *frac_fb_div_p = best_frac_feedback_div;
548 *ref_div_p = best_ref_div;
549 *post_div_p = best_post_div;
550}
551
552static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
553{
554 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
555 struct drm_device *dev = fb->dev;
556
557 if (fb->fbdev)
558 radeonfb_remove(dev, fb);
559
560 if (radeon_fb->obj) {
561 radeon_gem_object_unpin(radeon_fb->obj);
562 mutex_lock(&dev->struct_mutex);
563 drm_gem_object_unreference(radeon_fb->obj);
564 mutex_unlock(&dev->struct_mutex);
565 }
566 drm_framebuffer_cleanup(fb);
567 kfree(radeon_fb);
568}
569
570static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
571 struct drm_file *file_priv,
572 unsigned int *handle)
573{
574 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
575
576 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
577}
578
579static const struct drm_framebuffer_funcs radeon_fb_funcs = {
580 .destroy = radeon_user_framebuffer_destroy,
581 .create_handle = radeon_user_framebuffer_create_handle,
582};
583
584struct drm_framebuffer *
585radeon_framebuffer_create(struct drm_device *dev,
586 struct drm_mode_fb_cmd *mode_cmd,
587 struct drm_gem_object *obj)
588{
589 struct radeon_framebuffer *radeon_fb;
590
591 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
592 if (radeon_fb == NULL) {
593 return NULL;
594 }
595 drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
596 drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
597 radeon_fb->obj = obj;
598 return &radeon_fb->base;
599}
600
601static struct drm_framebuffer *
602radeon_user_framebuffer_create(struct drm_device *dev,
603 struct drm_file *file_priv,
604 struct drm_mode_fb_cmd *mode_cmd)
605{
606 struct drm_gem_object *obj;
607
608 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
609
610 return radeon_framebuffer_create(dev, mode_cmd, obj);
611}
612
613static const struct drm_mode_config_funcs radeon_mode_funcs = {
614 .fb_create = radeon_user_framebuffer_create,
615 .fb_changed = radeonfb_probe,
616};
617
618int radeon_modeset_init(struct radeon_device *rdev)
619{
620 int num_crtc = 2, i;
621 int ret;
622
623 drm_mode_config_init(rdev->ddev);
624 rdev->mode_info.mode_config_initialized = true;
625
626 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
627
628 if (ASIC_IS_AVIVO(rdev)) {
629 rdev->ddev->mode_config.max_width = 8192;
630 rdev->ddev->mode_config.max_height = 8192;
631 } else {
632 rdev->ddev->mode_config.max_width = 4096;
633 rdev->ddev->mode_config.max_height = 4096;
634 }
635
636 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
637
638 /* allocate crtcs - TODO single crtc */
639 for (i = 0; i < num_crtc; i++) {
640 radeon_crtc_init(rdev->ddev, i);
641 }
642
643 /* okay we should have all the bios connectors */
644 ret = radeon_setup_enc_conn(rdev->ddev);
645 if (!ret) {
646 return ret;
647 }
648 drm_helper_initial_config(rdev->ddev);
649 return 0;
650}
651
652void radeon_modeset_fini(struct radeon_device *rdev)
653{
654 if (rdev->mode_info.mode_config_initialized) {
655 drm_mode_config_cleanup(rdev->ddev);
656 rdev->mode_info.mode_config_initialized = false;
657 }
658}
659
660void radeon_init_disp_bandwidth(struct drm_device *dev)
661{
662 struct radeon_device *rdev = dev->dev_private;
663 struct drm_display_mode *modes[2];
664 int pixel_bytes[2];
665 struct drm_crtc *crtc;
666
667 pixel_bytes[0] = pixel_bytes[1] = 0;
668 modes[0] = modes[1] = NULL;
669
670 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
671 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
672
673 if (crtc->enabled && crtc->fb) {
674 modes[radeon_crtc->crtc_id] = &crtc->mode;
675 pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
676 }
677 }
678
679 if (ASIC_IS_AVIVO(rdev)) {
680 radeon_init_disp_bw_avivo(dev,
681 modes[0],
682 pixel_bytes[0],
683 modes[1],
684 pixel_bytes[1]);
685 } else {
686 radeon_init_disp_bw_legacy(dev,
687 modes[0],
688 pixel_bytes[0],
689 modes[1],
690 pixel_bytes[1]);
691 }
692}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 13a60f4d4227..c815a2cbf7b3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -35,12 +35,92 @@
35#include "radeon_drv.h" 35#include "radeon_drv.h"
36 36
37#include "drm_pciids.h" 37#include "drm_pciids.h"
38#include <linux/console.h>
39
40
41#if defined(CONFIG_DRM_RADEON_KMS)
42/*
43 * KMS wrapper.
44 */
45#define KMS_DRIVER_MAJOR 2
46#define KMS_DRIVER_MINOR 0
47#define KMS_DRIVER_PATCHLEVEL 0
48int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
49int radeon_driver_unload_kms(struct drm_device *dev);
50int radeon_driver_firstopen_kms(struct drm_device *dev);
51void radeon_driver_lastclose_kms(struct drm_device *dev);
52int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
53void radeon_driver_postclose_kms(struct drm_device *dev,
54 struct drm_file *file_priv);
55void radeon_driver_preclose_kms(struct drm_device *dev,
56 struct drm_file *file_priv);
57int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
58int radeon_resume_kms(struct drm_device *dev);
59u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
60int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
61void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
62void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
63int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
64void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
65irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
66int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
67void radeon_master_destroy_kms(struct drm_device *dev,
68 struct drm_master *master);
69int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
70 struct drm_file *file_priv);
71int radeon_gem_object_init(struct drm_gem_object *obj);
72void radeon_gem_object_free(struct drm_gem_object *obj);
73extern struct drm_ioctl_desc radeon_ioctls_kms[];
74extern int radeon_max_kms_ioctl;
75int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
76#if defined(CONFIG_DEBUG_FS)
77int radeon_debugfs_init(struct drm_minor *minor);
78void radeon_debugfs_cleanup(struct drm_minor *minor);
79#endif
80#endif
81
38 82
39int radeon_no_wb; 83int radeon_no_wb;
84#if defined(CONFIG_DRM_RADEON_KMS)
85int radeon_modeset = -1;
86int radeon_dynclks = -1;
87int radeon_r4xx_atom = 0;
88int radeon_agpmode = 0;
89int radeon_vram_limit = 0;
90int radeon_gart_size = 512; /* default gart size */
91int radeon_benchmarking = 0;
92int radeon_connector_table = 0;
93#endif
40 94
41MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
42module_param_named(no_wb, radeon_no_wb, int, 0444); 96module_param_named(no_wb, radeon_no_wb, int, 0444);
43 97
98#if defined(CONFIG_DRM_RADEON_KMS)
99MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
100module_param_named(modeset, radeon_modeset, int, 0400);
101
102MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
103module_param_named(dynclks, radeon_dynclks, int, 0444);
104
105MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
106module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
107
108MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
109module_param_named(vramlimit, radeon_vram_limit, int, 0600);
110
111MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
112module_param_named(agpmode, radeon_agpmode, int, 0444);
113
114MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
115module_param_named(gartsize, radeon_gart_size, int, 0600);
116
117MODULE_PARM_DESC(benchmark, "Run benchmark");
118module_param_named(benchmark, radeon_benchmarking, int, 0444);
119
120MODULE_PARM_DESC(connector_table, "Force connector table");
121module_param_named(connector_table, radeon_connector_table, int, 0444);
122#endif
123
44static int radeon_suspend(struct drm_device *dev, pm_message_t state) 124static int radeon_suspend(struct drm_device *dev, pm_message_t state)
45{ 125{
46 drm_radeon_private_t *dev_priv = dev->dev_private; 126 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -73,7 +153,11 @@ static struct pci_device_id pciidlist[] = {
73 radeon_PCI_IDS 153 radeon_PCI_IDS
74}; 154};
75 155
76static struct drm_driver driver = { 156#if defined(CONFIG_DRM_RADEON_KMS)
157MODULE_DEVICE_TABLE(pci, pciidlist);
158#endif
159
160static struct drm_driver driver_old = {
77 .driver_features = 161 .driver_features =
78 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 162 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
79 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 163 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
@@ -127,18 +211,141 @@ static struct drm_driver driver = {
127 .patchlevel = DRIVER_PATCHLEVEL, 211 .patchlevel = DRIVER_PATCHLEVEL,
128}; 212};
129 213
214#if defined(CONFIG_DRM_RADEON_KMS)
215static struct drm_driver kms_driver;
216
217static int __devinit
218radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
219{
220 return drm_get_dev(pdev, ent, &kms_driver);
221}
222
223static void
224radeon_pci_remove(struct pci_dev *pdev)
225{
226 struct drm_device *dev = pci_get_drvdata(pdev);
227
228 drm_put_dev(dev);
229}
230
231static int
232radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
233{
234 struct drm_device *dev = pci_get_drvdata(pdev);
235 return radeon_suspend_kms(dev, state);
236}
237
238static int
239radeon_pci_resume(struct pci_dev *pdev)
240{
241 struct drm_device *dev = pci_get_drvdata(pdev);
242 return radeon_resume_kms(dev);
243}
244
245static struct drm_driver kms_driver = {
246 .driver_features =
247 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
248 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
249 .dev_priv_size = 0,
250 .load = radeon_driver_load_kms,
251 .firstopen = radeon_driver_firstopen_kms,
252 .open = radeon_driver_open_kms,
253 .preclose = radeon_driver_preclose_kms,
254 .postclose = radeon_driver_postclose_kms,
255 .lastclose = radeon_driver_lastclose_kms,
256 .unload = radeon_driver_unload_kms,
257 .suspend = radeon_suspend_kms,
258 .resume = radeon_resume_kms,
259 .get_vblank_counter = radeon_get_vblank_counter_kms,
260 .enable_vblank = radeon_enable_vblank_kms,
261 .disable_vblank = radeon_disable_vblank_kms,
262 .master_create = radeon_master_create_kms,
263 .master_destroy = radeon_master_destroy_kms,
264#if defined(CONFIG_DEBUG_FS)
265 .debugfs_init = radeon_debugfs_init,
266 .debugfs_cleanup = radeon_debugfs_cleanup,
267#endif
268 .irq_preinstall = radeon_driver_irq_preinstall_kms,
269 .irq_postinstall = radeon_driver_irq_postinstall_kms,
270 .irq_uninstall = radeon_driver_irq_uninstall_kms,
271 .irq_handler = radeon_driver_irq_handler_kms,
272 .reclaim_buffers = drm_core_reclaim_buffers,
273 .get_map_ofs = drm_core_get_map_ofs,
274 .get_reg_ofs = drm_core_get_reg_ofs,
275 .ioctls = radeon_ioctls_kms,
276 .gem_init_object = radeon_gem_object_init,
277 .gem_free_object = radeon_gem_object_free,
278 .dma_ioctl = radeon_dma_ioctl_kms,
279 .fops = {
280 .owner = THIS_MODULE,
281 .open = drm_open,
282 .release = drm_release,
283 .ioctl = drm_ioctl,
284 .mmap = radeon_mmap,
285 .poll = drm_poll,
286 .fasync = drm_fasync,
287#ifdef CONFIG_COMPAT
288 .compat_ioctl = NULL,
289#endif
290 },
291
292 .pci_driver = {
293 .name = DRIVER_NAME,
294 .id_table = pciidlist,
295 .probe = radeon_pci_probe,
296 .remove = radeon_pci_remove,
297 .suspend = radeon_pci_suspend,
298 .resume = radeon_pci_resume,
299 },
300
301 .name = DRIVER_NAME,
302 .desc = DRIVER_DESC,
303 .date = DRIVER_DATE,
304 .major = KMS_DRIVER_MAJOR,
305 .minor = KMS_DRIVER_MINOR,
306 .patchlevel = KMS_DRIVER_PATCHLEVEL,
307};
308#endif
309
310static struct drm_driver *driver;
311
130static int __init radeon_init(void) 312static int __init radeon_init(void)
131{ 313{
132 driver.num_ioctls = radeon_max_ioctl; 314 driver = &driver_old;
133 return drm_init(&driver); 315 driver->num_ioctls = radeon_max_ioctl;
316#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86)
317 /* if enabled by default */
318 if (radeon_modeset == -1) {
319 DRM_INFO("radeon default to kernel modesetting.\n");
320 radeon_modeset = 1;
321 }
322 if (radeon_modeset == 1) {
323 DRM_INFO("radeon kernel modesetting enabled.\n");
324 driver = &kms_driver;
325 driver->driver_features |= DRIVER_MODESET;
326 driver->num_ioctls = radeon_max_kms_ioctl;
327 }
328
329 /* if the vga console setting is enabled still
330 * let modprobe override it */
331#ifdef CONFIG_VGA_CONSOLE
332 if (vgacon_text_force() && radeon_modeset == -1) {
333 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
334 driver = &driver_old;
335 driver->driver_features &= ~DRIVER_MODESET;
336 radeon_modeset = 0;
337 }
338#endif
339#endif
340 return drm_init(driver);
134} 341}
135 342
136static void __exit radeon_exit(void) 343static void __exit radeon_exit(void)
137{ 344{
138 drm_exit(&driver); 345 drm_exit(driver);
139} 346}
140 347
141module_init(radeon_init); 348late_initcall(radeon_init);
142module_exit(radeon_exit); 349module_exit(radeon_exit);
143 350
144MODULE_AUTHOR(DRIVER_AUTHOR); 351MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644
index 000000000000..c8ef0d14ffab
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -0,0 +1,1708 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32extern int atom_debug;
33
34uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
36{
37 struct radeon_device *rdev = dev->dev_private;
38 uint32_t ret = 0;
39
40 switch (supported_device) {
41 case ATOM_DEVICE_CRT1_SUPPORT:
42 case ATOM_DEVICE_TV1_SUPPORT:
43 case ATOM_DEVICE_TV2_SUPPORT:
44 case ATOM_DEVICE_CRT2_SUPPORT:
45 case ATOM_DEVICE_CV_SUPPORT:
46 switch (dac) {
47 case 1: /* dac a */
48 if ((rdev->family == CHIP_RS300) ||
49 (rdev->family == CHIP_RS400) ||
50 (rdev->family == CHIP_RS480))
51 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
52 else if (ASIC_IS_AVIVO(rdev))
53 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
54 else
55 ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
56 break;
57 case 2: /* dac b */
58 if (ASIC_IS_AVIVO(rdev))
59 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
60 else {
61 /*if (rdev->family == CHIP_R200)
62 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
63 else*/
64 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
65 }
66 break;
67 case 3: /* external dac */
68 if (ASIC_IS_AVIVO(rdev))
69 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
70 else
71 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
72 break;
73 }
74 break;
75 case ATOM_DEVICE_LCD1_SUPPORT:
76 if (ASIC_IS_AVIVO(rdev))
77 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
78 else
79 ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
80 break;
81 case ATOM_DEVICE_DFP1_SUPPORT:
82 if ((rdev->family == CHIP_RS300) ||
83 (rdev->family == CHIP_RS400) ||
84 (rdev->family == CHIP_RS480))
85 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
86 else if (ASIC_IS_AVIVO(rdev))
87 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
88 else
89 ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
90 break;
91 case ATOM_DEVICE_LCD2_SUPPORT:
92 case ATOM_DEVICE_DFP2_SUPPORT:
93 if ((rdev->family == CHIP_RS600) ||
94 (rdev->family == CHIP_RS690) ||
95 (rdev->family == CHIP_RS740))
96 ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
97 else if (ASIC_IS_AVIVO(rdev))
98 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
99 else
100 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
101 break;
102 case ATOM_DEVICE_DFP3_SUPPORT:
103 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
104 break;
105 }
106
107 return ret;
108}
109
110void
111radeon_link_encoder_connector(struct drm_device *dev)
112{
113 struct drm_connector *connector;
114 struct radeon_connector *radeon_connector;
115 struct drm_encoder *encoder;
116 struct radeon_encoder *radeon_encoder;
117
118 /* walk the list and link encoders to connectors */
119 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
120 radeon_connector = to_radeon_connector(connector);
121 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
122 radeon_encoder = to_radeon_encoder(encoder);
123 if (radeon_encoder->devices & radeon_connector->devices)
124 drm_mode_connector_attach_encoder(connector, encoder);
125 }
126 }
127}
128
129static struct drm_connector *
130radeon_get_connector_for_encoder(struct drm_encoder *encoder)
131{
132 struct drm_device *dev = encoder->dev;
133 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
134 struct drm_connector *connector;
135 struct radeon_connector *radeon_connector;
136
137 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
138 radeon_connector = to_radeon_connector(connector);
139 if (radeon_encoder->devices & radeon_connector->devices)
140 return connector;
141 }
142 return NULL;
143}
144
145/* used for both atom and legacy */
146void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
147 struct drm_display_mode *mode,
148 struct drm_display_mode *adjusted_mode)
149{
150 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
151 struct drm_device *dev = encoder->dev;
152 struct radeon_device *rdev = dev->dev_private;
153 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
154
155 if (mode->hdisplay < native_mode->panel_xres ||
156 mode->vdisplay < native_mode->panel_yres) {
157 radeon_encoder->flags |= RADEON_USE_RMX;
158 if (ASIC_IS_AVIVO(rdev)) {
159 adjusted_mode->hdisplay = native_mode->panel_xres;
160 adjusted_mode->vdisplay = native_mode->panel_yres;
161 adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
162 adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
163 adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
164 adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
165 adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
166 adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
167 /* update crtc values */
168 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
169 /* adjust crtc values */
170 adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
171 adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
172 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
173 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
174 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
175 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
176 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
177 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
178 } else {
179 adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
180 adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
181 adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
182 adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
183 adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
184 adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
185 /* update crtc values */
186 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
187 /* adjust crtc values */
188 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
189 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
190 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
191 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
192 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
193 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
194 }
195 adjusted_mode->flags = native_mode->flags;
196 adjusted_mode->clock = native_mode->dotclock;
197 }
198}
199
200static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
201 struct drm_display_mode *mode,
202 struct drm_display_mode *adjusted_mode)
203{
204
205 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
206
207 radeon_encoder->flags &= ~RADEON_USE_RMX;
208
209 drm_mode_set_crtcinfo(adjusted_mode, 0);
210
211 if (radeon_encoder->rmx_type != RMX_OFF)
212 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
213
214 /* hw bug */
215 if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
216 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
217 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
218
219 return true;
220}
221
222static void
223atombios_dac_setup(struct drm_encoder *encoder, int action)
224{
225 struct drm_device *dev = encoder->dev;
226 struct radeon_device *rdev = dev->dev_private;
227 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
228 DAC_ENCODER_CONTROL_PS_ALLOCATION args;
229 int index = 0, num = 0;
230 /* fixme - fill in enc_priv for atom dac */
231 enum radeon_tv_std tv_std = TV_STD_NTSC;
232
233 memset(&args, 0, sizeof(args));
234
235 switch (radeon_encoder->encoder_id) {
236 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
237 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
238 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
239 num = 1;
240 break;
241 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
242 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
243 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
244 num = 2;
245 break;
246 }
247
248 args.ucAction = action;
249
250 if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
251 args.ucDacStandard = ATOM_DAC1_PS2;
252 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
253 args.ucDacStandard = ATOM_DAC1_CV;
254 else {
255 switch (tv_std) {
256 case TV_STD_PAL:
257 case TV_STD_PAL_M:
258 case TV_STD_SCART_PAL:
259 case TV_STD_SECAM:
260 case TV_STD_PAL_CN:
261 args.ucDacStandard = ATOM_DAC1_PAL;
262 break;
263 case TV_STD_NTSC:
264 case TV_STD_NTSC_J:
265 case TV_STD_PAL_60:
266 default:
267 args.ucDacStandard = ATOM_DAC1_NTSC;
268 break;
269 }
270 }
271 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
272
273 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
274
275}
276
277static void
278atombios_tv_setup(struct drm_encoder *encoder, int action)
279{
280 struct drm_device *dev = encoder->dev;
281 struct radeon_device *rdev = dev->dev_private;
282 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
283 TV_ENCODER_CONTROL_PS_ALLOCATION args;
284 int index = 0;
285 /* fixme - fill in enc_priv for atom dac */
286 enum radeon_tv_std tv_std = TV_STD_NTSC;
287
288 memset(&args, 0, sizeof(args));
289
290 index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
291
292 args.sTVEncoder.ucAction = action;
293
294 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
295 args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
296 else {
297 switch (tv_std) {
298 case TV_STD_NTSC:
299 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
300 break;
301 case TV_STD_PAL:
302 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
303 break;
304 case TV_STD_PAL_M:
305 args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
306 break;
307 case TV_STD_PAL_60:
308 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
309 break;
310 case TV_STD_NTSC_J:
311 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
312 break;
313 case TV_STD_SCART_PAL:
314 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
315 break;
316 case TV_STD_SECAM:
317 args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
318 break;
319 case TV_STD_PAL_CN:
320 args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
321 break;
322 default:
323 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
324 break;
325 }
326 }
327
328 args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
329
330 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
331
332}
333
334void
335atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
336{
337 struct drm_device *dev = encoder->dev;
338 struct radeon_device *rdev = dev->dev_private;
339 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
340 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
341 int index = 0;
342
343 memset(&args, 0, sizeof(args));
344
345 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
346
347 args.sXTmdsEncoder.ucEnable = action;
348
349 if (radeon_encoder->pixel_clock > 165000)
350 args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
351
352 /*if (pScrn->rgbBits == 8)*/
353 args.sXTmdsEncoder.ucMisc |= (1 << 1);
354
355 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
356
357}
358
359static void
360atombios_ddia_setup(struct drm_encoder *encoder, int action)
361{
362 struct drm_device *dev = encoder->dev;
363 struct radeon_device *rdev = dev->dev_private;
364 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
365 DVO_ENCODER_CONTROL_PS_ALLOCATION args;
366 int index = 0;
367
368 memset(&args, 0, sizeof(args));
369
370 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
371
372 args.sDVOEncoder.ucAction = action;
373 args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
374
375 if (radeon_encoder->pixel_clock > 165000)
376 args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
377
378 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
379
380}
381
382union lvds_encoder_control {
383 LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
384 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
385};
386
387static void
388atombios_digital_setup(struct drm_encoder *encoder, int action)
389{
390 struct drm_device *dev = encoder->dev;
391 struct radeon_device *rdev = dev->dev_private;
392 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
393 union lvds_encoder_control args;
394 int index = 0;
395 uint8_t frev, crev;
396 struct radeon_encoder_atom_dig *dig;
397 struct drm_connector *connector;
398 struct radeon_connector *radeon_connector;
399 struct radeon_connector_atom_dig *dig_connector;
400
401 connector = radeon_get_connector_for_encoder(encoder);
402 if (!connector)
403 return;
404
405 radeon_connector = to_radeon_connector(connector);
406
407 if (!radeon_encoder->enc_priv)
408 return;
409
410 dig = radeon_encoder->enc_priv;
411
412 if (!radeon_connector->con_priv)
413 return;
414
415 dig_connector = radeon_connector->con_priv;
416
417 memset(&args, 0, sizeof(args));
418
419 switch (radeon_encoder->encoder_id) {
420 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
421 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
422 break;
423 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
424 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
425 index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
426 break;
427 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
428 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
429 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
430 else
431 index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
432 break;
433 }
434
435 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
436
437 switch (frev) {
438 case 1:
439 case 2:
440 switch (crev) {
441 case 1:
442 args.v1.ucMisc = 0;
443 args.v1.ucAction = action;
444 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
445 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
446 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
447 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
448 if (dig->lvds_misc & (1 << 0))
449 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
450 if (dig->lvds_misc & (1 << 1))
451 args.v1.ucMisc |= (1 << 1);
452 } else {
453 if (dig_connector->linkb)
454 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
455 if (radeon_encoder->pixel_clock > 165000)
456 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
457 /*if (pScrn->rgbBits == 8) */
458 args.v1.ucMisc |= (1 << 1);
459 }
460 break;
461 case 2:
462 case 3:
463 args.v2.ucMisc = 0;
464 args.v2.ucAction = action;
465 if (crev == 3) {
466 if (dig->coherent_mode)
467 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
468 }
469 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
470 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
471 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
472 args.v2.ucTruncate = 0;
473 args.v2.ucSpatial = 0;
474 args.v2.ucTemporal = 0;
475 args.v2.ucFRC = 0;
476 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
477 if (dig->lvds_misc & (1 << 0))
478 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
479 if (dig->lvds_misc & (1 << 5)) {
480 args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
481 if (dig->lvds_misc & (1 << 1))
482 args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
483 }
484 if (dig->lvds_misc & (1 << 6)) {
485 args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
486 if (dig->lvds_misc & (1 << 1))
487 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
488 if (((dig->lvds_misc >> 2) & 0x3) == 2)
489 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
490 }
491 } else {
492 if (dig_connector->linkb)
493 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
494 if (radeon_encoder->pixel_clock > 165000)
495 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
496 }
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
500 break;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
505 break;
506 }
507
508 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
509
510}
511
512int
513atombios_get_encoder_mode(struct drm_encoder *encoder)
514{
515 struct drm_connector *connector;
516 struct radeon_connector *radeon_connector;
517
518 connector = radeon_get_connector_for_encoder(encoder);
519 if (!connector)
520 return 0;
521
522 radeon_connector = to_radeon_connector(connector);
523
524 switch (connector->connector_type) {
525 case DRM_MODE_CONNECTOR_DVII:
526 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
527 return ATOM_ENCODER_MODE_HDMI;
528 else if (radeon_connector->use_digital)
529 return ATOM_ENCODER_MODE_DVI;
530 else
531 return ATOM_ENCODER_MODE_CRT;
532 break;
533 case DRM_MODE_CONNECTOR_DVID:
534 case DRM_MODE_CONNECTOR_HDMIA:
535 case DRM_MODE_CONNECTOR_HDMIB:
536 default:
537 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
538 return ATOM_ENCODER_MODE_HDMI;
539 else
540 return ATOM_ENCODER_MODE_DVI;
541 break;
542 case DRM_MODE_CONNECTOR_LVDS:
543 return ATOM_ENCODER_MODE_LVDS;
544 break;
545 case DRM_MODE_CONNECTOR_DisplayPort:
546 /*if (radeon_output->MonType == MT_DP)
547 return ATOM_ENCODER_MODE_DP;
548 else*/
549 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
550 return ATOM_ENCODER_MODE_HDMI;
551 else
552 return ATOM_ENCODER_MODE_DVI;
553 break;
554 case CONNECTOR_DVI_A:
555 case CONNECTOR_VGA:
556 return ATOM_ENCODER_MODE_CRT;
557 break;
558 case CONNECTOR_STV:
559 case CONNECTOR_CTV:
560 case CONNECTOR_DIN:
561 /* fix me */
562 return ATOM_ENCODER_MODE_TV;
563 /*return ATOM_ENCODER_MODE_CV;*/
564 break;
565 }
566}
567
568static void
569atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
570{
571 struct drm_device *dev = encoder->dev;
572 struct radeon_device *rdev = dev->dev_private;
573 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
574 DIG_ENCODER_CONTROL_PS_ALLOCATION args;
575 int index = 0, num = 0;
576 uint8_t frev, crev;
577 struct radeon_encoder_atom_dig *dig;
578 struct drm_connector *connector;
579 struct radeon_connector *radeon_connector;
580 struct radeon_connector_atom_dig *dig_connector;
581
582 connector = radeon_get_connector_for_encoder(encoder);
583 if (!connector)
584 return;
585
586 radeon_connector = to_radeon_connector(connector);
587
588 if (!radeon_connector->con_priv)
589 return;
590
591 dig_connector = radeon_connector->con_priv;
592
593 if (!radeon_encoder->enc_priv)
594 return;
595
596 dig = radeon_encoder->enc_priv;
597
598 memset(&args, 0, sizeof(args));
599
600 if (ASIC_IS_DCE32(rdev)) {
601 if (dig->dig_block)
602 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
603 else
604 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
605 num = dig->dig_block + 1;
606 } else {
607 switch (radeon_encoder->encoder_id) {
608 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
609 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
610 num = 1;
611 break;
612 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
613 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
614 num = 2;
615 break;
616 }
617 }
618
619 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
620
621 args.ucAction = action;
622 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
623
624 if (ASIC_IS_DCE32(rdev)) {
625 switch (radeon_encoder->encoder_id) {
626 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
627 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
628 break;
629 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
630 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
631 break;
632 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
633 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
634 break;
635 }
636 } else {
637 switch (radeon_encoder->encoder_id) {
638 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
639 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
640 break;
641 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
642 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
643 break;
644 }
645 }
646
647 if (radeon_encoder->pixel_clock > 165000) {
648 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
649 args.ucLaneNum = 8;
650 } else {
651 if (dig_connector->linkb)
652 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
653 else
654 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
655 args.ucLaneNum = 4;
656 }
657
658 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
659
660 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
661
662}
663
664union dig_transmitter_control {
665 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
666 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
667};
668
669static void
670atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
671{
672 struct drm_device *dev = encoder->dev;
673 struct radeon_device *rdev = dev->dev_private;
674 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
675 union dig_transmitter_control args;
676 int index = 0, num = 0;
677 uint8_t frev, crev;
678 struct radeon_encoder_atom_dig *dig;
679 struct drm_connector *connector;
680 struct radeon_connector *radeon_connector;
681 struct radeon_connector_atom_dig *dig_connector;
682
683 connector = radeon_get_connector_for_encoder(encoder);
684 if (!connector)
685 return;
686
687 radeon_connector = to_radeon_connector(connector);
688
689 if (!radeon_encoder->enc_priv)
690 return;
691
692 dig = radeon_encoder->enc_priv;
693
694 if (!radeon_connector->con_priv)
695 return;
696
697 dig_connector = radeon_connector->con_priv;
698
699 memset(&args, 0, sizeof(args));
700
701 if (ASIC_IS_DCE32(rdev))
702 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
703 else {
704 switch (radeon_encoder->encoder_id) {
705 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
706 index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
707 break;
708 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
709 index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
710 break;
711 }
712 }
713
714 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
715
716 args.v1.ucAction = action;
717
718 if (ASIC_IS_DCE32(rdev)) {
719 if (radeon_encoder->pixel_clock > 165000) {
720 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100);
721 args.v2.acConfig.fDualLinkConnector = 1;
722 } else {
723 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
724 }
725 if (dig->dig_block)
726 args.v2.acConfig.ucEncoderSel = 1;
727
728 switch (radeon_encoder->encoder_id) {
729 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
730 args.v2.acConfig.ucTransmitterSel = 0;
731 num = 0;
732 break;
733 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
734 args.v2.acConfig.ucTransmitterSel = 1;
735 num = 1;
736 break;
737 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
738 args.v2.acConfig.ucTransmitterSel = 2;
739 num = 2;
740 break;
741 }
742
743 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
744 if (dig->coherent_mode)
745 args.v2.acConfig.fCoherentMode = 1;
746 }
747 } else {
748 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
749 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
750
751 switch (radeon_encoder->encoder_id) {
752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
753 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
754 if (rdev->flags & RADEON_IS_IGP) {
755 if (radeon_encoder->pixel_clock > 165000) {
756 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
757 ATOM_TRANSMITTER_CONFIG_LINKA_B);
758 if (dig_connector->igp_lane_info & 0x3)
759 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
760 else if (dig_connector->igp_lane_info & 0xc)
761 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
762 } else {
763 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
764 if (dig_connector->igp_lane_info & 0x1)
765 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
766 else if (dig_connector->igp_lane_info & 0x2)
767 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
768 else if (dig_connector->igp_lane_info & 0x4)
769 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
770 else if (dig_connector->igp_lane_info & 0x8)
771 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
772 }
773 } else {
774 if (radeon_encoder->pixel_clock > 165000)
775 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
776 ATOM_TRANSMITTER_CONFIG_LINKA_B |
777 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
778 else {
779 if (dig_connector->linkb)
780 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
781 else
782 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
783 }
784 }
785 break;
786 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
787 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
788 if (radeon_encoder->pixel_clock > 165000)
789 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
790 ATOM_TRANSMITTER_CONFIG_LINKA_B |
791 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
792 else {
793 if (dig_connector->linkb)
794 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
795 else
796 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
797 }
798 break;
799 }
800
801 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
802 if (dig->coherent_mode)
803 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
804 }
805 }
806
807 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
808
809}
810
811static void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
812{
813
814 WREG32(0x659C, 0x0);
815 WREG32(0x6594, 0x705);
816 WREG32(0x65A4, 0x10001);
817 WREG32(0x65D8, 0x0);
818 WREG32(0x65B0, 0x0);
819 WREG32(0x65C0, 0x0);
820 WREG32(0x65D4, 0x0);
821 WREG32(0x6578, 0x0);
822 WREG32(0x657C, 0x841880A8);
823 WREG32(0x6578, 0x1);
824 WREG32(0x657C, 0x84208680);
825 WREG32(0x6578, 0x2);
826 WREG32(0x657C, 0xBFF880B0);
827 WREG32(0x6578, 0x100);
828 WREG32(0x657C, 0x83D88088);
829 WREG32(0x6578, 0x101);
830 WREG32(0x657C, 0x84608680);
831 WREG32(0x6578, 0x102);
832 WREG32(0x657C, 0xBFF080D0);
833 WREG32(0x6578, 0x200);
834 WREG32(0x657C, 0x83988068);
835 WREG32(0x6578, 0x201);
836 WREG32(0x657C, 0x84A08680);
837 WREG32(0x6578, 0x202);
838 WREG32(0x657C, 0xBFF080F8);
839 WREG32(0x6578, 0x300);
840 WREG32(0x657C, 0x83588058);
841 WREG32(0x6578, 0x301);
842 WREG32(0x657C, 0x84E08660);
843 WREG32(0x6578, 0x302);
844 WREG32(0x657C, 0xBFF88120);
845 WREG32(0x6578, 0x400);
846 WREG32(0x657C, 0x83188040);
847 WREG32(0x6578, 0x401);
848 WREG32(0x657C, 0x85008660);
849 WREG32(0x6578, 0x402);
850 WREG32(0x657C, 0xBFF88150);
851 WREG32(0x6578, 0x500);
852 WREG32(0x657C, 0x82D88030);
853 WREG32(0x6578, 0x501);
854 WREG32(0x657C, 0x85408640);
855 WREG32(0x6578, 0x502);
856 WREG32(0x657C, 0xBFF88180);
857 WREG32(0x6578, 0x600);
858 WREG32(0x657C, 0x82A08018);
859 WREG32(0x6578, 0x601);
860 WREG32(0x657C, 0x85808620);
861 WREG32(0x6578, 0x602);
862 WREG32(0x657C, 0xBFF081B8);
863 WREG32(0x6578, 0x700);
864 WREG32(0x657C, 0x82608010);
865 WREG32(0x6578, 0x701);
866 WREG32(0x657C, 0x85A08600);
867 WREG32(0x6578, 0x702);
868 WREG32(0x657C, 0x800081F0);
869 WREG32(0x6578, 0x800);
870 WREG32(0x657C, 0x8228BFF8);
871 WREG32(0x6578, 0x801);
872 WREG32(0x657C, 0x85E085E0);
873 WREG32(0x6578, 0x802);
874 WREG32(0x657C, 0xBFF88228);
875 WREG32(0x6578, 0x10000);
876 WREG32(0x657C, 0x82A8BF00);
877 WREG32(0x6578, 0x10001);
878 WREG32(0x657C, 0x82A08CC0);
879 WREG32(0x6578, 0x10002);
880 WREG32(0x657C, 0x8008BEF8);
881 WREG32(0x6578, 0x10100);
882 WREG32(0x657C, 0x81F0BF28);
883 WREG32(0x6578, 0x10101);
884 WREG32(0x657C, 0x83608CA0);
885 WREG32(0x6578, 0x10102);
886 WREG32(0x657C, 0x8018BED0);
887 WREG32(0x6578, 0x10200);
888 WREG32(0x657C, 0x8148BF38);
889 WREG32(0x6578, 0x10201);
890 WREG32(0x657C, 0x84408C80);
891 WREG32(0x6578, 0x10202);
892 WREG32(0x657C, 0x8008BEB8);
893 WREG32(0x6578, 0x10300);
894 WREG32(0x657C, 0x80B0BF78);
895 WREG32(0x6578, 0x10301);
896 WREG32(0x657C, 0x85008C20);
897 WREG32(0x6578, 0x10302);
898 WREG32(0x657C, 0x8020BEA0);
899 WREG32(0x6578, 0x10400);
900 WREG32(0x657C, 0x8028BF90);
901 WREG32(0x6578, 0x10401);
902 WREG32(0x657C, 0x85E08BC0);
903 WREG32(0x6578, 0x10402);
904 WREG32(0x657C, 0x8018BE90);
905 WREG32(0x6578, 0x10500);
906 WREG32(0x657C, 0xBFB8BFB0);
907 WREG32(0x6578, 0x10501);
908 WREG32(0x657C, 0x86C08B40);
909 WREG32(0x6578, 0x10502);
910 WREG32(0x657C, 0x8010BE90);
911 WREG32(0x6578, 0x10600);
912 WREG32(0x657C, 0xBF58BFC8);
913 WREG32(0x6578, 0x10601);
914 WREG32(0x657C, 0x87A08AA0);
915 WREG32(0x6578, 0x10602);
916 WREG32(0x657C, 0x8010BE98);
917 WREG32(0x6578, 0x10700);
918 WREG32(0x657C, 0xBF10BFF0);
919 WREG32(0x6578, 0x10701);
920 WREG32(0x657C, 0x886089E0);
921 WREG32(0x6578, 0x10702);
922 WREG32(0x657C, 0x8018BEB0);
923 WREG32(0x6578, 0x10800);
924 WREG32(0x657C, 0xBED8BFE8);
925 WREG32(0x6578, 0x10801);
926 WREG32(0x657C, 0x89408940);
927 WREG32(0x6578, 0x10802);
928 WREG32(0x657C, 0xBFE8BED8);
929 WREG32(0x6578, 0x20000);
930 WREG32(0x657C, 0x80008000);
931 WREG32(0x6578, 0x20001);
932 WREG32(0x657C, 0x90008000);
933 WREG32(0x6578, 0x20002);
934 WREG32(0x657C, 0x80008000);
935 WREG32(0x6578, 0x20003);
936 WREG32(0x657C, 0x80008000);
937 WREG32(0x6578, 0x20100);
938 WREG32(0x657C, 0x80108000);
939 WREG32(0x6578, 0x20101);
940 WREG32(0x657C, 0x8FE0BF70);
941 WREG32(0x6578, 0x20102);
942 WREG32(0x657C, 0xBFE880C0);
943 WREG32(0x6578, 0x20103);
944 WREG32(0x657C, 0x80008000);
945 WREG32(0x6578, 0x20200);
946 WREG32(0x657C, 0x8018BFF8);
947 WREG32(0x6578, 0x20201);
948 WREG32(0x657C, 0x8F80BF08);
949 WREG32(0x6578, 0x20202);
950 WREG32(0x657C, 0xBFD081A0);
951 WREG32(0x6578, 0x20203);
952 WREG32(0x657C, 0xBFF88000);
953 WREG32(0x6578, 0x20300);
954 WREG32(0x657C, 0x80188000);
955 WREG32(0x6578, 0x20301);
956 WREG32(0x657C, 0x8EE0BEC0);
957 WREG32(0x6578, 0x20302);
958 WREG32(0x657C, 0xBFB082A0);
959 WREG32(0x6578, 0x20303);
960 WREG32(0x657C, 0x80008000);
961 WREG32(0x6578, 0x20400);
962 WREG32(0x657C, 0x80188000);
963 WREG32(0x6578, 0x20401);
964 WREG32(0x657C, 0x8E00BEA0);
965 WREG32(0x6578, 0x20402);
966 WREG32(0x657C, 0xBF8883C0);
967 WREG32(0x6578, 0x20403);
968 WREG32(0x657C, 0x80008000);
969 WREG32(0x6578, 0x20500);
970 WREG32(0x657C, 0x80188000);
971 WREG32(0x6578, 0x20501);
972 WREG32(0x657C, 0x8D00BE90);
973 WREG32(0x6578, 0x20502);
974 WREG32(0x657C, 0xBF588500);
975 WREG32(0x6578, 0x20503);
976 WREG32(0x657C, 0x80008008);
977 WREG32(0x6578, 0x20600);
978 WREG32(0x657C, 0x80188000);
979 WREG32(0x6578, 0x20601);
980 WREG32(0x657C, 0x8BC0BE98);
981 WREG32(0x6578, 0x20602);
982 WREG32(0x657C, 0xBF308660);
983 WREG32(0x6578, 0x20603);
984 WREG32(0x657C, 0x80008008);
985 WREG32(0x6578, 0x20700);
986 WREG32(0x657C, 0x80108000);
987 WREG32(0x6578, 0x20701);
988 WREG32(0x657C, 0x8A80BEB0);
989 WREG32(0x6578, 0x20702);
990 WREG32(0x657C, 0xBF0087C0);
991 WREG32(0x6578, 0x20703);
992 WREG32(0x657C, 0x80008008);
993 WREG32(0x6578, 0x20800);
994 WREG32(0x657C, 0x80108000);
995 WREG32(0x6578, 0x20801);
996 WREG32(0x657C, 0x8920BED0);
997 WREG32(0x6578, 0x20802);
998 WREG32(0x657C, 0xBED08920);
999 WREG32(0x6578, 0x20803);
1000 WREG32(0x657C, 0x80008010);
1001 WREG32(0x6578, 0x30000);
1002 WREG32(0x657C, 0x90008000);
1003 WREG32(0x6578, 0x30001);
1004 WREG32(0x657C, 0x80008000);
1005 WREG32(0x6578, 0x30100);
1006 WREG32(0x657C, 0x8FE0BF90);
1007 WREG32(0x6578, 0x30101);
1008 WREG32(0x657C, 0xBFF880A0);
1009 WREG32(0x6578, 0x30200);
1010 WREG32(0x657C, 0x8F60BF40);
1011 WREG32(0x6578, 0x30201);
1012 WREG32(0x657C, 0xBFE88180);
1013 WREG32(0x6578, 0x30300);
1014 WREG32(0x657C, 0x8EC0BF00);
1015 WREG32(0x6578, 0x30301);
1016 WREG32(0x657C, 0xBFC88280);
1017 WREG32(0x6578, 0x30400);
1018 WREG32(0x657C, 0x8DE0BEE0);
1019 WREG32(0x6578, 0x30401);
1020 WREG32(0x657C, 0xBFA083A0);
1021 WREG32(0x6578, 0x30500);
1022 WREG32(0x657C, 0x8CE0BED0);
1023 WREG32(0x6578, 0x30501);
1024 WREG32(0x657C, 0xBF7884E0);
1025 WREG32(0x6578, 0x30600);
1026 WREG32(0x657C, 0x8BA0BED8);
1027 WREG32(0x6578, 0x30601);
1028 WREG32(0x657C, 0xBF508640);
1029 WREG32(0x6578, 0x30700);
1030 WREG32(0x657C, 0x8A60BEE8);
1031 WREG32(0x6578, 0x30701);
1032 WREG32(0x657C, 0xBF2087A0);
1033 WREG32(0x6578, 0x30800);
1034 WREG32(0x657C, 0x8900BF00);
1035 WREG32(0x6578, 0x30801);
1036 WREG32(0x657C, 0xBF008900);
1037}
1038
1039static void
1040atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1041{
1042 struct drm_device *dev = encoder->dev;
1043 struct radeon_device *rdev = dev->dev_private;
1044 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1045 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1046 ENABLE_YUV_PS_ALLOCATION args;
1047 int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
1048 uint32_t temp, reg;
1049
1050 memset(&args, 0, sizeof(args));
1051
1052 if (rdev->family >= CHIP_R600)
1053 reg = R600_BIOS_3_SCRATCH;
1054 else
1055 reg = RADEON_BIOS_3_SCRATCH;
1056
1057 /* XXX: fix up scratch reg handling */
1058 temp = RREG32(reg);
1059 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1060 WREG32(reg, (ATOM_S3_TV1_ACTIVE |
1061 (radeon_crtc->crtc_id << 18)));
1062 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1063 WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
1064 else
1065 WREG32(reg, 0);
1066
1067 if (enable)
1068 args.ucEnable = ATOM_ENABLE;
1069 args.ucCRTC = radeon_crtc->crtc_id;
1070
1071 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1072
1073 WREG32(reg, temp);
1074}
1075
1076static void
1077atombios_overscan_setup(struct drm_encoder *encoder,
1078 struct drm_display_mode *mode,
1079 struct drm_display_mode *adjusted_mode)
1080{
1081 struct drm_device *dev = encoder->dev;
1082 struct radeon_device *rdev = dev->dev_private;
1083 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1084 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1085 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
1086 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
1087
1088 memset(&args, 0, sizeof(args));
1089
1090 args.usOverscanRight = 0;
1091 args.usOverscanLeft = 0;
1092 args.usOverscanBottom = 0;
1093 args.usOverscanTop = 0;
1094 args.ucCRTC = radeon_crtc->crtc_id;
1095
1096 if (radeon_encoder->flags & RADEON_USE_RMX) {
1097 if (radeon_encoder->rmx_type == RMX_FULL) {
1098 args.usOverscanRight = 0;
1099 args.usOverscanLeft = 0;
1100 args.usOverscanBottom = 0;
1101 args.usOverscanTop = 0;
1102 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
1103 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1104 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1105 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1106 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1107 } else if (radeon_encoder->rmx_type == RMX_ASPECT) {
1108 int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
1109 int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
1110
1111 if (a1 > a2) {
1112 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1113 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1114 } else if (a2 > a1) {
1115 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1116 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1117 }
1118 }
1119 }
1120
1121 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1122
1123}
1124
1125static void
1126atombios_scaler_setup(struct drm_encoder *encoder)
1127{
1128 struct drm_device *dev = encoder->dev;
1129 struct radeon_device *rdev = dev->dev_private;
1130 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1131 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1132 ENABLE_SCALER_PS_ALLOCATION args;
1133 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
1134 /* fixme - fill in enc_priv for atom dac */
1135 enum radeon_tv_std tv_std = TV_STD_NTSC;
1136
1137 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
1138 return;
1139
1140 memset(&args, 0, sizeof(args));
1141
1142 args.ucScaler = radeon_crtc->crtc_id;
1143
1144 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
1145 switch (tv_std) {
1146 case TV_STD_NTSC:
1147 default:
1148 args.ucTVStandard = ATOM_TV_NTSC;
1149 break;
1150 case TV_STD_PAL:
1151 args.ucTVStandard = ATOM_TV_PAL;
1152 break;
1153 case TV_STD_PAL_M:
1154 args.ucTVStandard = ATOM_TV_PALM;
1155 break;
1156 case TV_STD_PAL_60:
1157 args.ucTVStandard = ATOM_TV_PAL60;
1158 break;
1159 case TV_STD_NTSC_J:
1160 args.ucTVStandard = ATOM_TV_NTSCJ;
1161 break;
1162 case TV_STD_SCART_PAL:
1163 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
1164 break;
1165 case TV_STD_SECAM:
1166 args.ucTVStandard = ATOM_TV_SECAM;
1167 break;
1168 case TV_STD_PAL_CN:
1169 args.ucTVStandard = ATOM_TV_PALCN;
1170 break;
1171 }
1172 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1173 } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) {
1174 args.ucTVStandard = ATOM_TV_CV;
1175 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1176 } else if (radeon_encoder->flags & RADEON_USE_RMX) {
1177 if (radeon_encoder->rmx_type == RMX_FULL)
1178 args.ucEnable = ATOM_SCALER_EXPANSION;
1179 else if (radeon_encoder->rmx_type == RMX_CENTER)
1180 args.ucEnable = ATOM_SCALER_CENTER;
1181 else if (radeon_encoder->rmx_type == RMX_ASPECT)
1182 args.ucEnable = ATOM_SCALER_EXPANSION;
1183 } else {
1184 if (ASIC_IS_AVIVO(rdev))
1185 args.ucEnable = ATOM_SCALER_DISABLE;
1186 else
1187 args.ucEnable = ATOM_SCALER_CENTER;
1188 }
1189
1190 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1191
1192 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
1193 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
1194 atom_rv515_force_tv_scaler(rdev);
1195 }
1196
1197}
1198
1199static void
1200radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1201{
1202 struct drm_device *dev = encoder->dev;
1203 struct radeon_device *rdev = dev->dev_private;
1204 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1205 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1206 int index = 0;
1207 bool is_dig = false;
1208
1209 memset(&args, 0, sizeof(args));
1210
1211 switch (radeon_encoder->encoder_id) {
1212 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1213 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1214 index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
1215 break;
1216 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1217 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1218 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1219 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1220 is_dig = true;
1221 break;
1222 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1223 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1224 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1225 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1226 break;
1227 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1228 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1229 break;
1230 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1231 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1232 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1233 else
1234 index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
1235 break;
1236 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1237 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1238 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1239 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1240 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1241 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1242 else
1243 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
1244 break;
1245 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1246 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1247 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1248 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1249 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1250 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1251 else
1252 index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
1253 break;
1254 }
1255
1256 if (is_dig) {
1257 switch (mode) {
1258 case DRM_MODE_DPMS_ON:
1259 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
1260 break;
1261 case DRM_MODE_DPMS_STANDBY:
1262 case DRM_MODE_DPMS_SUSPEND:
1263 case DRM_MODE_DPMS_OFF:
1264 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
1265 break;
1266 }
1267 } else {
1268 switch (mode) {
1269 case DRM_MODE_DPMS_ON:
1270 args.ucAction = ATOM_ENABLE;
1271 break;
1272 case DRM_MODE_DPMS_STANDBY:
1273 case DRM_MODE_DPMS_SUSPEND:
1274 case DRM_MODE_DPMS_OFF:
1275 args.ucAction = ATOM_DISABLE;
1276 break;
1277 }
1278 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1279 }
1280 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1281}
1282
1283union crtc_sourc_param {
1284 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1285 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1286};
1287
1288static void
1289atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1290{
1291 struct drm_device *dev = encoder->dev;
1292 struct radeon_device *rdev = dev->dev_private;
1293 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1294 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1295 union crtc_sourc_param args;
1296 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1297 uint8_t frev, crev;
1298
1299 memset(&args, 0, sizeof(args));
1300
1301 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
1302
1303 switch (frev) {
1304 case 1:
1305 switch (crev) {
1306 case 1:
1307 default:
1308 if (ASIC_IS_AVIVO(rdev))
1309 args.v1.ucCRTC = radeon_crtc->crtc_id;
1310 else {
1311 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
1312 args.v1.ucCRTC = radeon_crtc->crtc_id;
1313 } else {
1314 args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
1315 }
1316 }
1317 switch (radeon_encoder->encoder_id) {
1318 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1319 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1320 args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
1321 break;
1322 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1323 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1324 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
1325 args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
1326 else
1327 args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
1328 break;
1329 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1330 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1331 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1332 args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
1333 break;
1334 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1335 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1336 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1337 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1338 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1339 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1340 else
1341 args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
1342 break;
1343 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1344 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1345 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1346 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1347 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1348 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1349 else
1350 args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
1351 break;
1352 }
1353 break;
1354 case 2:
1355 args.v2.ucCRTC = radeon_crtc->crtc_id;
1356 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1357 switch (radeon_encoder->encoder_id) {
1358 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1359 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1360 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1361 if (ASIC_IS_DCE32(rdev)) {
1362 if (radeon_crtc->crtc_id)
1363 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1364 else
1365 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1366 } else
1367 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1368 break;
1369 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1370 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1371 break;
1372 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1373 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1374 break;
1375 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1376 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1377 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1378 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1379 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1380 else
1381 args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
1382 break;
1383 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1384 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1385 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1386 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1387 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1388 else
1389 args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
1390 break;
1391 }
1392 break;
1393 }
1394 break;
1395 default:
1396 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1397 break;
1398 }
1399
1400 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1401
1402}
1403
1404static void
1405atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1406 struct drm_display_mode *mode)
1407{
1408 struct drm_device *dev = encoder->dev;
1409 struct radeon_device *rdev = dev->dev_private;
1410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1411 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1412
1413 /* Funky macbooks */
1414 if ((dev->pdev->device == 0x71C5) &&
1415 (dev->pdev->subsystem_vendor == 0x106b) &&
1416 (dev->pdev->subsystem_device == 0x0080)) {
1417 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1418 uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
1419
1420 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
1421 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
1422
1423 WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
1424 }
1425 }
1426
1427 /* set scaler clears this on some chips */
1428 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
1429 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN);
1430}
1431
1432static void
1433radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1434 struct drm_display_mode *mode,
1435 struct drm_display_mode *adjusted_mode)
1436{
1437 struct drm_device *dev = encoder->dev;
1438 struct radeon_device *rdev = dev->dev_private;
1439 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1440 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1441
1442 if (radeon_encoder->enc_priv) {
1443 struct radeon_encoder_atom_dig *dig;
1444
1445 dig = radeon_encoder->enc_priv;
1446 dig->dig_block = radeon_crtc->crtc_id;
1447 }
1448 radeon_encoder->pixel_clock = adjusted_mode->clock;
1449
1450 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1451 atombios_overscan_setup(encoder, mode, adjusted_mode);
1452 atombios_scaler_setup(encoder);
1453 atombios_set_encoder_crtc_source(encoder);
1454
1455 if (ASIC_IS_AVIVO(rdev)) {
1456 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1457 atombios_yuv_setup(encoder, true);
1458 else
1459 atombios_yuv_setup(encoder, false);
1460 }
1461
1462 switch (radeon_encoder->encoder_id) {
1463 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1464 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1465 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1466 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1467 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
1468 break;
1469 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1470 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1471 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1472 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1473 /* disable the encoder and transmitter */
1474 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
1475 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1476
1477 /* setup and enable the encoder and transmitter */
1478 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1479 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
1480 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
1481 break;
1482 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1483 atombios_ddia_setup(encoder, ATOM_ENABLE);
1484 break;
1485 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1486 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1487 atombios_external_tmds_setup(encoder, ATOM_ENABLE);
1488 break;
1489 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1490 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1491 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1492 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1493 atombios_dac_setup(encoder, ATOM_ENABLE);
1494 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1495 atombios_tv_setup(encoder, ATOM_ENABLE);
1496 break;
1497 }
1498 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1499}
1500
1501static bool
1502atombios_dac_load_detect(struct drm_encoder *encoder)
1503{
1504 struct drm_device *dev = encoder->dev;
1505 struct radeon_device *rdev = dev->dev_private;
1506 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1507
1508 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
1509 ATOM_DEVICE_CV_SUPPORT |
1510 ATOM_DEVICE_CRT_SUPPORT)) {
1511 DAC_LOAD_DETECTION_PS_ALLOCATION args;
1512 int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
1513 uint8_t frev, crev;
1514
1515 memset(&args, 0, sizeof(args));
1516
1517 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
1518
1519 args.sDacload.ucMisc = 0;
1520
1521 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
1522 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
1523 args.sDacload.ucDacType = ATOM_DAC_A;
1524 else
1525 args.sDacload.ucDacType = ATOM_DAC_B;
1526
1527 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT)
1528 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
1529 else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT)
1530 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
1531 else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1532 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
1533 if (crev >= 3)
1534 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1535 } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1536 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
1537 if (crev >= 3)
1538 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1539 }
1540
1541 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1542
1543 return true;
1544 } else
1545 return false;
1546}
1547
1548static enum drm_connector_status
1549radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1550{
1551 struct drm_device *dev = encoder->dev;
1552 struct radeon_device *rdev = dev->dev_private;
1553 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1554 uint32_t bios_0_scratch;
1555
1556 if (!atombios_dac_load_detect(encoder)) {
1557 DRM_DEBUG("detect returned false \n");
1558 return connector_status_unknown;
1559 }
1560
1561 if (rdev->family >= CHIP_R600)
1562 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
1563 else
1564 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
1565
1566 DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch);
1567 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1568 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
1569 return connector_status_connected;
1570 } else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1571 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
1572 return connector_status_connected;
1573 } else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1574 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
1575 return connector_status_connected;
1576 } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1577 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
1578 return connector_status_connected; /* CTV */
1579 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
1580 return connector_status_connected; /* STV */
1581 }
1582 return connector_status_disconnected;
1583}
1584
1585static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1586{
1587 radeon_atom_output_lock(encoder, true);
1588 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1589}
1590
1591static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1592{
1593 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
1594 radeon_atom_output_lock(encoder, false);
1595}
1596
1597static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
1598 .dpms = radeon_atom_encoder_dpms,
1599 .mode_fixup = radeon_atom_mode_fixup,
1600 .prepare = radeon_atom_encoder_prepare,
1601 .mode_set = radeon_atom_encoder_mode_set,
1602 .commit = radeon_atom_encoder_commit,
1603 /* no detect for TMDS/LVDS yet */
1604};
1605
1606static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
1607 .dpms = radeon_atom_encoder_dpms,
1608 .mode_fixup = radeon_atom_mode_fixup,
1609 .prepare = radeon_atom_encoder_prepare,
1610 .mode_set = radeon_atom_encoder_mode_set,
1611 .commit = radeon_atom_encoder_commit,
1612 .detect = radeon_atom_dac_detect,
1613};
1614
1615void radeon_enc_destroy(struct drm_encoder *encoder)
1616{
1617 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1618 kfree(radeon_encoder->enc_priv);
1619 drm_encoder_cleanup(encoder);
1620 kfree(radeon_encoder);
1621}
1622
1623static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
1624 .destroy = radeon_enc_destroy,
1625};
1626
1627struct radeon_encoder_atom_dig *
1628radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1629{
1630 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
1631
1632 if (!dig)
1633 return NULL;
1634
1635 /* coherent mode by default */
1636 dig->coherent_mode = true;
1637
1638 return dig;
1639}
1640
1641void
1642radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1643{
1644 struct drm_encoder *encoder;
1645 struct radeon_encoder *radeon_encoder;
1646
1647 /* see if we already added it */
1648 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1649 radeon_encoder = to_radeon_encoder(encoder);
1650 if (radeon_encoder->encoder_id == encoder_id) {
1651 radeon_encoder->devices |= supported_device;
1652 return;
1653 }
1654
1655 }
1656
1657 /* add a new one */
1658 radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
1659 if (!radeon_encoder)
1660 return;
1661
1662 encoder = &radeon_encoder->base;
1663 encoder->possible_crtcs = 0x3;
1664 encoder->possible_clones = 0;
1665
1666 radeon_encoder->enc_priv = NULL;
1667
1668 radeon_encoder->encoder_id = encoder_id;
1669 radeon_encoder->devices = supported_device;
1670
1671 switch (radeon_encoder->encoder_id) {
1672 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1673 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1674 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1675 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1676 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1677 radeon_encoder->rmx_type = RMX_FULL;
1678 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
1679 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
1680 } else {
1681 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
1682 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1683 }
1684 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1685 break;
1686 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1687 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
1688 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
1689 break;
1690 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1691 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1692 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1693 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
1694 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
1695 break;
1696 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1697 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1698 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1699 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1700 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1701 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1702 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1703 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
1704 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1705 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1706 break;
1707 }
1708}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644
index 000000000000..fa86d398945e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -0,0 +1,825 @@
1/*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26 /*
27 * Modularization
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/mm.h>
35#include <linux/tty.h>
36#include <linux/slab.h>
37#include <linux/delay.h>
38#include <linux/fb.h>
39#include <linux/init.h>
40
41#include "drmP.h"
42#include "drm.h"
43#include "drm_crtc.h"
44#include "drm_crtc_helper.h"
45#include "radeon_drm.h"
46#include "radeon.h"
47
48struct radeon_fb_device {
49 struct radeon_device *rdev;
50 struct drm_display_mode *mode;
51 struct radeon_framebuffer *rfb;
52 int crtc_count;
53 /* crtc currently bound to this */
54 uint32_t crtc_ids[2];
55};
56
57static int radeonfb_setcolreg(unsigned regno,
58 unsigned red,
59 unsigned green,
60 unsigned blue,
61 unsigned transp,
62 struct fb_info *info)
63{
64 struct radeon_fb_device *rfbdev = info->par;
65 struct drm_device *dev = rfbdev->rdev->ddev;
66 struct drm_crtc *crtc;
67 int i;
68
69 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
70 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
71 struct drm_mode_set *modeset = &radeon_crtc->mode_set;
72 struct drm_framebuffer *fb = modeset->fb;
73
74 for (i = 0; i < rfbdev->crtc_count; i++) {
75 if (crtc->base.id == rfbdev->crtc_ids[i]) {
76 break;
77 }
78 }
79 if (i == rfbdev->crtc_count) {
80 continue;
81 }
82 if (regno > 255) {
83 return 1;
84 }
85 if (fb->depth == 8) {
86 radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno);
87 return 0;
88 }
89
90 if (regno < 16) {
91 switch (fb->depth) {
92 case 15:
93 fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
94 ((green & 0xf800) >> 6) |
95 ((blue & 0xf800) >> 11);
96 break;
97 case 16:
98 fb->pseudo_palette[regno] = (red & 0xf800) |
99 ((green & 0xfc00) >> 5) |
100 ((blue & 0xf800) >> 11);
101 break;
102 case 24:
103 case 32:
104 fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
105 (green & 0xff00) |
106 ((blue & 0xff00) >> 8);
107 break;
108 }
109 }
110 }
111 return 0;
112}
113
114static int radeonfb_check_var(struct fb_var_screeninfo *var,
115 struct fb_info *info)
116{
117 struct radeon_fb_device *rfbdev = info->par;
118 struct radeon_framebuffer *rfb = rfbdev->rfb;
119 struct drm_framebuffer *fb = &rfb->base;
120 int depth;
121
122 if (var->pixclock == -1 || !var->pixclock) {
123 return -EINVAL;
124 }
125 /* Need to resize the fb object !!! */
126 if (var->xres > fb->width || var->yres > fb->height) {
127 DRM_ERROR("Requested width/height is greater than current fb "
128 "object %dx%d > %dx%d\n", var->xres, var->yres,
129 fb->width, fb->height);
130 DRM_ERROR("Need resizing code.\n");
131 return -EINVAL;
132 }
133
134 switch (var->bits_per_pixel) {
135 case 16:
136 depth = (var->green.length == 6) ? 16 : 15;
137 break;
138 case 32:
139 depth = (var->transp.length > 0) ? 32 : 24;
140 break;
141 default:
142 depth = var->bits_per_pixel;
143 break;
144 }
145
146 switch (depth) {
147 case 8:
148 var->red.offset = 0;
149 var->green.offset = 0;
150 var->blue.offset = 0;
151 var->red.length = 8;
152 var->green.length = 8;
153 var->blue.length = 8;
154 var->transp.length = 0;
155 var->transp.offset = 0;
156 break;
157 case 15:
158 var->red.offset = 10;
159 var->green.offset = 5;
160 var->blue.offset = 0;
161 var->red.length = 5;
162 var->green.length = 5;
163 var->blue.length = 5;
164 var->transp.length = 1;
165 var->transp.offset = 15;
166 break;
167 case 16:
168 var->red.offset = 11;
169 var->green.offset = 5;
170 var->blue.offset = 0;
171 var->red.length = 5;
172 var->green.length = 6;
173 var->blue.length = 5;
174 var->transp.length = 0;
175 var->transp.offset = 0;
176 break;
177 case 24:
178 var->red.offset = 16;
179 var->green.offset = 8;
180 var->blue.offset = 0;
181 var->red.length = 8;
182 var->green.length = 8;
183 var->blue.length = 8;
184 var->transp.length = 0;
185 var->transp.offset = 0;
186 break;
187 case 32:
188 var->red.offset = 16;
189 var->green.offset = 8;
190 var->blue.offset = 0;
191 var->red.length = 8;
192 var->green.length = 8;
193 var->blue.length = 8;
194 var->transp.length = 8;
195 var->transp.offset = 24;
196 break;
197 default:
198 return -EINVAL;
199 }
200 return 0;
201}
202
203/* this will let fbcon do the mode init */
204static int radeonfb_set_par(struct fb_info *info)
205{
206 struct radeon_fb_device *rfbdev = info->par;
207 struct drm_device *dev = rfbdev->rdev->ddev;
208 struct fb_var_screeninfo *var = &info->var;
209 struct drm_crtc *crtc;
210 int ret;
211 int i;
212
213 if (var->pixclock != -1) {
214 DRM_ERROR("PIXEL CLCOK SET\n");
215 return -EINVAL;
216 }
217
218 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
219 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
220
221 for (i = 0; i < rfbdev->crtc_count; i++) {
222 if (crtc->base.id == rfbdev->crtc_ids[i]) {
223 break;
224 }
225 }
226 if (i == rfbdev->crtc_count) {
227 continue;
228 }
229 if (crtc->fb == radeon_crtc->mode_set.fb) {
230 mutex_lock(&dev->mode_config.mutex);
231 ret = crtc->funcs->set_config(&radeon_crtc->mode_set);
232 mutex_unlock(&dev->mode_config.mutex);
233 if (ret) {
234 return ret;
235 }
236 }
237 }
238 return 0;
239}
240
241static int radeonfb_pan_display(struct fb_var_screeninfo *var,
242 struct fb_info *info)
243{
244 struct radeon_fb_device *rfbdev = info->par;
245 struct drm_device *dev = rfbdev->rdev->ddev;
246 struct drm_mode_set *modeset;
247 struct drm_crtc *crtc;
248 struct radeon_crtc *radeon_crtc;
249 int ret = 0;
250 int i;
251
252 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
253 for (i = 0; i < rfbdev->crtc_count; i++) {
254 if (crtc->base.id == rfbdev->crtc_ids[i]) {
255 break;
256 }
257 }
258
259 if (i == rfbdev->crtc_count) {
260 continue;
261 }
262
263 radeon_crtc = to_radeon_crtc(crtc);
264 modeset = &radeon_crtc->mode_set;
265
266 modeset->x = var->xoffset;
267 modeset->y = var->yoffset;
268
269 if (modeset->num_connectors) {
270 mutex_lock(&dev->mode_config.mutex);
271 ret = crtc->funcs->set_config(modeset);
272 mutex_unlock(&dev->mode_config.mutex);
273 if (!ret) {
274 info->var.xoffset = var->xoffset;
275 info->var.yoffset = var->yoffset;
276 }
277 }
278 }
279 return ret;
280}
281
282static void radeonfb_on(struct fb_info *info)
283{
284 struct radeon_fb_device *rfbdev = info->par;
285 struct drm_device *dev = rfbdev->rdev->ddev;
286 struct drm_crtc *crtc;
287 struct drm_encoder *encoder;
288 int i;
289
290 /*
291 * For each CRTC in this fb, find all associated encoders
292 * and turn them off, then turn off the CRTC.
293 */
294 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
295 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
296
297 for (i = 0; i < rfbdev->crtc_count; i++) {
298 if (crtc->base.id == rfbdev->crtc_ids[i]) {
299 break;
300 }
301 }
302
303 mutex_lock(&dev->mode_config.mutex);
304 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
305 mutex_unlock(&dev->mode_config.mutex);
306
307 /* Found a CRTC on this fb, now find encoders */
308 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
309 if (encoder->crtc == crtc) {
310 struct drm_encoder_helper_funcs *encoder_funcs;
311
312 encoder_funcs = encoder->helper_private;
313 mutex_lock(&dev->mode_config.mutex);
314 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
315 mutex_unlock(&dev->mode_config.mutex);
316 }
317 }
318 }
319}
320
321static void radeonfb_off(struct fb_info *info, int dpms_mode)
322{
323 struct radeon_fb_device *rfbdev = info->par;
324 struct drm_device *dev = rfbdev->rdev->ddev;
325 struct drm_crtc *crtc;
326 struct drm_encoder *encoder;
327 int i;
328
329 /*
330 * For each CRTC in this fb, find all associated encoders
331 * and turn them off, then turn off the CRTC.
332 */
333 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
334 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
335
336 for (i = 0; i < rfbdev->crtc_count; i++) {
337 if (crtc->base.id == rfbdev->crtc_ids[i]) {
338 break;
339 }
340 }
341
342 /* Found a CRTC on this fb, now find encoders */
343 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
344 if (encoder->crtc == crtc) {
345 struct drm_encoder_helper_funcs *encoder_funcs;
346
347 encoder_funcs = encoder->helper_private;
348 mutex_lock(&dev->mode_config.mutex);
349 encoder_funcs->dpms(encoder, dpms_mode);
350 mutex_unlock(&dev->mode_config.mutex);
351 }
352 }
353 if (dpms_mode == DRM_MODE_DPMS_OFF) {
354 mutex_lock(&dev->mode_config.mutex);
355 crtc_funcs->dpms(crtc, dpms_mode);
356 mutex_unlock(&dev->mode_config.mutex);
357 }
358 }
359}
360
361int radeonfb_blank(int blank, struct fb_info *info)
362{
363 switch (blank) {
364 case FB_BLANK_UNBLANK:
365 radeonfb_on(info);
366 break;
367 case FB_BLANK_NORMAL:
368 radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
369 break;
370 case FB_BLANK_HSYNC_SUSPEND:
371 radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
372 break;
373 case FB_BLANK_VSYNC_SUSPEND:
374 radeonfb_off(info, DRM_MODE_DPMS_SUSPEND);
375 break;
376 case FB_BLANK_POWERDOWN:
377 radeonfb_off(info, DRM_MODE_DPMS_OFF);
378 break;
379 }
380 return 0;
381}
382
383static struct fb_ops radeonfb_ops = {
384 .owner = THIS_MODULE,
385 .fb_check_var = radeonfb_check_var,
386 .fb_set_par = radeonfb_set_par,
387 .fb_setcolreg = radeonfb_setcolreg,
388 .fb_fillrect = cfb_fillrect,
389 .fb_copyarea = cfb_copyarea,
390 .fb_imageblit = cfb_imageblit,
391 .fb_pan_display = radeonfb_pan_display,
392 .fb_blank = radeonfb_blank,
393};
394
395/**
396 * Curretly it is assumed that the old framebuffer is reused.
397 *
398 * LOCKING
399 * caller should hold the mode config lock.
400 *
401 */
402int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
403{
404 struct fb_info *info;
405 struct drm_framebuffer *fb;
406 struct drm_display_mode *mode = crtc->desired_mode;
407
408 fb = crtc->fb;
409 if (fb == NULL) {
410 return 1;
411 }
412 info = fb->fbdev;
413 if (info == NULL) {
414 return 1;
415 }
416 if (mode == NULL) {
417 return 1;
418 }
419 info->var.xres = mode->hdisplay;
420 info->var.right_margin = mode->hsync_start - mode->hdisplay;
421 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
422 info->var.left_margin = mode->htotal - mode->hsync_end;
423 info->var.yres = mode->vdisplay;
424 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
425 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
426 info->var.upper_margin = mode->vtotal - mode->vsync_end;
427 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
428 /* avoid overflow */
429 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
430
431 return 0;
432}
433EXPORT_SYMBOL(radeonfb_resize);
434
435static struct drm_mode_set panic_mode;
436
437int radeonfb_panic(struct notifier_block *n, unsigned long ununsed,
438 void *panic_str)
439{
440 DRM_ERROR("panic occurred, switching back to text console\n");
441 drm_crtc_helper_set_config(&panic_mode);
442 return 0;
443}
444EXPORT_SYMBOL(radeonfb_panic);
445
446static struct notifier_block paniced = {
447 .notifier_call = radeonfb_panic,
448};
449
450static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp)
451{
452 int aligned = width;
453 int align_large = (ASIC_IS_AVIVO(rdev));
454 int pitch_mask = 0;
455
456 switch (bpp / 8) {
457 case 1:
458 pitch_mask = align_large ? 255 : 127;
459 break;
460 case 2:
461 pitch_mask = align_large ? 127 : 31;
462 break;
463 case 3:
464 case 4:
465 pitch_mask = align_large ? 63 : 15;
466 break;
467 }
468
469 aligned += pitch_mask;
470 aligned &= ~pitch_mask;
471 return aligned;
472}
473
474int radeonfb_create(struct radeon_device *rdev,
475 uint32_t fb_width, uint32_t fb_height,
476 uint32_t surface_width, uint32_t surface_height,
477 struct radeon_framebuffer **rfb_p)
478{
479 struct fb_info *info;
480 struct radeon_fb_device *rfbdev;
481 struct drm_framebuffer *fb;
482 struct radeon_framebuffer *rfb;
483 struct drm_mode_fb_cmd mode_cmd;
484 struct drm_gem_object *gobj = NULL;
485 struct radeon_object *robj = NULL;
486 struct device *device = &rdev->pdev->dev;
487 int size, aligned_size, ret;
488 void *fbptr = NULL;
489
490 mode_cmd.width = surface_width;
491 mode_cmd.height = surface_height;
492 mode_cmd.bpp = 32;
493 /* need to align pitch with crtc limits */
494 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8);
495 mode_cmd.depth = 24;
496
497 size = mode_cmd.pitch * mode_cmd.height;
498 aligned_size = ALIGN(size, PAGE_SIZE);
499
500 ret = radeon_gem_object_create(rdev, aligned_size, 0,
501 RADEON_GEM_DOMAIN_VRAM,
502 false, ttm_bo_type_kernel,
503 false, &gobj);
504 if (ret) {
505 printk(KERN_ERR "failed to allocate framebuffer\n");
506 ret = -ENOMEM;
507 goto out;
508 }
509 robj = gobj->driver_private;
510
511 mutex_lock(&rdev->ddev->struct_mutex);
512 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
513 if (fb == NULL) {
514 DRM_ERROR("failed to allocate fb.\n");
515 ret = -ENOMEM;
516 goto out_unref;
517 }
518
519 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
520
521 rfb = to_radeon_framebuffer(fb);
522 *rfb_p = rfb;
523 rdev->fbdev_rfb = rfb;
524
525 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
526 if (info == NULL) {
527 ret = -ENOMEM;
528 goto out_unref;
529 }
530 rfbdev = info->par;
531
532 ret = radeon_object_kmap(robj, &fbptr);
533 if (ret) {
534 goto out_unref;
535 }
536
537 strcpy(info->fix.id, "radeondrmfb");
538 info->fix.type = FB_TYPE_PACKED_PIXELS;
539 info->fix.visual = FB_VISUAL_TRUECOLOR;
540 info->fix.type_aux = 0;
541 info->fix.xpanstep = 1; /* doing it in hw */
542 info->fix.ypanstep = 1; /* doing it in hw */
543 info->fix.ywrapstep = 0;
544 info->fix.accel = FB_ACCEL_I830;
545 info->fix.type_aux = 0;
546 info->flags = FBINFO_DEFAULT;
547 info->fbops = &radeonfb_ops;
548 info->fix.line_length = fb->pitch;
549 info->screen_base = fbptr;
550 info->fix.smem_start = (unsigned long)fbptr;
551 info->fix.smem_len = size;
552 info->screen_base = fbptr;
553 info->screen_size = size;
554 info->pseudo_palette = fb->pseudo_palette;
555 info->var.xres_virtual = fb->width;
556 info->var.yres_virtual = fb->height;
557 info->var.bits_per_pixel = fb->bits_per_pixel;
558 info->var.xoffset = 0;
559 info->var.yoffset = 0;
560 info->var.activate = FB_ACTIVATE_NOW;
561 info->var.height = -1;
562 info->var.width = -1;
563 info->var.xres = fb_width;
564 info->var.yres = fb_height;
565 info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
566 info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
567 info->pixmap.size = 64*1024;
568 info->pixmap.buf_align = 8;
569 info->pixmap.access_align = 32;
570 info->pixmap.flags = FB_PIXMAP_SYSTEM;
571 info->pixmap.scan_align = 1;
572 if (info->screen_base == NULL) {
573 ret = -ENOSPC;
574 goto out_unref;
575 }
576 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
577 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
578 DRM_INFO("size %lu\n", (unsigned long)size);
579 DRM_INFO("fb depth is %d\n", fb->depth);
580 DRM_INFO(" pitch is %d\n", fb->pitch);
581
582 switch (fb->depth) {
583 case 8:
584 info->var.red.offset = 0;
585 info->var.green.offset = 0;
586 info->var.blue.offset = 0;
587 info->var.red.length = 8; /* 8bit DAC */
588 info->var.green.length = 8;
589 info->var.blue.length = 8;
590 info->var.transp.offset = 0;
591 info->var.transp.length = 0;
592 break;
593 case 15:
594 info->var.red.offset = 10;
595 info->var.green.offset = 5;
596 info->var.blue.offset = 0;
597 info->var.red.length = 5;
598 info->var.green.length = 5;
599 info->var.blue.length = 5;
600 info->var.transp.offset = 15;
601 info->var.transp.length = 1;
602 break;
603 case 16:
604 info->var.red.offset = 11;
605 info->var.green.offset = 5;
606 info->var.blue.offset = 0;
607 info->var.red.length = 5;
608 info->var.green.length = 6;
609 info->var.blue.length = 5;
610 info->var.transp.offset = 0;
611 break;
612 case 24:
613 info->var.red.offset = 16;
614 info->var.green.offset = 8;
615 info->var.blue.offset = 0;
616 info->var.red.length = 8;
617 info->var.green.length = 8;
618 info->var.blue.length = 8;
619 info->var.transp.offset = 0;
620 info->var.transp.length = 0;
621 break;
622 case 32:
623 info->var.red.offset = 16;
624 info->var.green.offset = 8;
625 info->var.blue.offset = 0;
626 info->var.red.length = 8;
627 info->var.green.length = 8;
628 info->var.blue.length = 8;
629 info->var.transp.offset = 24;
630 info->var.transp.length = 8;
631 break;
632 default:
633 break;
634 }
635
636 fb->fbdev = info;
637 rfbdev->rfb = rfb;
638 rfbdev->rdev = rdev;
639
640 mutex_unlock(&rdev->ddev->struct_mutex);
641 return 0;
642
643out_unref:
644 if (robj) {
645 radeon_object_kunmap(robj);
646 }
647 if (ret) {
648 list_del(&fb->filp_head);
649 drm_gem_object_unreference(gobj);
650 drm_framebuffer_cleanup(fb);
651 kfree(fb);
652 }
653 drm_gem_object_unreference(gobj);
654 mutex_unlock(&rdev->ddev->struct_mutex);
655out:
656 return ret;
657}
658
659static int radeonfb_single_fb_probe(struct radeon_device *rdev)
660{
661 struct drm_crtc *crtc;
662 struct drm_connector *connector;
663 unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
664 unsigned int surface_width = 0, surface_height = 0;
665 int new_fb = 0;
666 int crtc_count = 0;
667 int ret, i, conn_count = 0;
668 struct radeon_framebuffer *rfb;
669 struct fb_info *info;
670 struct radeon_fb_device *rfbdev;
671 struct drm_mode_set *modeset = NULL;
672
673 /* first up get a count of crtcs now in use and new min/maxes width/heights */
674 list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
675 if (drm_helper_crtc_in_use(crtc)) {
676 if (crtc->desired_mode) {
677 if (crtc->desired_mode->hdisplay < fb_width)
678 fb_width = crtc->desired_mode->hdisplay;
679
680 if (crtc->desired_mode->vdisplay < fb_height)
681 fb_height = crtc->desired_mode->vdisplay;
682
683 if (crtc->desired_mode->hdisplay > surface_width)
684 surface_width = crtc->desired_mode->hdisplay;
685
686 if (crtc->desired_mode->vdisplay > surface_height)
687 surface_height = crtc->desired_mode->vdisplay;
688 }
689 crtc_count++;
690 }
691 }
692
693 if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
694 /* hmm everyone went away - assume VGA cable just fell out
695 and will come back later. */
696 return 0;
697 }
698
699 /* do we have an fb already? */
700 if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) {
701 /* create an fb if we don't have one */
702 ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb);
703 if (ret) {
704 return -EINVAL;
705 }
706 new_fb = 1;
707 } else {
708 struct drm_framebuffer *fb;
709 fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head);
710 rfb = to_radeon_framebuffer(fb);
711
712 /* if someone hotplugs something bigger than we have already allocated, we are pwned.
713 As really we can't resize an fbdev that is in the wild currently due to fbdev
714 not really being designed for the lower layers moving stuff around under it.
715 - so in the grand style of things - punt. */
716 if ((fb->width < surface_width) || (fb->height < surface_height)) {
717 DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
718 return -EINVAL;
719 }
720 }
721
722 info = rfb->base.fbdev;
723 rdev->fbdev_info = info;
724 rfbdev = info->par;
725
726 crtc_count = 0;
727 /* okay we need to setup new connector sets in the crtcs */
728 list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
729 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
730 modeset = &radeon_crtc->mode_set;
731 modeset->fb = &rfb->base;
732 conn_count = 0;
733 list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) {
734 if (connector->encoder)
735 if (connector->encoder->crtc == modeset->crtc) {
736 modeset->connectors[conn_count] = connector;
737 conn_count++;
738 if (conn_count > RADEONFB_CONN_LIMIT)
739 BUG();
740 }
741 }
742
743 for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++)
744 modeset->connectors[i] = NULL;
745
746
747 rfbdev->crtc_ids[crtc_count++] = crtc->base.id;
748
749 modeset->num_connectors = conn_count;
750 if (modeset->crtc->desired_mode) {
751 if (modeset->mode) {
752 drm_mode_destroy(rdev->ddev, modeset->mode);
753 }
754 modeset->mode = drm_mode_duplicate(rdev->ddev,
755 modeset->crtc->desired_mode);
756 }
757 }
758 rfbdev->crtc_count = crtc_count;
759
760 if (new_fb) {
761 info->var.pixclock = -1;
762 if (register_framebuffer(info) < 0)
763 return -EINVAL;
764 } else {
765 radeonfb_set_par(info);
766 }
767 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
768 info->fix.id);
769
770 /* Switch back to kernel console on panic */
771 panic_mode = *modeset;
772 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
773 printk(KERN_INFO "registered panic notifier\n");
774
775 return 0;
776}
777
778int radeonfb_probe(struct drm_device *dev)
779{
780 int ret;
781
782 /* something has changed in the lower levels of hell - deal with it
783 here */
784
785 /* two modes : a) 1 fb to rule all crtcs.
786 b) one fb per crtc.
787 two actions 1) new connected device
788 2) device removed.
789 case a/1 : if the fb surface isn't big enough - resize the surface fb.
790 if the fb size isn't big enough - resize fb into surface.
791 if everything big enough configure the new crtc/etc.
792 case a/2 : undo the configuration
793 possibly resize down the fb to fit the new configuration.
794 case b/1 : see if it is on a new crtc - setup a new fb and add it.
795 case b/2 : teardown the new fb.
796 */
797 ret = radeonfb_single_fb_probe(dev->dev_private);
798 return ret;
799}
800EXPORT_SYMBOL(radeonfb_probe);
801
802int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
803{
804 struct fb_info *info;
805 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
806 struct radeon_object *robj;
807
808 if (!fb) {
809 return -EINVAL;
810 }
811 info = fb->fbdev;
812 if (info) {
813 robj = rfb->obj->driver_private;
814 unregister_framebuffer(info);
815 radeon_object_kunmap(robj);
816 framebuffer_release(info);
817 }
818
819 printk(KERN_INFO "unregistered panic notifier\n");
820 atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
821 memset(&panic_mode, 0, sizeof(struct drm_mode_set));
822 return 0;
823}
824EXPORT_SYMBOL(radeonfb_remove);
825MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644
index 000000000000..96afbf5ae2ad
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -0,0 +1,387 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <asm/atomic.h>
33#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
36#include "drmP.h"
37#include "drm.h"
38#include "radeon_reg.h"
39#include "radeon.h"
40
41int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
42{
43 unsigned long irq_flags;
44
45 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
46 if (fence->emited) {
47 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
48 return 0;
49 }
50 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
51 if (!rdev->cp.ready) {
52 /* FIXME: cp is not running assume everythings is done right
53 * away
54 */
55 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
56 } else {
57 radeon_fence_ring_emit(rdev, fence);
58 }
59 fence->emited = true;
60 fence->timeout = jiffies + ((2000 * HZ) / 1000);
61 list_del(&fence->list);
62 list_add_tail(&fence->list, &rdev->fence_drv.emited);
63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
64 return 0;
65}
66
67static bool radeon_fence_poll_locked(struct radeon_device *rdev)
68{
69 struct radeon_fence *fence;
70 struct list_head *i, *n;
71 uint32_t seq;
72 bool wake = false;
73
74 if (rdev == NULL) {
75 return true;
76 }
77 if (rdev->shutdown) {
78 return true;
79 }
80 seq = RREG32(rdev->fence_drv.scratch_reg);
81 rdev->fence_drv.last_seq = seq;
82 n = NULL;
83 list_for_each(i, &rdev->fence_drv.emited) {
84 fence = list_entry(i, struct radeon_fence, list);
85 if (fence->seq == seq) {
86 n = i;
87 break;
88 }
89 }
90 /* all fence previous to this one are considered as signaled */
91 if (n) {
92 i = n;
93 do {
94 n = i->prev;
95 list_del(i);
96 list_add_tail(i, &rdev->fence_drv.signaled);
97 fence = list_entry(i, struct radeon_fence, list);
98 fence->signaled = true;
99 i = n;
100 } while (i != &rdev->fence_drv.emited);
101 wake = true;
102 }
103 return wake;
104}
105
106static void radeon_fence_destroy(struct kref *kref)
107{
108 unsigned long irq_flags;
109 struct radeon_fence *fence;
110
111 fence = container_of(kref, struct radeon_fence, kref);
112 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
113 list_del(&fence->list);
114 fence->emited = false;
115 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
116 kfree(fence);
117}
118
119int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
120{
121 unsigned long irq_flags;
122
123 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
124 if ((*fence) == NULL) {
125 return -ENOMEM;
126 }
127 kref_init(&((*fence)->kref));
128 (*fence)->rdev = rdev;
129 (*fence)->emited = false;
130 (*fence)->signaled = false;
131 (*fence)->seq = 0;
132 INIT_LIST_HEAD(&(*fence)->list);
133
134 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
135 list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
136 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
137 return 0;
138}
139
140
141bool radeon_fence_signaled(struct radeon_fence *fence)
142{
143 struct radeon_device *rdev = fence->rdev;
144 unsigned long irq_flags;
145 bool signaled = false;
146
147 if (rdev->gpu_lockup) {
148 return true;
149 }
150 if (fence == NULL) {
151 return true;
152 }
153 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154 signaled = fence->signaled;
155 /* if we are shuting down report all fence as signaled */
156 if (fence->rdev->shutdown) {
157 signaled = true;
158 }
159 if (!fence->emited) {
160 WARN(1, "Querying an unemited fence : %p !\n", fence);
161 signaled = true;
162 }
163 if (!signaled) {
164 radeon_fence_poll_locked(fence->rdev);
165 signaled = fence->signaled;
166 }
167 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
168 return signaled;
169}
170
171int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
172{
173 struct radeon_device *rdev;
174 unsigned long cur_jiffies;
175 unsigned long timeout;
176 bool expired = false;
177 int r;
178
179
180 if (fence == NULL) {
181 WARN(1, "Querying an invalid fence : %p !\n", fence);
182 return 0;
183 }
184 rdev = fence->rdev;
185 if (radeon_fence_signaled(fence)) {
186 return 0;
187 }
188retry:
189 cur_jiffies = jiffies;
190 timeout = HZ / 100;
191 if (time_after(fence->timeout, cur_jiffies)) {
192 timeout = fence->timeout - cur_jiffies;
193 }
194 if (interruptible) {
195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
196 radeon_fence_signaled(fence), timeout);
197 if (unlikely(r == -ERESTARTSYS)) {
198 return -ERESTART;
199 }
200 } else {
201 r = wait_event_timeout(rdev->fence_drv.queue,
202 radeon_fence_signaled(fence), timeout);
203 }
204 if (unlikely(!radeon_fence_signaled(fence))) {
205 if (unlikely(r == 0)) {
206 expired = true;
207 }
208 if (unlikely(expired)) {
209 timeout = 1;
210 if (time_after(cur_jiffies, fence->timeout)) {
211 timeout = cur_jiffies - fence->timeout;
212 }
213 timeout = jiffies_to_msecs(timeout);
214 if (timeout > 500) {
215 DRM_ERROR("fence(%p:0x%08X) %lums timeout "
216 "going to reset GPU\n",
217 fence, fence->seq, timeout);
218 radeon_gpu_reset(rdev);
219 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
220 }
221 }
222 goto retry;
223 }
224 if (unlikely(expired)) {
225 rdev->fence_drv.count_timeout++;
226 cur_jiffies = jiffies;
227 timeout = 1;
228 if (time_after(cur_jiffies, fence->timeout)) {
229 timeout = cur_jiffies - fence->timeout;
230 }
231 timeout = jiffies_to_msecs(timeout);
232 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
233 fence, fence->seq, timeout);
234 DRM_ERROR("last signaled fence(0x%08X)\n",
235 rdev->fence_drv.last_seq);
236 }
237 return 0;
238}
239
240int radeon_fence_wait_next(struct radeon_device *rdev)
241{
242 unsigned long irq_flags;
243 struct radeon_fence *fence;
244 int r;
245
246 if (rdev->gpu_lockup) {
247 return 0;
248 }
249 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
250 if (list_empty(&rdev->fence_drv.emited)) {
251 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
252 return 0;
253 }
254 fence = list_entry(rdev->fence_drv.emited.next,
255 struct radeon_fence, list);
256 radeon_fence_ref(fence);
257 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
258 r = radeon_fence_wait(fence, false);
259 radeon_fence_unref(&fence);
260 return r;
261}
262
263int radeon_fence_wait_last(struct radeon_device *rdev)
264{
265 unsigned long irq_flags;
266 struct radeon_fence *fence;
267 int r;
268
269 if (rdev->gpu_lockup) {
270 return 0;
271 }
272 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
273 if (list_empty(&rdev->fence_drv.emited)) {
274 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
275 return 0;
276 }
277 fence = list_entry(rdev->fence_drv.emited.prev,
278 struct radeon_fence, list);
279 radeon_fence_ref(fence);
280 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
281 r = radeon_fence_wait(fence, false);
282 radeon_fence_unref(&fence);
283 return r;
284}
285
286struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
287{
288 kref_get(&fence->kref);
289 return fence;
290}
291
292void radeon_fence_unref(struct radeon_fence **fence)
293{
294 struct radeon_fence *tmp = *fence;
295
296 *fence = NULL;
297 if (tmp) {
298 kref_put(&tmp->kref, &radeon_fence_destroy);
299 }
300}
301
302void radeon_fence_process(struct radeon_device *rdev)
303{
304 unsigned long irq_flags;
305 bool wake;
306
307 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
308 wake = radeon_fence_poll_locked(rdev);
309 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
310 if (wake) {
311 wake_up_all(&rdev->fence_drv.queue);
312 }
313}
314
315int radeon_fence_driver_init(struct radeon_device *rdev)
316{
317 unsigned long irq_flags;
318 int r;
319
320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
321 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
322 if (r) {
323 DRM_ERROR("Fence failed to get a scratch register.");
324 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
325 return r;
326 }
327 WREG32(rdev->fence_drv.scratch_reg, 0);
328 atomic_set(&rdev->fence_drv.seq, 0);
329 INIT_LIST_HEAD(&rdev->fence_drv.created);
330 INIT_LIST_HEAD(&rdev->fence_drv.emited);
331 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
332 rdev->fence_drv.count_timeout = 0;
333 init_waitqueue_head(&rdev->fence_drv.queue);
334 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
335 if (radeon_debugfs_fence_init(rdev)) {
336 DRM_ERROR("Failed to register debugfs file for fence !\n");
337 }
338 return 0;
339}
340
341void radeon_fence_driver_fini(struct radeon_device *rdev)
342{
343 unsigned long irq_flags;
344
345 wake_up_all(&rdev->fence_drv.queue);
346 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
347 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
348 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
349 DRM_INFO("radeon: fence finalized\n");
350}
351
352
353/*
354 * Fence debugfs
355 */
356#if defined(CONFIG_DEBUG_FS)
357static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
358{
359 struct drm_info_node *node = (struct drm_info_node *)m->private;
360 struct drm_device *dev = node->minor->dev;
361 struct radeon_device *rdev = dev->dev_private;
362 struct radeon_fence *fence;
363
364 seq_printf(m, "Last signaled fence 0x%08X\n",
365 RREG32(rdev->fence_drv.scratch_reg));
366 if (!list_empty(&rdev->fence_drv.emited)) {
367 fence = list_entry(rdev->fence_drv.emited.prev,
368 struct radeon_fence, list);
369 seq_printf(m, "Last emited fence %p with 0x%08X\n",
370 fence, fence->seq);
371 }
372 return 0;
373}
374
375static struct drm_info_list radeon_debugfs_fence_list[] = {
376 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
377};
378#endif
379
380int radeon_debugfs_fence_init(struct radeon_device *rdev)
381{
382#if defined(CONFIG_DEBUG_FS)
383 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
384#else
385 return 0;
386#endif
387}
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
new file mode 100644
index 000000000000..90187d173847
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 */
24#ifndef RADEON_FIXED_H
25#define RADEON_FIXED_H
26
27typedef union rfixed {
28 u32 full;
29} fixed20_12;
30
31
32#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
33#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
34#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
35#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
36#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
37#define fixed_init(A) { .full = rfixed_const((A)) }
38#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
39#define rfixed_trunc(A) ((A).full >> 12)
40
41static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
42{
43 u64 tmp = ((u64)A.full << 13);
44
45 do_div(tmp, B.full);
46 tmp += 1;
47 tmp /= 2;
48 return lower_32_bits(tmp);
49}
50#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644
index 000000000000..d343a15316ec
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31#include "radeon_reg.h"
32
33/*
34 * Common GART table functions.
35 */
36int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
37{
38 void *ptr;
39
40 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41 &rdev->gart.table_addr);
42 if (ptr == NULL) {
43 return -ENOMEM;
44 }
45#ifdef CONFIG_X86
46 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48 set_memory_uc((unsigned long)ptr,
49 rdev->gart.table_size >> PAGE_SHIFT);
50 }
51#endif
52 rdev->gart.table.ram.ptr = ptr;
53 memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
54 return 0;
55}
56
57void radeon_gart_table_ram_free(struct radeon_device *rdev)
58{
59 if (rdev->gart.table.ram.ptr == NULL) {
60 return;
61 }
62#ifdef CONFIG_X86
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
65 set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
66 rdev->gart.table_size >> PAGE_SHIFT);
67 }
68#endif
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
70 (void *)rdev->gart.table.ram.ptr,
71 rdev->gart.table_addr);
72 rdev->gart.table.ram.ptr = NULL;
73 rdev->gart.table_addr = 0;
74}
75
76int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77{
78 uint64_t gpu_addr;
79 int r;
80
81 if (rdev->gart.table.vram.robj == NULL) {
82 r = radeon_object_create(rdev, NULL,
83 rdev->gart.table_size,
84 true,
85 RADEON_GEM_DOMAIN_VRAM,
86 false, &rdev->gart.table.vram.robj);
87 if (r) {
88 return r;
89 }
90 }
91 r = radeon_object_pin(rdev->gart.table.vram.robj,
92 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
93 if (r) {
94 radeon_object_unref(&rdev->gart.table.vram.robj);
95 return r;
96 }
97 r = radeon_object_kmap(rdev->gart.table.vram.robj,
98 (void **)&rdev->gart.table.vram.ptr);
99 if (r) {
100 radeon_object_unpin(rdev->gart.table.vram.robj);
101 radeon_object_unref(&rdev->gart.table.vram.robj);
102 DRM_ERROR("radeon: failed to map gart vram table.\n");
103 return r;
104 }
105 rdev->gart.table_addr = gpu_addr;
106 return 0;
107}
108
109void radeon_gart_table_vram_free(struct radeon_device *rdev)
110{
111 if (rdev->gart.table.vram.robj == NULL) {
112 return;
113 }
114 radeon_object_kunmap(rdev->gart.table.vram.robj);
115 radeon_object_unpin(rdev->gart.table.vram.robj);
116 radeon_object_unref(&rdev->gart.table.vram.robj);
117}
118
119
120
121
122/*
123 * Common gart functions.
124 */
125void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
126 int pages)
127{
128 unsigned t;
129 unsigned p;
130 int i, j;
131
132 if (!rdev->gart.ready) {
133 WARN(1, "trying to unbind memory to unitialized GART !\n");
134 return;
135 }
136 t = offset / 4096;
137 p = t / (PAGE_SIZE / 4096);
138 for (i = 0; i < pages; i++, p++) {
139 if (rdev->gart.pages[p]) {
140 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
141 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
142 rdev->gart.pages[p] = NULL;
143 rdev->gart.pages_addr[p] = 0;
144 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
145 radeon_gart_set_page(rdev, t, 0);
146 }
147 }
148 }
149 mb();
150 radeon_gart_tlb_flush(rdev);
151}
152
153int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
154 int pages, struct page **pagelist)
155{
156 unsigned t;
157 unsigned p;
158 uint64_t page_base;
159 int i, j;
160
161 if (!rdev->gart.ready) {
162 DRM_ERROR("trying to bind memory to unitialized GART !\n");
163 return -EINVAL;
164 }
165 t = offset / 4096;
166 p = t / (PAGE_SIZE / 4096);
167
168 for (i = 0; i < pages; i++, p++) {
169 /* we need to support large memory configurations */
170 /* assume that unbind have already been call on the range */
171 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
172 0, PAGE_SIZE,
173 PCI_DMA_BIDIRECTIONAL);
174 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
175 /* FIXME: failed to map page (return -ENOMEM?) */
176 radeon_gart_unbind(rdev, offset, pages);
177 return -ENOMEM;
178 }
179 rdev->gart.pages[p] = pagelist[i];
180 page_base = (uint32_t)rdev->gart.pages_addr[p];
181 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
182 radeon_gart_set_page(rdev, t, page_base);
183 page_base += 4096;
184 }
185 }
186 mb();
187 radeon_gart_tlb_flush(rdev);
188 return 0;
189}
190
191int radeon_gart_init(struct radeon_device *rdev)
192{
193 if (rdev->gart.pages) {
194 return 0;
195 }
196 /* We need PAGE_SIZE >= 4096 */
197 if (PAGE_SIZE < 4096) {
198 DRM_ERROR("Page size is smaller than GPU page size!\n");
199 return -EINVAL;
200 }
201 /* Compute table size */
202 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
203 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
204 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
205 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
206 /* Allocate pages table */
207 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
208 GFP_KERNEL);
209 if (rdev->gart.pages == NULL) {
210 radeon_gart_fini(rdev);
211 return -ENOMEM;
212 }
213 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
214 rdev->gart.num_cpu_pages, GFP_KERNEL);
215 if (rdev->gart.pages_addr == NULL) {
216 radeon_gart_fini(rdev);
217 return -ENOMEM;
218 }
219 return 0;
220}
221
222void radeon_gart_fini(struct radeon_device *rdev)
223{
224 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
225 /* unbind pages */
226 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
227 }
228 rdev->gart.ready = false;
229 kfree(rdev->gart.pages);
230 kfree(rdev->gart.pages_addr);
231 rdev->gart.pages = NULL;
232 rdev->gart.pages_addr = NULL;
233}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644
index 000000000000..eb516034235d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -0,0 +1,287 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
35 /* we do nothings here */
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{
41 struct radeon_object *robj = gobj->driver_private;
42
43 gobj->driver_private = NULL;
44 if (robj) {
45 radeon_object_unref(&robj);
46 }
47}
48
49int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 bool interruptible,
53 struct drm_gem_object **obj)
54{
55 struct drm_gem_object *gobj;
56 struct radeon_object *robj;
57 int r;
58
59 *obj = NULL;
60 gobj = drm_gem_object_alloc(rdev->ddev, size);
61 if (!gobj) {
62 return -ENOMEM;
63 }
64 /* At least align on page size */
65 if (alignment < PAGE_SIZE) {
66 alignment = PAGE_SIZE;
67 }
68 r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
69 interruptible, &robj);
70 if (r) {
71 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
72 size, initial_domain, alignment);
73 mutex_lock(&rdev->ddev->struct_mutex);
74 drm_gem_object_unreference(gobj);
75 mutex_unlock(&rdev->ddev->struct_mutex);
76 return r;
77 }
78 gobj->driver_private = robj;
79 *obj = gobj;
80 return 0;
81}
82
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84 uint64_t *gpu_addr)
85{
86 struct radeon_object *robj = obj->driver_private;
87 uint32_t flags;
88
89 switch (pin_domain) {
90 case RADEON_GEM_DOMAIN_VRAM:
91 flags = TTM_PL_FLAG_VRAM;
92 break;
93 case RADEON_GEM_DOMAIN_GTT:
94 flags = TTM_PL_FLAG_TT;
95 break;
96 default:
97 flags = TTM_PL_FLAG_SYSTEM;
98 break;
99 }
100 return radeon_object_pin(robj, flags, gpu_addr);
101}
102
103void radeon_gem_object_unpin(struct drm_gem_object *obj)
104{
105 struct radeon_object *robj = obj->driver_private;
106 radeon_object_unpin(robj);
107}
108
109int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 uint32_t rdomain, uint32_t wdomain)
111{
112 struct radeon_object *robj;
113 uint32_t domain;
114 int r;
115
116 /* FIXME: reeimplement */
117 robj = gobj->driver_private;
118 /* work out where to validate the buffer to */
119 domain = wdomain;
120 if (!domain) {
121 domain = rdomain;
122 }
123 if (!domain) {
124 /* Do nothings */
125 printk(KERN_WARNING "Set domain withou domain !\n");
126 return 0;
127 }
128 if (domain == RADEON_GEM_DOMAIN_CPU) {
129 /* Asking for cpu access wait for object idle */
130 r = radeon_object_wait(robj);
131 if (r) {
132 printk(KERN_ERR "Failed to wait for object !\n");
133 return r;
134 }
135 }
136 return 0;
137}
138
139int radeon_gem_init(struct radeon_device *rdev)
140{
141 INIT_LIST_HEAD(&rdev->gem.objects);
142 return 0;
143}
144
145void radeon_gem_fini(struct radeon_device *rdev)
146{
147 radeon_object_force_delete(rdev);
148}
149
150
151/*
152 * GEM ioctls.
153 */
154int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
155 struct drm_file *filp)
156{
157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data;
159
160 args->vram_size = rdev->mc.vram_size;
161 /* FIXME: report somethings that makes sense */
162 args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024);
163 args->gart_size = rdev->mc.gtt_size;
164 return 0;
165}
166
167int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
168 struct drm_file *filp)
169{
170 /* TODO: implement */
171 DRM_ERROR("unimplemented %s\n", __func__);
172 return -ENOSYS;
173}
174
175int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
176 struct drm_file *filp)
177{
178 /* TODO: implement */
179 DRM_ERROR("unimplemented %s\n", __func__);
180 return -ENOSYS;
181}
182
183int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
184 struct drm_file *filp)
185{
186 struct radeon_device *rdev = dev->dev_private;
187 struct drm_radeon_gem_create *args = data;
188 struct drm_gem_object *gobj;
189 uint32_t handle;
190 int r;
191
192 /* create a gem object to contain this object in */
193 args->size = roundup(args->size, PAGE_SIZE);
194 r = radeon_gem_object_create(rdev, args->size, args->alignment,
195 args->initial_domain, false,
196 false, true, &gobj);
197 if (r) {
198 return r;
199 }
200 r = drm_gem_handle_create(filp, gobj, &handle);
201 if (r) {
202 mutex_lock(&dev->struct_mutex);
203 drm_gem_object_unreference(gobj);
204 mutex_unlock(&dev->struct_mutex);
205 return r;
206 }
207 mutex_lock(&dev->struct_mutex);
208 drm_gem_object_handle_unreference(gobj);
209 mutex_unlock(&dev->struct_mutex);
210 args->handle = handle;
211 return 0;
212}
213
214int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
215 struct drm_file *filp)
216{
217 /* transition the BO to a domain -
218 * just validate the BO into a certain domain */
219 struct drm_radeon_gem_set_domain *args = data;
220 struct drm_gem_object *gobj;
221 struct radeon_object *robj;
222 int r;
223
224 /* for now if someone requests domain CPU -
225 * just make sure the buffer is finished with */
226
227 /* just do a BO wait for now */
228 gobj = drm_gem_object_lookup(dev, filp, args->handle);
229 if (gobj == NULL) {
230 return -EINVAL;
231 }
232 robj = gobj->driver_private;
233
234 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
235
236 mutex_lock(&dev->struct_mutex);
237 drm_gem_object_unreference(gobj);
238 mutex_unlock(&dev->struct_mutex);
239 return r;
240}
241
242int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *filp)
244{
245 struct drm_radeon_gem_mmap *args = data;
246 struct drm_gem_object *gobj;
247 struct radeon_object *robj;
248 int r;
249
250 gobj = drm_gem_object_lookup(dev, filp, args->handle);
251 if (gobj == NULL) {
252 return -EINVAL;
253 }
254 robj = gobj->driver_private;
255 r = radeon_object_mmap(robj, &args->addr_ptr);
256 mutex_lock(&dev->struct_mutex);
257 drm_gem_object_unreference(gobj);
258 mutex_unlock(&dev->struct_mutex);
259 return r;
260}
261
262int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *filp)
264{
265 /* FIXME: implement */
266 return 0;
267}
268
269int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *filp)
271{
272 struct drm_radeon_gem_wait_idle *args = data;
273 struct drm_gem_object *gobj;
274 struct radeon_object *robj;
275 int r;
276
277 gobj = drm_gem_object_lookup(dev, filp, args->handle);
278 if (gobj == NULL) {
279 return -EINVAL;
280 }
281 robj = gobj->driver_private;
282 r = radeon_object_wait(robj);
283 mutex_lock(&dev->struct_mutex);
284 drm_gem_object_unreference(gobj);
285 mutex_unlock(&dev->struct_mutex);
286 return r;
287}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644
index 000000000000..71465ed2688a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30/**
31 * radeon_ddc_probe
32 *
33 */
34bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
35{
36 u8 out_buf[] = { 0x0, 0x0};
37 u8 buf[2];
38 int ret;
39 struct i2c_msg msgs[] = {
40 {
41 .addr = 0x50,
42 .flags = 0,
43 .len = 1,
44 .buf = out_buf,
45 },
46 {
47 .addr = 0x50,
48 .flags = I2C_M_RD,
49 .len = 1,
50 .buf = buf,
51 }
52 };
53
54 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
55 if (ret == 2)
56 return true;
57
58 return false;
59}
60
61
62void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
63{
64 struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
65 uint32_t temp;
66 struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
67
68 /* RV410 appears to have a bug where the hw i2c in reset
69 * holds the i2c port in a bad state - switch hw i2c away before
70 * doing DDC - do this for all r200s/r300s/r400s for safety sake
71 */
72 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
73 if (rec->a_clk_reg == RADEON_GPIO_MONID) {
74 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
75 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
76 } else {
77 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
78 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
79 }
80 }
81 if (lock_state) {
82 temp = RREG32(rec->a_clk_reg);
83 temp &= ~(rec->a_clk_mask);
84 WREG32(rec->a_clk_reg, temp);
85
86 temp = RREG32(rec->a_data_reg);
87 temp &= ~(rec->a_data_mask);
88 WREG32(rec->a_data_reg, temp);
89 }
90
91 temp = RREG32(rec->mask_clk_reg);
92 if (lock_state)
93 temp |= rec->mask_clk_mask;
94 else
95 temp &= ~rec->mask_clk_mask;
96 WREG32(rec->mask_clk_reg, temp);
97 temp = RREG32(rec->mask_clk_reg);
98
99 temp = RREG32(rec->mask_data_reg);
100 if (lock_state)
101 temp |= rec->mask_data_mask;
102 else
103 temp &= ~rec->mask_data_mask;
104 WREG32(rec->mask_data_reg, temp);
105 temp = RREG32(rec->mask_data_reg);
106}
107
108static int get_clock(void *i2c_priv)
109{
110 struct radeon_i2c_chan *i2c = i2c_priv;
111 struct radeon_device *rdev = i2c->dev->dev_private;
112 struct radeon_i2c_bus_rec *rec = &i2c->rec;
113 uint32_t val;
114
115 val = RREG32(rec->get_clk_reg);
116 val &= rec->get_clk_mask;
117
118 return (val != 0);
119}
120
121
122static int get_data(void *i2c_priv)
123{
124 struct radeon_i2c_chan *i2c = i2c_priv;
125 struct radeon_device *rdev = i2c->dev->dev_private;
126 struct radeon_i2c_bus_rec *rec = &i2c->rec;
127 uint32_t val;
128
129 val = RREG32(rec->get_data_reg);
130 val &= rec->get_data_mask;
131 return (val != 0);
132}
133
134static void set_clock(void *i2c_priv, int clock)
135{
136 struct radeon_i2c_chan *i2c = i2c_priv;
137 struct radeon_device *rdev = i2c->dev->dev_private;
138 struct radeon_i2c_bus_rec *rec = &i2c->rec;
139 uint32_t val;
140
141 val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
142 val |= clock ? 0 : rec->put_clk_mask;
143 WREG32(rec->put_clk_reg, val);
144}
145
146static void set_data(void *i2c_priv, int data)
147{
148 struct radeon_i2c_chan *i2c = i2c_priv;
149 struct radeon_device *rdev = i2c->dev->dev_private;
150 struct radeon_i2c_bus_rec *rec = &i2c->rec;
151 uint32_t val;
152
153 val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
154 val |= data ? 0 : rec->put_data_mask;
155 WREG32(rec->put_data_reg, val);
156}
157
158struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
159 struct radeon_i2c_bus_rec *rec,
160 const char *name)
161{
162 struct radeon_i2c_chan *i2c;
163 int ret;
164
165 i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
166 if (i2c == NULL)
167 return NULL;
168
169 i2c->adapter.owner = THIS_MODULE;
170 i2c->adapter.algo_data = &i2c->algo;
171 i2c->dev = dev;
172 i2c->algo.setsda = set_data;
173 i2c->algo.setscl = set_clock;
174 i2c->algo.getsda = get_data;
175 i2c->algo.getscl = get_clock;
176 i2c->algo.udelay = 20;
177 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
178 * make this, 2 jiffies is a lot more reliable */
179 i2c->algo.timeout = 2;
180 i2c->algo.data = i2c;
181 i2c->rec = *rec;
182 i2c_set_adapdata(&i2c->adapter, i2c);
183
184 ret = i2c_bit_add_bus(&i2c->adapter);
185 if (ret) {
186 DRM_INFO("Failed to register i2c %s\n", name);
187 goto out_free;
188 }
189
190 return i2c;
191out_free:
192 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
193 return NULL;
194
195}
196
197void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
198{
199 if (!i2c)
200 return;
201
202 i2c_del_adapter(&i2c->adapter);
203 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
204}
205
206struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
207{
208 return NULL;
209}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644
index 000000000000..491d569deb0e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon_reg.h"
31#include "radeon_microcode.h"
32#include "radeon.h"
33#include "atom.h"
34
35static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
36{
37 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
38 uint32_t irq_mask = RADEON_SW_INT_TEST;
39
40 if (irqs) {
41 WREG32(RADEON_GEN_INT_STATUS, irqs);
42 }
43 return irqs & irq_mask;
44}
45
46int r100_irq_set(struct radeon_device *rdev)
47{
48 uint32_t tmp = 0;
49
50 if (rdev->irq.sw_int) {
51 tmp |= RADEON_SW_INT_ENABLE;
52 }
53 /* Todo go through CRTC and enable vblank int or not */
54 WREG32(RADEON_GEN_INT_CNTL, tmp);
55 return 0;
56}
57
58int r100_irq_process(struct radeon_device *rdev)
59{
60 uint32_t status;
61
62 status = r100_irq_ack(rdev);
63 if (!status) {
64 return IRQ_NONE;
65 }
66 while (status) {
67 /* SW interrupt */
68 if (status & RADEON_SW_INT_TEST) {
69 radeon_fence_process(rdev);
70 }
71 status = r100_irq_ack(rdev);
72 }
73 return IRQ_HANDLED;
74}
75
76int rs600_irq_set(struct radeon_device *rdev)
77{
78 uint32_t tmp = 0;
79
80 if (rdev->irq.sw_int) {
81 tmp |= RADEON_SW_INT_ENABLE;
82 }
83 WREG32(RADEON_GEN_INT_CNTL, tmp);
84 /* Todo go through CRTC and enable vblank int or not */
85 WREG32(R500_DxMODE_INT_MASK, 0);
86 return 0;
87}
88
89irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
90{
91 struct drm_device *dev = (struct drm_device *) arg;
92 struct radeon_device *rdev = dev->dev_private;
93
94 return radeon_irq_process(rdev);
95}
96
97void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
98{
99 struct radeon_device *rdev = dev->dev_private;
100 unsigned i;
101
102 /* Disable *all* interrupts */
103 rdev->irq.sw_int = false;
104 for (i = 0; i < 2; i++) {
105 rdev->irq.crtc_vblank_int[i] = false;
106 }
107 radeon_irq_set(rdev);
108 /* Clear bits */
109 radeon_irq_process(rdev);
110}
111
112int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
113{
114 struct radeon_device *rdev = dev->dev_private;
115
116 dev->max_vblank_count = 0x001fffff;
117 rdev->irq.sw_int = true;
118 radeon_irq_set(rdev);
119 return 0;
120}
121
122void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
123{
124 struct radeon_device *rdev = dev->dev_private;
125 unsigned i;
126
127 if (rdev == NULL) {
128 return;
129 }
130 /* Disable *all* interrupts */
131 rdev->irq.sw_int = false;
132 for (i = 0; i < 2; i++) {
133 rdev->irq.crtc_vblank_int[i] = false;
134 }
135 radeon_irq_set(rdev);
136}
137
138int radeon_irq_kms_init(struct radeon_device *rdev)
139{
140 int r = 0;
141
142 r = drm_vblank_init(rdev->ddev, 2);
143 if (r) {
144 return r;
145 }
146 drm_irq_install(rdev->ddev);
147 rdev->irq.installed = true;
148 DRM_INFO("radeon: irq initialized.\n");
149 return 0;
150}
151
152void radeon_irq_kms_fini(struct radeon_device *rdev)
153{
154 if (rdev->irq.installed) {
155 rdev->irq.installed = false;
156 drm_irq_uninstall(rdev->ddev);
157 }
158}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644
index 000000000000..b0ce44b9f5ab
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm_sarea.h"
30#include "radeon.h"
31#include "radeon_drm.h"
32
33
34/*
35 * Driver load/unload
36 */
37int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
38{
39 struct radeon_device *rdev;
40 int r;
41
42 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
43 if (rdev == NULL) {
44 return -ENOMEM;
45 }
46 dev->dev_private = (void *)rdev;
47
48 /* update BUS flag */
49 if (drm_device_is_agp(dev)) {
50 flags |= RADEON_IS_AGP;
51 } else if (drm_device_is_pcie(dev)) {
52 flags |= RADEON_IS_PCIE;
53 } else {
54 flags |= RADEON_IS_PCI;
55 }
56
57 r = radeon_device_init(rdev, dev, dev->pdev, flags);
58 if (r) {
59 DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
60 radeon_device_fini(rdev);
61 return r;
62 }
63 return 0;
64}
65
66int radeon_driver_unload_kms(struct drm_device *dev)
67{
68 struct radeon_device *rdev = dev->dev_private;
69
70 radeon_device_fini(rdev);
71 kfree(rdev);
72 dev->dev_private = NULL;
73 return 0;
74}
75
76
77/*
78 * Userspace get informations ioctl
79 */
80int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
81{
82 struct radeon_device *rdev = dev->dev_private;
83 struct drm_radeon_info *info;
84 uint32_t *value_ptr;
85 uint32_t value;
86
87 info = data;
88 value_ptr = (uint32_t *)((unsigned long)info->value);
89 switch (info->request) {
90 case RADEON_INFO_DEVICE_ID:
91 value = dev->pci_device;
92 break;
93 case RADEON_INFO_NUM_GB_PIPES:
94 value = rdev->num_gb_pipes;
95 break;
96 default:
97 DRM_DEBUG("Invalid request %d\n", info->request);
98 return -EINVAL;
99 }
100 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
101 DRM_ERROR("copy_to_user\n");
102 return -EFAULT;
103 }
104 return 0;
105}
106
107
108/*
109 * Outdated mess for old drm with Xorg being in charge (void function now).
110 */
111int radeon_driver_firstopen_kms(struct drm_device *dev)
112{
113 return 0;
114}
115
116
117void radeon_driver_lastclose_kms(struct drm_device *dev)
118{
119}
120
121int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
122{
123 return 0;
124}
125
126void radeon_driver_postclose_kms(struct drm_device *dev,
127 struct drm_file *file_priv)
128{
129}
130
131void radeon_driver_preclose_kms(struct drm_device *dev,
132 struct drm_file *file_priv)
133{
134}
135
136
137/*
138 * VBlank related functions.
139 */
140u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
141{
142 /* FIXME: implement */
143 return 0;
144}
145
146int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
147{
148 /* FIXME: implement */
149 return 0;
150}
151
152void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
153{
154 /* FIXME: implement */
155}
156
157
158/*
159 * For multiple master (like multiple X).
160 */
161struct drm_radeon_master_private {
162 drm_local_map_t *sarea;
163 drm_radeon_sarea_t *sarea_priv;
164};
165
166int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
167{
168 struct drm_radeon_master_private *master_priv;
169 unsigned long sareapage;
170 int ret;
171
172 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
173 if (master_priv == NULL) {
174 return -ENOMEM;
175 }
176 /* prebuild the SAREA */
177 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
178 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
179 _DRM_CONTAINS_LOCK|_DRM_DRIVER,
180 &master_priv->sarea);
181 if (ret) {
182 DRM_ERROR("SAREA setup failed\n");
183 return ret;
184 }
185 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
186 master_priv->sarea_priv->pfCurrentPage = 0;
187 master->driver_priv = master_priv;
188 return 0;
189}
190
191void radeon_master_destroy_kms(struct drm_device *dev,
192 struct drm_master *master)
193{
194 struct drm_radeon_master_private *master_priv = master->driver_priv;
195
196 if (master_priv == NULL) {
197 return;
198 }
199 if (master_priv->sarea) {
200 drm_rmmap_locked(dev, master_priv->sarea);
201 }
202 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
203 master->driver_priv = NULL;
204}
205
206
207/*
208 * IOCTL.
209 */
210int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
211 struct drm_file *file_priv)
212{
213 /* Not valid in KMS. */
214 return -EINVAL;
215}
216
217#define KMS_INVALID_IOCTL(name) \
218int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
219{ \
220 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
221 return -EINVAL; \
222}
223
224/*
225 * All these ioctls are invalid in kms world.
226 */
227KMS_INVALID_IOCTL(radeon_cp_init_kms)
228KMS_INVALID_IOCTL(radeon_cp_start_kms)
229KMS_INVALID_IOCTL(radeon_cp_stop_kms)
230KMS_INVALID_IOCTL(radeon_cp_reset_kms)
231KMS_INVALID_IOCTL(radeon_cp_idle_kms)
232KMS_INVALID_IOCTL(radeon_cp_resume_kms)
233KMS_INVALID_IOCTL(radeon_engine_reset_kms)
234KMS_INVALID_IOCTL(radeon_fullscreen_kms)
235KMS_INVALID_IOCTL(radeon_cp_swap_kms)
236KMS_INVALID_IOCTL(radeon_cp_clear_kms)
237KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
238KMS_INVALID_IOCTL(radeon_cp_indices_kms)
239KMS_INVALID_IOCTL(radeon_cp_texture_kms)
240KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
241KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
242KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
243KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
244KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
245KMS_INVALID_IOCTL(radeon_cp_flip_kms)
246KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
247KMS_INVALID_IOCTL(radeon_mem_free_kms)
248KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
249KMS_INVALID_IOCTL(radeon_irq_emit_kms)
250KMS_INVALID_IOCTL(radeon_irq_wait_kms)
251KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
252KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
253KMS_INVALID_IOCTL(radeon_surface_free_kms)
254
255
256struct drm_ioctl_desc radeon_ioctls_kms[] = {
257 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
258 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
259 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
260 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
261 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
262 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
263 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
264 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
265 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
266 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
267 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
268 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
269 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
270 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
271 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
272 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
273 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
274 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
275 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
276 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
277 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
278 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
279 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
280 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
281 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
282 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
283 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
284 /* KMS */
285 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
286 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
287 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
288 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
289 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
290 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
291 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
292 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
293 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
294};
295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644
index 000000000000..8086ecf7f03d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -0,0 +1,1276 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h>
28#include <drm/radeon_drm.h>
29#include "radeon_fixed.h"
30#include "radeon.h"
31
32void radeon_restore_common_regs(struct drm_device *dev)
33{
34 /* don't need this yet */
35}
36
37static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
38{
39 struct radeon_device *rdev = dev->dev_private;
40 int i = 0;
41
42 /* FIXME: Certain revisions of R300 can't recover here. Not sure of
43 the cause yet, but this workaround will mask the problem for now.
44 Other chips usually will pass at the very first test, so the
45 workaround shouldn't have any effect on them. */
46 for (i = 0;
47 (i < 10000 &&
48 RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
49 i++);
50}
51
52static void radeon_pll_write_update(struct drm_device *dev)
53{
54 struct radeon_device *rdev = dev->dev_private;
55
56 while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
57
58 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
59 RADEON_PPLL_ATOMIC_UPDATE_W,
60 ~(RADEON_PPLL_ATOMIC_UPDATE_W));
61}
62
63static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
64{
65 struct radeon_device *rdev = dev->dev_private;
66 int i = 0;
67
68
69 /* FIXME: Certain revisions of R300 can't recover here. Not sure of
70 the cause yet, but this workaround will mask the problem for now.
71 Other chips usually will pass at the very first test, so the
72 workaround shouldn't have any effect on them. */
73 for (i = 0;
74 (i < 10000 &&
75 RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
76 i++);
77}
78
79static void radeon_pll2_write_update(struct drm_device *dev)
80{
81 struct radeon_device *rdev = dev->dev_private;
82
83 while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
84
85 WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
86 RADEON_P2PLL_ATOMIC_UPDATE_W,
87 ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
88}
89
90static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
91 uint16_t fb_div)
92{
93 unsigned int vcoFreq;
94
95 if (!ref_div)
96 return 1;
97
98 vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
99
100 /*
101 * This is horribly crude: the VCO frequency range is divided into
102 * 3 parts, each part having a fixed PLL gain value.
103 */
104 if (vcoFreq >= 30000)
105 /*
106 * [300..max] MHz : 7
107 */
108 return 7;
109 else if (vcoFreq >= 18000)
110 /*
111 * [180..300) MHz : 4
112 */
113 return 4;
114 else
115 /*
116 * [0..180) MHz : 1
117 */
118 return 1;
119}
120
121void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
122{
123 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
124 struct drm_device *dev = crtc->dev;
125 struct radeon_device *rdev = dev->dev_private;
126 uint32_t mask;
127
128 if (radeon_crtc->crtc_id)
129 mask = (RADEON_CRTC2_EN |
130 RADEON_CRTC2_DISP_DIS |
131 RADEON_CRTC2_VSYNC_DIS |
132 RADEON_CRTC2_HSYNC_DIS |
133 RADEON_CRTC2_DISP_REQ_EN_B);
134 else
135 mask = (RADEON_CRTC_DISPLAY_DIS |
136 RADEON_CRTC_VSYNC_DIS |
137 RADEON_CRTC_HSYNC_DIS);
138
139 switch (mode) {
140 case DRM_MODE_DPMS_ON:
141 if (radeon_crtc->crtc_id)
142 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
143 else {
144 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
145 RADEON_CRTC_DISP_REQ_EN_B));
146 WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
147 }
148 break;
149 case DRM_MODE_DPMS_STANDBY:
150 case DRM_MODE_DPMS_SUSPEND:
151 case DRM_MODE_DPMS_OFF:
152 if (radeon_crtc->crtc_id)
153 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
154 else {
155 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
156 RADEON_CRTC_DISP_REQ_EN_B));
157 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
158 }
159 break;
160 }
161
162 if (mode != DRM_MODE_DPMS_OFF) {
163 radeon_crtc_load_lut(crtc);
164 }
165}
166
167/* properly set crtc bpp when using atombios */
168void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
169{
170 struct drm_device *dev = crtc->dev;
171 struct radeon_device *rdev = dev->dev_private;
172 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
173 int format;
174 uint32_t crtc_gen_cntl;
175 uint32_t disp_merge_cntl;
176 uint32_t crtc_pitch;
177
178 switch (crtc->fb->bits_per_pixel) {
179 case 15: /* 555 */
180 format = 3;
181 break;
182 case 16: /* 565 */
183 format = 4;
184 break;
185 case 24: /* RGB */
186 format = 5;
187 break;
188 case 32: /* xRGB */
189 format = 6;
190 break;
191 default:
192 return;
193 }
194
195 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
196 ((crtc->fb->bits_per_pixel * 8) - 1)) /
197 (crtc->fb->bits_per_pixel * 8));
198 crtc_pitch |= crtc_pitch << 16;
199
200 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
201
202 switch (radeon_crtc->crtc_id) {
203 case 0:
204 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
205 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
206 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
207
208 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
209 crtc_gen_cntl |= (format << 8);
210 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
211 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
212 break;
213 case 1:
214 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
215 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
216 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
217
218 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
219 crtc_gen_cntl |= (format << 8);
220 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
221 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
222 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
223 break;
224 }
225}
226
227int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
228 struct drm_framebuffer *old_fb)
229{
230 struct drm_device *dev = crtc->dev;
231 struct radeon_device *rdev = dev->dev_private;
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_framebuffer *radeon_fb;
234 struct drm_gem_object *obj;
235 uint64_t base;
236 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
237 uint32_t crtc_pitch, pitch_pixels;
238
239 DRM_DEBUG("\n");
240
241 radeon_fb = to_radeon_framebuffer(crtc->fb);
242
243 obj = radeon_fb->obj;
244 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
245 return -EINVAL;
246 }
247 crtc_offset = (u32)base;
248 crtc_offset_cntl = 0;
249
250 pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
251 crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) +
252 ((crtc->fb->bits_per_pixel * 8) - 1)) /
253 (crtc->fb->bits_per_pixel * 8));
254 crtc_pitch |= crtc_pitch << 16;
255
256 /* TODO tiling */
257 if (0) {
258 if (ASIC_IS_R300(rdev))
259 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
260 R300_CRTC_MICRO_TILE_BUFFER_DIS |
261 R300_CRTC_MACRO_TILE_EN);
262 else
263 crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
264 } else {
265 if (ASIC_IS_R300(rdev))
266 crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
267 R300_CRTC_MICRO_TILE_BUFFER_DIS |
268 R300_CRTC_MACRO_TILE_EN);
269 else
270 crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
271 }
272
273
274 /* TODO more tiling */
275 if (0) {
276 if (ASIC_IS_R300(rdev)) {
277 crtc_tile_x0_y0 = x | (y << 16);
278 base &= ~0x7ff;
279 } else {
280 int byteshift = crtc->fb->bits_per_pixel >> 4;
281 int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11;
282 base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
283 crtc_offset_cntl |= (y % 16);
284 }
285 } else {
286 int offset = y * pitch_pixels + x;
287 switch (crtc->fb->bits_per_pixel) {
288 case 15:
289 case 16:
290 offset *= 2;
291 break;
292 case 24:
293 offset *= 3;
294 break;
295 case 32:
296 offset *= 4;
297 break;
298 default:
299 return false;
300 }
301 base += offset;
302 }
303
304 base &= ~7;
305
306 /* update sarea TODO */
307
308 crtc_offset = (u32)base;
309
310 WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location);
311
312 if (ASIC_IS_R300(rdev)) {
313 if (radeon_crtc->crtc_id)
314 WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
315 else
316 WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
317 }
318 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
319 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
320 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
321
322 if (old_fb && old_fb != crtc->fb) {
323 radeon_fb = to_radeon_framebuffer(old_fb);
324 radeon_gem_object_unpin(radeon_fb->obj);
325 }
326 return 0;
327}
328
329static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
330{
331 struct drm_device *dev = crtc->dev;
332 struct radeon_device *rdev = dev->dev_private;
333 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
334 int format;
335 int hsync_start;
336 int hsync_wid;
337 int vsync_wid;
338 uint32_t crtc_h_total_disp;
339 uint32_t crtc_h_sync_strt_wid;
340 uint32_t crtc_v_total_disp;
341 uint32_t crtc_v_sync_strt_wid;
342
343 DRM_DEBUG("\n");
344
345 switch (crtc->fb->bits_per_pixel) {
346 case 15: /* 555 */
347 format = 3;
348 break;
349 case 16: /* 565 */
350 format = 4;
351 break;
352 case 24: /* RGB */
353 format = 5;
354 break;
355 case 32: /* xRGB */
356 format = 6;
357 break;
358 default:
359 return false;
360 }
361
362 crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
363 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
364
365 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
366 if (!hsync_wid)
367 hsync_wid = 1;
368 hsync_start = mode->crtc_hsync_start - 8;
369
370 crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
371 | ((hsync_wid & 0x3f) << 16)
372 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
373 ? RADEON_CRTC_H_SYNC_POL
374 : 0));
375
376 /* This works for double scan mode. */
377 crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
378 | ((mode->crtc_vdisplay - 1) << 16));
379
380 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
381 if (!vsync_wid)
382 vsync_wid = 1;
383
384 crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
385 | ((vsync_wid & 0x1f) << 16)
386 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
387 ? RADEON_CRTC_V_SYNC_POL
388 : 0));
389
390 /* TODO -> Dell Server */
391 if (0) {
392 uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
393 uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
394 uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
395 uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
396
397 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
398 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
399
400 /* For CRT on DAC2, don't turn it on if BIOS didn't
401 enable it, even it's detected.
402 */
403 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
404 tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
405 tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
406
407 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
408 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
409 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
410 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
411 }
412
413 if (radeon_crtc->crtc_id) {
414 uint32_t crtc2_gen_cntl;
415 uint32_t disp2_merge_cntl;
416
417 /* check to see if TV DAC is enabled for another crtc and keep it enabled */
418 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
419 crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
420 else
421 crtc2_gen_cntl = 0;
422
423 crtc2_gen_cntl |= ((format << 8)
424 | RADEON_CRTC2_VSYNC_DIS
425 | RADEON_CRTC2_HSYNC_DIS
426 | RADEON_CRTC2_DISP_DIS
427 | RADEON_CRTC2_DISP_REQ_EN_B
428 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
429 ? RADEON_CRTC2_DBL_SCAN_EN
430 : 0)
431 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
432 ? RADEON_CRTC2_CSYNC_EN
433 : 0)
434 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
435 ? RADEON_CRTC2_INTERLACE_EN
436 : 0));
437
438 disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
439 disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
440
441 WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
442 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
443 } else {
444 uint32_t crtc_gen_cntl;
445 uint32_t crtc_ext_cntl;
446 uint32_t disp_merge_cntl;
447
448 crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
449 | (format << 8)
450 | RADEON_CRTC_DISP_REQ_EN_B
451 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
452 ? RADEON_CRTC_DBL_SCAN_EN
453 : 0)
454 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
455 ? RADEON_CRTC_CSYNC_EN
456 : 0)
457 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
458 ? RADEON_CRTC_INTERLACE_EN
459 : 0));
460
461 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
462 crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
463 RADEON_CRTC_VSYNC_DIS |
464 RADEON_CRTC_HSYNC_DIS |
465 RADEON_CRTC_DISPLAY_DIS);
466
467 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
468 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
469
470 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
471 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
472 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
473 }
474
475 WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
476 WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
477 WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
478 WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
479
480 return true;
481}
482
483static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
484{
485 struct drm_device *dev = crtc->dev;
486 struct radeon_device *rdev = dev->dev_private;
487 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
488 struct drm_encoder *encoder;
489 uint32_t feedback_div = 0;
490 uint32_t frac_fb_div = 0;
491 uint32_t reference_div = 0;
492 uint32_t post_divider = 0;
493 uint32_t freq = 0;
494 uint8_t pll_gain;
495 int pll_flags = RADEON_PLL_LEGACY;
496 bool use_bios_divs = false;
497 /* PLL registers */
498 uint32_t pll_ref_div = 0;
499 uint32_t pll_fb_post_div = 0;
500 uint32_t htotal_cntl = 0;
501
502 struct radeon_pll *pll;
503
504 struct {
505 int divider;
506 int bitvalue;
507 } *post_div, post_divs[] = {
508 /* From RAGE 128 VR/RAGE 128 GL Register
509 * Reference Manual (Technical Reference
510 * Manual P/N RRG-G04100-C Rev. 0.04), page
511 * 3-17 (PLL_DIV_[3:0]).
512 */
513 { 1, 0 }, /* VCLK_SRC */
514 { 2, 1 }, /* VCLK_SRC/2 */
515 { 4, 2 }, /* VCLK_SRC/4 */
516 { 8, 3 }, /* VCLK_SRC/8 */
517 { 3, 4 }, /* VCLK_SRC/3 */
518 { 16, 5 }, /* VCLK_SRC/16 */
519 { 6, 6 }, /* VCLK_SRC/6 */
520 { 12, 7 }, /* VCLK_SRC/12 */
521 { 0, 0 }
522 };
523
524 if (radeon_crtc->crtc_id)
525 pll = &rdev->clock.p2pll;
526 else
527 pll = &rdev->clock.p1pll;
528
529 if (mode->clock > 200000) /* range limits??? */
530 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
531 else
532 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
533
534 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
535 if (encoder->crtc == crtc) {
536 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
537 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
538 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
539 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
540 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
541 if (lvds) {
542 if (lvds->use_bios_dividers) {
543 pll_ref_div = lvds->panel_ref_divider;
544 pll_fb_post_div = (lvds->panel_fb_divider |
545 (lvds->panel_post_divider << 16));
546 htotal_cntl = 0;
547 use_bios_divs = true;
548 }
549 }
550 pll_flags |= RADEON_PLL_USE_REF_DIV;
551 }
552 }
553 }
554
555 DRM_DEBUG("\n");
556
557 if (!use_bios_divs) {
558 radeon_compute_pll(pll, mode->clock,
559 &freq, &feedback_div, &frac_fb_div,
560 &reference_div, &post_divider,
561 pll_flags);
562
563 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
564 if (post_div->divider == post_divider)
565 break;
566 }
567
568 if (!post_div->divider)
569 post_div = &post_divs[0];
570
571 DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
572 (unsigned)freq,
573 feedback_div,
574 reference_div,
575 post_divider);
576
577 pll_ref_div = reference_div;
578#if defined(__powerpc__) && (0) /* TODO */
579 /* apparently programming this otherwise causes a hang??? */
580 if (info->MacModel == RADEON_MAC_IBOOK)
581 pll_fb_post_div = 0x000600ad;
582 else
583#endif
584 pll_fb_post_div = (feedback_div | (post_div->bitvalue << 16));
585
586 htotal_cntl = mode->htotal & 0x7;
587
588 }
589
590 pll_gain = radeon_compute_pll_gain(pll->reference_freq,
591 pll_ref_div & 0x3ff,
592 pll_fb_post_div & 0x7ff);
593
594 if (radeon_crtc->crtc_id) {
595 uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
596 ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
597 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
598
599 WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
600 RADEON_PIX2CLK_SRC_SEL_CPUCLK,
601 ~(RADEON_PIX2CLK_SRC_SEL_MASK));
602
603 WREG32_PLL_P(RADEON_P2PLL_CNTL,
604 RADEON_P2PLL_RESET
605 | RADEON_P2PLL_ATOMIC_UPDATE_EN
606 | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
607 ~(RADEON_P2PLL_RESET
608 | RADEON_P2PLL_ATOMIC_UPDATE_EN
609 | RADEON_P2PLL_PVG_MASK));
610
611 WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
612 pll_ref_div,
613 ~RADEON_P2PLL_REF_DIV_MASK);
614
615 WREG32_PLL_P(RADEON_P2PLL_DIV_0,
616 pll_fb_post_div,
617 ~RADEON_P2PLL_FB0_DIV_MASK);
618
619 WREG32_PLL_P(RADEON_P2PLL_DIV_0,
620 pll_fb_post_div,
621 ~RADEON_P2PLL_POST0_DIV_MASK);
622
623 radeon_pll2_write_update(dev);
624 radeon_pll2_wait_for_read_update_complete(dev);
625
626 WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
627
628 WREG32_PLL_P(RADEON_P2PLL_CNTL,
629 0,
630 ~(RADEON_P2PLL_RESET
631 | RADEON_P2PLL_SLEEP
632 | RADEON_P2PLL_ATOMIC_UPDATE_EN));
633
634 DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
635 (unsigned)pll_ref_div,
636 (unsigned)pll_fb_post_div,
637 (unsigned)htotal_cntl,
638 RREG32_PLL(RADEON_P2PLL_CNTL));
639 DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
640 (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
641 (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
642 (unsigned)((pll_fb_post_div &
643 RADEON_P2PLL_POST0_DIV_MASK) >> 16));
644
645 mdelay(50); /* Let the clock to lock */
646
647 WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
648 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
649 ~(RADEON_PIX2CLK_SRC_SEL_MASK));
650
651 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
652 } else {
653 if (rdev->flags & RADEON_IS_MOBILITY) {
654 /* A temporal workaround for the occational blanking on certain laptop panels.
655 This appears to related to the PLL divider registers (fail to lock?).
656 It occurs even when all dividers are the same with their old settings.
657 In this case we really don't need to fiddle with PLL registers.
658 By doing this we can avoid the blanking problem with some panels.
659 */
660 if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
661 (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
662 (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
663 WREG32_P(RADEON_CLOCK_CNTL_INDEX,
664 RADEON_PLL_DIV_SEL,
665 ~(RADEON_PLL_DIV_SEL));
666 r100_pll_errata_after_index(rdev);
667 return;
668 }
669 }
670
671 WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
672 RADEON_VCLK_SRC_SEL_CPUCLK,
673 ~(RADEON_VCLK_SRC_SEL_MASK));
674 WREG32_PLL_P(RADEON_PPLL_CNTL,
675 RADEON_PPLL_RESET
676 | RADEON_PPLL_ATOMIC_UPDATE_EN
677 | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
678 | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
679 ~(RADEON_PPLL_RESET
680 | RADEON_PPLL_ATOMIC_UPDATE_EN
681 | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
682 | RADEON_PPLL_PVG_MASK));
683
684 WREG32_P(RADEON_CLOCK_CNTL_INDEX,
685 RADEON_PLL_DIV_SEL,
686 ~(RADEON_PLL_DIV_SEL));
687 r100_pll_errata_after_index(rdev);
688
689 if (ASIC_IS_R300(rdev) ||
690 (rdev->family == CHIP_RS300) ||
691 (rdev->family == CHIP_RS400) ||
692 (rdev->family == CHIP_RS480)) {
693 if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
694 /* When restoring console mode, use saved PPLL_REF_DIV
695 * setting.
696 */
697 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
698 pll_ref_div,
699 0);
700 } else {
701 /* R300 uses ref_div_acc field as real ref divider */
702 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
703 (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
704 ~R300_PPLL_REF_DIV_ACC_MASK);
705 }
706 } else
707 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
708 pll_ref_div,
709 ~RADEON_PPLL_REF_DIV_MASK);
710
711 WREG32_PLL_P(RADEON_PPLL_DIV_3,
712 pll_fb_post_div,
713 ~RADEON_PPLL_FB3_DIV_MASK);
714
715 WREG32_PLL_P(RADEON_PPLL_DIV_3,
716 pll_fb_post_div,
717 ~RADEON_PPLL_POST3_DIV_MASK);
718
719 radeon_pll_write_update(dev);
720 radeon_pll_wait_for_read_update_complete(dev);
721
722 WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
723
724 WREG32_PLL_P(RADEON_PPLL_CNTL,
725 0,
726 ~(RADEON_PPLL_RESET
727 | RADEON_PPLL_SLEEP
728 | RADEON_PPLL_ATOMIC_UPDATE_EN
729 | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
730
731 DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
732 pll_ref_div,
733 pll_fb_post_div,
734 (unsigned)htotal_cntl,
735 RREG32_PLL(RADEON_PPLL_CNTL));
736 DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
737 pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
738 pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
739 (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
740
741 mdelay(50); /* Let the clock to lock */
742
743 WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
744 RADEON_VCLK_SRC_SEL_PPLLCLK,
745 ~(RADEON_VCLK_SRC_SEL_MASK));
746
747 }
748}
749
750static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
751 struct drm_display_mode *mode,
752 struct drm_display_mode *adjusted_mode)
753{
754 return true;
755}
756
757static int radeon_crtc_mode_set(struct drm_crtc *crtc,
758 struct drm_display_mode *mode,
759 struct drm_display_mode *adjusted_mode,
760 int x, int y, struct drm_framebuffer *old_fb)
761{
762
763 DRM_DEBUG("\n");
764
765 /* TODO TV */
766
767 radeon_crtc_set_base(crtc, x, y, old_fb);
768 radeon_set_crtc_timing(crtc, adjusted_mode);
769 radeon_set_pll(crtc, adjusted_mode);
770 radeon_init_disp_bandwidth(crtc->dev);
771
772 return 0;
773}
774
775static void radeon_crtc_prepare(struct drm_crtc *crtc)
776{
777 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
778}
779
780static void radeon_crtc_commit(struct drm_crtc *crtc)
781{
782 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
783}
784
785static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
786 .dpms = radeon_crtc_dpms,
787 .mode_fixup = radeon_crtc_mode_fixup,
788 .mode_set = radeon_crtc_mode_set,
789 .mode_set_base = radeon_crtc_set_base,
790 .prepare = radeon_crtc_prepare,
791 .commit = radeon_crtc_commit,
792};
793
794
795void radeon_legacy_init_crtc(struct drm_device *dev,
796 struct radeon_crtc *radeon_crtc)
797{
798 if (radeon_crtc->crtc_id == 1)
799 radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
800 drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
801}
802
803void radeon_init_disp_bw_legacy(struct drm_device *dev,
804 struct drm_display_mode *mode1,
805 uint32_t pixel_bytes1,
806 struct drm_display_mode *mode2,
807 uint32_t pixel_bytes2)
808{
809 struct radeon_device *rdev = dev->dev_private;
810 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
811 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
812 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
813 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
814 fixed20_12 memtcas_ff[8] = {
815 fixed_init(1),
816 fixed_init(2),
817 fixed_init(3),
818 fixed_init(0),
819 fixed_init_half(1),
820 fixed_init_half(2),
821 fixed_init(0),
822 };
823 fixed20_12 memtcas_rs480_ff[8] = {
824 fixed_init(0),
825 fixed_init(1),
826 fixed_init(2),
827 fixed_init(3),
828 fixed_init(0),
829 fixed_init_half(1),
830 fixed_init_half(2),
831 fixed_init_half(3),
832 };
833 fixed20_12 memtcas2_ff[8] = {
834 fixed_init(0),
835 fixed_init(1),
836 fixed_init(2),
837 fixed_init(3),
838 fixed_init(4),
839 fixed_init(5),
840 fixed_init(6),
841 fixed_init(7),
842 };
843 fixed20_12 memtrbs[8] = {
844 fixed_init(1),
845 fixed_init_half(1),
846 fixed_init(2),
847 fixed_init_half(2),
848 fixed_init(3),
849 fixed_init_half(3),
850 fixed_init(4),
851 fixed_init_half(4)
852 };
853 fixed20_12 memtrbs_r4xx[8] = {
854 fixed_init(4),
855 fixed_init(5),
856 fixed_init(6),
857 fixed_init(7),
858 fixed_init(8),
859 fixed_init(9),
860 fixed_init(10),
861 fixed_init(11)
862 };
863 fixed20_12 min_mem_eff;
864 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
865 fixed20_12 cur_latency_mclk, cur_latency_sclk;
866 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
867 disp_drain_rate2, read_return_rate;
868 fixed20_12 time_disp1_drop_priority;
869 int c;
870 int cur_size = 16; /* in octawords */
871 int critical_point = 0, critical_point2;
872/* uint32_t read_return_rate, time_disp1_drop_priority; */
873 int stop_req, max_stop_req;
874
875 min_mem_eff.full = rfixed_const_8(0);
876 /* get modes */
877 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
878 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
879 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
880 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
881 /* check crtc enables */
882 if (mode2)
883 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
884 if (mode1)
885 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
886 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
887 }
888
889 /*
890 * determine is there is enough bw for current mode
891 */
892 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
893 temp_ff.full = rfixed_const(100);
894 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
895 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
896 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
897
898 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
899 temp_ff.full = rfixed_const(temp);
900 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
901
902 pix_clk.full = 0;
903 pix_clk2.full = 0;
904 peak_disp_bw.full = 0;
905 if (mode1) {
906 temp_ff.full = rfixed_const(1000);
907 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
908 pix_clk.full = rfixed_div(pix_clk, temp_ff);
909 temp_ff.full = rfixed_const(pixel_bytes1);
910 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
911 }
912 if (mode2) {
913 temp_ff.full = rfixed_const(1000);
914 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
915 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
916 temp_ff.full = rfixed_const(pixel_bytes2);
917 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
918 }
919
920 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
921 if (peak_disp_bw.full >= mem_bw.full) {
922 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
923 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
924 }
925
926 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
927 temp = RREG32(RADEON_MEM_TIMING_CNTL);
928 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
929 mem_trcd = ((temp >> 2) & 0x3) + 1;
930 mem_trp = ((temp & 0x3)) + 1;
931 mem_tras = ((temp & 0x70) >> 4) + 1;
932 } else if (rdev->family == CHIP_R300 ||
933 rdev->family == CHIP_R350) { /* r300, r350 */
934 mem_trcd = (temp & 0x7) + 1;
935 mem_trp = ((temp >> 8) & 0x7) + 1;
936 mem_tras = ((temp >> 11) & 0xf) + 4;
937 } else if (rdev->family == CHIP_RV350 ||
938 rdev->family <= CHIP_RV380) {
939 /* rv3x0 */
940 mem_trcd = (temp & 0x7) + 3;
941 mem_trp = ((temp >> 8) & 0x7) + 3;
942 mem_tras = ((temp >> 11) & 0xf) + 6;
943 } else if (rdev->family == CHIP_R420 ||
944 rdev->family == CHIP_R423 ||
945 rdev->family == CHIP_RV410) {
946 /* r4xx */
947 mem_trcd = (temp & 0xf) + 3;
948 if (mem_trcd > 15)
949 mem_trcd = 15;
950 mem_trp = ((temp >> 8) & 0xf) + 3;
951 if (mem_trp > 15)
952 mem_trp = 15;
953 mem_tras = ((temp >> 12) & 0x1f) + 6;
954 if (mem_tras > 31)
955 mem_tras = 31;
956 } else { /* RV200, R200 */
957 mem_trcd = (temp & 0x7) + 1;
958 mem_trp = ((temp >> 8) & 0x7) + 1;
959 mem_tras = ((temp >> 12) & 0xf) + 4;
960 }
961 /* convert to FF */
962 trcd_ff.full = rfixed_const(mem_trcd);
963 trp_ff.full = rfixed_const(mem_trp);
964 tras_ff.full = rfixed_const(mem_tras);
965
966 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
967 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
968 data = (temp & (7 << 20)) >> 20;
969 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
970 if (rdev->family == CHIP_RS480) /* don't think rs400 */
971 tcas_ff = memtcas_rs480_ff[data];
972 else
973 tcas_ff = memtcas_ff[data];
974 } else
975 tcas_ff = memtcas2_ff[data];
976
977 if (rdev->family == CHIP_RS400 ||
978 rdev->family == CHIP_RS480) {
979 /* extra cas latency stored in bits 23-25 0-4 clocks */
980 data = (temp >> 23) & 0x7;
981 if (data < 5)
982 tcas_ff.full += rfixed_const(data);
983 }
984
985 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
986 /* on the R300, Tcas is included in Trbs.
987 */
988 temp = RREG32(RADEON_MEM_CNTL);
989 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
990 if (data == 1) {
991 if (R300_MEM_USE_CD_CH_ONLY & temp) {
992 temp = RREG32(R300_MC_IND_INDEX);
993 temp &= ~R300_MC_IND_ADDR_MASK;
994 temp |= R300_MC_READ_CNTL_CD_mcind;
995 WREG32(R300_MC_IND_INDEX, temp);
996 temp = RREG32(R300_MC_IND_DATA);
997 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
998 } else {
999 temp = RREG32(R300_MC_READ_CNTL_AB);
1000 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1001 }
1002 } else {
1003 temp = RREG32(R300_MC_READ_CNTL_AB);
1004 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1005 }
1006 if (rdev->family == CHIP_RV410 ||
1007 rdev->family == CHIP_R420 ||
1008 rdev->family == CHIP_R423)
1009 trbs_ff = memtrbs_r4xx[data];
1010 else
1011 trbs_ff = memtrbs[data];
1012 tcas_ff.full += trbs_ff.full;
1013 }
1014
1015 sclk_eff_ff.full = sclk_ff.full;
1016
1017 if (rdev->flags & RADEON_IS_AGP) {
1018 fixed20_12 agpmode_ff;
1019 agpmode_ff.full = rfixed_const(radeon_agpmode);
1020 temp_ff.full = rfixed_const_666(16);
1021 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
1022 }
1023 /* TODO PCIE lanes may affect this - agpmode == 16?? */
1024
1025 if (ASIC_IS_R300(rdev)) {
1026 sclk_delay_ff.full = rfixed_const(250);
1027 } else {
1028 if ((rdev->family == CHIP_RV100) ||
1029 rdev->flags & RADEON_IS_IGP) {
1030 if (rdev->mc.vram_is_ddr)
1031 sclk_delay_ff.full = rfixed_const(41);
1032 else
1033 sclk_delay_ff.full = rfixed_const(33);
1034 } else {
1035 if (rdev->mc.vram_width == 128)
1036 sclk_delay_ff.full = rfixed_const(57);
1037 else
1038 sclk_delay_ff.full = rfixed_const(41);
1039 }
1040 }
1041
1042 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
1043
1044 if (rdev->mc.vram_is_ddr) {
1045 if (rdev->mc.vram_width == 32) {
1046 k1.full = rfixed_const(40);
1047 c = 3;
1048 } else {
1049 k1.full = rfixed_const(20);
1050 c = 1;
1051 }
1052 } else {
1053 k1.full = rfixed_const(40);
1054 c = 3;
1055 }
1056
1057 temp_ff.full = rfixed_const(2);
1058 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
1059 temp_ff.full = rfixed_const(c);
1060 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
1061 temp_ff.full = rfixed_const(4);
1062 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
1063 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
1064 mc_latency_mclk.full += k1.full;
1065
1066 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
1067 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
1068
1069 /*
1070 HW cursor time assuming worst case of full size colour cursor.
1071 */
1072 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
1073 temp_ff.full += trcd_ff.full;
1074 if (temp_ff.full < tras_ff.full)
1075 temp_ff.full = tras_ff.full;
1076 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
1077
1078 temp_ff.full = rfixed_const(cur_size);
1079 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
1080 /*
1081 Find the total latency for the display data.
1082 */
1083 disp_latency_overhead.full = rfixed_const(80);
1084 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
1085 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
1086 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
1087
1088 if (mc_latency_mclk.full > mc_latency_sclk.full)
1089 disp_latency.full = mc_latency_mclk.full;
1090 else
1091 disp_latency.full = mc_latency_sclk.full;
1092
1093 /* setup Max GRPH_STOP_REQ default value */
1094 if (ASIC_IS_RV100(rdev))
1095 max_stop_req = 0x5c;
1096 else
1097 max_stop_req = 0x7c;
1098
1099 if (mode1) {
1100 /* CRTC1
1101 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
1102 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
1103 */
1104 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
1105
1106 if (stop_req > max_stop_req)
1107 stop_req = max_stop_req;
1108
1109 /*
1110 Find the drain rate of the display buffer.
1111 */
1112 temp_ff.full = rfixed_const((16/pixel_bytes1));
1113 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
1114
1115 /*
1116 Find the critical point of the display buffer.
1117 */
1118 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
1119 crit_point_ff.full += rfixed_const_half(0);
1120
1121 critical_point = rfixed_trunc(crit_point_ff);
1122
1123 if (rdev->disp_priority == 2) {
1124 critical_point = 0;
1125 }
1126
1127 /*
1128 The critical point should never be above max_stop_req-4. Setting
1129 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
1130 */
1131 if (max_stop_req - critical_point < 4)
1132 critical_point = 0;
1133
1134 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
1135 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
1136 critical_point = 0x10;
1137 }
1138
1139 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
1140 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
1141 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1142 temp &= ~(RADEON_GRPH_START_REQ_MASK);
1143 if ((rdev->family == CHIP_R350) &&
1144 (stop_req > 0x15)) {
1145 stop_req -= 0x10;
1146 }
1147 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1148 temp |= RADEON_GRPH_BUFFER_SIZE;
1149 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
1150 RADEON_GRPH_CRITICAL_AT_SOF |
1151 RADEON_GRPH_STOP_CNTL);
1152 /*
1153 Write the result into the register.
1154 */
1155 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1156 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1157
1158#if 0
1159 if ((rdev->family == CHIP_RS400) ||
1160 (rdev->family == CHIP_RS480)) {
1161 /* attempt to program RS400 disp regs correctly ??? */
1162 temp = RREG32(RS400_DISP1_REG_CNTL);
1163 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
1164 RS400_DISP1_STOP_REQ_LEVEL_MASK);
1165 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
1166 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1167 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1168 temp = RREG32(RS400_DMIF_MEM_CNTL1);
1169 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
1170 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
1171 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
1172 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
1173 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
1174 }
1175#endif
1176
1177 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
1178 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
1179 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
1180 }
1181
1182 if (mode2) {
1183 u32 grph2_cntl;
1184 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
1185
1186 if (stop_req > max_stop_req)
1187 stop_req = max_stop_req;
1188
1189 /*
1190 Find the drain rate of the display buffer.
1191 */
1192 temp_ff.full = rfixed_const((16/pixel_bytes2));
1193 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
1194
1195 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
1196 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
1197 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1198 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
1199 if ((rdev->family == CHIP_R350) &&
1200 (stop_req > 0x15)) {
1201 stop_req -= 0x10;
1202 }
1203 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1204 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
1205 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
1206 RADEON_GRPH_CRITICAL_AT_SOF |
1207 RADEON_GRPH_STOP_CNTL);
1208
1209 if ((rdev->family == CHIP_RS100) ||
1210 (rdev->family == CHIP_RS200))
1211 critical_point2 = 0;
1212 else {
1213 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
1214 temp_ff.full = rfixed_const(temp);
1215 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
1216 if (sclk_ff.full < temp_ff.full)
1217 temp_ff.full = sclk_ff.full;
1218
1219 read_return_rate.full = temp_ff.full;
1220
1221 if (mode1) {
1222 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
1223 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
1224 } else {
1225 time_disp1_drop_priority.full = 0;
1226 }
1227 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
1228 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
1229 crit_point_ff.full += rfixed_const_half(0);
1230
1231 critical_point2 = rfixed_trunc(crit_point_ff);
1232
1233 if (rdev->disp_priority == 2) {
1234 critical_point2 = 0;
1235 }
1236
1237 if (max_stop_req - critical_point2 < 4)
1238 critical_point2 = 0;
1239
1240 }
1241
1242 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
1243 /* some R300 cards have problem with this set to 0 */
1244 critical_point2 = 0x10;
1245 }
1246
1247 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1248 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1249
1250 if ((rdev->family == CHIP_RS400) ||
1251 (rdev->family == CHIP_RS480)) {
1252#if 0
1253 /* attempt to program RS400 disp2 regs correctly ??? */
1254 temp = RREG32(RS400_DISP2_REQ_CNTL1);
1255 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
1256 RS400_DISP2_STOP_REQ_LEVEL_MASK);
1257 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
1258 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1259 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1260 temp = RREG32(RS400_DISP2_REQ_CNTL2);
1261 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
1262 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
1263 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
1264 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
1265 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
1266#endif
1267 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
1268 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
1269 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
1270 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
1271 }
1272
1273 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
1274 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
1275 }
1276}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644
index 000000000000..2c2f42de1d4c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -0,0 +1,1288 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32
33static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder,
34 struct drm_display_mode *mode,
35 struct drm_display_mode *adjusted_mode)
36{
37 struct drm_device *dev = encoder->dev;
38 struct radeon_device *rdev = dev->dev_private;
39 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
40 int xres = mode->hdisplay;
41 int yres = mode->vdisplay;
42 bool hscale = true, vscale = true;
43 int hsync_wid;
44 int vsync_wid;
45 int hsync_start;
46 uint32_t scale, inc;
47 uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active;
48 uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp;
49 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
50
51 DRM_DEBUG("\n");
52
53 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
54 (RADEON_VERT_STRETCH_RESERVED |
55 RADEON_VERT_AUTO_RATIO_INC);
56 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
57 (RADEON_HORZ_FP_LOOP_STRETCH |
58 RADEON_HORZ_AUTO_RATIO_INC);
59
60 crtc_more_cntl = 0;
61 if ((rdev->family == CHIP_RS100) ||
62 (rdev->family == CHIP_RS200)) {
63 /* This is to workaround the asic bug for RMX, some versions
64 of BIOS dosen't have this register initialized correctly. */
65 crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
66 }
67
68
69 fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
70 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
71
72 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
73 if (!hsync_wid)
74 hsync_wid = 1;
75 hsync_start = mode->crtc_hsync_start - 8;
76
77 fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
78 | ((hsync_wid & 0x3f) << 16)
79 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
80 ? RADEON_CRTC_H_SYNC_POL
81 : 0));
82
83 fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
84 | ((mode->crtc_vdisplay - 1) << 16));
85
86 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
87 if (!vsync_wid)
88 vsync_wid = 1;
89
90 fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
91 | ((vsync_wid & 0x1f) << 16)
92 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
93 ? RADEON_CRTC_V_SYNC_POL
94 : 0));
95
96 fp_horz_vert_active = 0;
97
98 if (native_mode->panel_xres == 0 ||
99 native_mode->panel_yres == 0) {
100 hscale = false;
101 vscale = false;
102 } else {
103 if (xres > native_mode->panel_xres)
104 xres = native_mode->panel_xres;
105 if (yres > native_mode->panel_yres)
106 yres = native_mode->panel_yres;
107
108 if (xres == native_mode->panel_xres)
109 hscale = false;
110 if (yres == native_mode->panel_yres)
111 vscale = false;
112 }
113
114 if (radeon_encoder->flags & RADEON_USE_RMX) {
115 if (radeon_encoder->rmx_type != RMX_CENTER) {
116 if (!hscale)
117 fp_horz_stretch |= ((xres/8-1) << 16);
118 else {
119 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
120 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
121 / native_mode->panel_xres + 1;
122 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
123 RADEON_HORZ_STRETCH_BLEND |
124 RADEON_HORZ_STRETCH_ENABLE |
125 ((native_mode->panel_xres/8-1) << 16));
126 }
127
128 if (!vscale)
129 fp_vert_stretch |= ((yres-1) << 12);
130 else {
131 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
132 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
133 / native_mode->panel_yres + 1;
134 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
135 RADEON_VERT_STRETCH_ENABLE |
136 RADEON_VERT_STRETCH_BLEND |
137 ((native_mode->panel_yres-1) << 12));
138 }
139 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
140 int blank_width;
141
142 fp_horz_stretch |= ((xres/8-1) << 16);
143 fp_vert_stretch |= ((yres-1) << 12);
144
145 crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
146 RADEON_CRTC_AUTO_VERT_CENTER_EN);
147
148 blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
149 if (blank_width > 110)
150 blank_width = 110;
151
152 fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
153 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
154
155 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
156 if (!hsync_wid)
157 hsync_wid = 1;
158
159 fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
160 | ((hsync_wid & 0x3f) << 16)
161 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
162 ? RADEON_CRTC_H_SYNC_POL
163 : 0));
164
165 fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
166 | ((mode->crtc_vdisplay - 1) << 16));
167
168 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
169 if (!vsync_wid)
170 vsync_wid = 1;
171
172 fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
173 | ((vsync_wid & 0x1f) << 16)
174 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
175 ? RADEON_CRTC_V_SYNC_POL
176 : 0)));
177
178 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
179 (((native_mode->panel_xres / 8) & 0x1ff) << 16));
180 }
181 } else {
182 fp_horz_stretch |= ((xres/8-1) << 16);
183 fp_vert_stretch |= ((yres-1) << 12);
184 }
185
186 WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
187 WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
188 WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
189 WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
190 WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
191 WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
192 WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
193 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
194
195}
196
197static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
198{
199 struct drm_device *dev = encoder->dev;
200 struct radeon_device *rdev = dev->dev_private;
201 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
202 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
203 int panel_pwr_delay = 2000;
204 DRM_DEBUG("\n");
205
206 if (radeon_encoder->enc_priv) {
207 if (rdev->is_atom_bios) {
208 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
209 panel_pwr_delay = lvds->panel_pwr_delay;
210 } else {
211 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
212 panel_pwr_delay = lvds->panel_pwr_delay;
213 }
214 }
215
216 switch (mode) {
217 case DRM_MODE_DPMS_ON:
218 disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
219 disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
220 WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
221 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
222 lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
223 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
224 udelay(1000);
225
226 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
227 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
228 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
229
230 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
231 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
232 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
233 udelay(panel_pwr_delay * 1000);
234 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
235 break;
236 case DRM_MODE_DPMS_STANDBY:
237 case DRM_MODE_DPMS_SUSPEND:
238 case DRM_MODE_DPMS_OFF:
239 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
240 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
241 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
242 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
243 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
244 udelay(panel_pwr_delay * 1000);
245 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
246 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
247 break;
248 }
249
250 if (rdev->is_atom_bios)
251 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
252 else
253 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
254}
255
256static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
257{
258 struct radeon_device *rdev = encoder->dev->dev_private;
259
260 if (rdev->is_atom_bios)
261 radeon_atom_output_lock(encoder, true);
262 else
263 radeon_combios_output_lock(encoder, true);
264 radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
265}
266
267static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
268{
269 struct radeon_device *rdev = encoder->dev->dev_private;
270
271 radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
272 if (rdev->is_atom_bios)
273 radeon_atom_output_lock(encoder, false);
274 else
275 radeon_combios_output_lock(encoder, false);
276}
277
278static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
279 struct drm_display_mode *mode,
280 struct drm_display_mode *adjusted_mode)
281{
282 struct drm_device *dev = encoder->dev;
283 struct radeon_device *rdev = dev->dev_private;
284 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
285 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
286 uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
287
288 DRM_DEBUG("\n");
289
290 if (radeon_crtc->crtc_id == 0)
291 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
292
293 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
294 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
295
296 lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
297 if ((!rdev->is_atom_bios)) {
298 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
299 if (lvds) {
300 DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
301 lvds_gen_cntl = lvds->lvds_gen_cntl;
302 lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
303 (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
304 lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
305 (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
306 } else
307 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
308 } else
309 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
310 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
311 lvds_gen_cntl &= ~(RADEON_LVDS_ON |
312 RADEON_LVDS_BLON |
313 RADEON_LVDS_EN |
314 RADEON_LVDS_RST_FM);
315
316 if (ASIC_IS_R300(rdev))
317 lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
318
319 if (radeon_crtc->crtc_id == 0) {
320 if (ASIC_IS_R300(rdev)) {
321 if (radeon_encoder->flags & RADEON_USE_RMX)
322 lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
323 } else
324 lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
325 } else {
326 if (ASIC_IS_R300(rdev))
327 lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
328 else
329 lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
330 }
331
332 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
333 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
334 WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
335
336 if (rdev->family == CHIP_RV410)
337 WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
338
339 if (rdev->is_atom_bios)
340 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
341 else
342 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
343}
344
345static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
346 struct drm_display_mode *mode,
347 struct drm_display_mode *adjusted_mode)
348{
349 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
350
351 drm_mode_set_crtcinfo(adjusted_mode, 0);
352
353 radeon_encoder->flags &= ~RADEON_USE_RMX;
354
355 if (radeon_encoder->rmx_type != RMX_OFF)
356 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
357
358 return true;
359}
360
361static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
362 .dpms = radeon_legacy_lvds_dpms,
363 .mode_fixup = radeon_legacy_lvds_mode_fixup,
364 .prepare = radeon_legacy_lvds_prepare,
365 .mode_set = radeon_legacy_lvds_mode_set,
366 .commit = radeon_legacy_lvds_commit,
367};
368
369
370static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
371 .destroy = radeon_enc_destroy,
372};
373
374static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
375 struct drm_display_mode *mode,
376 struct drm_display_mode *adjusted_mode)
377{
378
379 drm_mode_set_crtcinfo(adjusted_mode, 0);
380
381 return true;
382}
383
384static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
385{
386 struct drm_device *dev = encoder->dev;
387 struct radeon_device *rdev = dev->dev_private;
388 uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
389 uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
390 uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
391
392 DRM_DEBUG("\n");
393
394 switch (mode) {
395 case DRM_MODE_DPMS_ON:
396 crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
397 dac_cntl &= ~RADEON_DAC_PDWN;
398 dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
399 RADEON_DAC_PDWN_G |
400 RADEON_DAC_PDWN_B);
401 break;
402 case DRM_MODE_DPMS_STANDBY:
403 case DRM_MODE_DPMS_SUSPEND:
404 case DRM_MODE_DPMS_OFF:
405 crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
406 dac_cntl |= RADEON_DAC_PDWN;
407 dac_macro_cntl |= (RADEON_DAC_PDWN_R |
408 RADEON_DAC_PDWN_G |
409 RADEON_DAC_PDWN_B);
410 break;
411 }
412
413 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
414 WREG32(RADEON_DAC_CNTL, dac_cntl);
415 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
416
417 if (rdev->is_atom_bios)
418 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
419 else
420 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
421}
422
423static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
424{
425 struct radeon_device *rdev = encoder->dev->dev_private;
426
427 if (rdev->is_atom_bios)
428 radeon_atom_output_lock(encoder, true);
429 else
430 radeon_combios_output_lock(encoder, true);
431 radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
432}
433
434static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
435{
436 struct radeon_device *rdev = encoder->dev->dev_private;
437
438 radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
439
440 if (rdev->is_atom_bios)
441 radeon_atom_output_lock(encoder, false);
442 else
443 radeon_combios_output_lock(encoder, false);
444}
445
446static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
447 struct drm_display_mode *mode,
448 struct drm_display_mode *adjusted_mode)
449{
450 struct drm_device *dev = encoder->dev;
451 struct radeon_device *rdev = dev->dev_private;
452 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
453 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
454 uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
455
456 DRM_DEBUG("\n");
457
458 if (radeon_crtc->crtc_id == 0)
459 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
460
461 if (radeon_crtc->crtc_id == 0) {
462 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
463 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
464 ~(RADEON_DISP_DAC_SOURCE_MASK);
465 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
466 } else {
467 dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL);
468 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
469 }
470 } else {
471 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
472 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
473 ~(RADEON_DISP_DAC_SOURCE_MASK);
474 disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
475 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
476 } else {
477 dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
478 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
479 }
480 }
481
482 dac_cntl = (RADEON_DAC_MASK_ALL |
483 RADEON_DAC_VGA_ADR_EN |
484 /* TODO 6-bits */
485 RADEON_DAC_8BIT_EN);
486
487 WREG32_P(RADEON_DAC_CNTL,
488 dac_cntl,
489 RADEON_DAC_RANGE_CNTL |
490 RADEON_DAC_BLANKING);
491
492 if (radeon_encoder->enc_priv) {
493 struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
494 dac_macro_cntl = p_dac->ps2_pdac_adj;
495 } else
496 dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
497 dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
498 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
499
500 if (rdev->is_atom_bios)
501 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
502 else
503 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
504}
505
506static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
507 struct drm_connector *connector)
508{
509 struct drm_device *dev = encoder->dev;
510 struct radeon_device *rdev = dev->dev_private;
511 uint32_t vclk_ecp_cntl, crtc_ext_cntl;
512 uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
513 enum drm_connector_status found = connector_status_disconnected;
514 bool color = true;
515
516 /* save the regs we need */
517 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
518 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
519 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
520 dac_cntl = RREG32(RADEON_DAC_CNTL);
521 dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
522
523 tmp = vclk_ecp_cntl &
524 ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
525 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
526
527 tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
528 WREG32(RADEON_CRTC_EXT_CNTL, tmp);
529
530 tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
531 RADEON_DAC_FORCE_DATA_EN;
532
533 if (color)
534 tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
535 else
536 tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
537
538 if (ASIC_IS_R300(rdev))
539 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
540 else
541 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
542
543 WREG32(RADEON_DAC_EXT_CNTL, tmp);
544
545 tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
546 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
547 WREG32(RADEON_DAC_CNTL, tmp);
548
549 tmp &= ~(RADEON_DAC_PDWN_R |
550 RADEON_DAC_PDWN_G |
551 RADEON_DAC_PDWN_B);
552
553 WREG32(RADEON_DAC_MACRO_CNTL, tmp);
554
555 udelay(2000);
556
557 if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
558 found = connector_status_connected;
559
560 /* restore the regs we used */
561 WREG32(RADEON_DAC_CNTL, dac_cntl);
562 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
563 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
564 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
565 WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
566
567 return found;
568}
569
570static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
571 .dpms = radeon_legacy_primary_dac_dpms,
572 .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
573 .prepare = radeon_legacy_primary_dac_prepare,
574 .mode_set = radeon_legacy_primary_dac_mode_set,
575 .commit = radeon_legacy_primary_dac_commit,
576 .detect = radeon_legacy_primary_dac_detect,
577};
578
579
580static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
581 .destroy = radeon_enc_destroy,
582};
583
584static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
585 struct drm_display_mode *mode,
586 struct drm_display_mode *adjusted_mode)
587{
588
589 drm_mode_set_crtcinfo(adjusted_mode, 0);
590
591 return true;
592}
593
594static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
595{
596 struct drm_device *dev = encoder->dev;
597 struct radeon_device *rdev = dev->dev_private;
598 uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
599 DRM_DEBUG("\n");
600
601 switch (mode) {
602 case DRM_MODE_DPMS_ON:
603 fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
604 break;
605 case DRM_MODE_DPMS_STANDBY:
606 case DRM_MODE_DPMS_SUSPEND:
607 case DRM_MODE_DPMS_OFF:
608 fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
609 break;
610 }
611
612 WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
613
614 if (rdev->is_atom_bios)
615 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
616 else
617 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
618}
619
620static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
621{
622 struct radeon_device *rdev = encoder->dev->dev_private;
623
624 if (rdev->is_atom_bios)
625 radeon_atom_output_lock(encoder, true);
626 else
627 radeon_combios_output_lock(encoder, true);
628 radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
629}
630
631static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
632{
633 struct radeon_device *rdev = encoder->dev->dev_private;
634
635 radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
636
637 if (rdev->is_atom_bios)
638 radeon_atom_output_lock(encoder, true);
639 else
640 radeon_combios_output_lock(encoder, true);
641}
642
643static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
644 struct drm_display_mode *mode,
645 struct drm_display_mode *adjusted_mode)
646{
647 struct drm_device *dev = encoder->dev;
648 struct radeon_device *rdev = dev->dev_private;
649 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
650 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
651 uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
652 int i;
653
654 DRM_DEBUG("\n");
655
656 if (radeon_crtc->crtc_id == 0)
657 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
658
659 tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
660 tmp &= 0xfffff;
661 if (rdev->family == CHIP_RV280) {
662 /* bit 22 of TMDS_PLL_CNTL is read-back inverted */
663 tmp ^= (1 << 22);
664 tmds_pll_cntl ^= (1 << 22);
665 }
666
667 if (radeon_encoder->enc_priv) {
668 struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
669
670 for (i = 0; i < 4; i++) {
671 if (tmds->tmds_pll[i].freq == 0)
672 break;
673 if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
674 tmp = tmds->tmds_pll[i].value ;
675 break;
676 }
677 }
678 }
679
680 if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
681 if (tmp & 0xfff00000)
682 tmds_pll_cntl = tmp;
683 else {
684 tmds_pll_cntl &= 0xfff00000;
685 tmds_pll_cntl |= tmp;
686 }
687 } else
688 tmds_pll_cntl = tmp;
689
690 tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
691 ~(RADEON_TMDS_TRANSMITTER_PLLRST);
692
693 if (rdev->family == CHIP_R200 ||
694 rdev->family == CHIP_R100 ||
695 ASIC_IS_R300(rdev))
696 tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
697 else /* RV chips got this bit reversed */
698 tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
699
700 fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
701 (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
702 RADEON_FP_CRTC_DONT_SHADOW_HEND));
703
704 fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
705
706 if (1) /* FIXME rgbBits == 8 */
707 fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
708 else
709 fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
710
711 if (radeon_crtc->crtc_id == 0) {
712 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
713 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
714 if (radeon_encoder->flags & RADEON_USE_RMX)
715 fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
716 else
717 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
718 } else
719 fp_gen_cntl |= RADEON_FP_SEL_CRTC1;
720 } else {
721 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
722 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
723 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
724 } else
725 fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
726 }
727
728 WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
729 WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
730 WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
731
732 if (rdev->is_atom_bios)
733 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
734 else
735 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
736}
737
738static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
739 .dpms = radeon_legacy_tmds_int_dpms,
740 .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
741 .prepare = radeon_legacy_tmds_int_prepare,
742 .mode_set = radeon_legacy_tmds_int_mode_set,
743 .commit = radeon_legacy_tmds_int_commit,
744};
745
746
747static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
748 .destroy = radeon_enc_destroy,
749};
750
751static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
752 struct drm_display_mode *mode,
753 struct drm_display_mode *adjusted_mode)
754{
755
756 drm_mode_set_crtcinfo(adjusted_mode, 0);
757
758 return true;
759}
760
761static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
762{
763 struct drm_device *dev = encoder->dev;
764 struct radeon_device *rdev = dev->dev_private;
765 uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
766 DRM_DEBUG("\n");
767
768 switch (mode) {
769 case DRM_MODE_DPMS_ON:
770 fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
771 fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
772 break;
773 case DRM_MODE_DPMS_STANDBY:
774 case DRM_MODE_DPMS_SUSPEND:
775 case DRM_MODE_DPMS_OFF:
776 fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
777 fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
778 break;
779 }
780
781 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
782
783 if (rdev->is_atom_bios)
784 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
785 else
786 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
787}
788
789static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
790{
791 struct radeon_device *rdev = encoder->dev->dev_private;
792
793 if (rdev->is_atom_bios)
794 radeon_atom_output_lock(encoder, true);
795 else
796 radeon_combios_output_lock(encoder, true);
797 radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
798}
799
800static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
801{
802 struct radeon_device *rdev = encoder->dev->dev_private;
803 radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
804
805 if (rdev->is_atom_bios)
806 radeon_atom_output_lock(encoder, false);
807 else
808 radeon_combios_output_lock(encoder, false);
809}
810
811static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
812 struct drm_display_mode *mode,
813 struct drm_display_mode *adjusted_mode)
814{
815 struct drm_device *dev = encoder->dev;
816 struct radeon_device *rdev = dev->dev_private;
817 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
818 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
819 uint32_t fp2_gen_cntl;
820
821 DRM_DEBUG("\n");
822
823 if (radeon_crtc->crtc_id == 0)
824 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
825
826 if (rdev->is_atom_bios) {
827 radeon_encoder->pixel_clock = adjusted_mode->clock;
828 atombios_external_tmds_setup(encoder, ATOM_ENABLE);
829 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
830 } else {
831 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
832
833 if (1) /* FIXME rgbBits == 8 */
834 fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
835 else
836 fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
837
838 fp2_gen_cntl &= ~(RADEON_FP2_ON |
839 RADEON_FP2_DVO_EN |
840 RADEON_FP2_DVO_RATE_SEL_SDR);
841
842 /* XXX: these are oem specific */
843 if (ASIC_IS_R300(rdev)) {
844 if ((dev->pdev->device == 0x4850) &&
845 (dev->pdev->subsystem_vendor == 0x1028) &&
846 (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
847 fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
848 else
849 fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
850
851 /*if (mode->clock > 165000)
852 fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
853 }
854 }
855
856 if (radeon_crtc->crtc_id == 0) {
857 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
858 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
859 if (radeon_encoder->flags & RADEON_USE_RMX)
860 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
861 else
862 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
863 } else
864 fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
865 } else {
866 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
867 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
868 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
869 } else
870 fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
871 }
872
873 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
874
875 if (rdev->is_atom_bios)
876 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
877 else
878 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
879}
880
881static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
882 .dpms = radeon_legacy_tmds_ext_dpms,
883 .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
884 .prepare = radeon_legacy_tmds_ext_prepare,
885 .mode_set = radeon_legacy_tmds_ext_mode_set,
886 .commit = radeon_legacy_tmds_ext_commit,
887};
888
889
890static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
891 .destroy = radeon_enc_destroy,
892};
893
894static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
895 struct drm_display_mode *mode,
896 struct drm_display_mode *adjusted_mode)
897{
898
899 drm_mode_set_crtcinfo(adjusted_mode, 0);
900
901 return true;
902}
903
904static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
905{
906 struct drm_device *dev = encoder->dev;
907 struct radeon_device *rdev = dev->dev_private;
908 uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
909 /* uint32_t tv_master_cntl = 0; */
910
911 DRM_DEBUG("\n");
912
913 if (rdev->family == CHIP_R200)
914 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
915 else {
916 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
917 /* FIXME TV */
918 /* tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); */
919 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
920 }
921
922 switch (mode) {
923 case DRM_MODE_DPMS_ON:
924 if (rdev->family == CHIP_R200) {
925 fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
926 } else {
927 crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
928 /* tv_master_cntl |= RADEON_TV_ON; */
929 if (rdev->family == CHIP_R420 ||
930 rdev->family == CHIP_R423 ||
931 rdev->family == CHIP_RV410)
932 tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
933 R420_TV_DAC_GDACPD |
934 R420_TV_DAC_BDACPD |
935 RADEON_TV_DAC_BGSLEEP);
936 else
937 tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
938 RADEON_TV_DAC_GDACPD |
939 RADEON_TV_DAC_BDACPD |
940 RADEON_TV_DAC_BGSLEEP);
941 }
942 break;
943 case DRM_MODE_DPMS_STANDBY:
944 case DRM_MODE_DPMS_SUSPEND:
945 case DRM_MODE_DPMS_OFF:
946 if (rdev->family == CHIP_R200)
947 fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
948 else {
949 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
950 /* tv_master_cntl &= ~RADEON_TV_ON; */
951 if (rdev->family == CHIP_R420 ||
952 rdev->family == CHIP_R423 ||
953 rdev->family == CHIP_RV410)
954 tv_dac_cntl |= (R420_TV_DAC_RDACPD |
955 R420_TV_DAC_GDACPD |
956 R420_TV_DAC_BDACPD |
957 RADEON_TV_DAC_BGSLEEP);
958 else
959 tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
960 RADEON_TV_DAC_GDACPD |
961 RADEON_TV_DAC_BDACPD |
962 RADEON_TV_DAC_BGSLEEP);
963 }
964 break;
965 }
966
967 if (rdev->family == CHIP_R200) {
968 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
969 } else {
970 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
971 /* WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); */
972 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
973 }
974
975 if (rdev->is_atom_bios)
976 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
977 else
978 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
979}
980
981static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
982{
983 struct radeon_device *rdev = encoder->dev->dev_private;
984
985 if (rdev->is_atom_bios)
986 radeon_atom_output_lock(encoder, true);
987 else
988 radeon_combios_output_lock(encoder, true);
989 radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
990}
991
992static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
993{
994 struct radeon_device *rdev = encoder->dev->dev_private;
995
996 radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
997
998 if (rdev->is_atom_bios)
999 radeon_atom_output_lock(encoder, true);
1000 else
1001 radeon_combios_output_lock(encoder, true);
1002}
1003
1004static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
1005 struct drm_display_mode *mode,
1006 struct drm_display_mode *adjusted_mode)
1007{
1008 struct drm_device *dev = encoder->dev;
1009 struct radeon_device *rdev = dev->dev_private;
1010 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1011 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1012 uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
1013 uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0;
1014
1015 DRM_DEBUG("\n");
1016
1017 if (radeon_crtc->crtc_id == 0)
1018 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
1019
1020 if (rdev->family != CHIP_R200) {
1021 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1022 if (rdev->family == CHIP_R420 ||
1023 rdev->family == CHIP_R423 ||
1024 rdev->family == CHIP_RV410) {
1025 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
1026 RADEON_TV_DAC_BGADJ_MASK |
1027 R420_TV_DAC_DACADJ_MASK |
1028 R420_TV_DAC_RDACPD |
1029 R420_TV_DAC_GDACPD |
1030 R420_TV_DAC_GDACPD |
1031 R420_TV_DAC_TVENABLE);
1032 } else {
1033 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
1034 RADEON_TV_DAC_BGADJ_MASK |
1035 RADEON_TV_DAC_DACADJ_MASK |
1036 RADEON_TV_DAC_RDACPD |
1037 RADEON_TV_DAC_GDACPD |
1038 RADEON_TV_DAC_GDACPD);
1039 }
1040
1041 /* FIXME TV */
1042 if (radeon_encoder->enc_priv) {
1043 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
1044 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1045 RADEON_TV_DAC_NHOLD |
1046 RADEON_TV_DAC_STD_PS2 |
1047 tv_dac->ps2_tvdac_adj);
1048 } else
1049 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1050 RADEON_TV_DAC_NHOLD |
1051 RADEON_TV_DAC_STD_PS2);
1052
1053 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1054 }
1055
1056 if (ASIC_IS_R300(rdev)) {
1057 gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
1058 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
1059 } else if (rdev->family == CHIP_R200)
1060 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
1061 else
1062 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1063
1064 dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
1065
1066 if (radeon_crtc->crtc_id == 0) {
1067 if (ASIC_IS_R300(rdev)) {
1068 disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
1069 disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
1070 } else if (rdev->family == CHIP_R200) {
1071 fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
1072 RADEON_FP2_DVO_RATE_SEL_SDR);
1073 } else
1074 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
1075 } else {
1076 if (ASIC_IS_R300(rdev)) {
1077 disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
1078 disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1079 } else if (rdev->family == CHIP_R200) {
1080 fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
1081 RADEON_FP2_DVO_RATE_SEL_SDR);
1082 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
1083 } else
1084 disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
1085 }
1086
1087 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1088
1089 if (ASIC_IS_R300(rdev)) {
1090 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1091 WREG32(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl);
1092 } else if (rdev->family == CHIP_R200)
1093 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
1094 else
1095 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1096
1097 if (rdev->is_atom_bios)
1098 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1099 else
1100 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1101
1102}
1103
1104static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
1105 struct drm_connector *connector)
1106{
1107 struct drm_device *dev = encoder->dev;
1108 struct radeon_device *rdev = dev->dev_private;
1109 uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
1110 uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
1111 enum drm_connector_status found = connector_status_disconnected;
1112 bool color = true;
1113
1114 /* FIXME tv */
1115
1116 /* save the regs we need */
1117 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
1118 gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0;
1119 disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0;
1120 disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG);
1121 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1122 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1123 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
1124 dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
1125
1126 tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
1127 | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
1128 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
1129
1130 if (ASIC_IS_R300(rdev))
1131 WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
1132
1133 tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
1134 tmp |= RADEON_CRTC2_CRT2_ON |
1135 (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
1136
1137 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
1138
1139 if (ASIC_IS_R300(rdev)) {
1140 tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
1141 tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1142 WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
1143 } else {
1144 tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
1145 WREG32(RADEON_DISP_HW_DEBUG, tmp);
1146 }
1147
1148 tmp = RADEON_TV_DAC_NBLANK |
1149 RADEON_TV_DAC_NHOLD |
1150 RADEON_TV_MONITOR_DETECT_EN |
1151 RADEON_TV_DAC_STD_PS2;
1152
1153 WREG32(RADEON_TV_DAC_CNTL, tmp);
1154
1155 tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
1156 RADEON_DAC2_FORCE_DATA_EN;
1157
1158 if (color)
1159 tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
1160 else
1161 tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
1162
1163 if (ASIC_IS_R300(rdev))
1164 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
1165 else
1166 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
1167
1168 WREG32(RADEON_DAC_EXT_CNTL, tmp);
1169
1170 tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
1171 WREG32(RADEON_DAC_CNTL2, tmp);
1172
1173 udelay(10000);
1174
1175 if (ASIC_IS_R300(rdev)) {
1176 if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
1177 found = connector_status_connected;
1178 } else {
1179 if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
1180 found = connector_status_connected;
1181 }
1182
1183 /* restore regs we used */
1184 WREG32(RADEON_DAC_CNTL2, dac_cntl2);
1185 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
1186 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1187 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1188
1189 if (ASIC_IS_R300(rdev)) {
1190 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1191 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1192 } else {
1193 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1194 }
1195 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
1196
1197 /* return found; */
1198 return connector_status_disconnected;
1199
1200}
1201
1202static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
1203 .dpms = radeon_legacy_tv_dac_dpms,
1204 .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
1205 .prepare = radeon_legacy_tv_dac_prepare,
1206 .mode_set = radeon_legacy_tv_dac_mode_set,
1207 .commit = radeon_legacy_tv_dac_commit,
1208 .detect = radeon_legacy_tv_dac_detect,
1209};
1210
1211
1212static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
1213 .destroy = radeon_enc_destroy,
1214};
1215
1216void
1217radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1218{
1219 struct radeon_device *rdev = dev->dev_private;
1220 struct drm_encoder *encoder;
1221 struct radeon_encoder *radeon_encoder;
1222
1223 /* see if we already added it */
1224 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1225 radeon_encoder = to_radeon_encoder(encoder);
1226 if (radeon_encoder->encoder_id == encoder_id) {
1227 radeon_encoder->devices |= supported_device;
1228 return;
1229 }
1230
1231 }
1232
1233 /* add a new one */
1234 radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
1235 if (!radeon_encoder)
1236 return;
1237
1238 encoder = &radeon_encoder->base;
1239 encoder->possible_crtcs = 0x3;
1240 encoder->possible_clones = 0;
1241
1242 radeon_encoder->enc_priv = NULL;
1243
1244 radeon_encoder->encoder_id = encoder_id;
1245 radeon_encoder->devices = supported_device;
1246
1247 switch (radeon_encoder->encoder_id) {
1248 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1249 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
1250 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
1251 if (rdev->is_atom_bios)
1252 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
1253 else
1254 radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
1255 radeon_encoder->rmx_type = RMX_FULL;
1256 break;
1257 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1258 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
1259 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
1260 if (rdev->is_atom_bios)
1261 radeon_encoder->enc_priv = radeon_atombios_get_tmds_info(radeon_encoder);
1262 else
1263 radeon_encoder->enc_priv = radeon_combios_get_tmds_info(radeon_encoder);
1264 break;
1265 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1266 drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
1267 drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
1268 if (rdev->is_atom_bios)
1269 radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
1270 else
1271 radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
1272 break;
1273 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1274 drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
1275 drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
1276 if (rdev->is_atom_bios)
1277 radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
1278 else
1279 radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
1280 break;
1281 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1282 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
1283 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
1284 if (!rdev->is_atom_bios)
1285 radeon_combios_get_ext_tmds_info(radeon_encoder);
1286 break;
1287 }
1288}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644
index 000000000000..9173b687462b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -0,0 +1,398 @@
1/*
2 * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
3 * VA Linux Systems Inc., Fremont, California.
4 * Copyright 2008 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Original Authors:
25 * Kevin E. Martin, Rickard E. Faith, Alan Hourihane
26 *
27 * Kernel port Author: Dave Airlie
28 */
29
30#ifndef RADEON_MODE_H
31#define RADEON_MODE_H
32
33#include <drm_crtc.h>
34#include <drm_mode.h>
35#include <drm_edid.h>
36#include <linux/i2c.h>
37#include <linux/i2c-id.h>
38#include <linux/i2c-algo-bit.h>
39
40#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
41#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
42#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
43#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
44
45enum radeon_connector_type {
46 CONNECTOR_NONE,
47 CONNECTOR_VGA,
48 CONNECTOR_DVI_I,
49 CONNECTOR_DVI_D,
50 CONNECTOR_DVI_A,
51 CONNECTOR_STV,
52 CONNECTOR_CTV,
53 CONNECTOR_LVDS,
54 CONNECTOR_DIGITAL,
55 CONNECTOR_SCART,
56 CONNECTOR_HDMI_TYPE_A,
57 CONNECTOR_HDMI_TYPE_B,
58 CONNECTOR_0XC,
59 CONNECTOR_0XD,
60 CONNECTOR_DIN,
61 CONNECTOR_DISPLAY_PORT,
62 CONNECTOR_UNSUPPORTED
63};
64
65enum radeon_dvi_type {
66 DVI_AUTO,
67 DVI_DIGITAL,
68 DVI_ANALOG
69};
70
71enum radeon_rmx_type {
72 RMX_OFF,
73 RMX_FULL,
74 RMX_CENTER,
75 RMX_ASPECT
76};
77
78enum radeon_tv_std {
79 TV_STD_NTSC,
80 TV_STD_PAL,
81 TV_STD_PAL_M,
82 TV_STD_PAL_60,
83 TV_STD_NTSC_J,
84 TV_STD_SCART_PAL,
85 TV_STD_SECAM,
86 TV_STD_PAL_CN,
87};
88
89struct radeon_i2c_bus_rec {
90 bool valid;
91 uint32_t mask_clk_reg;
92 uint32_t mask_data_reg;
93 uint32_t a_clk_reg;
94 uint32_t a_data_reg;
95 uint32_t put_clk_reg;
96 uint32_t put_data_reg;
97 uint32_t get_clk_reg;
98 uint32_t get_data_reg;
99 uint32_t mask_clk_mask;
100 uint32_t mask_data_mask;
101 uint32_t put_clk_mask;
102 uint32_t put_data_mask;
103 uint32_t get_clk_mask;
104 uint32_t get_data_mask;
105 uint32_t a_clk_mask;
106 uint32_t a_data_mask;
107};
108
109struct radeon_tmds_pll {
110 uint32_t freq;
111 uint32_t value;
112};
113
114#define RADEON_MAX_BIOS_CONNECTOR 16
115
116#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
117#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
118#define RADEON_PLL_USE_REF_DIV (1 << 2)
119#define RADEON_PLL_LEGACY (1 << 3)
120#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
121#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5)
122#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6)
123#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7)
124#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
127
128struct radeon_pll {
129 uint16_t reference_freq;
130 uint16_t reference_div;
131 uint32_t pll_in_min;
132 uint32_t pll_in_max;
133 uint32_t pll_out_min;
134 uint32_t pll_out_max;
135 uint16_t xclk;
136
137 uint32_t min_ref_div;
138 uint32_t max_ref_div;
139 uint32_t min_post_div;
140 uint32_t max_post_div;
141 uint32_t min_feedback_div;
142 uint32_t max_feedback_div;
143 uint32_t min_frac_feedback_div;
144 uint32_t max_frac_feedback_div;
145 uint32_t best_vco;
146};
147
148struct radeon_i2c_chan {
149 struct drm_device *dev;
150 struct i2c_adapter adapter;
151 struct i2c_algo_bit_data algo;
152 struct radeon_i2c_bus_rec rec;
153};
154
155/* mostly for macs, but really any system without connector tables */
156enum radeon_connector_table {
157 CT_NONE,
158 CT_GENERIC,
159 CT_IBOOK,
160 CT_POWERBOOK_EXTERNAL,
161 CT_POWERBOOK_INTERNAL,
162 CT_POWERBOOK_VGA,
163 CT_MINI_EXTERNAL,
164 CT_MINI_INTERNAL,
165 CT_IMAC_G5_ISIGHT,
166 CT_EMAC,
167};
168
169struct radeon_mode_info {
170 struct atom_context *atom_context;
171 enum radeon_connector_table connector_table;
172 bool mode_config_initialized;
173};
174
175struct radeon_crtc {
176 struct drm_crtc base;
177 int crtc_id;
178 u16 lut_r[256], lut_g[256], lut_b[256];
179 bool enabled;
180 bool can_tile;
181 uint32_t crtc_offset;
182 struct radeon_framebuffer *fbdev_fb;
183 struct drm_mode_set mode_set;
184 struct drm_gem_object *cursor_bo;
185 uint64_t cursor_addr;
186 int cursor_width;
187 int cursor_height;
188};
189
190#define RADEON_USE_RMX 1
191
192struct radeon_native_mode {
193 /* preferred mode */
194 uint32_t panel_xres, panel_yres;
195 uint32_t hoverplus, hsync_width;
196 uint32_t hblank;
197 uint32_t voverplus, vsync_width;
198 uint32_t vblank;
199 uint32_t dotclock;
200 uint32_t flags;
201};
202
203struct radeon_encoder_primary_dac {
204 /* legacy primary dac */
205 uint32_t ps2_pdac_adj;
206};
207
208struct radeon_encoder_lvds {
209 /* legacy lvds */
210 uint16_t panel_vcc_delay;
211 uint8_t panel_pwr_delay;
212 uint8_t panel_digon_delay;
213 uint8_t panel_blon_delay;
214 uint16_t panel_ref_divider;
215 uint8_t panel_post_divider;
216 uint16_t panel_fb_divider;
217 bool use_bios_dividers;
218 uint32_t lvds_gen_cntl;
219 /* panel mode */
220 struct radeon_native_mode native_mode;
221};
222
223struct radeon_encoder_tv_dac {
224 /* legacy tv dac */
225 uint32_t ps2_tvdac_adj;
226 uint32_t ntsc_tvdac_adj;
227 uint32_t pal_tvdac_adj;
228
229 enum radeon_tv_std tv_std;
230};
231
232struct radeon_encoder_int_tmds {
233 /* legacy int tmds */
234 struct radeon_tmds_pll tmds_pll[4];
235};
236
237struct radeon_encoder_atom_dig {
238 /* atom dig */
239 bool coherent_mode;
240 int dig_block;
241 /* atom lvds */
242 uint32_t lvds_misc;
243 uint16_t panel_pwr_delay;
244 /* panel mode */
245 struct radeon_native_mode native_mode;
246};
247
248struct radeon_encoder {
249 struct drm_encoder base;
250 uint32_t encoder_id;
251 uint32_t devices;
252 uint32_t flags;
253 uint32_t pixel_clock;
254 enum radeon_rmx_type rmx_type;
255 struct radeon_native_mode native_mode;
256 void *enc_priv;
257};
258
259struct radeon_connector_atom_dig {
260 uint32_t igp_lane_info;
261 bool linkb;
262};
263
264struct radeon_connector {
265 struct drm_connector base;
266 uint32_t connector_id;
267 uint32_t devices;
268 struct radeon_i2c_chan *ddc_bus;
269 int use_digital;
270 void *con_priv;
271};
272
273struct radeon_framebuffer {
274 struct drm_framebuffer base;
275 struct drm_gem_object *obj;
276};
277
278extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
279 struct radeon_i2c_bus_rec *rec,
280 const char *name);
281extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
282extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
283extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
284
285extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
286
287extern void radeon_compute_pll(struct radeon_pll *pll,
288 uint64_t freq,
289 uint32_t *dot_clock_p,
290 uint32_t *fb_div_p,
291 uint32_t *frac_fb_div_p,
292 uint32_t *ref_div_p,
293 uint32_t *post_div_p,
294 int flags);
295
296struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
297struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
298struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
299struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
300struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
301extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
302extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
303
304extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
305extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
306 struct drm_framebuffer *old_fb);
307extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
308 struct drm_display_mode *mode,
309 struct drm_display_mode *adjusted_mode,
310 int x, int y,
311 struct drm_framebuffer *old_fb);
312extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
313
314extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
315 struct drm_framebuffer *old_fb);
316extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
317
318extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
319 struct drm_file *file_priv,
320 uint32_t handle,
321 uint32_t width,
322 uint32_t height);
323extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
324 int x, int y);
325
326extern bool radeon_atom_get_clock_info(struct drm_device *dev);
327extern bool radeon_combios_get_clock_info(struct drm_device *dev);
328extern struct radeon_encoder_atom_dig *
329radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
330extern struct radeon_encoder_int_tmds *
331radeon_atombios_get_tmds_info(struct radeon_encoder *encoder);
332extern struct radeon_encoder_primary_dac *
333radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
334extern struct radeon_encoder_tv_dac *
335radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
336extern struct radeon_encoder_lvds *
337radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
338extern struct radeon_encoder_int_tmds *
339radeon_combios_get_tmds_info(struct radeon_encoder *encoder);
340extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
341extern struct radeon_encoder_tv_dac *
342radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
343extern struct radeon_encoder_primary_dac *
344radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
345extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
346extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
347extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
348extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
349extern void
350radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
351extern void
352radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
353extern void
354radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
355extern void
356radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
357extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
358 u16 blue, int regno);
359struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
360 struct drm_mode_fb_cmd *mode_cmd,
361 struct drm_gem_object *obj);
362
363int radeonfb_probe(struct drm_device *dev);
364
365int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
366bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
367bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
368void radeon_atombios_init_crtc(struct drm_device *dev,
369 struct radeon_crtc *radeon_crtc);
370void radeon_legacy_init_crtc(struct drm_device *dev,
371 struct radeon_crtc *radeon_crtc);
372void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
373
374void radeon_get_clock_info(struct drm_device *dev);
375
376extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
377extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
378
379void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
380 struct drm_display_mode *mode,
381 struct drm_display_mode *adjusted_mode);
382void radeon_enc_destroy(struct drm_encoder *encoder);
383void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
384void radeon_combios_asic_init(struct drm_device *dev);
385extern int radeon_static_clocks_init(struct drm_device *dev);
386void radeon_init_disp_bw_legacy(struct drm_device *dev,
387 struct drm_display_mode *mode1,
388 uint32_t pixel_bytes1,
389 struct drm_display_mode *mode2,
390 uint32_t pixel_bytes2);
391void radeon_init_disp_bw_avivo(struct drm_device *dev,
392 struct drm_display_mode *mode1,
393 uint32_t pixel_bytes1,
394 struct drm_display_mode *mode2,
395 uint32_t pixel_bytes2);
396void radeon_init_disp_bandwidth(struct drm_device *dev);
397
398#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 000000000000..983e8df5e000
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,511 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <drm/drmP.h>
34#include "radeon_drm.h"
35#include "radeon.h"
36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47};
48
49int radeon_ttm_init(struct radeon_device *rdev);
50void radeon_ttm_fini(struct radeon_device *rdev);
51
52/*
53 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
54 * function are calling it.
55 */
56
57static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
58{
59 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
60}
61
62static void radeon_object_unreserve(struct radeon_object *robj)
63{
64 ttm_bo_unreserve(&robj->tobj);
65}
66
67static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
68{
69 struct radeon_object *robj;
70
71 robj = container_of(tobj, struct radeon_object, tobj);
72 list_del_init(&robj->list);
73 kfree(robj);
74}
75
76static inline void radeon_object_gpu_addr(struct radeon_object *robj)
77{
78 /* Default gpu address */
79 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
80 if (robj->tobj.mem.mm_node == NULL) {
81 return;
82 }
83 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
84 switch (robj->tobj.mem.mem_type) {
85 case TTM_PL_VRAM:
86 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
87 break;
88 case TTM_PL_TT:
89 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
90 break;
91 default:
92 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
93 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
94 return;
95 }
96}
97
98static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
99{
100 uint32_t flags = 0;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 flags |= TTM_PL_FLAG_VRAM;
103 }
104 if (domain & RADEON_GEM_DOMAIN_GTT) {
105 flags |= TTM_PL_FLAG_TT;
106 }
107 if (domain & RADEON_GEM_DOMAIN_CPU) {
108 flags |= TTM_PL_FLAG_SYSTEM;
109 }
110 if (!flags) {
111 flags |= TTM_PL_FLAG_SYSTEM;
112 }
113 return flags;
114}
115
116int radeon_object_create(struct radeon_device *rdev,
117 struct drm_gem_object *gobj,
118 unsigned long size,
119 bool kernel,
120 uint32_t domain,
121 bool interruptible,
122 struct radeon_object **robj_ptr)
123{
124 struct radeon_object *robj;
125 enum ttm_bo_type type;
126 uint32_t flags;
127 int r;
128
129 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
130 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
131 }
132 if (kernel) {
133 type = ttm_bo_type_kernel;
134 } else {
135 type = ttm_bo_type_device;
136 }
137 *robj_ptr = NULL;
138 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
139 if (robj == NULL) {
140 return -ENOMEM;
141 }
142 robj->rdev = rdev;
143 robj->gobj = gobj;
144 INIT_LIST_HEAD(&robj->list);
145
146 flags = radeon_object_flags_from_domain(domain);
147 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
148 0, 0, false, NULL, size,
149 &radeon_ttm_object_object_destroy);
150 if (unlikely(r != 0)) {
151 /* ttm call radeon_ttm_object_object_destroy if error happen */
152 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
153 size, flags, 0);
154 return r;
155 }
156 *robj_ptr = robj;
157 if (gobj) {
158 list_add_tail(&robj->list, &rdev->gem.objects);
159 }
160 return 0;
161}
162
163int radeon_object_kmap(struct radeon_object *robj, void **ptr)
164{
165 int r;
166
167 spin_lock(&robj->tobj.lock);
168 if (robj->kptr) {
169 if (ptr) {
170 *ptr = robj->kptr;
171 }
172 spin_unlock(&robj->tobj.lock);
173 return 0;
174 }
175 spin_unlock(&robj->tobj.lock);
176 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
177 if (r) {
178 return r;
179 }
180 spin_lock(&robj->tobj.lock);
181 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
182 spin_unlock(&robj->tobj.lock);
183 if (ptr) {
184 *ptr = robj->kptr;
185 }
186 return 0;
187}
188
189void radeon_object_kunmap(struct radeon_object *robj)
190{
191 spin_lock(&robj->tobj.lock);
192 if (robj->kptr == NULL) {
193 spin_unlock(&robj->tobj.lock);
194 return;
195 }
196 robj->kptr = NULL;
197 spin_unlock(&robj->tobj.lock);
198 ttm_bo_kunmap(&robj->kmap);
199}
200
201void radeon_object_unref(struct radeon_object **robj)
202{
203 struct ttm_buffer_object *tobj;
204
205 if ((*robj) == NULL) {
206 return;
207 }
208 tobj = &((*robj)->tobj);
209 ttm_bo_unref(&tobj);
210 if (tobj == NULL) {
211 *robj = NULL;
212 }
213}
214
215int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
216{
217 *offset = robj->tobj.addr_space_offset;
218 return 0;
219}
220
221int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
222 uint64_t *gpu_addr)
223{
224 uint32_t flags;
225 uint32_t tmp;
226 void *fbptr;
227 int r;
228
229 flags = radeon_object_flags_from_domain(domain);
230 spin_lock(&robj->tobj.lock);
231 if (robj->pin_count) {
232 robj->pin_count++;
233 if (gpu_addr != NULL) {
234 *gpu_addr = robj->gpu_addr;
235 }
236 spin_unlock(&robj->tobj.lock);
237 return 0;
238 }
239 spin_unlock(&robj->tobj.lock);
240 r = radeon_object_reserve(robj, false);
241 if (unlikely(r != 0)) {
242 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
243 return r;
244 }
245 if (robj->rdev->fbdev_robj == robj) {
246 mutex_lock(&robj->rdev->fbdev_info->lock);
247 radeon_object_kunmap(robj);
248 }
249 tmp = robj->tobj.mem.placement;
250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
252 r = ttm_buffer_object_validate(&robj->tobj,
253 robj->tobj.proposed_placement,
254 false, false);
255 radeon_object_gpu_addr(robj);
256 if (gpu_addr != NULL) {
257 *gpu_addr = robj->gpu_addr;
258 }
259 robj->pin_count = 1;
260 if (unlikely(r != 0)) {
261 DRM_ERROR("radeon: failed to pin object.\n");
262 }
263 radeon_object_unreserve(robj);
264 if (robj->rdev->fbdev_robj == robj) {
265 if (!r) {
266 r = radeon_object_kmap(robj, &fbptr);
267 }
268 if (!r) {
269 robj->rdev->fbdev_info->screen_base = fbptr;
270 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
271 }
272 mutex_unlock(&robj->rdev->fbdev_info->lock);
273 }
274 return r;
275}
276
277void radeon_object_unpin(struct radeon_object *robj)
278{
279 uint32_t flags;
280 void *fbptr;
281 int r;
282
283 spin_lock(&robj->tobj.lock);
284 if (!robj->pin_count) {
285 spin_unlock(&robj->tobj.lock);
286 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
287 return;
288 }
289 robj->pin_count--;
290 if (robj->pin_count) {
291 spin_unlock(&robj->tobj.lock);
292 return;
293 }
294 spin_unlock(&robj->tobj.lock);
295 r = radeon_object_reserve(robj, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
298 return;
299 }
300 if (robj->rdev->fbdev_robj == robj) {
301 mutex_lock(&robj->rdev->fbdev_info->lock);
302 radeon_object_kunmap(robj);
303 }
304 flags = robj->tobj.mem.placement;
305 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
306 r = ttm_buffer_object_validate(&robj->tobj,
307 robj->tobj.proposed_placement,
308 false, false);
309 if (unlikely(r != 0)) {
310 DRM_ERROR("radeon: failed to unpin buffer.\n");
311 }
312 radeon_object_unreserve(robj);
313 if (robj->rdev->fbdev_robj == robj) {
314 if (!r) {
315 r = radeon_object_kmap(robj, &fbptr);
316 }
317 if (!r) {
318 robj->rdev->fbdev_info->screen_base = fbptr;
319 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
320 }
321 mutex_unlock(&robj->rdev->fbdev_info->lock);
322 }
323}
324
325int radeon_object_wait(struct radeon_object *robj)
326{
327 int r = 0;
328
329 /* FIXME: should use block reservation instead */
330 r = radeon_object_reserve(robj, true);
331 if (unlikely(r != 0)) {
332 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
333 return r;
334 }
335 spin_lock(&robj->tobj.lock);
336 if (robj->tobj.sync_obj) {
337 r = ttm_bo_wait(&robj->tobj, true, false, false);
338 }
339 spin_unlock(&robj->tobj.lock);
340 radeon_object_unreserve(robj);
341 return r;
342}
343
344int radeon_object_evict_vram(struct radeon_device *rdev)
345{
346 if (rdev->flags & RADEON_IS_IGP) {
347 /* Useless to evict on IGP chips */
348 return 0;
349 }
350 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
351}
352
353void radeon_object_force_delete(struct radeon_device *rdev)
354{
355 struct radeon_object *robj, *n;
356 struct drm_gem_object *gobj;
357
358 if (list_empty(&rdev->gem.objects)) {
359 return;
360 }
361 DRM_ERROR("Userspace still has active objects !\n");
362 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
363 mutex_lock(&rdev->ddev->struct_mutex);
364 gobj = robj->gobj;
365 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
366 gobj, robj, (unsigned long)gobj->size,
367 *((unsigned long *)&gobj->refcount));
368 list_del_init(&robj->list);
369 radeon_object_unref(&robj);
370 gobj->driver_private = NULL;
371 drm_gem_object_unreference(gobj);
372 mutex_unlock(&rdev->ddev->struct_mutex);
373 }
374}
375
376int radeon_object_init(struct radeon_device *rdev)
377{
378 return radeon_ttm_init(rdev);
379}
380
381void radeon_object_fini(struct radeon_device *rdev)
382{
383 radeon_ttm_fini(rdev);
384}
385
386void radeon_object_list_add_object(struct radeon_object_list *lobj,
387 struct list_head *head)
388{
389 if (lobj->wdomain) {
390 list_add(&lobj->list, head);
391 } else {
392 list_add_tail(&lobj->list, head);
393 }
394}
395
396int radeon_object_list_reserve(struct list_head *head)
397{
398 struct radeon_object_list *lobj;
399 struct list_head *i;
400 int r;
401
402 list_for_each(i, head) {
403 lobj = list_entry(i, struct radeon_object_list, list);
404 if (!lobj->robj->pin_count) {
405 r = radeon_object_reserve(lobj->robj, true);
406 if (unlikely(r != 0)) {
407 DRM_ERROR("radeon: failed to reserve object.\n");
408 return r;
409 }
410 } else {
411 }
412 }
413 return 0;
414}
415
416void radeon_object_list_unreserve(struct list_head *head)
417{
418 struct radeon_object_list *lobj;
419 struct list_head *i;
420
421 list_for_each(i, head) {
422 lobj = list_entry(i, struct radeon_object_list, list);
423 if (!lobj->robj->pin_count) {
424 radeon_object_unreserve(lobj->robj);
425 } else {
426 }
427 }
428}
429
430int radeon_object_list_validate(struct list_head *head, void *fence)
431{
432 struct radeon_object_list *lobj;
433 struct radeon_object *robj;
434 struct radeon_fence *old_fence = NULL;
435 struct list_head *i;
436 uint32_t flags;
437 int r;
438
439 r = radeon_object_list_reserve(head);
440 if (unlikely(r != 0)) {
441 radeon_object_list_unreserve(head);
442 return r;
443 }
444 list_for_each(i, head) {
445 lobj = list_entry(i, struct radeon_object_list, list);
446 robj = lobj->robj;
447 if (lobj->wdomain) {
448 flags = radeon_object_flags_from_domain(lobj->wdomain);
449 flags |= TTM_PL_FLAG_TT;
450 } else {
451 flags = radeon_object_flags_from_domain(lobj->rdomain);
452 flags |= TTM_PL_FLAG_TT;
453 flags |= TTM_PL_FLAG_VRAM;
454 }
455 if (!robj->pin_count) {
456 robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
457 r = ttm_buffer_object_validate(&robj->tobj,
458 robj->tobj.proposed_placement,
459 true, false);
460 if (unlikely(r)) {
461 radeon_object_list_unreserve(head);
462 DRM_ERROR("radeon: failed to validate.\n");
463 return r;
464 }
465 radeon_object_gpu_addr(robj);
466 }
467 lobj->gpu_offset = robj->gpu_addr;
468 if (fence) {
469 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
470 robj->tobj.sync_obj = radeon_fence_ref(fence);
471 robj->tobj.sync_obj_arg = NULL;
472 }
473 if (old_fence) {
474 radeon_fence_unref(&old_fence);
475 }
476 }
477 return 0;
478}
479
480void radeon_object_list_unvalidate(struct list_head *head)
481{
482 struct radeon_object_list *lobj;
483 struct radeon_fence *old_fence = NULL;
484 struct list_head *i;
485
486 list_for_each(i, head) {
487 lobj = list_entry(i, struct radeon_object_list, list);
488 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
489 lobj->robj->tobj.sync_obj = NULL;
490 if (old_fence) {
491 radeon_fence_unref(&old_fence);
492 }
493 }
494 radeon_object_list_unreserve(head);
495}
496
497void radeon_object_list_clean(struct list_head *head)
498{
499 radeon_object_list_unreserve(head);
500}
501
502int radeon_object_fbdev_mmap(struct radeon_object *robj,
503 struct vm_area_struct *vma)
504{
505 return ttm_fbdev_mmap(vma, &robj->tobj);
506}
507
508unsigned long radeon_object_size(struct radeon_object *robj)
509{
510 return robj->tobj.num_pages << PAGE_SHIFT;
511}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644
index 000000000000..473e4775dc5a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_OBJECT_H__
29#define __RADEON_OBJECT_H__
30
31#include <ttm/ttm_bo_api.h>
32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h>
34#include <ttm/ttm_module.h>
35
36/*
37 * TTM.
38 */
39struct radeon_mman {
40 struct ttm_global_reference mem_global_ref;
41 bool mem_global_referenced;
42 struct ttm_bo_device bdev;
43};
44
45#endif
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644
index 000000000000..6d3d90406a24
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -0,0 +1,3570 @@
1/*
2 * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
3 * VA Linux Systems Inc., Fremont, California.
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation on the rights to use, copy, modify, merge,
11 * publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
23 * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29/*
30 * Authors:
31 * Kevin E. Martin <martin@xfree86.org>
32 * Rickard E. Faith <faith@valinux.com>
33 * Alan Hourihane <alanh@fairlite.demon.co.uk>
34 *
35 * References:
36 *
37 * !!!! FIXME !!!!
38 * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
39 * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
40 * 1999.
41 *
42 * !!!! FIXME !!!!
43 * RAGE 128 Software Development Manual (Technical Reference Manual P/N
44 * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
45 *
46 */
47
48/* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
49 * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
50 * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */
51#ifndef _RADEON_REG_H_
52#define _RADEON_REG_H_
53
54#include "r300_reg.h"
55#include "r500_reg.h"
56#include "r600_reg.h"
57
58
59#define RADEON_MC_AGP_LOCATION 0x014c
60#define RADEON_MC_AGP_START_MASK 0x0000FFFF
61#define RADEON_MC_AGP_START_SHIFT 0
62#define RADEON_MC_AGP_TOP_MASK 0xFFFF0000
63#define RADEON_MC_AGP_TOP_SHIFT 16
64#define RADEON_MC_FB_LOCATION 0x0148
65#define RADEON_MC_FB_START_MASK 0x0000FFFF
66#define RADEON_MC_FB_START_SHIFT 0
67#define RADEON_MC_FB_TOP_MASK 0xFFFF0000
68#define RADEON_MC_FB_TOP_SHIFT 16
69#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
70#define RADEON_AGP_BASE 0x0170
71
72#define ATI_DATATYPE_VQ 0
73#define ATI_DATATYPE_CI4 1
74#define ATI_DATATYPE_CI8 2
75#define ATI_DATATYPE_ARGB1555 3
76#define ATI_DATATYPE_RGB565 4
77#define ATI_DATATYPE_RGB888 5
78#define ATI_DATATYPE_ARGB8888 6
79#define ATI_DATATYPE_RGB332 7
80#define ATI_DATATYPE_Y8 8
81#define ATI_DATATYPE_RGB8 9
82#define ATI_DATATYPE_CI16 10
83#define ATI_DATATYPE_VYUY_422 11
84#define ATI_DATATYPE_YVYU_422 12
85#define ATI_DATATYPE_AYUV_444 14
86#define ATI_DATATYPE_ARGB4444 15
87
88 /* Registers for 2D/Video/Overlay */
89#define RADEON_ADAPTER_ID 0x0f2c /* PCI */
90#define RADEON_AGP_BASE 0x0170
91#define RADEON_AGP_CNTL 0x0174
92# define RADEON_AGP_APER_SIZE_256MB (0x00 << 0)
93# define RADEON_AGP_APER_SIZE_128MB (0x20 << 0)
94# define RADEON_AGP_APER_SIZE_64MB (0x30 << 0)
95# define RADEON_AGP_APER_SIZE_32MB (0x38 << 0)
96# define RADEON_AGP_APER_SIZE_16MB (0x3c << 0)
97# define RADEON_AGP_APER_SIZE_8MB (0x3e << 0)
98# define RADEON_AGP_APER_SIZE_4MB (0x3f << 0)
99# define RADEON_AGP_APER_SIZE_MASK (0x3f << 0)
100#define RADEON_STATUS_PCI_CONFIG 0x06
101# define RADEON_CAP_LIST 0x100000
102#define RADEON_CAPABILITIES_PTR_PCI_CONFIG 0x34 /* offset in PCI config*/
103# define RADEON_CAP_PTR_MASK 0xfc /* mask off reserved bits of CAP_PTR */
104# define RADEON_CAP_ID_NULL 0x00 /* End of capability list */
105# define RADEON_CAP_ID_AGP 0x02 /* AGP capability ID */
106# define RADEON_CAP_ID_EXP 0x10 /* PCI Express */
107#define RADEON_AGP_COMMAND 0x0f60 /* PCI */
108#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/
109# define RADEON_AGP_ENABLE (1<<8)
110#define RADEON_AGP_PLL_CNTL 0x000b /* PLL */
111#define RADEON_AGP_STATUS 0x0f5c /* PCI */
112# define RADEON_AGP_1X_MODE 0x01
113# define RADEON_AGP_2X_MODE 0x02
114# define RADEON_AGP_4X_MODE 0x04
115# define RADEON_AGP_FW_MODE 0x10
116# define RADEON_AGP_MODE_MASK 0x17
117# define RADEON_AGPv3_MODE 0x08
118# define RADEON_AGPv3_4X_MODE 0x01
119# define RADEON_AGPv3_8X_MODE 0x02
120#define RADEON_ATTRDR 0x03c1 /* VGA */
121#define RADEON_ATTRDW 0x03c0 /* VGA */
122#define RADEON_ATTRX 0x03c0 /* VGA */
123#define RADEON_AUX_SC_CNTL 0x1660
124# define RADEON_AUX1_SC_EN (1 << 0)
125# define RADEON_AUX1_SC_MODE_OR (0 << 1)
126# define RADEON_AUX1_SC_MODE_NAND (1 << 1)
127# define RADEON_AUX2_SC_EN (1 << 2)
128# define RADEON_AUX2_SC_MODE_OR (0 << 3)
129# define RADEON_AUX2_SC_MODE_NAND (1 << 3)
130# define RADEON_AUX3_SC_EN (1 << 4)
131# define RADEON_AUX3_SC_MODE_OR (0 << 5)
132# define RADEON_AUX3_SC_MODE_NAND (1 << 5)
133#define RADEON_AUX1_SC_BOTTOM 0x1670
134#define RADEON_AUX1_SC_LEFT 0x1664
135#define RADEON_AUX1_SC_RIGHT 0x1668
136#define RADEON_AUX1_SC_TOP 0x166c
137#define RADEON_AUX2_SC_BOTTOM 0x1680
138#define RADEON_AUX2_SC_LEFT 0x1674
139#define RADEON_AUX2_SC_RIGHT 0x1678
140#define RADEON_AUX2_SC_TOP 0x167c
141#define RADEON_AUX3_SC_BOTTOM 0x1690
142#define RADEON_AUX3_SC_LEFT 0x1684
143#define RADEON_AUX3_SC_RIGHT 0x1688
144#define RADEON_AUX3_SC_TOP 0x168c
145#define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8
146#define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc
147
148#define RADEON_BASE_CODE 0x0f0b
149#define RADEON_BIOS_0_SCRATCH 0x0010
150# define RADEON_FP_PANEL_SCALABLE (1 << 16)
151# define RADEON_FP_PANEL_SCALE_EN (1 << 17)
152# define RADEON_FP_CHIP_SCALE_EN (1 << 18)
153# define RADEON_DRIVER_BRIGHTNESS_EN (1 << 26)
154# define RADEON_DISPLAY_ROT_MASK (3 << 28)
155# define RADEON_DISPLAY_ROT_00 (0 << 28)
156# define RADEON_DISPLAY_ROT_90 (1 << 28)
157# define RADEON_DISPLAY_ROT_180 (2 << 28)
158# define RADEON_DISPLAY_ROT_270 (3 << 28)
159#define RADEON_BIOS_1_SCRATCH 0x0014
160#define RADEON_BIOS_2_SCRATCH 0x0018
161#define RADEON_BIOS_3_SCRATCH 0x001c
162#define RADEON_BIOS_4_SCRATCH 0x0020
163# define RADEON_CRT1_ATTACHED_MASK (3 << 0)
164# define RADEON_CRT1_ATTACHED_MONO (1 << 0)
165# define RADEON_CRT1_ATTACHED_COLOR (2 << 0)
166# define RADEON_LCD1_ATTACHED (1 << 2)
167# define RADEON_DFP1_ATTACHED (1 << 3)
168# define RADEON_TV1_ATTACHED_MASK (3 << 4)
169# define RADEON_TV1_ATTACHED_COMP (1 << 4)
170# define RADEON_TV1_ATTACHED_SVIDEO (2 << 4)
171# define RADEON_CRT2_ATTACHED_MASK (3 << 8)
172# define RADEON_CRT2_ATTACHED_MONO (1 << 8)
173# define RADEON_CRT2_ATTACHED_COLOR (2 << 8)
174# define RADEON_DFP2_ATTACHED (1 << 11)
175#define RADEON_BIOS_5_SCRATCH 0x0024
176# define RADEON_LCD1_ON (1 << 0)
177# define RADEON_CRT1_ON (1 << 1)
178# define RADEON_TV1_ON (1 << 2)
179# define RADEON_DFP1_ON (1 << 3)
180# define RADEON_CRT2_ON (1 << 5)
181# define RADEON_CV1_ON (1 << 6)
182# define RADEON_DFP2_ON (1 << 7)
183# define RADEON_LCD1_CRTC_MASK (1 << 8)
184# define RADEON_LCD1_CRTC_SHIFT 8
185# define RADEON_CRT1_CRTC_MASK (1 << 9)
186# define RADEON_CRT1_CRTC_SHIFT 9
187# define RADEON_TV1_CRTC_MASK (1 << 10)
188# define RADEON_TV1_CRTC_SHIFT 10
189# define RADEON_DFP1_CRTC_MASK (1 << 11)
190# define RADEON_DFP1_CRTC_SHIFT 11
191# define RADEON_CRT2_CRTC_MASK (1 << 12)
192# define RADEON_CRT2_CRTC_SHIFT 12
193# define RADEON_CV1_CRTC_MASK (1 << 13)
194# define RADEON_CV1_CRTC_SHIFT 13
195# define RADEON_DFP2_CRTC_MASK (1 << 14)
196# define RADEON_DFP2_CRTC_SHIFT 14
197# define RADEON_ACC_REQ_LCD1 (1 << 16)
198# define RADEON_ACC_REQ_CRT1 (1 << 17)
199# define RADEON_ACC_REQ_TV1 (1 << 18)
200# define RADEON_ACC_REQ_DFP1 (1 << 19)
201# define RADEON_ACC_REQ_CRT2 (1 << 21)
202# define RADEON_ACC_REQ_TV2 (1 << 22)
203# define RADEON_ACC_REQ_DFP2 (1 << 23)
204#define RADEON_BIOS_6_SCRATCH 0x0028
205# define RADEON_ACC_MODE_CHANGE (1 << 2)
206# define RADEON_EXT_DESKTOP_MODE (1 << 3)
207# define RADEON_LCD_DPMS_ON (1 << 20)
208# define RADEON_CRT_DPMS_ON (1 << 21)
209# define RADEON_TV_DPMS_ON (1 << 22)
210# define RADEON_DFP_DPMS_ON (1 << 23)
211# define RADEON_DPMS_MASK (3 << 24)
212# define RADEON_DPMS_ON (0 << 24)
213# define RADEON_DPMS_STANDBY (1 << 24)
214# define RADEON_DPMS_SUSPEND (2 << 24)
215# define RADEON_DPMS_OFF (3 << 24)
216# define RADEON_SCREEN_BLANKING (1 << 26)
217# define RADEON_DRIVER_CRITICAL (1 << 27)
218# define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
219#define RADEON_BIOS_7_SCRATCH 0x002c
220# define RADEON_SYS_HOTKEY (1 << 10)
221# define RADEON_DRV_LOADED (1 << 12)
222#define RADEON_BIOS_ROM 0x0f30 /* PCI */
223#define RADEON_BIST 0x0f0f /* PCI */
224#define RADEON_BRUSH_DATA0 0x1480
225#define RADEON_BRUSH_DATA1 0x1484
226#define RADEON_BRUSH_DATA10 0x14a8
227#define RADEON_BRUSH_DATA11 0x14ac
228#define RADEON_BRUSH_DATA12 0x14b0
229#define RADEON_BRUSH_DATA13 0x14b4
230#define RADEON_BRUSH_DATA14 0x14b8
231#define RADEON_BRUSH_DATA15 0x14bc
232#define RADEON_BRUSH_DATA16 0x14c0
233#define RADEON_BRUSH_DATA17 0x14c4
234#define RADEON_BRUSH_DATA18 0x14c8
235#define RADEON_BRUSH_DATA19 0x14cc
236#define RADEON_BRUSH_DATA2 0x1488
237#define RADEON_BRUSH_DATA20 0x14d0
238#define RADEON_BRUSH_DATA21 0x14d4
239#define RADEON_BRUSH_DATA22 0x14d8
240#define RADEON_BRUSH_DATA23 0x14dc
241#define RADEON_BRUSH_DATA24 0x14e0
242#define RADEON_BRUSH_DATA25 0x14e4
243#define RADEON_BRUSH_DATA26 0x14e8
244#define RADEON_BRUSH_DATA27 0x14ec
245#define RADEON_BRUSH_DATA28 0x14f0
246#define RADEON_BRUSH_DATA29 0x14f4
247#define RADEON_BRUSH_DATA3 0x148c
248#define RADEON_BRUSH_DATA30 0x14f8
249#define RADEON_BRUSH_DATA31 0x14fc
250#define RADEON_BRUSH_DATA32 0x1500
251#define RADEON_BRUSH_DATA33 0x1504
252#define RADEON_BRUSH_DATA34 0x1508
253#define RADEON_BRUSH_DATA35 0x150c
254#define RADEON_BRUSH_DATA36 0x1510
255#define RADEON_BRUSH_DATA37 0x1514
256#define RADEON_BRUSH_DATA38 0x1518
257#define RADEON_BRUSH_DATA39 0x151c
258#define RADEON_BRUSH_DATA4 0x1490
259#define RADEON_BRUSH_DATA40 0x1520
260#define RADEON_BRUSH_DATA41 0x1524
261#define RADEON_BRUSH_DATA42 0x1528
262#define RADEON_BRUSH_DATA43 0x152c
263#define RADEON_BRUSH_DATA44 0x1530
264#define RADEON_BRUSH_DATA45 0x1534
265#define RADEON_BRUSH_DATA46 0x1538
266#define RADEON_BRUSH_DATA47 0x153c
267#define RADEON_BRUSH_DATA48 0x1540
268#define RADEON_BRUSH_DATA49 0x1544
269#define RADEON_BRUSH_DATA5 0x1494
270#define RADEON_BRUSH_DATA50 0x1548
271#define RADEON_BRUSH_DATA51 0x154c
272#define RADEON_BRUSH_DATA52 0x1550
273#define RADEON_BRUSH_DATA53 0x1554
274#define RADEON_BRUSH_DATA54 0x1558
275#define RADEON_BRUSH_DATA55 0x155c
276#define RADEON_BRUSH_DATA56 0x1560
277#define RADEON_BRUSH_DATA57 0x1564
278#define RADEON_BRUSH_DATA58 0x1568
279#define RADEON_BRUSH_DATA59 0x156c
280#define RADEON_BRUSH_DATA6 0x1498
281#define RADEON_BRUSH_DATA60 0x1570
282#define RADEON_BRUSH_DATA61 0x1574
283#define RADEON_BRUSH_DATA62 0x1578
284#define RADEON_BRUSH_DATA63 0x157c
285#define RADEON_BRUSH_DATA7 0x149c
286#define RADEON_BRUSH_DATA8 0x14a0
287#define RADEON_BRUSH_DATA9 0x14a4
288#define RADEON_BRUSH_SCALE 0x1470
289#define RADEON_BRUSH_Y_X 0x1474
290#define RADEON_BUS_CNTL 0x0030
291# define RADEON_BUS_MASTER_DIS (1 << 6)
292# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
293# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
294# define RADEON_BUS_RD_ABORT_EN (1 << 25)
295# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
296# define RADEON_BUS_WRT_BURST (1 << 29)
297# define RADEON_BUS_READ_BURST (1 << 30)
298#define RADEON_BUS_CNTL1 0x0034
299# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
300
301/* #define RADEON_PCIE_INDEX 0x0030 */
302/* #define RADEON_PCIE_DATA 0x0034 */
303#define RADEON_PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE */
304# define RADEON_PCIE_LC_LINK_WIDTH_SHIFT 0
305# define RADEON_PCIE_LC_LINK_WIDTH_MASK 0x7
306# define RADEON_PCIE_LC_LINK_WIDTH_X0 0
307# define RADEON_PCIE_LC_LINK_WIDTH_X1 1
308# define RADEON_PCIE_LC_LINK_WIDTH_X2 2
309# define RADEON_PCIE_LC_LINK_WIDTH_X4 3
310# define RADEON_PCIE_LC_LINK_WIDTH_X8 4
311# define RADEON_PCIE_LC_LINK_WIDTH_X12 5
312# define RADEON_PCIE_LC_LINK_WIDTH_X16 6
313# define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT 4
314# define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK 0x70
315# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
316# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
317# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
318
319#define RADEON_CACHE_CNTL 0x1724
320#define RADEON_CACHE_LINE 0x0f0c /* PCI */
321#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */
322#define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */
323#define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */
324# define RADEON_DONT_USE_XTALIN (1 << 4)
325# define RADEON_SCLK_DYN_START_CNTL (1 << 15)
326#define RADEON_CLOCK_CNTL_DATA 0x000c
327#define RADEON_CLOCK_CNTL_INDEX 0x0008
328# define RADEON_PLL_WR_EN (1 << 7)
329# define RADEON_PLL_DIV_SEL (3 << 8)
330# define RADEON_PLL2_DIV_SEL_MASK (~(3 << 8))
331#define RADEON_CLK_PWRMGT_CNTL 0x0014
332# define RADEON_ENGIN_DYNCLK_MODE (1 << 12)
333# define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13)
334# define RADEON_ACTIVE_HILO_LAT_SHIFT 13
335# define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
336# define RADEON_MC_BUSY (1 << 16)
337# define RADEON_DLL_READY (1 << 19)
338# define RADEON_CG_NO1_DEBUG_0 (1 << 24)
339# define RADEON_CG_NO1_DEBUG_MASK (0x1f << 24)
340# define RADEON_DYN_STOP_MODE_MASK (7 << 21)
341# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
342# define RADEON_TVCLK_TURNOFF (1 << 31)
343#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
344# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
345#define RADEON_CLR_CMP_CLR_3D 0x1a24
346#define RADEON_CLR_CMP_CLR_DST 0x15c8
347#define RADEON_CLR_CMP_CLR_SRC 0x15c4
348#define RADEON_CLR_CMP_CNTL 0x15c0
349# define RADEON_SRC_CMP_EQ_COLOR (4 << 0)
350# define RADEON_SRC_CMP_NEQ_COLOR (5 << 0)
351# define RADEON_CLR_CMP_SRC_SOURCE (1 << 24)
352#define RADEON_CLR_CMP_MASK 0x15cc
353# define RADEON_CLR_CMP_MSK 0xffffffff
354#define RADEON_CLR_CMP_MASK_3D 0x1A28
355#define RADEON_COMMAND 0x0f04 /* PCI */
356#define RADEON_COMPOSITE_SHADOW_ID 0x1a0c
357#define RADEON_CONFIG_APER_0_BASE 0x0100
358#define RADEON_CONFIG_APER_1_BASE 0x0104
359#define RADEON_CONFIG_APER_SIZE 0x0108
360#define RADEON_CONFIG_BONDS 0x00e8
361#define RADEON_CONFIG_CNTL 0x00e0
362# define RADEON_CFG_ATI_REV_A11 (0 << 16)
363# define RADEON_CFG_ATI_REV_A12 (1 << 16)
364# define RADEON_CFG_ATI_REV_A13 (2 << 16)
365# define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16)
366#define RADEON_CONFIG_MEMSIZE 0x00f8
367#define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114
368#define RADEON_CONFIG_REG_1_BASE 0x010c
369#define RADEON_CONFIG_REG_APER_SIZE 0x0110
370#define RADEON_CONFIG_XSTRAP 0x00e4
371#define RADEON_CONSTANT_COLOR_C 0x1d34
372# define RADEON_CONSTANT_COLOR_MASK 0x00ffffff
373# define RADEON_CONSTANT_COLOR_ONE 0x00ffffff
374# define RADEON_CONSTANT_COLOR_ZERO 0x00000000
375#define RADEON_CRC_CMDFIFO_ADDR 0x0740
376#define RADEON_CRC_CMDFIFO_DOUT 0x0744
377#define RADEON_GRPH_BUFFER_CNTL 0x02f0
378# define RADEON_GRPH_START_REQ_MASK (0x7f)
379# define RADEON_GRPH_START_REQ_SHIFT 0
380# define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8)
381# define RADEON_GRPH_STOP_REQ_SHIFT 8
382# define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16)
383# define RADEON_GRPH_CRITICAL_POINT_SHIFT 16
384# define RADEON_GRPH_CRITICAL_CNTL (1<<28)
385# define RADEON_GRPH_BUFFER_SIZE (1<<29)
386# define RADEON_GRPH_CRITICAL_AT_SOF (1<<30)
387# define RADEON_GRPH_STOP_CNTL (1<<31)
388#define RADEON_GRPH2_BUFFER_CNTL 0x03f0
389# define RADEON_GRPH2_START_REQ_MASK (0x7f)
390# define RADEON_GRPH2_START_REQ_SHIFT 0
391# define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8)
392# define RADEON_GRPH2_STOP_REQ_SHIFT 8
393# define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16)
394# define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16
395# define RADEON_GRPH2_CRITICAL_CNTL (1<<28)
396# define RADEON_GRPH2_BUFFER_SIZE (1<<29)
397# define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30)
398# define RADEON_GRPH2_STOP_CNTL (1<<31)
399#define RADEON_CRTC_CRNT_FRAME 0x0214
400#define RADEON_CRTC_EXT_CNTL 0x0054
401# define RADEON_CRTC_VGA_XOVERSCAN (1 << 0)
402# define RADEON_VGA_ATI_LINEAR (1 << 3)
403# define RADEON_XCRT_CNT_EN (1 << 6)
404# define RADEON_CRTC_HSYNC_DIS (1 << 8)
405# define RADEON_CRTC_VSYNC_DIS (1 << 9)
406# define RADEON_CRTC_DISPLAY_DIS (1 << 10)
407# define RADEON_CRTC_SYNC_TRISTAT (1 << 11)
408# define RADEON_CRTC_CRT_ON (1 << 15)
409#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055
410# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0)
411# define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1)
412# define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2)
413#define RADEON_CRTC_GEN_CNTL 0x0050
414# define RADEON_CRTC_DBL_SCAN_EN (1 << 0)
415# define RADEON_CRTC_INTERLACE_EN (1 << 1)
416# define RADEON_CRTC_CSYNC_EN (1 << 4)
417# define RADEON_CRTC_ICON_EN (1 << 15)
418# define RADEON_CRTC_CUR_EN (1 << 16)
419# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
420# define RADEON_CRTC_CUR_MODE_SHIFT 20
421# define RADEON_CRTC_CUR_MODE_MONO 0
422# define RADEON_CRTC_CUR_MODE_24BPP 2
423# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
424# define RADEON_CRTC_EN (1 << 25)
425# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
426#define RADEON_CRTC2_GEN_CNTL 0x03f8
427# define RADEON_CRTC2_DBL_SCAN_EN (1 << 0)
428# define RADEON_CRTC2_INTERLACE_EN (1 << 1)
429# define RADEON_CRTC2_SYNC_TRISTAT (1 << 4)
430# define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5)
431# define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6)
432# define RADEON_CRTC2_CRT2_ON (1 << 7)
433# define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
434# define RADEON_CRTC2_PIX_WIDTH_MASK (0xf << 8)
435# define RADEON_CRTC2_ICON_EN (1 << 15)
436# define RADEON_CRTC2_CUR_EN (1 << 16)
437# define RADEON_CRTC2_CUR_MODE_MASK (7 << 20)
438# define RADEON_CRTC2_DISP_DIS (1 << 23)
439# define RADEON_CRTC2_EN (1 << 25)
440# define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26)
441# define RADEON_CRTC2_CSYNC_EN (1 << 27)
442# define RADEON_CRTC2_HSYNC_DIS (1 << 28)
443# define RADEON_CRTC2_VSYNC_DIS (1 << 29)
444#define RADEON_CRTC_MORE_CNTL 0x27c
445# define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
446# define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
447# define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
448# define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
449#define RADEON_CRTC_GUI_TRIG_VLINE 0x0218
450#define RADEON_CRTC_H_SYNC_STRT_WID 0x0204
451# define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0)
452# define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3)
453# define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
454# define RADEON_CRTC_H_SYNC_WID (0x3f << 16)
455# define RADEON_CRTC_H_SYNC_WID_SHIFT 16
456# define RADEON_CRTC_H_SYNC_POL (1 << 23)
457#define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304
458# define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0)
459# define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3)
460# define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
461# define RADEON_CRTC2_H_SYNC_WID (0x3f << 16)
462# define RADEON_CRTC2_H_SYNC_WID_SHIFT 16
463# define RADEON_CRTC2_H_SYNC_POL (1 << 23)
464#define RADEON_CRTC_H_TOTAL_DISP 0x0200
465# define RADEON_CRTC_H_TOTAL (0x03ff << 0)
466# define RADEON_CRTC_H_TOTAL_SHIFT 0
467# define RADEON_CRTC_H_DISP (0x01ff << 16)
468# define RADEON_CRTC_H_DISP_SHIFT 16
469#define RADEON_CRTC2_H_TOTAL_DISP 0x0300
470# define RADEON_CRTC2_H_TOTAL (0x03ff << 0)
471# define RADEON_CRTC2_H_TOTAL_SHIFT 0
472# define RADEON_CRTC2_H_DISP (0x01ff << 16)
473# define RADEON_CRTC2_H_DISP_SHIFT 16
474
475#define RADEON_CRTC_OFFSET_RIGHT 0x0220
476#define RADEON_CRTC_OFFSET 0x0224
477# define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
478# define RADEON_CRTC_OFFSET__OFFSET_LOCK (1<<31)
479
480#define RADEON_CRTC2_OFFSET 0x0324
481# define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
482# define RADEON_CRTC2_OFFSET__OFFSET_LOCK (1<<31)
483#define RADEON_CRTC_OFFSET_CNTL 0x0228
484# define RADEON_CRTC_TILE_LINE_SHIFT 0
485# define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT 4
486# define R300_CRTC_X_Y_MODE_EN_RIGHT (1 << 6)
487# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK (3 << 7)
488# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO (0 << 7)
489# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
490# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
491# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS (3 << 7)
492# define R300_CRTC_X_Y_MODE_EN (1 << 9)
493# define R300_CRTC_MICRO_TILE_BUFFER_MASK (3 << 10)
494# define R300_CRTC_MICRO_TILE_BUFFER_AUTO (0 << 10)
495# define R300_CRTC_MICRO_TILE_BUFFER_SINGLE (1 << 10)
496# define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE (2 << 10)
497# define R300_CRTC_MICRO_TILE_BUFFER_DIS (3 << 10)
498# define R300_CRTC_MICRO_TILE_EN_RIGHT (1 << 12)
499# define R300_CRTC_MICRO_TILE_EN (1 << 13)
500# define R300_CRTC_MACRO_TILE_EN_RIGHT (1 << 14)
501# define R300_CRTC_MACRO_TILE_EN (1 << 15)
502# define RADEON_CRTC_TILE_EN_RIGHT (1 << 14)
503# define RADEON_CRTC_TILE_EN (1 << 15)
504# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
505# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
506
507#define R300_CRTC_TILE_X0_Y0 0x0350
508#define R300_CRTC2_TILE_X0_Y0 0x0358
509
510#define RADEON_CRTC2_OFFSET_CNTL 0x0328
511# define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
512# define RADEON_CRTC2_TILE_EN (1 << 15)
513#define RADEON_CRTC_PITCH 0x022c
514# define RADEON_CRTC_PITCH__SHIFT 0
515# define RADEON_CRTC_PITCH__RIGHT_SHIFT 16
516
517#define RADEON_CRTC2_PITCH 0x032c
518#define RADEON_CRTC_STATUS 0x005c
519# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
520# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
521#define RADEON_CRTC2_STATUS 0x03fc
522# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
523# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
524#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
525# define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0)
526# define RADEON_CRTC_V_SYNC_STRT_SHIFT 0
527# define RADEON_CRTC_V_SYNC_WID (0x1f << 16)
528# define RADEON_CRTC_V_SYNC_WID_SHIFT 16
529# define RADEON_CRTC_V_SYNC_POL (1 << 23)
530#define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c
531# define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0)
532# define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
533# define RADEON_CRTC2_V_SYNC_WID (0x1f << 16)
534# define RADEON_CRTC2_V_SYNC_WID_SHIFT 16
535# define RADEON_CRTC2_V_SYNC_POL (1 << 23)
536#define RADEON_CRTC_V_TOTAL_DISP 0x0208
537# define RADEON_CRTC_V_TOTAL (0x07ff << 0)
538# define RADEON_CRTC_V_TOTAL_SHIFT 0
539# define RADEON_CRTC_V_DISP (0x07ff << 16)
540# define RADEON_CRTC_V_DISP_SHIFT 16
541#define RADEON_CRTC2_V_TOTAL_DISP 0x0308
542# define RADEON_CRTC2_V_TOTAL (0x07ff << 0)
543# define RADEON_CRTC2_V_TOTAL_SHIFT 0
544# define RADEON_CRTC2_V_DISP (0x07ff << 16)
545# define RADEON_CRTC2_V_DISP_SHIFT 16
546#define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210
547# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
548#define RADEON_CRTC2_CRNT_FRAME 0x0314
549#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
550#define RADEON_CRTC2_STATUS 0x03fc
551#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
552#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
553#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
554#define RADEON_CUR_CLR0 0x026c
555#define RADEON_CUR_CLR1 0x0270
556#define RADEON_CUR_HORZ_VERT_OFF 0x0268
557#define RADEON_CUR_HORZ_VERT_POSN 0x0264
558#define RADEON_CUR_OFFSET 0x0260
559# define RADEON_CUR_LOCK (1 << 31)
560#define RADEON_CUR2_CLR0 0x036c
561#define RADEON_CUR2_CLR1 0x0370
562#define RADEON_CUR2_HORZ_VERT_OFF 0x0368
563#define RADEON_CUR2_HORZ_VERT_POSN 0x0364
564#define RADEON_CUR2_OFFSET 0x0360
565# define RADEON_CUR2_LOCK (1 << 31)
566
567#define RADEON_DAC_CNTL 0x0058
568# define RADEON_DAC_RANGE_CNTL (3 << 0)
569# define RADEON_DAC_RANGE_CNTL_PS2 (2 << 0)
570# define RADEON_DAC_RANGE_CNTL_MASK 0x03
571# define RADEON_DAC_BLANKING (1 << 2)
572# define RADEON_DAC_CMP_EN (1 << 3)
573# define RADEON_DAC_CMP_OUTPUT (1 << 7)
574# define RADEON_DAC_8BIT_EN (1 << 8)
575# define RADEON_DAC_TVO_EN (1 << 10)
576# define RADEON_DAC_VGA_ADR_EN (1 << 13)
577# define RADEON_DAC_PDWN (1 << 15)
578# define RADEON_DAC_MASK_ALL (0xff << 24)
579#define RADEON_DAC_CNTL2 0x007c
580# define RADEON_DAC2_TV_CLK_SEL (0 << 1)
581# define RADEON_DAC2_DAC_CLK_SEL (1 << 0)
582# define RADEON_DAC2_DAC2_CLK_SEL (1 << 1)
583# define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5)
584# define RADEON_DAC2_CMP_EN (1 << 7)
585# define RADEON_DAC2_CMP_OUT_R (1 << 8)
586# define RADEON_DAC2_CMP_OUT_G (1 << 9)
587# define RADEON_DAC2_CMP_OUT_B (1 << 10)
588# define RADEON_DAC2_CMP_OUTPUT (1 << 11)
589#define RADEON_DAC_EXT_CNTL 0x0280
590# define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
591# define RADEON_DAC2_FORCE_DATA_EN (1 << 1)
592# define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4)
593# define RADEON_DAC_FORCE_DATA_EN (1 << 5)
594# define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
595# define RADEON_DAC_FORCE_DATA_SEL_R (0 << 6)
596# define RADEON_DAC_FORCE_DATA_SEL_G (1 << 6)
597# define RADEON_DAC_FORCE_DATA_SEL_B (2 << 6)
598# define RADEON_DAC_FORCE_DATA_SEL_RGB (3 << 6)
599# define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00
600# define RADEON_DAC_FORCE_DATA_SHIFT 8
601#define RADEON_DAC_MACRO_CNTL 0x0d04
602# define RADEON_DAC_PDWN_R (1 << 16)
603# define RADEON_DAC_PDWN_G (1 << 17)
604# define RADEON_DAC_PDWN_B (1 << 18)
605#define RADEON_DISP_PWR_MAN 0x0d08
606# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0)
607# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4)
608# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8)
609# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8)
610# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8)
611# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
612# define RADEON_DISP_D3_RST (1 << 16)
613# define RADEON_DISP_D3_REG_RST (1 << 17)
614# define RADEON_DISP_D3_GRPH_RST (1 << 18)
615# define RADEON_DISP_D3_SUBPIC_RST (1 << 19)
616# define RADEON_DISP_D3_OV0_RST (1 << 20)
617# define RADEON_DISP_D1D2_GRPH_RST (1 << 21)
618# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22)
619# define RADEON_DISP_D1D2_OV0_RST (1 << 23)
620# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24)
621# define RADEON_TV_ENABLE_RST (1 << 25)
622# define RADEON_AUTO_PWRUP_EN (1 << 26)
623#define RADEON_TV_DAC_CNTL 0x088c
624# define RADEON_TV_DAC_NBLANK (1 << 0)
625# define RADEON_TV_DAC_NHOLD (1 << 1)
626# define RADEON_TV_DAC_PEDESTAL (1 << 2)
627# define RADEON_TV_MONITOR_DETECT_EN (1 << 4)
628# define RADEON_TV_DAC_CMPOUT (1 << 5)
629# define RADEON_TV_DAC_STD_MASK (3 << 8)
630# define RADEON_TV_DAC_STD_PAL (0 << 8)
631# define RADEON_TV_DAC_STD_NTSC (1 << 8)
632# define RADEON_TV_DAC_STD_PS2 (2 << 8)
633# define RADEON_TV_DAC_STD_RS343 (3 << 8)
634# define RADEON_TV_DAC_BGSLEEP (1 << 6)
635# define RADEON_TV_DAC_BGADJ_MASK (0xf << 16)
636# define RADEON_TV_DAC_BGADJ_SHIFT 16
637# define RADEON_TV_DAC_DACADJ_MASK (0xf << 20)
638# define RADEON_TV_DAC_DACADJ_SHIFT 20
639# define RADEON_TV_DAC_RDACPD (1 << 24)
640# define RADEON_TV_DAC_GDACPD (1 << 25)
641# define RADEON_TV_DAC_BDACPD (1 << 26)
642# define RADEON_TV_DAC_RDACDET (1 << 29)
643# define RADEON_TV_DAC_GDACDET (1 << 30)
644# define RADEON_TV_DAC_BDACDET (1 << 31)
645# define R420_TV_DAC_DACADJ_MASK (0x1f << 20)
646# define R420_TV_DAC_RDACPD (1 << 25)
647# define R420_TV_DAC_GDACPD (1 << 26)
648# define R420_TV_DAC_BDACPD (1 << 27)
649# define R420_TV_DAC_TVENABLE (1 << 28)
650#define RADEON_DISP_HW_DEBUG 0x0d14
651# define RADEON_CRT2_DISP1_SEL (1 << 5)
652#define RADEON_DISP_OUTPUT_CNTL 0x0d64
653# define RADEON_DISP_DAC_SOURCE_MASK 0x03
654# define RADEON_DISP_DAC2_SOURCE_MASK 0x0c
655# define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
656# define RADEON_DISP_DAC_SOURCE_RMX 0x02
657# define RADEON_DISP_DAC_SOURCE_LTU 0x03
658# define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
659# define RADEON_DISP_TVDAC_SOURCE_MASK (0x03 << 2)
660# define RADEON_DISP_TVDAC_SOURCE_CRTC 0x0
661# define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
662# define RADEON_DISP_TVDAC_SOURCE_RMX (0x02 << 2)
663# define RADEON_DISP_TVDAC_SOURCE_LTU (0x03 << 2)
664# define RADEON_DISP_TRANS_MATRIX_MASK (0x03 << 4)
665# define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
666# define RADEON_DISP_TRANS_MATRIX_GRAPHICS (0x01 << 4)
667# define RADEON_DISP_TRANS_MATRIX_VIDEO (0x02 << 4)
668# define RADEON_DISP_TV_SOURCE_CRTC (1 << 16) /* crtc1 or crtc2 */
669# define RADEON_DISP_TV_SOURCE_LTU (0 << 16) /* linear transform unit */
670#define RADEON_DISP_TV_OUT_CNTL 0x0d6c
671# define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
672# define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
673#define RADEON_DAC_CRC_SIG 0x02cc
674#define RADEON_DAC_DATA 0x03c9 /* VGA */
675#define RADEON_DAC_MASK 0x03c6 /* VGA */
676#define RADEON_DAC_R_INDEX 0x03c7 /* VGA */
677#define RADEON_DAC_W_INDEX 0x03c8 /* VGA */
678#define RADEON_DDA_CONFIG 0x02e0
679#define RADEON_DDA_ON_OFF 0x02e4
680#define RADEON_DEFAULT_OFFSET 0x16e0
681#define RADEON_DEFAULT_PITCH 0x16e4
682#define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8
683# define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0)
684# define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
685#define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820
686#define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824
687#define RADEON_DEVICE_ID 0x0f02 /* PCI */
688#define RADEON_DISP_MISC_CNTL 0x0d00
689# define RADEON_SOFT_RESET_GRPH_PP (1 << 0)
690#define RADEON_DISP_MERGE_CNTL 0x0d60
691# define RADEON_DISP_ALPHA_MODE_MASK 0x03
692# define RADEON_DISP_ALPHA_MODE_KEY 0
693# define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
694# define RADEON_DISP_ALPHA_MODE_GLOBAL 2
695# define RADEON_DISP_RGB_OFFSET_EN (1 << 8)
696# define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16)
697# define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24)
698# define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
699#define RADEON_DISP2_MERGE_CNTL 0x0d68
700# define RADEON_DISP2_RGB_OFFSET_EN (1 << 8)
701#define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80
702#define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84
703#define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88
704#define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c
705#define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90
706#define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98
707#define RADEON_DP_BRUSH_BKGD_CLR 0x1478
708#define RADEON_DP_BRUSH_FRGD_CLR 0x147c
709#define RADEON_DP_CNTL 0x16c0
710# define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0)
711# define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1)
712# define RADEON_DP_DST_TILE_LINEAR (0 << 3)
713# define RADEON_DP_DST_TILE_MACRO (1 << 3)
714# define RADEON_DP_DST_TILE_MICRO (2 << 3)
715# define RADEON_DP_DST_TILE_BOTH (3 << 3)
716#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0
717# define RADEON_DST_Y_MAJOR (1 << 2)
718# define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
719# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
720#define RADEON_DP_DATATYPE 0x16c4
721# define RADEON_HOST_BIG_ENDIAN_EN (1 << 29)
722#define RADEON_DP_GUI_MASTER_CNTL 0x146c
723# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
724# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
725# define RADEON_GMC_SRC_CLIPPING (1 << 2)
726# define RADEON_GMC_DST_CLIPPING (1 << 3)
727# define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4)
728# define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4)
729# define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4)
730# define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4)
731# define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4)
732# define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4)
733# define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4)
734# define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4)
735# define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4)
736# define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4)
737# define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4)
738# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
739# define RADEON_GMC_BRUSH_NONE (15 << 4)
740# define RADEON_GMC_DST_8BPP_CI (2 << 8)
741# define RADEON_GMC_DST_15BPP (3 << 8)
742# define RADEON_GMC_DST_16BPP (4 << 8)
743# define RADEON_GMC_DST_24BPP (5 << 8)
744# define RADEON_GMC_DST_32BPP (6 << 8)
745# define RADEON_GMC_DST_8BPP_RGB (7 << 8)
746# define RADEON_GMC_DST_Y8 (8 << 8)
747# define RADEON_GMC_DST_RGB8 (9 << 8)
748# define RADEON_GMC_DST_VYUY (11 << 8)
749# define RADEON_GMC_DST_YVYU (12 << 8)
750# define RADEON_GMC_DST_AYUV444 (14 << 8)
751# define RADEON_GMC_DST_ARGB4444 (15 << 8)
752# define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8)
753# define RADEON_GMC_DST_DATATYPE_SHIFT 8
754# define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12)
755# define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12)
756# define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12)
757# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
758# define RADEON_GMC_BYTE_PIX_ORDER (1 << 14)
759# define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14)
760# define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14)
761# define RADEON_GMC_CONVERSION_TEMP (1 << 15)
762# define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15)
763# define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15)
764# define RADEON_GMC_ROP3_MASK (0xff << 16)
765# define RADEON_DP_SRC_SOURCE_MASK (7 << 24)
766# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
767# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
768# define RADEON_GMC_3D_FCN_EN (1 << 27)
769# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
770# define RADEON_GMC_AUX_CLIP_DIS (1 << 29)
771# define RADEON_GMC_WR_MSK_DIS (1 << 30)
772# define RADEON_GMC_LD_BRUSH_Y_X (1 << 31)
773# define RADEON_ROP3_ZERO 0x00000000
774# define RADEON_ROP3_DSa 0x00880000
775# define RADEON_ROP3_SDna 0x00440000
776# define RADEON_ROP3_S 0x00cc0000
777# define RADEON_ROP3_DSna 0x00220000
778# define RADEON_ROP3_D 0x00aa0000
779# define RADEON_ROP3_DSx 0x00660000
780# define RADEON_ROP3_DSo 0x00ee0000
781# define RADEON_ROP3_DSon 0x00110000
782# define RADEON_ROP3_DSxn 0x00990000
783# define RADEON_ROP3_Dn 0x00550000
784# define RADEON_ROP3_SDno 0x00dd0000
785# define RADEON_ROP3_Sn 0x00330000
786# define RADEON_ROP3_DSno 0x00bb0000
787# define RADEON_ROP3_DSan 0x00770000
788# define RADEON_ROP3_ONE 0x00ff0000
789# define RADEON_ROP3_DPa 0x00a00000
790# define RADEON_ROP3_PDna 0x00500000
791# define RADEON_ROP3_P 0x00f00000
792# define RADEON_ROP3_DPna 0x000a0000
793# define RADEON_ROP3_D 0x00aa0000
794# define RADEON_ROP3_DPx 0x005a0000
795# define RADEON_ROP3_DPo 0x00fa0000
796# define RADEON_ROP3_DPon 0x00050000
797# define RADEON_ROP3_PDxn 0x00a50000
798# define RADEON_ROP3_PDno 0x00f50000
799# define RADEON_ROP3_Pn 0x000f0000
800# define RADEON_ROP3_DPno 0x00af0000
801# define RADEON_ROP3_DPan 0x005f0000
802#define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84
803#define RADEON_DP_MIX 0x16c8
804#define RADEON_DP_SRC_BKGD_CLR 0x15dc
805#define RADEON_DP_SRC_FRGD_CLR 0x15d8
806#define RADEON_DP_WRITE_MASK 0x16cc
807#define RADEON_DST_BRES_DEC 0x1630
808#define RADEON_DST_BRES_ERR 0x1628
809#define RADEON_DST_BRES_INC 0x162c
810#define RADEON_DST_BRES_LNTH 0x1634
811#define RADEON_DST_BRES_LNTH_SUB 0x1638
812#define RADEON_DST_HEIGHT 0x1410
813#define RADEON_DST_HEIGHT_WIDTH 0x143c
814#define RADEON_DST_HEIGHT_WIDTH_8 0x158c
815#define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4
816#define RADEON_DST_HEIGHT_Y 0x15a0
817#define RADEON_DST_LINE_START 0x1600
818#define RADEON_DST_LINE_END 0x1604
819#define RADEON_DST_LINE_PATCOUNT 0x1608
820# define RADEON_BRES_CNTL_SHIFT 8
821#define RADEON_DST_OFFSET 0x1404
822#define RADEON_DST_PITCH 0x1408
823#define RADEON_DST_PITCH_OFFSET 0x142c
824#define RADEON_DST_PITCH_OFFSET_C 0x1c80
825# define RADEON_PITCH_SHIFT 21
826# define RADEON_DST_TILE_LINEAR (0 << 30)
827# define RADEON_DST_TILE_MACRO (1 << 30)
828# define RADEON_DST_TILE_MICRO (2 << 30)
829# define RADEON_DST_TILE_BOTH (3 << 30)
830#define RADEON_DST_WIDTH 0x140c
831#define RADEON_DST_WIDTH_HEIGHT 0x1598
832#define RADEON_DST_WIDTH_X 0x1588
833#define RADEON_DST_WIDTH_X_INCY 0x159c
834#define RADEON_DST_X 0x141c
835#define RADEON_DST_X_SUB 0x15a4
836#define RADEON_DST_X_Y 0x1594
837#define RADEON_DST_Y 0x1420
838#define RADEON_DST_Y_SUB 0x15a8
839#define RADEON_DST_Y_X 0x1438
840
841#define RADEON_FCP_CNTL 0x0910
842# define RADEON_FCP0_SRC_PCICLK 0
843# define RADEON_FCP0_SRC_PCLK 1
844# define RADEON_FCP0_SRC_PCLKb 2
845# define RADEON_FCP0_SRC_HREF 3
846# define RADEON_FCP0_SRC_GND 4
847# define RADEON_FCP0_SRC_HREFb 5
848#define RADEON_FLUSH_1 0x1704
849#define RADEON_FLUSH_2 0x1708
850#define RADEON_FLUSH_3 0x170c
851#define RADEON_FLUSH_4 0x1710
852#define RADEON_FLUSH_5 0x1714
853#define RADEON_FLUSH_6 0x1718
854#define RADEON_FLUSH_7 0x171c
855#define RADEON_FOG_3D_TABLE_START 0x1810
856#define RADEON_FOG_3D_TABLE_END 0x1814
857#define RADEON_FOG_3D_TABLE_DENSITY 0x181c
858#define RADEON_FOG_TABLE_INDEX 0x1a14
859#define RADEON_FOG_TABLE_DATA 0x1a18
860#define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250
861#define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254
862# define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff
863# define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000
864# define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff
865# define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000
866# define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8
867# define RADEON_FP_H_SYNC_WID_MASK 0x003f0000
868# define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff
869# define RADEON_FP_V_SYNC_WID_MASK 0x001f0000
870# define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000
871# define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010
872# define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000
873# define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010
874# define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
875# define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010
876# define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000
877# define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010
878#define RADEON_FP_GEN_CNTL 0x0284
879# define RADEON_FP_FPON (1 << 0)
880# define RADEON_FP_BLANK_EN (1 << 1)
881# define RADEON_FP_TMDS_EN (1 << 2)
882# define RADEON_FP_PANEL_FORMAT (1 << 3)
883# define RADEON_FP_EN_TMDS (1 << 7)
884# define RADEON_FP_DETECT_SENSE (1 << 8)
885# define R200_FP_SOURCE_SEL_MASK (3 << 10)
886# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
887# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
888# define R200_FP_SOURCE_SEL_RMX (2 << 10)
889# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
890# define RADEON_FP_SEL_CRTC1 (0 << 13)
891# define RADEON_FP_SEL_CRTC2 (1 << 13)
892# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
893# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
894# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
895# define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18)
896# define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
897# define RADEON_FP_DFP_SYNC_SEL (1 << 21)
898# define RADEON_FP_CRTC_LOCK_8DOT (1 << 22)
899# define RADEON_FP_CRT_SYNC_SEL (1 << 23)
900# define RADEON_FP_USE_SHADOW_EN (1 << 24)
901# define RADEON_FP_CRT_SYNC_ALT (1 << 26)
902#define RADEON_FP2_GEN_CNTL 0x0288
903# define RADEON_FP2_BLANK_EN (1 << 1)
904# define RADEON_FP2_ON (1 << 2)
905# define RADEON_FP2_PANEL_FORMAT (1 << 3)
906# define RADEON_FP2_DETECT_SENSE (1 << 8)
907# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
908# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
909# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
910# define R200_FP2_SOURCE_SEL_RMX (2 << 10)
911# define R200_FP2_SOURCE_SEL_TRANS_UNIT (3 << 10)
912# define RADEON_FP2_SRC_SEL_MASK (3 << 13)
913# define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13)
914# define RADEON_FP2_FP_POL (1 << 16)
915# define RADEON_FP2_LP_POL (1 << 17)
916# define RADEON_FP2_SCK_POL (1 << 18)
917# define RADEON_FP2_LCD_CNTL_MASK (7 << 19)
918# define RADEON_FP2_PAD_FLOP_EN (1 << 22)
919# define RADEON_FP2_CRC_EN (1 << 23)
920# define RADEON_FP2_CRC_READ_EN (1 << 24)
921# define RADEON_FP2_DVO_EN (1 << 25)
922# define RADEON_FP2_DVO_RATE_SEL_SDR (1 << 26)
923# define R200_FP2_DVO_RATE_SEL_SDR (1 << 27)
924# define R300_FP2_DVO_CLOCK_MODE_SINGLE (1 << 28)
925# define R300_FP2_DVO_DUAL_CHANNEL_EN (1 << 29)
926#define RADEON_FP_H_SYNC_STRT_WID 0x02c4
927#define RADEON_FP_H2_SYNC_STRT_WID 0x03c4
928#define RADEON_FP_HORZ_STRETCH 0x028c
929#define RADEON_FP_HORZ2_STRETCH 0x038c
930# define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
931# define RADEON_HORZ_STRETCH_RATIO_MAX 4096
932# define RADEON_HORZ_PANEL_SIZE (0x1ff << 16)
933# define RADEON_HORZ_PANEL_SHIFT 16
934# define RADEON_HORZ_STRETCH_PIXREP (0 << 25)
935# define RADEON_HORZ_STRETCH_BLEND (1 << 26)
936# define RADEON_HORZ_STRETCH_ENABLE (1 << 25)
937# define RADEON_HORZ_AUTO_RATIO (1 << 27)
938# define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28)
939# define RADEON_HORZ_AUTO_RATIO_INC (1 << 31)
940#define RADEON_FP_HORZ_VERT_ACTIVE 0x0278
941#define RADEON_FP_V_SYNC_STRT_WID 0x02c8
942#define RADEON_FP_VERT_STRETCH 0x0290
943#define RADEON_FP_V2_SYNC_STRT_WID 0x03c8
944#define RADEON_FP_VERT2_STRETCH 0x0390
945# define RADEON_VERT_PANEL_SIZE (0xfff << 12)
946# define RADEON_VERT_PANEL_SHIFT 12
947# define RADEON_VERT_STRETCH_RATIO_MASK 0xfff
948# define RADEON_VERT_STRETCH_RATIO_SHIFT 0
949# define RADEON_VERT_STRETCH_RATIO_MAX 4096
950# define RADEON_VERT_STRETCH_ENABLE (1 << 25)
951# define RADEON_VERT_STRETCH_LINEREP (0 << 26)
952# define RADEON_VERT_STRETCH_BLEND (1 << 26)
953# define RADEON_VERT_AUTO_RATIO_EN (1 << 27)
954# define RADEON_VERT_AUTO_RATIO_INC (1 << 31)
955# define RADEON_VERT_STRETCH_RESERVED 0x71000000
956#define RS400_FP_2ND_GEN_CNTL 0x0384
957# define RS400_FP_2ND_ON (1 << 0)
958# define RS400_FP_2ND_BLANK_EN (1 << 1)
959# define RS400_TMDS_2ND_EN (1 << 2)
960# define RS400_PANEL_FORMAT_2ND (1 << 3)
961# define RS400_FP_2ND_EN_TMDS (1 << 7)
962# define RS400_FP_2ND_DETECT_SENSE (1 << 8)
963# define RS400_FP_2ND_SOURCE_SEL_MASK (3 << 10)
964# define RS400_FP_2ND_SOURCE_SEL_CRTC1 (0 << 10)
965# define RS400_FP_2ND_SOURCE_SEL_CRTC2 (1 << 10)
966# define RS400_FP_2ND_SOURCE_SEL_RMX (2 << 10)
967# define RS400_FP_2ND_DETECT_EN (1 << 12)
968# define RS400_HPD_2ND_SEL (1 << 13)
969#define RS400_FP2_2_GEN_CNTL 0x0388
970# define RS400_FP2_2_BLANK_EN (1 << 1)
971# define RS400_FP2_2_ON (1 << 2)
972# define RS400_FP2_2_PANEL_FORMAT (1 << 3)
973# define RS400_FP2_2_DETECT_SENSE (1 << 8)
974# define RS400_FP2_2_SOURCE_SEL_MASK (3 << 10)
975# define RS400_FP2_2_SOURCE_SEL_CRTC1 (0 << 10)
976# define RS400_FP2_2_SOURCE_SEL_CRTC2 (1 << 10)
977# define RS400_FP2_2_SOURCE_SEL_RMX (2 << 10)
978# define RS400_FP2_2_DVO2_EN (1 << 25)
979#define RS400_TMDS2_CNTL 0x0394
980#define RS400_TMDS2_TRANSMITTER_CNTL 0x03a4
981# define RS400_TMDS2_PLLEN (1 << 0)
982# define RS400_TMDS2_PLLRST (1 << 1)
983
984#define RADEON_GEN_INT_CNTL 0x0040
985# define RADEON_SW_INT_ENABLE (1 << 25)
986#define RADEON_GEN_INT_STATUS 0x0044
987# define RADEON_VSYNC_INT_AK (1 << 2)
988# define RADEON_VSYNC_INT (1 << 2)
989# define RADEON_VSYNC2_INT_AK (1 << 6)
990# define RADEON_VSYNC2_INT (1 << 6)
991# define RADEON_SW_INT_FIRE (1 << 26)
992# define RADEON_SW_INT_TEST (1 << 25)
993# define RADEON_SW_INT_TEST_ACK (1 << 25)
994#define RADEON_GENENB 0x03c3 /* VGA */
995#define RADEON_GENFC_RD 0x03ca /* VGA */
996#define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */
997#define RADEON_GENMO_RD 0x03cc /* VGA */
998#define RADEON_GENMO_WT 0x03c2 /* VGA */
999#define RADEON_GENS0 0x03c2 /* VGA */
1000#define RADEON_GENS1 0x03da /* VGA, 0x03ba */
1001#define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */ /* DDC3 */
1002#define RADEON_GPIO_MONIDB 0x006c
1003#define RADEON_GPIO_CRT2_DDC 0x006c
1004#define RADEON_GPIO_DVI_DDC 0x0064 /* DDC2 */
1005#define RADEON_GPIO_VGA_DDC 0x0060 /* DDC1 */
1006# define RADEON_GPIO_A_0 (1 << 0)
1007# define RADEON_GPIO_A_1 (1 << 1)
1008# define RADEON_GPIO_Y_0 (1 << 8)
1009# define RADEON_GPIO_Y_1 (1 << 9)
1010# define RADEON_GPIO_Y_SHIFT_0 8
1011# define RADEON_GPIO_Y_SHIFT_1 9
1012# define RADEON_GPIO_EN_0 (1 << 16)
1013# define RADEON_GPIO_EN_1 (1 << 17)
1014# define RADEON_GPIO_MASK_0 (1 << 24) /*??*/
1015# define RADEON_GPIO_MASK_1 (1 << 25) /*??*/
1016#define RADEON_GRPH8_DATA 0x03cf /* VGA */
1017#define RADEON_GRPH8_IDX 0x03ce /* VGA */
1018#define RADEON_GUI_SCRATCH_REG0 0x15e0
1019#define RADEON_GUI_SCRATCH_REG1 0x15e4
1020#define RADEON_GUI_SCRATCH_REG2 0x15e8
1021#define RADEON_GUI_SCRATCH_REG3 0x15ec
1022#define RADEON_GUI_SCRATCH_REG4 0x15f0
1023#define RADEON_GUI_SCRATCH_REG5 0x15f4
1024
1025#define RADEON_HEADER 0x0f0e /* PCI */
1026#define RADEON_HOST_DATA0 0x17c0
1027#define RADEON_HOST_DATA1 0x17c4
1028#define RADEON_HOST_DATA2 0x17c8
1029#define RADEON_HOST_DATA3 0x17cc
1030#define RADEON_HOST_DATA4 0x17d0
1031#define RADEON_HOST_DATA5 0x17d4
1032#define RADEON_HOST_DATA6 0x17d8
1033#define RADEON_HOST_DATA7 0x17dc
1034#define RADEON_HOST_DATA_LAST 0x17e0
1035#define RADEON_HOST_PATH_CNTL 0x0130
1036# define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24)
1037# define RADEON_HDP_READ_BUFFER_INVALIDATE (1 << 27)
1038# define RADEON_HDP_SOFT_RESET (1 << 26)
1039# define RADEON_HDP_APER_CNTL (1 << 23)
1040#define RADEON_HTOTAL_CNTL 0x0009 /* PLL */
1041# define RADEON_HTOT_CNTL_VGA_EN (1 << 28)
1042#define RADEON_HTOTAL2_CNTL 0x002e /* PLL */
1043
1044 /* Multimedia I2C bus */
1045#define RADEON_I2C_CNTL_0 0x0090
1046#define RADEON_I2C_DONE (1<<0)
1047#define RADEON_I2C_NACK (1<<1)
1048#define RADEON_I2C_HALT (1<<2)
1049#define RADEON_I2C_SOFT_RST (1<<5)
1050#define RADEON_I2C_DRIVE_EN (1<<6)
1051#define RADEON_I2C_DRIVE_SEL (1<<7)
1052#define RADEON_I2C_START (1<<8)
1053#define RADEON_I2C_STOP (1<<9)
1054#define RADEON_I2C_RECEIVE (1<<10)
1055#define RADEON_I2C_ABORT (1<<11)
1056#define RADEON_I2C_GO (1<<12)
1057#define RADEON_I2C_CNTL_1 0x0094
1058#define RADEON_I2C_SEL (1<<16)
1059#define RADEON_I2C_EN (1<<17)
1060#define RADEON_I2C_DATA 0x0098
1061
1062#define RADEON_DVI_I2C_CNTL_0 0x02e0
1063# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
1064# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
1065# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
1066# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
1067#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
1068#define RADEON_DVI_I2C_DATA 0x02e8
1069
1070#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
1071#define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */
1072#define RADEON_IO_BASE 0x0f14 /* PCI */
1073
1074#define RADEON_LATENCY 0x0f0d /* PCI */
1075#define RADEON_LEAD_BRES_DEC 0x1608
1076#define RADEON_LEAD_BRES_LNTH 0x161c
1077#define RADEON_LEAD_BRES_LNTH_SUB 0x1624
1078#define RADEON_LVDS_GEN_CNTL 0x02d0
1079# define RADEON_LVDS_ON (1 << 0)
1080# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
1081# define RADEON_LVDS_PANEL_TYPE (1 << 2)
1082# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
1083# define RADEON_LVDS_NO_FM (0 << 4)
1084# define RADEON_LVDS_2_GREY (1 << 4)
1085# define RADEON_LVDS_4_GREY (2 << 4)
1086# define RADEON_LVDS_RST_FM (1 << 6)
1087# define RADEON_LVDS_EN (1 << 7)
1088# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
1089# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
1090# define RADEON_LVDS_BL_MOD_EN (1 << 16)
1091# define RADEON_LVDS_BL_CLK_SEL (1 << 17)
1092# define RADEON_LVDS_DIGON (1 << 18)
1093# define RADEON_LVDS_BLON (1 << 19)
1094# define RADEON_LVDS_FP_POL_LOW (1 << 20)
1095# define RADEON_LVDS_LP_POL_LOW (1 << 21)
1096# define RADEON_LVDS_DTM_POL_LOW (1 << 22)
1097# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
1098# define RADEON_LVDS_FPDI_EN (1 << 27)
1099# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28
1100#define RADEON_LVDS_PLL_CNTL 0x02d4
1101# define RADEON_HSYNC_DELAY_SHIFT 28
1102# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
1103# define RADEON_LVDS_PLL_EN (1 << 16)
1104# define RADEON_LVDS_PLL_RESET (1 << 17)
1105# define R300_LVDS_SRC_SEL_MASK (3 << 18)
1106# define R300_LVDS_SRC_SEL_CRTC1 (0 << 18)
1107# define R300_LVDS_SRC_SEL_CRTC2 (1 << 18)
1108# define R300_LVDS_SRC_SEL_RMX (2 << 18)
1109#define RADEON_LVDS_SS_GEN_CNTL 0x02ec
1110# define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT 16
1111# define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT 20
1112
1113#define RADEON_MAX_LATENCY 0x0f3f /* PCI */
1114#define RADEON_DISPLAY_BASE_ADDR 0x23c
1115#define RADEON_DISPLAY2_BASE_ADDR 0x33c
1116#define RADEON_OV0_BASE_ADDR 0x43c
1117#define RADEON_NB_TOM 0x15c
1118#define R300_MC_INIT_MISC_LAT_TIMER 0x180
1119# define R300_MC_DISP0R_INIT_LAT_SHIFT 8
1120# define R300_MC_DISP0R_INIT_LAT_MASK 0xf
1121# define R300_MC_DISP1R_INIT_LAT_SHIFT 12
1122# define R300_MC_DISP1R_INIT_LAT_MASK 0xf
1123#define RADEON_MCLK_CNTL 0x0012 /* PLL */
1124# define RADEON_MCLKA_SRC_SEL_MASK 0x7
1125# define RADEON_FORCEON_MCLKA (1 << 16)
1126# define RADEON_FORCEON_MCLKB (1 << 17)
1127# define RADEON_FORCEON_YCLKA (1 << 18)
1128# define RADEON_FORCEON_YCLKB (1 << 19)
1129# define RADEON_FORCEON_MC (1 << 20)
1130# define RADEON_FORCEON_AIC (1 << 21)
1131# define R300_DISABLE_MC_MCLKA (1 << 21)
1132# define R300_DISABLE_MC_MCLKB (1 << 21)
1133#define RADEON_MCLK_MISC 0x001f /* PLL */
1134# define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
1135# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
1136# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
1137# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
1138#define RADEON_LCD_GPIO_MASK 0x01a0
1139#define RADEON_GPIOPAD_EN 0x01a0
1140#define RADEON_LCD_GPIO_Y_REG 0x01a4
1141#define RADEON_MDGPIO_A_REG 0x01ac
1142#define RADEON_MDGPIO_EN_REG 0x01b0
1143#define RADEON_MDGPIO_MASK 0x0198
1144#define RADEON_GPIOPAD_MASK 0x0198
1145#define RADEON_GPIOPAD_A 0x019c
1146#define RADEON_MDGPIO_Y_REG 0x01b4
1147#define RADEON_MEM_ADDR_CONFIG 0x0148
1148#define RADEON_MEM_BASE 0x0f10 /* PCI */
1149#define RADEON_MEM_CNTL 0x0140
1150# define RADEON_MEM_NUM_CHANNELS_MASK 0x01
1151# define RADEON_MEM_USE_B_CH_ONLY (1 << 1)
1152# define RV100_HALF_MODE (1 << 3)
1153# define R300_MEM_NUM_CHANNELS_MASK 0x03
1154# define R300_MEM_USE_CD_CH_ONLY (1 << 2)
1155#define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */
1156#define RADEON_MEM_INIT_LAT_TIMER 0x0154
1157#define RADEON_MEM_INTF_CNTL 0x014c
1158#define RADEON_MEM_SDRAM_MODE_REG 0x0158
1159# define RADEON_SDRAM_MODE_MASK 0xffff0000
1160# define RADEON_B3MEM_RESET_MASK 0x6fffffff
1161# define RADEON_MEM_CFG_TYPE_DDR (1 << 30)
1162#define RADEON_MEM_STR_CNTL 0x0150
1163# define RADEON_MEM_PWRUP_COMPL_A (1 << 0)
1164# define RADEON_MEM_PWRUP_COMPL_B (1 << 1)
1165# define R300_MEM_PWRUP_COMPL_C (1 << 2)
1166# define R300_MEM_PWRUP_COMPL_D (1 << 3)
1167# define RADEON_MEM_PWRUP_COMPLETE 0x03
1168# define R300_MEM_PWRUP_COMPLETE 0x0f
1169#define RADEON_MC_STATUS 0x0150
1170# define RADEON_MC_IDLE (1 << 2)
1171# define R300_MC_IDLE (1 << 4)
1172#define RADEON_MEM_VGA_RP_SEL 0x003c
1173#define RADEON_MEM_VGA_WP_SEL 0x0038
1174#define RADEON_MIN_GRANT 0x0f3e /* PCI */
1175#define RADEON_MM_DATA 0x0004
1176#define RADEON_MM_INDEX 0x0000
1177# define RADEON_MM_APER (1 << 31)
1178#define RADEON_MPLL_CNTL 0x000e /* PLL */
1179#define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */
1180#define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */
1181#define RADEON_SEPROM_CNTL1 0x01c0
1182# define RADEON_SCK_PRESCALE_SHIFT 24
1183# define RADEON_SCK_PRESCALE_MASK (0xff << 24)
1184#define R300_MC_IND_INDEX 0x01f8
1185# define R300_MC_IND_ADDR_MASK 0x3f
1186# define R300_MC_IND_WR_EN (1 << 8)
1187#define R300_MC_IND_DATA 0x01fc
1188#define R300_MC_READ_CNTL_AB 0x017c
1189# define R300_MEM_RBS_POSITION_A_MASK 0x03
1190#define R300_MC_READ_CNTL_CD_mcind 0x24
1191# define R300_MEM_RBS_POSITION_C_MASK 0x03
1192
1193#define RADEON_N_VIF_COUNT 0x0248
1194
1195#define RADEON_OV0_AUTO_FLIP_CNTL 0x0470
1196# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM 0x00000007
1197# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD 0x00000008
1198# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD 0x00000010
1199# define RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
1200# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE 0x00000040
1201# define RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT 0x00000300
1202# define RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN 0x00010000
1203# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN 0x00040000
1204# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN 0x00080000
1205# define RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE 0x00800000
1206
1207#define RADEON_OV0_COLOUR_CNTL 0x04E0
1208#define RADEON_OV0_DEINTERLACE_PATTERN 0x0474
1209#define RADEON_OV0_EXCLUSIVE_HORZ 0x0408
1210# define RADEON_EXCL_HORZ_START_MASK 0x000000ff
1211# define RADEON_EXCL_HORZ_END_MASK 0x0000ff00
1212# define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000
1213# define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000
1214#define RADEON_OV0_EXCLUSIVE_VERT 0x040C
1215# define RADEON_EXCL_VERT_START_MASK 0x000003ff
1216# define RADEON_EXCL_VERT_END_MASK 0x03ff0000
1217#define RADEON_OV0_FILTER_CNTL 0x04A0
1218# define RADEON_FILTER_PROGRAMMABLE_COEF 0x0
1219# define RADEON_FILTER_HC_COEF_HORZ_Y 0x1
1220# define RADEON_FILTER_HC_COEF_HORZ_UV 0x2
1221# define RADEON_FILTER_HC_COEF_VERT_Y 0x4
1222# define RADEON_FILTER_HC_COEF_VERT_UV 0x8
1223# define RADEON_FILTER_HARDCODED_COEF 0xf
1224# define RADEON_FILTER_COEF_MASK 0xf
1225
1226#define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0
1227#define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4
1228#define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8
1229#define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC
1230#define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0
1231#define RADEON_OV0_FLAG_CNTL 0x04DC
1232#define RADEON_OV0_GAMMA_000_00F 0x0d40
1233#define RADEON_OV0_GAMMA_010_01F 0x0d44
1234#define RADEON_OV0_GAMMA_020_03F 0x0d48
1235#define RADEON_OV0_GAMMA_040_07F 0x0d4c
1236#define RADEON_OV0_GAMMA_080_0BF 0x0e00
1237#define RADEON_OV0_GAMMA_0C0_0FF 0x0e04
1238#define RADEON_OV0_GAMMA_100_13F 0x0e08
1239#define RADEON_OV0_GAMMA_140_17F 0x0e0c
1240#define RADEON_OV0_GAMMA_180_1BF 0x0e10
1241#define RADEON_OV0_GAMMA_1C0_1FF 0x0e14
1242#define RADEON_OV0_GAMMA_200_23F 0x0e18
1243#define RADEON_OV0_GAMMA_240_27F 0x0e1c
1244#define RADEON_OV0_GAMMA_280_2BF 0x0e20
1245#define RADEON_OV0_GAMMA_2C0_2FF 0x0e24
1246#define RADEON_OV0_GAMMA_300_33F 0x0e28
1247#define RADEON_OV0_GAMMA_340_37F 0x0e2c
1248#define RADEON_OV0_GAMMA_380_3BF 0x0d50
1249#define RADEON_OV0_GAMMA_3C0_3FF 0x0d54
1250#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC
1251#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0
1252#define RADEON_OV0_H_INC 0x0480
1253#define RADEON_OV0_KEY_CNTL 0x04F4
1254# define RADEON_VIDEO_KEY_FN_MASK 0x00000003L
1255# define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L
1256# define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L
1257# define RADEON_VIDEO_KEY_FN_EQ 0x00000002L
1258# define RADEON_VIDEO_KEY_FN_NE 0x00000003L
1259# define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L
1260# define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
1261# define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L
1262# define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L
1263# define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L
1264# define RADEON_CMP_MIX_MASK 0x00000100L
1265# define RADEON_CMP_MIX_OR 0x00000000L
1266# define RADEON_CMP_MIX_AND 0x00000100L
1267#define RADEON_OV0_LIN_TRANS_A 0x0d20
1268#define RADEON_OV0_LIN_TRANS_B 0x0d24
1269#define RADEON_OV0_LIN_TRANS_C 0x0d28
1270#define RADEON_OV0_LIN_TRANS_D 0x0d2c
1271#define RADEON_OV0_LIN_TRANS_E 0x0d30
1272#define RADEON_OV0_LIN_TRANS_F 0x0d34
1273#define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430
1274# define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL
1275# define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L
1276#define RADEON_OV0_P1_H_ACCUM_INIT 0x0488
1277#define RADEON_OV0_P1_V_ACCUM_INIT 0x0428
1278# define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
1279# define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L
1280#define RADEON_OV0_P1_X_START_END 0x0494
1281#define RADEON_OV0_P2_X_START_END 0x0498
1282#define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434
1283# define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL
1284# define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L
1285#define RADEON_OV0_P23_H_ACCUM_INIT 0x048C
1286#define RADEON_OV0_P23_V_ACCUM_INIT 0x042C
1287#define RADEON_OV0_P3_X_START_END 0x049C
1288#define RADEON_OV0_REG_LOAD_CNTL 0x0410
1289# define RADEON_REG_LD_CTL_LOCK 0x00000001L
1290# define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L
1291# define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
1292# define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L
1293# define RADEON_REG_LD_CTL_FLIP_READBACK 0x00000010L
1294#define RADEON_OV0_SCALE_CNTL 0x0420
1295# define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L
1296# define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L
1297# define RADEON_SCALER_SIGNED_UV 0x00000010L
1298# define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L
1299# define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L
1300# define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L
1301# define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L
1302# define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L
1303# define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
1304# define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L
1305# define RADEON_SCALER_SOURCE_15BPP 0x00000300L
1306# define RADEON_SCALER_SOURCE_16BPP 0x00000400L
1307# define RADEON_SCALER_SOURCE_32BPP 0x00000600L
1308# define RADEON_SCALER_SOURCE_YUV9 0x00000900L
1309# define RADEON_SCALER_SOURCE_YUV12 0x00000A00L
1310# define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L
1311# define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L
1312# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L
1313# define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L
1314# define RADEON_SCALER_CRTC_SEL 0x00004000L
1315# define RADEON_SCALER_SMART_SWITCH 0x00008000L
1316# define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L
1317# define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L
1318# define RADEON_SCALER_DIS_LIMIT 0x08000000L
1319# define RADEON_SCALER_LIN_TRANS_BYPASS 0x10000000L
1320# define RADEON_SCALER_INT_EMU 0x20000000L
1321# define RADEON_SCALER_ENABLE 0x40000000L
1322# define RADEON_SCALER_SOFT_RESET 0x80000000L
1323#define RADEON_OV0_STEP_BY 0x0484
1324#define RADEON_OV0_TEST 0x04F8
1325#define RADEON_OV0_V_INC 0x0424
1326#define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460
1327#define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464
1328#define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440
1329# define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L
1330# define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L
1331# define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L
1332# define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
1333#define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444
1334# define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L
1335# define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L
1336# define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L
1337# define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
1338#define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448
1339# define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L
1340# define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L
1341# define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L
1342# define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
1343#define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C
1344#define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450
1345#define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454
1346#define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8
1347#define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4
1348#define RADEON_OV0_Y_X_START 0x0400
1349#define RADEON_OV0_Y_X_END 0x0404
1350#define RADEON_OV1_Y_X_START 0x0600
1351#define RADEON_OV1_Y_X_END 0x0604
1352#define RADEON_OVR_CLR 0x0230
1353#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
1354#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
1355
1356/* first capture unit */
1357
1358#define RADEON_CAP0_BUF0_OFFSET 0x0920
1359#define RADEON_CAP0_BUF1_OFFSET 0x0924
1360#define RADEON_CAP0_BUF0_EVEN_OFFSET 0x0928
1361#define RADEON_CAP0_BUF1_EVEN_OFFSET 0x092C
1362
1363#define RADEON_CAP0_BUF_PITCH 0x0930
1364#define RADEON_CAP0_V_WINDOW 0x0934
1365#define RADEON_CAP0_H_WINDOW 0x0938
1366#define RADEON_CAP0_VBI0_OFFSET 0x093C
1367#define RADEON_CAP0_VBI1_OFFSET 0x0940
1368#define RADEON_CAP0_VBI_V_WINDOW 0x0944
1369#define RADEON_CAP0_VBI_H_WINDOW 0x0948
1370#define RADEON_CAP0_PORT_MODE_CNTL 0x094C
1371#define RADEON_CAP0_TRIG_CNTL 0x0950
1372#define RADEON_CAP0_DEBUG 0x0954
1373#define RADEON_CAP0_CONFIG 0x0958
1374# define RADEON_CAP0_CONFIG_CONTINUOS 0x00000001
1375# define RADEON_CAP0_CONFIG_START_FIELD_EVEN 0x00000002
1376# define RADEON_CAP0_CONFIG_START_BUF_GET 0x00000004
1377# define RADEON_CAP0_CONFIG_START_BUF_SET 0x00000008
1378# define RADEON_CAP0_CONFIG_BUF_TYPE_ALT 0x00000010
1379# define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME 0x00000020
1380# define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
1381# define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE 0x00000080
1382# define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE 0x00000100
1383# define RADEON_CAP0_CONFIG_MIRROR_EN 0x00000200
1384# define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN 0x00000400
1385# define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV 0x00000800
1386# define RADEON_CAP0_CONFIG_ANC_DECODE_EN 0x00001000
1387# define RADEON_CAP0_CONFIG_VBI_EN 0x00002000
1388# define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN 0x00004000
1389# define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
1390# define RADEON_CAP0_CONFIG_FAKE_FIELD_EN 0x00010000
1391# define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE 0x00020000
1392# define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
1393# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2 0x00080000
1394# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4 0x00100000
1395# define RADEON_CAP0_CONFIG_VERT_DIVIDE_2 0x00200000
1396# define RADEON_CAP0_CONFIG_VERT_DIVIDE_4 0x00400000
1397# define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE 0x00000000
1398# define RADEON_CAP0_CONFIG_FORMAT_CCIR656 0x00800000
1399# define RADEON_CAP0_CONFIG_FORMAT_ZV 0x01000000
1400# define RADEON_CAP0_CONFIG_FORMAT_VIP 0x01800000
1401# define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT 0x02000000
1402# define RADEON_CAP0_CONFIG_HORZ_DECIMATOR 0x04000000
1403# define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422 0x00000000
1404# define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422 0x20000000
1405# define RADEON_CAP0_CONFIG_VBI_DIVIDE_2 0x40000000
1406# define RADEON_CAP0_CONFIG_VBI_DIVIDE_4 0x80000000
1407#define RADEON_CAP0_ANC_ODD_OFFSET 0x095C
1408#define RADEON_CAP0_ANC_EVEN_OFFSET 0x0960
1409#define RADEON_CAP0_ANC_H_WINDOW 0x0964
1410#define RADEON_CAP0_VIDEO_SYNC_TEST 0x0968
1411#define RADEON_CAP0_ONESHOT_BUF_OFFSET 0x096C
1412#define RADEON_CAP0_BUF_STATUS 0x0970
1413/* #define RADEON_CAP0_DWNSC_XRATIO 0x0978 */
1414/* #define RADEON_CAP0_XSHARPNESS 0x097C */
1415#define RADEON_CAP0_VBI2_OFFSET 0x0980
1416#define RADEON_CAP0_VBI3_OFFSET 0x0984
1417#define RADEON_CAP0_ANC2_OFFSET 0x0988
1418#define RADEON_CAP0_ANC3_OFFSET 0x098C
1419#define RADEON_VID_BUFFER_CONTROL 0x0900
1420
1421/* second capture unit */
1422
1423#define RADEON_CAP1_BUF0_OFFSET 0x0990
1424#define RADEON_CAP1_BUF1_OFFSET 0x0994
1425#define RADEON_CAP1_BUF0_EVEN_OFFSET 0x0998
1426#define RADEON_CAP1_BUF1_EVEN_OFFSET 0x099C
1427
1428#define RADEON_CAP1_BUF_PITCH 0x09A0
1429#define RADEON_CAP1_V_WINDOW 0x09A4
1430#define RADEON_CAP1_H_WINDOW 0x09A8
1431#define RADEON_CAP1_VBI_ODD_OFFSET 0x09AC
1432#define RADEON_CAP1_VBI_EVEN_OFFSET 0x09B0
1433#define RADEON_CAP1_VBI_V_WINDOW 0x09B4
1434#define RADEON_CAP1_VBI_H_WINDOW 0x09B8
1435#define RADEON_CAP1_PORT_MODE_CNTL 0x09BC
1436#define RADEON_CAP1_TRIG_CNTL 0x09C0
1437#define RADEON_CAP1_DEBUG 0x09C4
1438#define RADEON_CAP1_CONFIG 0x09C8
1439#define RADEON_CAP1_ANC_ODD_OFFSET 0x09CC
1440#define RADEON_CAP1_ANC_EVEN_OFFSET 0x09D0
1441#define RADEON_CAP1_ANC_H_WINDOW 0x09D4
1442#define RADEON_CAP1_VIDEO_SYNC_TEST 0x09D8
1443#define RADEON_CAP1_ONESHOT_BUF_OFFSET 0x09DC
1444#define RADEON_CAP1_BUF_STATUS 0x09E0
1445#define RADEON_CAP1_DWNSC_XRATIO 0x09E8
1446#define RADEON_CAP1_XSHARPNESS 0x09EC
1447
1448/* misc multimedia registers */
1449
1450#define RADEON_IDCT_RUNS 0x1F80
1451#define RADEON_IDCT_LEVELS 0x1F84
1452#define RADEON_IDCT_CONTROL 0x1FBC
1453#define RADEON_IDCT_AUTH_CONTROL 0x1F88
1454#define RADEON_IDCT_AUTH 0x1F8C
1455
1456#define RADEON_P2PLL_CNTL 0x002a /* P2PLL */
1457# define RADEON_P2PLL_RESET (1 << 0)
1458# define RADEON_P2PLL_SLEEP (1 << 1)
1459# define RADEON_P2PLL_PVG_MASK (7 << 11)
1460# define RADEON_P2PLL_PVG_SHIFT 11
1461# define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16)
1462# define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
1463# define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18)
1464#define RADEON_P2PLL_DIV_0 0x002c
1465# define RADEON_P2PLL_FB0_DIV_MASK 0x07ff
1466# define RADEON_P2PLL_POST0_DIV_MASK 0x00070000
1467#define RADEON_P2PLL_REF_DIV 0x002B /* PLL */
1468# define RADEON_P2PLL_REF_DIV_MASK 0x03ff
1469# define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
1470# define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
1471# define R300_PPLL_REF_DIV_ACC_MASK (0x3ff << 18)
1472# define R300_PPLL_REF_DIV_ACC_SHIFT 18
1473#define RADEON_PALETTE_DATA 0x00b4
1474#define RADEON_PALETTE_30_DATA 0x00b8
1475#define RADEON_PALETTE_INDEX 0x00b0
1476#define RADEON_PCI_GART_PAGE 0x017c
1477#define RADEON_PIXCLKS_CNTL 0x002d
1478# define RADEON_PIX2CLK_SRC_SEL_MASK 0x03
1479# define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00
1480# define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
1481# define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02
1482# define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
1483# define RADEON_PIX2CLK_ALWAYS_ONb (1<<6)
1484# define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7)
1485# define RADEON_PIXCLK_TV_SRC_SEL (1 << 8)
1486# define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
1487# define R300_DVOCLK_ALWAYS_ONb (1 << 10)
1488# define RADEON_PIXCLK_BLEND_ALWAYS_ONb (1 << 11)
1489# define RADEON_PIXCLK_GV_ALWAYS_ONb (1 << 12)
1490# define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
1491# define R300_PIXCLK_DVO_ALWAYS_ONb (1 << 13)
1492# define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14)
1493# define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15)
1494# define R300_PIXCLK_TRANS_ALWAYS_ONb (1 << 16)
1495# define R300_PIXCLK_TVO_ALWAYS_ONb (1 << 17)
1496# define R300_P2G2CLK_ALWAYS_ONb (1 << 18)
1497# define R300_P2G2CLK_DAC_ALWAYS_ONb (1 << 19)
1498# define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
1499#define RADEON_PLANE_3D_MASK_C 0x1d44
1500#define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */
1501# define RADEON_PLL_MASK_READ_B (1 << 9)
1502#define RADEON_PMI_CAP_ID 0x0f5c /* PCI */
1503#define RADEON_PMI_DATA 0x0f63 /* PCI */
1504#define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */
1505#define RADEON_PMI_PMC_REG 0x0f5e /* PCI */
1506#define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */
1507#define RADEON_PMI_REGISTER 0x0f5c /* PCI */
1508#define RADEON_PPLL_CNTL 0x0002 /* PLL */
1509# define RADEON_PPLL_RESET (1 << 0)
1510# define RADEON_PPLL_SLEEP (1 << 1)
1511# define RADEON_PPLL_PVG_MASK (7 << 11)
1512# define RADEON_PPLL_PVG_SHIFT 11
1513# define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16)
1514# define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
1515# define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18)
1516#define RADEON_PPLL_DIV_0 0x0004 /* PLL */
1517#define RADEON_PPLL_DIV_1 0x0005 /* PLL */
1518#define RADEON_PPLL_DIV_2 0x0006 /* PLL */
1519#define RADEON_PPLL_DIV_3 0x0007 /* PLL */
1520# define RADEON_PPLL_FB3_DIV_MASK 0x07ff
1521# define RADEON_PPLL_POST3_DIV_MASK 0x00070000
1522#define RADEON_PPLL_REF_DIV 0x0003 /* PLL */
1523# define RADEON_PPLL_REF_DIV_MASK 0x03ff
1524# define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
1525# define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
1526#define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */
1527
1528#define RADEON_RBBM_GUICNTL 0x172c
1529# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
1530# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
1531# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
1532# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
1533#define RADEON_RBBM_SOFT_RESET 0x00f0
1534# define RADEON_SOFT_RESET_CP (1 << 0)
1535# define RADEON_SOFT_RESET_HI (1 << 1)
1536# define RADEON_SOFT_RESET_SE (1 << 2)
1537# define RADEON_SOFT_RESET_RE (1 << 3)
1538# define RADEON_SOFT_RESET_PP (1 << 4)
1539# define RADEON_SOFT_RESET_E2 (1 << 5)
1540# define RADEON_SOFT_RESET_RB (1 << 6)
1541# define RADEON_SOFT_RESET_HDP (1 << 7)
1542#define RADEON_RBBM_STATUS 0x0e40
1543# define RADEON_RBBM_FIFOCNT_MASK 0x007f
1544# define RADEON_RBBM_ACTIVE (1 << 31)
1545#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
1546# define RADEON_RB2D_DC_FLUSH (3 << 0)
1547# define RADEON_RB2D_DC_FREE (3 << 2)
1548# define RADEON_RB2D_DC_FLUSH_ALL 0xf
1549# define RADEON_RB2D_DC_BUSY (1 << 31)
1550#define RADEON_RB2D_DSTCACHE_MODE 0x3428
1551#define RADEON_DSTCACHE_CTLSTAT 0x1714
1552
1553#define RADEON_RB3D_ZCACHE_MODE 0x3250
1554#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
1555# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
1556#define RADEON_RB3D_DSTCACHE_MODE 0x3258
1557# define RADEON_RB3D_DC_CACHE_ENABLE (0)
1558# define RADEON_RB3D_DC_2D_CACHE_DISABLE (1)
1559# define RADEON_RB3D_DC_3D_CACHE_DISABLE (2)
1560# define RADEON_RB3D_DC_CACHE_DISABLE (3)
1561# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128 (1 << 2)
1562# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128 (2 << 2)
1563# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH (1 << 8)
1564# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH (2 << 8)
1565# define R200_RB3D_DC_2D_CACHE_AUTOFREE (1 << 10)
1566# define R200_RB3D_DC_3D_CACHE_AUTOFREE (2 << 10)
1567# define RADEON_RB3D_DC_FORCE_RMW (1 << 16)
1568# define RADEON_RB3D_DC_DISABLE_RI_FILL (1 << 24)
1569# define RADEON_RB3D_DC_DISABLE_RI_READ (1 << 25)
1570
1571#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325C
1572# define RADEON_RB3D_DC_FLUSH (3 << 0)
1573# define RADEON_RB3D_DC_FREE (3 << 2)
1574# define RADEON_RB3D_DC_FLUSH_ALL 0xf
1575# define RADEON_RB3D_DC_BUSY (1 << 31)
1576
1577#define RADEON_REG_BASE 0x0f18 /* PCI */
1578#define RADEON_REGPROG_INF 0x0f09 /* PCI */
1579#define RADEON_REVISION_ID 0x0f08 /* PCI */
1580
1581#define RADEON_SC_BOTTOM 0x164c
1582#define RADEON_SC_BOTTOM_RIGHT 0x16f0
1583#define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c
1584#define RADEON_SC_LEFT 0x1640
1585#define RADEON_SC_RIGHT 0x1644
1586#define RADEON_SC_TOP 0x1648
1587#define RADEON_SC_TOP_LEFT 0x16ec
1588#define RADEON_SC_TOP_LEFT_C 0x1c88
1589# define RADEON_SC_SIGN_MASK_LO 0x8000
1590# define RADEON_SC_SIGN_MASK_HI 0x80000000
1591#define RADEON_M_SPLL_REF_FB_DIV 0x000a /* PLL */
1592# define RADEON_M_SPLL_REF_DIV_SHIFT 0
1593# define RADEON_M_SPLL_REF_DIV_MASK 0xff
1594# define RADEON_MPLL_FB_DIV_SHIFT 8
1595# define RADEON_MPLL_FB_DIV_MASK 0xff
1596# define RADEON_SPLL_FB_DIV_SHIFT 16
1597# define RADEON_SPLL_FB_DIV_MASK 0xff
1598#define RADEON_SPLL_CNTL 0x000c /* PLL */
1599# define RADEON_SPLL_SLEEP (1 << 0)
1600# define RADEON_SPLL_RESET (1 << 1)
1601# define RADEON_SPLL_PCP_MASK 0x7
1602# define RADEON_SPLL_PCP_SHIFT 8
1603# define RADEON_SPLL_PVG_MASK 0x7
1604# define RADEON_SPLL_PVG_SHIFT 11
1605# define RADEON_SPLL_PDC_MASK 0x3
1606# define RADEON_SPLL_PDC_SHIFT 14
1607#define RADEON_SCLK_CNTL 0x000d /* PLL */
1608# define RADEON_SCLK_SRC_SEL_MASK 0x0007
1609# define RADEON_DYN_STOP_LAT_MASK 0x00007ff8
1610# define RADEON_CP_MAX_DYN_STOP_LAT 0x0008
1611# define RADEON_SCLK_FORCEON_MASK 0xffff8000
1612# define RADEON_SCLK_FORCE_DISP2 (1<<15)
1613# define RADEON_SCLK_FORCE_CP (1<<16)
1614# define RADEON_SCLK_FORCE_HDP (1<<17)
1615# define RADEON_SCLK_FORCE_DISP1 (1<<18)
1616# define RADEON_SCLK_FORCE_TOP (1<<19)
1617# define RADEON_SCLK_FORCE_E2 (1<<20)
1618# define RADEON_SCLK_FORCE_SE (1<<21)
1619# define RADEON_SCLK_FORCE_IDCT (1<<22)
1620# define RADEON_SCLK_FORCE_VIP (1<<23)
1621# define RADEON_SCLK_FORCE_RE (1<<24)
1622# define RADEON_SCLK_FORCE_PB (1<<25)
1623# define RADEON_SCLK_FORCE_TAM (1<<26)
1624# define RADEON_SCLK_FORCE_TDM (1<<27)
1625# define RADEON_SCLK_FORCE_RB (1<<28)
1626# define RADEON_SCLK_FORCE_TV_SCLK (1<<29)
1627# define RADEON_SCLK_FORCE_SUBPIC (1<<30)
1628# define RADEON_SCLK_FORCE_OV0 (1<<31)
1629# define R300_SCLK_FORCE_VAP (1<<21)
1630# define R300_SCLK_FORCE_SR (1<<25)
1631# define R300_SCLK_FORCE_PX (1<<26)
1632# define R300_SCLK_FORCE_TX (1<<27)
1633# define R300_SCLK_FORCE_US (1<<28)
1634# define R300_SCLK_FORCE_SU (1<<30)
1635#define R300_SCLK_CNTL2 0x1e /* PLL */
1636# define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
1637# define R300_SCLK_GA_MAX_DYN_STOP_LAT (1<<11)
1638# define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
1639# define R300_SCLK_FORCE_TCL (1<<13)
1640# define R300_SCLK_FORCE_CBA (1<<14)
1641# define R300_SCLK_FORCE_GA (1<<15)
1642#define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */
1643# define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
1644# define RADEON_SCLK_MORE_FORCEON 0x0700
1645#define RADEON_SDRAM_MODE_REG 0x0158
1646#define RADEON_SEQ8_DATA 0x03c5 /* VGA */
1647#define RADEON_SEQ8_IDX 0x03c4 /* VGA */
1648#define RADEON_SNAPSHOT_F_COUNT 0x0244
1649#define RADEON_SNAPSHOT_VH_COUNTS 0x0240
1650#define RADEON_SNAPSHOT_VIF_COUNT 0x024c
1651#define RADEON_SRC_OFFSET 0x15ac
1652#define RADEON_SRC_PITCH 0x15b0
1653#define RADEON_SRC_PITCH_OFFSET 0x1428
1654#define RADEON_SRC_SC_BOTTOM 0x165c
1655#define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4
1656#define RADEON_SRC_SC_RIGHT 0x1654
1657#define RADEON_SRC_X 0x1414
1658#define RADEON_SRC_X_Y 0x1590
1659#define RADEON_SRC_Y 0x1418
1660#define RADEON_SRC_Y_X 0x1434
1661#define RADEON_STATUS 0x0f06 /* PCI */
1662#define RADEON_SUBPIC_CNTL 0x0540 /* ? */
1663#define RADEON_SUB_CLASS 0x0f0a /* PCI */
1664#define RADEON_SURFACE_CNTL 0x0b00
1665# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
1666# define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
1667# define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
1668# define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
1669# define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
1670#define RADEON_SURFACE0_INFO 0x0b0c
1671# define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
1672# define RADEON_SURF_TILE_COLOR_BOTH (1 << 16)
1673# define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
1674# define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
1675# define R200_SURF_TILE_NONE (0 << 16)
1676# define R200_SURF_TILE_COLOR_MACRO (1 << 16)
1677# define R200_SURF_TILE_COLOR_MICRO (2 << 16)
1678# define R200_SURF_TILE_COLOR_BOTH (3 << 16)
1679# define R200_SURF_TILE_DEPTH_32BPP (4 << 16)
1680# define R200_SURF_TILE_DEPTH_16BPP (5 << 16)
1681# define R300_SURF_TILE_NONE (0 << 16)
1682# define R300_SURF_TILE_COLOR_MACRO (1 << 16)
1683# define R300_SURF_TILE_DEPTH_32BPP (2 << 16)
1684# define RADEON_SURF_AP0_SWP_16BPP (1 << 20)
1685# define RADEON_SURF_AP0_SWP_32BPP (1 << 21)
1686# define RADEON_SURF_AP1_SWP_16BPP (1 << 22)
1687# define RADEON_SURF_AP1_SWP_32BPP (1 << 23)
1688#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
1689#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
1690#define RADEON_SURFACE1_INFO 0x0b1c
1691#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
1692#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
1693#define RADEON_SURFACE2_INFO 0x0b2c
1694#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
1695#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
1696#define RADEON_SURFACE3_INFO 0x0b3c
1697#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
1698#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
1699#define RADEON_SURFACE4_INFO 0x0b4c
1700#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
1701#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
1702#define RADEON_SURFACE5_INFO 0x0b5c
1703#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
1704#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
1705#define RADEON_SURFACE6_INFO 0x0b6c
1706#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
1707#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
1708#define RADEON_SURFACE7_INFO 0x0b7c
1709#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
1710#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
1711#define RADEON_SW_SEMAPHORE 0x013c
1712
1713#define RADEON_TEST_DEBUG_CNTL 0x0120
1714#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
1715
1716#define RADEON_TEST_DEBUG_MUX 0x0124
1717#define RADEON_TEST_DEBUG_OUT 0x012c
1718#define RADEON_TMDS_PLL_CNTL 0x02a8
1719#define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4
1720# define RADEON_TMDS_TRANSMITTER_PLLEN 1
1721# define RADEON_TMDS_TRANSMITTER_PLLRST 2
1722#define RADEON_TRAIL_BRES_DEC 0x1614
1723#define RADEON_TRAIL_BRES_ERR 0x160c
1724#define RADEON_TRAIL_BRES_INC 0x1610
1725#define RADEON_TRAIL_X 0x1618
1726#define RADEON_TRAIL_X_SUB 0x1620
1727
1728#define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */
1729# define RADEON_VCLK_SRC_SEL_MASK 0x03
1730# define RADEON_VCLK_SRC_SEL_CPUCLK 0x00
1731# define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
1732# define RADEON_VCLK_SRC_SEL_BYTECLK 0x02
1733# define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03
1734# define RADEON_PIXCLK_ALWAYS_ONb (1<<6)
1735# define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
1736# define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
1737
1738#define RADEON_VENDOR_ID 0x0f00 /* PCI */
1739#define RADEON_VGA_DDA_CONFIG 0x02e8
1740#define RADEON_VGA_DDA_ON_OFF 0x02ec
1741#define RADEON_VID_BUFFER_CONTROL 0x0900
1742#define RADEON_VIDEOMUX_CNTL 0x0190
1743
1744/* VIP bus */
1745#define RADEON_VIPH_CH0_DATA 0x0c00
1746#define RADEON_VIPH_CH1_DATA 0x0c04
1747#define RADEON_VIPH_CH2_DATA 0x0c08
1748#define RADEON_VIPH_CH3_DATA 0x0c0c
1749#define RADEON_VIPH_CH0_ADDR 0x0c10
1750#define RADEON_VIPH_CH1_ADDR 0x0c14
1751#define RADEON_VIPH_CH2_ADDR 0x0c18
1752#define RADEON_VIPH_CH3_ADDR 0x0c1c
1753#define RADEON_VIPH_CH0_SBCNT 0x0c20
1754#define RADEON_VIPH_CH1_SBCNT 0x0c24
1755#define RADEON_VIPH_CH2_SBCNT 0x0c28
1756#define RADEON_VIPH_CH3_SBCNT 0x0c2c
1757#define RADEON_VIPH_CH0_ABCNT 0x0c30
1758#define RADEON_VIPH_CH1_ABCNT 0x0c34
1759#define RADEON_VIPH_CH2_ABCNT 0x0c38
1760#define RADEON_VIPH_CH3_ABCNT 0x0c3c
1761#define RADEON_VIPH_CONTROL 0x0c40
1762# define RADEON_VIP_BUSY 0
1763# define RADEON_VIP_IDLE 1
1764# define RADEON_VIP_RESET 2
1765# define RADEON_VIPH_EN (1 << 21)
1766#define RADEON_VIPH_DV_LAT 0x0c44
1767#define RADEON_VIPH_BM_CHUNK 0x0c48
1768#define RADEON_VIPH_DV_INT 0x0c4c
1769#define RADEON_VIPH_TIMEOUT_STAT 0x0c50
1770#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
1771#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK 0x00000010
1772#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
1773
1774#define RADEON_VIPH_REG_DATA 0x0084
1775#define RADEON_VIPH_REG_ADDR 0x0080
1776
1777
1778#define RADEON_WAIT_UNTIL 0x1720
1779# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
1780# define RADEON_WAIT_RE_CRTC_VLINE (1 << 1)
1781# define RADEON_WAIT_FE_CRTC_VLINE (1 << 2)
1782# define RADEON_WAIT_CRTC_VLINE (1 << 3)
1783# define RADEON_WAIT_DMA_VID_IDLE (1 << 8)
1784# define RADEON_WAIT_DMA_GUI_IDLE (1 << 9)
1785# define RADEON_WAIT_CMDFIFO (1 << 10) /* wait for CMDFIFO_ENTRIES */
1786# define RADEON_WAIT_OV0_FLIP (1 << 11)
1787# define RADEON_WAIT_AGP_FLUSH (1 << 13)
1788# define RADEON_WAIT_2D_IDLE (1 << 14)
1789# define RADEON_WAIT_3D_IDLE (1 << 15)
1790# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
1791# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
1792# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
1793# define RADEON_CMDFIFO_ENTRIES_SHIFT 10
1794# define RADEON_CMDFIFO_ENTRIES_MASK 0x7f
1795# define RADEON_WAIT_VAP_IDLE (1 << 28)
1796# define RADEON_WAIT_BOTH_CRTC_PFLIP (1 << 30)
1797# define RADEON_ENG_DISPLAY_SELECT_CRTC0 (0 << 31)
1798# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1 << 31)
1799
1800#define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */
1801#define RADEON_XCLK_CNTL 0x000d /* PLL */
1802#define RADEON_XDLL_CNTL 0x000c /* PLL */
1803#define RADEON_XPLL_CNTL 0x000b /* PLL */
1804
1805
1806
1807 /* Registers for 3D/TCL */
1808#define RADEON_PP_BORDER_COLOR_0 0x1d40
1809#define RADEON_PP_BORDER_COLOR_1 0x1d44
1810#define RADEON_PP_BORDER_COLOR_2 0x1d48
1811#define RADEON_PP_CNTL 0x1c38
1812# define RADEON_STIPPLE_ENABLE (1 << 0)
1813# define RADEON_SCISSOR_ENABLE (1 << 1)
1814# define RADEON_PATTERN_ENABLE (1 << 2)
1815# define RADEON_SHADOW_ENABLE (1 << 3)
1816# define RADEON_TEX_ENABLE_MASK (0xf << 4)
1817# define RADEON_TEX_0_ENABLE (1 << 4)
1818# define RADEON_TEX_1_ENABLE (1 << 5)
1819# define RADEON_TEX_2_ENABLE (1 << 6)
1820# define RADEON_TEX_3_ENABLE (1 << 7)
1821# define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
1822# define RADEON_TEX_BLEND_0_ENABLE (1 << 12)
1823# define RADEON_TEX_BLEND_1_ENABLE (1 << 13)
1824# define RADEON_TEX_BLEND_2_ENABLE (1 << 14)
1825# define RADEON_TEX_BLEND_3_ENABLE (1 << 15)
1826# define RADEON_PLANAR_YUV_ENABLE (1 << 20)
1827# define RADEON_SPECULAR_ENABLE (1 << 21)
1828# define RADEON_FOG_ENABLE (1 << 22)
1829# define RADEON_ALPHA_TEST_ENABLE (1 << 23)
1830# define RADEON_ANTI_ALIAS_NONE (0 << 24)
1831# define RADEON_ANTI_ALIAS_LINE (1 << 24)
1832# define RADEON_ANTI_ALIAS_POLY (2 << 24)
1833# define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24)
1834# define RADEON_BUMP_MAP_ENABLE (1 << 26)
1835# define RADEON_BUMPED_MAP_T0 (0 << 27)
1836# define RADEON_BUMPED_MAP_T1 (1 << 27)
1837# define RADEON_BUMPED_MAP_T2 (2 << 27)
1838# define RADEON_TEX_3D_ENABLE_0 (1 << 29)
1839# define RADEON_TEX_3D_ENABLE_1 (1 << 30)
1840# define RADEON_MC_ENABLE (1 << 31)
1841#define RADEON_PP_FOG_COLOR 0x1c18
1842# define RADEON_FOG_COLOR_MASK 0x00ffffff
1843# define RADEON_FOG_VERTEX (0 << 24)
1844# define RADEON_FOG_TABLE (1 << 24)
1845# define RADEON_FOG_USE_DEPTH (0 << 25)
1846# define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
1847# define RADEON_FOG_USE_SPEC_ALPHA (3 << 25)
1848#define RADEON_PP_LUM_MATRIX 0x1d00
1849#define RADEON_PP_MISC 0x1c14
1850# define RADEON_REF_ALPHA_MASK 0x000000ff
1851# define RADEON_ALPHA_TEST_FAIL (0 << 8)
1852# define RADEON_ALPHA_TEST_LESS (1 << 8)
1853# define RADEON_ALPHA_TEST_LEQUAL (2 << 8)
1854# define RADEON_ALPHA_TEST_EQUAL (3 << 8)
1855# define RADEON_ALPHA_TEST_GEQUAL (4 << 8)
1856# define RADEON_ALPHA_TEST_GREATER (5 << 8)
1857# define RADEON_ALPHA_TEST_NEQUAL (6 << 8)
1858# define RADEON_ALPHA_TEST_PASS (7 << 8)
1859# define RADEON_ALPHA_TEST_OP_MASK (7 << 8)
1860# define RADEON_CHROMA_FUNC_FAIL (0 << 16)
1861# define RADEON_CHROMA_FUNC_PASS (1 << 16)
1862# define RADEON_CHROMA_FUNC_NEQUAL (2 << 16)
1863# define RADEON_CHROMA_FUNC_EQUAL (3 << 16)
1864# define RADEON_CHROMA_KEY_NEAREST (0 << 18)
1865# define RADEON_CHROMA_KEY_ZERO (1 << 18)
1866# define RADEON_SHADOW_ID_AUTO_INC (1 << 20)
1867# define RADEON_SHADOW_FUNC_EQUAL (0 << 21)
1868# define RADEON_SHADOW_FUNC_NEQUAL (1 << 21)
1869# define RADEON_SHADOW_PASS_1 (0 << 22)
1870# define RADEON_SHADOW_PASS_2 (1 << 22)
1871# define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24)
1872# define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24)
1873#define RADEON_PP_ROT_MATRIX_0 0x1d58
1874#define RADEON_PP_ROT_MATRIX_1 0x1d5c
1875#define RADEON_PP_TXFILTER_0 0x1c54
1876#define RADEON_PP_TXFILTER_1 0x1c6c
1877#define RADEON_PP_TXFILTER_2 0x1c84
1878# define RADEON_MAG_FILTER_NEAREST (0 << 0)
1879# define RADEON_MAG_FILTER_LINEAR (1 << 0)
1880# define RADEON_MAG_FILTER_MASK (1 << 0)
1881# define RADEON_MIN_FILTER_NEAREST (0 << 1)
1882# define RADEON_MIN_FILTER_LINEAR (1 << 1)
1883# define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
1884# define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
1885# define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
1886# define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
1887# define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1)
1888# define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1)
1889# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
1890# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
1891# define RADEON_MIN_FILTER_MASK (15 << 1)
1892# define RADEON_MAX_ANISO_1_TO_1 (0 << 5)
1893# define RADEON_MAX_ANISO_2_TO_1 (1 << 5)
1894# define RADEON_MAX_ANISO_4_TO_1 (2 << 5)
1895# define RADEON_MAX_ANISO_8_TO_1 (3 << 5)
1896# define RADEON_MAX_ANISO_16_TO_1 (4 << 5)
1897# define RADEON_MAX_ANISO_MASK (7 << 5)
1898# define RADEON_LOD_BIAS_MASK (0xff << 8)
1899# define RADEON_LOD_BIAS_SHIFT 8
1900# define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16)
1901# define RADEON_MAX_MIP_LEVEL_SHIFT 16
1902# define RADEON_YUV_TO_RGB (1 << 20)
1903# define RADEON_YUV_TEMPERATURE_COOL (0 << 21)
1904# define RADEON_YUV_TEMPERATURE_HOT (1 << 21)
1905# define RADEON_YUV_TEMPERATURE_MASK (1 << 21)
1906# define RADEON_WRAPEN_S (1 << 22)
1907# define RADEON_CLAMP_S_WRAP (0 << 23)
1908# define RADEON_CLAMP_S_MIRROR (1 << 23)
1909# define RADEON_CLAMP_S_CLAMP_LAST (2 << 23)
1910# define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
1911# define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23)
1912# define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
1913# define RADEON_CLAMP_S_CLAMP_GL (6 << 23)
1914# define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
1915# define RADEON_CLAMP_S_MASK (7 << 23)
1916# define RADEON_WRAPEN_T (1 << 26)
1917# define RADEON_CLAMP_T_WRAP (0 << 27)
1918# define RADEON_CLAMP_T_MIRROR (1 << 27)
1919# define RADEON_CLAMP_T_CLAMP_LAST (2 << 27)
1920# define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
1921# define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27)
1922# define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
1923# define RADEON_CLAMP_T_CLAMP_GL (6 << 27)
1924# define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
1925# define RADEON_CLAMP_T_MASK (7 << 27)
1926# define RADEON_BORDER_MODE_OGL (0 << 31)
1927# define RADEON_BORDER_MODE_D3D (1 << 31)
1928#define RADEON_PP_TXFORMAT_0 0x1c58
1929#define RADEON_PP_TXFORMAT_1 0x1c70
1930#define RADEON_PP_TXFORMAT_2 0x1c88
1931# define RADEON_TXFORMAT_I8 (0 << 0)
1932# define RADEON_TXFORMAT_AI88 (1 << 0)
1933# define RADEON_TXFORMAT_RGB332 (2 << 0)
1934# define RADEON_TXFORMAT_ARGB1555 (3 << 0)
1935# define RADEON_TXFORMAT_RGB565 (4 << 0)
1936# define RADEON_TXFORMAT_ARGB4444 (5 << 0)
1937# define RADEON_TXFORMAT_ARGB8888 (6 << 0)
1938# define RADEON_TXFORMAT_RGBA8888 (7 << 0)
1939# define RADEON_TXFORMAT_Y8 (8 << 0)
1940# define RADEON_TXFORMAT_VYUY422 (10 << 0)
1941# define RADEON_TXFORMAT_YVYU422 (11 << 0)
1942# define RADEON_TXFORMAT_DXT1 (12 << 0)
1943# define RADEON_TXFORMAT_DXT23 (14 << 0)
1944# define RADEON_TXFORMAT_DXT45 (15 << 0)
1945# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0)
1946# define RADEON_TXFORMAT_FORMAT_SHIFT 0
1947# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5)
1948# define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6)
1949# define RADEON_TXFORMAT_NON_POWER2 (1 << 7)
1950# define RADEON_TXFORMAT_WIDTH_MASK (15 << 8)
1951# define RADEON_TXFORMAT_WIDTH_SHIFT 8
1952# define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12)
1953# define RADEON_TXFORMAT_HEIGHT_SHIFT 12
1954# define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16)
1955# define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16
1956# define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
1957# define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20
1958# define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
1959# define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24)
1960# define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
1961# define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
1962# define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26)
1963# define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26)
1964# define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26)
1965# define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26)
1966# define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
1967# define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
1968# define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
1969# define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31)
1970#define RADEON_PP_CUBIC_FACES_0 0x1d24
1971#define RADEON_PP_CUBIC_FACES_1 0x1d28
1972#define RADEON_PP_CUBIC_FACES_2 0x1d2c
1973# define RADEON_FACE_WIDTH_1_SHIFT 0
1974# define RADEON_FACE_HEIGHT_1_SHIFT 4
1975# define RADEON_FACE_WIDTH_1_MASK (0xf << 0)
1976# define RADEON_FACE_HEIGHT_1_MASK (0xf << 4)
1977# define RADEON_FACE_WIDTH_2_SHIFT 8
1978# define RADEON_FACE_HEIGHT_2_SHIFT 12
1979# define RADEON_FACE_WIDTH_2_MASK (0xf << 8)
1980# define RADEON_FACE_HEIGHT_2_MASK (0xf << 12)
1981# define RADEON_FACE_WIDTH_3_SHIFT 16
1982# define RADEON_FACE_HEIGHT_3_SHIFT 20
1983# define RADEON_FACE_WIDTH_3_MASK (0xf << 16)
1984# define RADEON_FACE_HEIGHT_3_MASK (0xf << 20)
1985# define RADEON_FACE_WIDTH_4_SHIFT 24
1986# define RADEON_FACE_HEIGHT_4_SHIFT 28
1987# define RADEON_FACE_WIDTH_4_MASK (0xf << 24)
1988# define RADEON_FACE_HEIGHT_4_MASK (0xf << 28)
1989
1990#define RADEON_PP_TXOFFSET_0 0x1c5c
1991#define RADEON_PP_TXOFFSET_1 0x1c74
1992#define RADEON_PP_TXOFFSET_2 0x1c8c
1993# define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0)
1994# define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0)
1995# define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0)
1996# define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
1997# define RADEON_TXO_MACRO_LINEAR (0 << 2)
1998# define RADEON_TXO_MACRO_TILE (1 << 2)
1999# define RADEON_TXO_MICRO_LINEAR (0 << 3)
2000# define RADEON_TXO_MICRO_TILE_X2 (1 << 3)
2001# define RADEON_TXO_MICRO_TILE_OPT (2 << 3)
2002# define RADEON_TXO_OFFSET_MASK 0xffffffe0
2003# define RADEON_TXO_OFFSET_SHIFT 5
2004
2005#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
2006#define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4
2007#define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8
2008#define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc
2009#define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0
2010#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
2011#define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04
2012#define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08
2013#define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c
2014#define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10
2015#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
2016#define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18
2017#define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c
2018#define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20
2019#define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24
2020
2021#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
2022#define RADEON_PP_TEX_SIZE_1 0x1d0c
2023#define RADEON_PP_TEX_SIZE_2 0x1d14
2024# define RADEON_TEX_USIZE_MASK (0x7ff << 0)
2025# define RADEON_TEX_USIZE_SHIFT 0
2026# define RADEON_TEX_VSIZE_MASK (0x7ff << 16)
2027# define RADEON_TEX_VSIZE_SHIFT 16
2028# define RADEON_SIGNED_RGB_MASK (1 << 30)
2029# define RADEON_SIGNED_RGB_SHIFT 30
2030# define RADEON_SIGNED_ALPHA_MASK (1 << 31)
2031# define RADEON_SIGNED_ALPHA_SHIFT 31
2032#define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */
2033#define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */
2034#define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */
2035/* note: bits 13-5: 32 byte aligned stride of texture map */
2036
2037#define RADEON_PP_TXCBLEND_0 0x1c60
2038#define RADEON_PP_TXCBLEND_1 0x1c78
2039#define RADEON_PP_TXCBLEND_2 0x1c90
2040# define RADEON_COLOR_ARG_A_SHIFT 0
2041# define RADEON_COLOR_ARG_A_MASK (0x1f << 0)
2042# define RADEON_COLOR_ARG_A_ZERO (0 << 0)
2043# define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0)
2044# define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0)
2045# define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0)
2046# define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0)
2047# define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0)
2048# define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0)
2049# define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0)
2050# define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0)
2051# define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0)
2052# define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0)
2053# define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0)
2054# define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0)
2055# define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0)
2056# define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0)
2057# define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0)
2058# define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0)
2059# define RADEON_COLOR_ARG_B_SHIFT 5
2060# define RADEON_COLOR_ARG_B_MASK (0x1f << 5)
2061# define RADEON_COLOR_ARG_B_ZERO (0 << 5)
2062# define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5)
2063# define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5)
2064# define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5)
2065# define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5)
2066# define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5)
2067# define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5)
2068# define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5)
2069# define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5)
2070# define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5)
2071# define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5)
2072# define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5)
2073# define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5)
2074# define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5)
2075# define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5)
2076# define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5)
2077# define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5)
2078# define RADEON_COLOR_ARG_C_SHIFT 10
2079# define RADEON_COLOR_ARG_C_MASK (0x1f << 10)
2080# define RADEON_COLOR_ARG_C_ZERO (0 << 10)
2081# define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10)
2082# define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10)
2083# define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10)
2084# define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10)
2085# define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10)
2086# define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10)
2087# define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10)
2088# define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10)
2089# define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10)
2090# define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10)
2091# define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10)
2092# define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10)
2093# define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10)
2094# define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10)
2095# define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10)
2096# define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10)
2097# define RADEON_COMP_ARG_A (1 << 15)
2098# define RADEON_COMP_ARG_A_SHIFT 15
2099# define RADEON_COMP_ARG_B (1 << 16)
2100# define RADEON_COMP_ARG_B_SHIFT 16
2101# define RADEON_COMP_ARG_C (1 << 17)
2102# define RADEON_COMP_ARG_C_SHIFT 17
2103# define RADEON_BLEND_CTL_MASK (7 << 18)
2104# define RADEON_BLEND_CTL_ADD (0 << 18)
2105# define RADEON_BLEND_CTL_SUBTRACT (1 << 18)
2106# define RADEON_BLEND_CTL_ADDSIGNED (2 << 18)
2107# define RADEON_BLEND_CTL_BLEND (3 << 18)
2108# define RADEON_BLEND_CTL_DOT3 (4 << 18)
2109# define RADEON_SCALE_SHIFT 21
2110# define RADEON_SCALE_MASK (3 << 21)
2111# define RADEON_SCALE_1X (0 << 21)
2112# define RADEON_SCALE_2X (1 << 21)
2113# define RADEON_SCALE_4X (2 << 21)
2114# define RADEON_CLAMP_TX (1 << 23)
2115# define RADEON_T0_EQ_TCUR (1 << 24)
2116# define RADEON_T1_EQ_TCUR (1 << 25)
2117# define RADEON_T2_EQ_TCUR (1 << 26)
2118# define RADEON_T3_EQ_TCUR (1 << 27)
2119# define RADEON_COLOR_ARG_MASK 0x1f
2120# define RADEON_COMP_ARG_SHIFT 15
2121#define RADEON_PP_TXABLEND_0 0x1c64
2122#define RADEON_PP_TXABLEND_1 0x1c7c
2123#define RADEON_PP_TXABLEND_2 0x1c94
2124# define RADEON_ALPHA_ARG_A_SHIFT 0
2125# define RADEON_ALPHA_ARG_A_MASK (0xf << 0)
2126# define RADEON_ALPHA_ARG_A_ZERO (0 << 0)
2127# define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0)
2128# define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0)
2129# define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0)
2130# define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0)
2131# define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0)
2132# define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0)
2133# define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0)
2134# define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0)
2135# define RADEON_ALPHA_ARG_B_SHIFT 4
2136# define RADEON_ALPHA_ARG_B_MASK (0xf << 4)
2137# define RADEON_ALPHA_ARG_B_ZERO (0 << 4)
2138# define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4)
2139# define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4)
2140# define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4)
2141# define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4)
2142# define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4)
2143# define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4)
2144# define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4)
2145# define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4)
2146# define RADEON_ALPHA_ARG_C_SHIFT 8
2147# define RADEON_ALPHA_ARG_C_MASK (0xf << 8)
2148# define RADEON_ALPHA_ARG_C_ZERO (0 << 8)
2149# define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8)
2150# define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8)
2151# define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8)
2152# define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8)
2153# define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8)
2154# define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8)
2155# define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8)
2156# define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8)
2157# define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 9)
2158# define RADEON_ALPHA_ARG_MASK 0xf
2159
2160#define RADEON_PP_TFACTOR_0 0x1c68
2161#define RADEON_PP_TFACTOR_1 0x1c80
2162#define RADEON_PP_TFACTOR_2 0x1c98
2163
2164#define RADEON_RB3D_BLENDCNTL 0x1c20
2165# define RADEON_COMB_FCN_MASK (3 << 12)
2166# define RADEON_COMB_FCN_ADD_CLAMP (0 << 12)
2167# define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12)
2168# define RADEON_COMB_FCN_SUB_CLAMP (2 << 12)
2169# define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12)
2170# define RADEON_SRC_BLEND_GL_ZERO (32 << 16)
2171# define RADEON_SRC_BLEND_GL_ONE (33 << 16)
2172# define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16)
2173# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
2174# define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16)
2175# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
2176# define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
2177# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
2178# define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16)
2179# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
2180# define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
2181# define RADEON_SRC_BLEND_MASK (63 << 16)
2182# define RADEON_DST_BLEND_GL_ZERO (32 << 24)
2183# define RADEON_DST_BLEND_GL_ONE (33 << 24)
2184# define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24)
2185# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
2186# define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24)
2187# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
2188# define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24)
2189# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
2190# define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24)
2191# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
2192# define RADEON_DST_BLEND_MASK (63 << 24)
2193#define RADEON_RB3D_CNTL 0x1c3c
2194# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
2195# define RADEON_PLANE_MASK_ENABLE (1 << 1)
2196# define RADEON_DITHER_ENABLE (1 << 2)
2197# define RADEON_ROUND_ENABLE (1 << 3)
2198# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
2199# define RADEON_DITHER_INIT (1 << 5)
2200# define RADEON_ROP_ENABLE (1 << 6)
2201# define RADEON_STENCIL_ENABLE (1 << 7)
2202# define RADEON_Z_ENABLE (1 << 8)
2203# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
2204# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10
2205
2206# define RADEON_COLOR_FORMAT_ARGB1555 3
2207# define RADEON_COLOR_FORMAT_RGB565 4
2208# define RADEON_COLOR_FORMAT_ARGB8888 6
2209# define RADEON_COLOR_FORMAT_RGB332 7
2210# define RADEON_COLOR_FORMAT_Y8 8
2211# define RADEON_COLOR_FORMAT_RGB8 9
2212# define RADEON_COLOR_FORMAT_YUV422_VYUY 11
2213# define RADEON_COLOR_FORMAT_YUV422_YVYU 12
2214# define RADEON_COLOR_FORMAT_aYUV444 14
2215# define RADEON_COLOR_FORMAT_ARGB4444 15
2216
2217# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
2218#define RADEON_RB3D_COLOROFFSET 0x1c40
2219# define RADEON_COLOROFFSET_MASK 0xfffffff0
2220#define RADEON_RB3D_COLORPITCH 0x1c48
2221# define RADEON_COLORPITCH_MASK 0x000001ff8
2222# define RADEON_COLOR_TILE_ENABLE (1 << 16)
2223# define RADEON_COLOR_MICROTILE_ENABLE (1 << 17)
2224# define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18)
2225# define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18)
2226# define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
2227#define RADEON_RB3D_DEPTHOFFSET 0x1c24
2228#define RADEON_RB3D_DEPTHPITCH 0x1c28
2229# define RADEON_DEPTHPITCH_MASK 0x00001ff8
2230# define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18)
2231# define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18)
2232# define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
2233#define RADEON_RB3D_PLANEMASK 0x1d84
2234#define RADEON_RB3D_ROPCNTL 0x1d80
2235# define RADEON_ROP_MASK (15 << 8)
2236# define RADEON_ROP_CLEAR (0 << 8)
2237# define RADEON_ROP_NOR (1 << 8)
2238# define RADEON_ROP_AND_INVERTED (2 << 8)
2239# define RADEON_ROP_COPY_INVERTED (3 << 8)
2240# define RADEON_ROP_AND_REVERSE (4 << 8)
2241# define RADEON_ROP_INVERT (5 << 8)
2242# define RADEON_ROP_XOR (6 << 8)
2243# define RADEON_ROP_NAND (7 << 8)
2244# define RADEON_ROP_AND (8 << 8)
2245# define RADEON_ROP_EQUIV (9 << 8)
2246# define RADEON_ROP_NOOP (10 << 8)
2247# define RADEON_ROP_OR_INVERTED (11 << 8)
2248# define RADEON_ROP_COPY (12 << 8)
2249# define RADEON_ROP_OR_REVERSE (13 << 8)
2250# define RADEON_ROP_OR (14 << 8)
2251# define RADEON_ROP_SET (15 << 8)
2252#define RADEON_RB3D_STENCILREFMASK 0x1d7c
2253# define RADEON_STENCIL_REF_SHIFT 0
2254# define RADEON_STENCIL_REF_MASK (0xff << 0)
2255# define RADEON_STENCIL_MASK_SHIFT 16
2256# define RADEON_STENCIL_VALUE_MASK (0xff << 16)
2257# define RADEON_STENCIL_WRITEMASK_SHIFT 24
2258# define RADEON_STENCIL_WRITE_MASK (0xff << 24)
2259#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
2260# define RADEON_DEPTH_FORMAT_MASK (0xf << 0)
2261# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
2262# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
2263# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0)
2264# define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0)
2265# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0)
2266# define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0)
2267# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0)
2268# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0)
2269# define RADEON_Z_TEST_NEVER (0 << 4)
2270# define RADEON_Z_TEST_LESS (1 << 4)
2271# define RADEON_Z_TEST_LEQUAL (2 << 4)
2272# define RADEON_Z_TEST_EQUAL (3 << 4)
2273# define RADEON_Z_TEST_GEQUAL (4 << 4)
2274# define RADEON_Z_TEST_GREATER (5 << 4)
2275# define RADEON_Z_TEST_NEQUAL (6 << 4)
2276# define RADEON_Z_TEST_ALWAYS (7 << 4)
2277# define RADEON_Z_TEST_MASK (7 << 4)
2278# define RADEON_STENCIL_TEST_NEVER (0 << 12)
2279# define RADEON_STENCIL_TEST_LESS (1 << 12)
2280# define RADEON_STENCIL_TEST_LEQUAL (2 << 12)
2281# define RADEON_STENCIL_TEST_EQUAL (3 << 12)
2282# define RADEON_STENCIL_TEST_GEQUAL (4 << 12)
2283# define RADEON_STENCIL_TEST_GREATER (5 << 12)
2284# define RADEON_STENCIL_TEST_NEQUAL (6 << 12)
2285# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
2286# define RADEON_STENCIL_TEST_MASK (0x7 << 12)
2287# define RADEON_STENCIL_FAIL_KEEP (0 << 16)
2288# define RADEON_STENCIL_FAIL_ZERO (1 << 16)
2289# define RADEON_STENCIL_FAIL_REPLACE (2 << 16)
2290# define RADEON_STENCIL_FAIL_INC (3 << 16)
2291# define RADEON_STENCIL_FAIL_DEC (4 << 16)
2292# define RADEON_STENCIL_FAIL_INVERT (5 << 16)
2293# define RADEON_STENCIL_FAIL_MASK (0x7 << 16)
2294# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
2295# define RADEON_STENCIL_ZPASS_ZERO (1 << 20)
2296# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
2297# define RADEON_STENCIL_ZPASS_INC (3 << 20)
2298# define RADEON_STENCIL_ZPASS_DEC (4 << 20)
2299# define RADEON_STENCIL_ZPASS_INVERT (5 << 20)
2300# define RADEON_STENCIL_ZPASS_MASK (0x7 << 20)
2301# define RADEON_STENCIL_ZFAIL_KEEP (0 << 24)
2302# define RADEON_STENCIL_ZFAIL_ZERO (1 << 24)
2303# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
2304# define RADEON_STENCIL_ZFAIL_INC (3 << 24)
2305# define RADEON_STENCIL_ZFAIL_DEC (4 << 24)
2306# define RADEON_STENCIL_ZFAIL_INVERT (5 << 24)
2307# define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24)
2308# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
2309# define RADEON_FORCE_Z_DIRTY (1 << 29)
2310# define RADEON_Z_WRITE_ENABLE (1 << 30)
2311#define RADEON_RE_LINE_PATTERN 0x1cd0
2312# define RADEON_LINE_PATTERN_MASK 0x0000ffff
2313# define RADEON_LINE_REPEAT_COUNT_SHIFT 16
2314# define RADEON_LINE_PATTERN_START_SHIFT 24
2315# define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
2316# define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28)
2317# define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29)
2318#define RADEON_RE_LINE_STATE 0x1cd4
2319# define RADEON_LINE_CURRENT_PTR_SHIFT 0
2320# define RADEON_LINE_CURRENT_COUNT_SHIFT 8
2321#define RADEON_RE_MISC 0x26c4
2322# define RADEON_STIPPLE_COORD_MASK 0x1f
2323# define RADEON_STIPPLE_X_OFFSET_SHIFT 0
2324# define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0)
2325# define RADEON_STIPPLE_Y_OFFSET_SHIFT 8
2326# define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8)
2327# define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
2328# define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16)
2329#define RADEON_RE_SOLID_COLOR 0x1c1c
2330#define RADEON_RE_TOP_LEFT 0x26c0
2331# define RADEON_RE_LEFT_SHIFT 0
2332# define RADEON_RE_TOP_SHIFT 16
2333#define RADEON_RE_WIDTH_HEIGHT 0x1c44
2334# define RADEON_RE_WIDTH_SHIFT 0
2335# define RADEON_RE_HEIGHT_SHIFT 16
2336
2337#define RADEON_SE_CNTL 0x1c4c
2338# define RADEON_FFACE_CULL_CW (0 << 0)
2339# define RADEON_FFACE_CULL_CCW (1 << 0)
2340# define RADEON_FFACE_CULL_DIR_MASK (1 << 0)
2341# define RADEON_BFACE_CULL (0 << 1)
2342# define RADEON_BFACE_SOLID (3 << 1)
2343# define RADEON_FFACE_CULL (0 << 3)
2344# define RADEON_FFACE_SOLID (3 << 3)
2345# define RADEON_FFACE_CULL_MASK (3 << 3)
2346# define RADEON_BADVTX_CULL_DISABLE (1 << 5)
2347# define RADEON_FLAT_SHADE_VTX_0 (0 << 6)
2348# define RADEON_FLAT_SHADE_VTX_1 (1 << 6)
2349# define RADEON_FLAT_SHADE_VTX_2 (2 << 6)
2350# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
2351# define RADEON_DIFFUSE_SHADE_SOLID (0 << 8)
2352# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
2353# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
2354# define RADEON_DIFFUSE_SHADE_MASK (3 << 8)
2355# define RADEON_ALPHA_SHADE_SOLID (0 << 10)
2356# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
2357# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
2358# define RADEON_ALPHA_SHADE_MASK (3 << 10)
2359# define RADEON_SPECULAR_SHADE_SOLID (0 << 12)
2360# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
2361# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
2362# define RADEON_SPECULAR_SHADE_MASK (3 << 12)
2363# define RADEON_FOG_SHADE_SOLID (0 << 14)
2364# define RADEON_FOG_SHADE_FLAT (1 << 14)
2365# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
2366# define RADEON_FOG_SHADE_MASK (3 << 14)
2367# define RADEON_ZBIAS_ENABLE_POINT (1 << 16)
2368# define RADEON_ZBIAS_ENABLE_LINE (1 << 17)
2369# define RADEON_ZBIAS_ENABLE_TRI (1 << 18)
2370# define RADEON_WIDELINE_ENABLE (1 << 20)
2371# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
2372# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
2373# define RADEON_VTX_PIX_CENTER_D3D (0 << 27)
2374# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
2375# define RADEON_ROUND_MODE_TRUNC (0 << 28)
2376# define RADEON_ROUND_MODE_ROUND (1 << 28)
2377# define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28)
2378# define RADEON_ROUND_MODE_ROUND_ODD (3 << 28)
2379# define RADEON_ROUND_PREC_16TH_PIX (0 << 30)
2380# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
2381# define RADEON_ROUND_PREC_4TH_PIX (2 << 30)
2382# define RADEON_ROUND_PREC_HALF_PIX (3 << 30)
2383#define R200_RE_CNTL 0x1c50
2384# define R200_STIPPLE_ENABLE 0x1
2385# define R200_SCISSOR_ENABLE 0x2
2386# define R200_PATTERN_ENABLE 0x4
2387# define R200_PERSPECTIVE_ENABLE 0x8
2388# define R200_POINT_SMOOTH 0x20
2389# define R200_VTX_STQ0_D3D 0x00010000
2390# define R200_VTX_STQ1_D3D 0x00040000
2391# define R200_VTX_STQ2_D3D 0x00100000
2392# define R200_VTX_STQ3_D3D 0x00400000
2393# define R200_VTX_STQ4_D3D 0x01000000
2394# define R200_VTX_STQ5_D3D 0x04000000
2395#define RADEON_SE_CNTL_STATUS 0x2140
2396# define RADEON_VC_NO_SWAP (0 << 0)
2397# define RADEON_VC_16BIT_SWAP (1 << 0)
2398# define RADEON_VC_32BIT_SWAP (2 << 0)
2399# define RADEON_VC_HALF_DWORD_SWAP (3 << 0)
2400# define RADEON_TCL_BYPASS (1 << 8)
2401#define RADEON_SE_COORD_FMT 0x1c50
2402# define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0)
2403# define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1)
2404# define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8)
2405# define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9)
2406# define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10)
2407# define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11)
2408# define RADEON_VTX_W0_NORMALIZE (1 << 12)
2409# define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16)
2410# define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
2411# define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
2412# define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
2413# define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
2414# define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26)
2415# define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26)
2416#define RADEON_SE_LINE_WIDTH 0x1db8
2417#define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c
2418# define RADEON_LIGHTING_ENABLE (1 << 0)
2419# define RADEON_LIGHT_IN_MODELSPACE (1 << 1)
2420# define RADEON_LOCAL_VIEWER (1 << 2)
2421# define RADEON_NORMALIZE_NORMALS (1 << 3)
2422# define RADEON_RESCALE_NORMALS (1 << 4)
2423# define RADEON_SPECULAR_LIGHTS (1 << 5)
2424# define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6)
2425# define RADEON_LIGHT_ALPHA (1 << 7)
2426# define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8)
2427# define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
2428# define RADEON_LM_SOURCE_STATE_PREMULT 0
2429# define RADEON_LM_SOURCE_STATE_MULT 1
2430# define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2
2431# define RADEON_LM_SOURCE_VERTEX_SPECULAR 3
2432# define RADEON_EMISSIVE_SOURCE_SHIFT 16
2433# define RADEON_AMBIENT_SOURCE_SHIFT 18
2434# define RADEON_DIFFUSE_SOURCE_SHIFT 20
2435# define RADEON_SPECULAR_SOURCE_SHIFT 22
2436#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220
2437#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224
2438#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228
2439#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c
2440#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230
2441#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234
2442#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238
2443#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c
2444#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
2445#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
2446#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218
2447#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
2448#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240
2449#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244
2450#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248
2451#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c
2452#define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c
2453# define RADEON_MODELVIEW_0_SHIFT 0
2454# define RADEON_MODELVIEW_1_SHIFT 4
2455# define RADEON_MODELVIEW_2_SHIFT 8
2456# define RADEON_MODELVIEW_3_SHIFT 12
2457# define RADEON_IT_MODELVIEW_0_SHIFT 16
2458# define RADEON_IT_MODELVIEW_1_SHIFT 20
2459# define RADEON_IT_MODELVIEW_2_SHIFT 24
2460# define RADEON_IT_MODELVIEW_3_SHIFT 28
2461#define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260
2462# define RADEON_MODELPROJECT_0_SHIFT 0
2463# define RADEON_MODELPROJECT_1_SHIFT 4
2464# define RADEON_MODELPROJECT_2_SHIFT 8
2465# define RADEON_MODELPROJECT_3_SHIFT 12
2466# define RADEON_TEXMAT_0_SHIFT 16
2467# define RADEON_TEXMAT_1_SHIFT 20
2468# define RADEON_TEXMAT_2_SHIFT 24
2469# define RADEON_TEXMAT_3_SHIFT 28
2470
2471
2472#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
2473# define RADEON_TCL_VTX_W0 (1 << 0)
2474# define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1)
2475# define RADEON_TCL_VTX_FP_ALPHA (1 << 2)
2476# define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3)
2477# define RADEON_TCL_VTX_FP_SPEC (1 << 4)
2478# define RADEON_TCL_VTX_FP_FOG (1 << 5)
2479# define RADEON_TCL_VTX_PK_SPEC (1 << 6)
2480# define RADEON_TCL_VTX_ST0 (1 << 7)
2481# define RADEON_TCL_VTX_ST1 (1 << 8)
2482# define RADEON_TCL_VTX_Q1 (1 << 9)
2483# define RADEON_TCL_VTX_ST2 (1 << 10)
2484# define RADEON_TCL_VTX_Q2 (1 << 11)
2485# define RADEON_TCL_VTX_ST3 (1 << 12)
2486# define RADEON_TCL_VTX_Q3 (1 << 13)
2487# define RADEON_TCL_VTX_Q0 (1 << 14)
2488# define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
2489# define RADEON_TCL_VTX_NORM0 (1 << 18)
2490# define RADEON_TCL_VTX_XY1 (1 << 27)
2491# define RADEON_TCL_VTX_Z1 (1 << 28)
2492# define RADEON_TCL_VTX_W1 (1 << 29)
2493# define RADEON_TCL_VTX_NORM1 (1 << 30)
2494# define RADEON_TCL_VTX_Z0 (1 << 31)
2495
2496#define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258
2497# define RADEON_TCL_COMPUTE_XYZW (1 << 0)
2498# define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1)
2499# define RADEON_TCL_COMPUTE_SPECULAR (1 << 2)
2500# define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
2501# define RADEON_TCL_FORCE_INORDER_PROC (1 << 4)
2502# define RADEON_TCL_TEX_INPUT_TEX_0 0
2503# define RADEON_TCL_TEX_INPUT_TEX_1 1
2504# define RADEON_TCL_TEX_INPUT_TEX_2 2
2505# define RADEON_TCL_TEX_INPUT_TEX_3 3
2506# define RADEON_TCL_TEX_COMPUTED_TEX_0 8
2507# define RADEON_TCL_TEX_COMPUTED_TEX_1 9
2508# define RADEON_TCL_TEX_COMPUTED_TEX_2 10
2509# define RADEON_TCL_TEX_COMPUTED_TEX_3 11
2510# define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16
2511# define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20
2512# define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24
2513# define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28
2514
2515#define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270
2516# define RADEON_LIGHT_0_ENABLE (1 << 0)
2517# define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1)
2518# define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2)
2519# define RADEON_LIGHT_0_IS_LOCAL (1 << 3)
2520# define RADEON_LIGHT_0_IS_SPOT (1 << 4)
2521# define RADEON_LIGHT_0_DUAL_CONE (1 << 5)
2522# define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6)
2523# define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7)
2524# define RADEON_LIGHT_0_SHIFT 0
2525# define RADEON_LIGHT_1_ENABLE (1 << 16)
2526# define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17)
2527# define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18)
2528# define RADEON_LIGHT_1_IS_LOCAL (1 << 19)
2529# define RADEON_LIGHT_1_IS_SPOT (1 << 20)
2530# define RADEON_LIGHT_1_DUAL_CONE (1 << 21)
2531# define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22)
2532# define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
2533# define RADEON_LIGHT_1_SHIFT 16
2534#define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274
2535# define RADEON_LIGHT_2_SHIFT 0
2536# define RADEON_LIGHT_3_SHIFT 16
2537#define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278
2538# define RADEON_LIGHT_4_SHIFT 0
2539# define RADEON_LIGHT_5_SHIFT 16
2540#define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c
2541# define RADEON_LIGHT_6_SHIFT 0
2542# define RADEON_LIGHT_7_SHIFT 16
2543
2544#define RADEON_SE_TCL_SHININESS 0x2250
2545
2546#define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268
2547# define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0)
2548# define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1)
2549# define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2)
2550# define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3)
2551# define RADEON_TEXMAT_0_ENABLE (1 << 4)
2552# define RADEON_TEXMAT_1_ENABLE (1 << 5)
2553# define RADEON_TEXMAT_2_ENABLE (1 << 6)
2554# define RADEON_TEXMAT_3_ENABLE (1 << 7)
2555# define RADEON_TEXGEN_INPUT_MASK 0xf
2556# define RADEON_TEXGEN_INPUT_TEXCOORD_0 0
2557# define RADEON_TEXGEN_INPUT_TEXCOORD_1 1
2558# define RADEON_TEXGEN_INPUT_TEXCOORD_2 2
2559# define RADEON_TEXGEN_INPUT_TEXCOORD_3 3
2560# define RADEON_TEXGEN_INPUT_OBJ 4
2561# define RADEON_TEXGEN_INPUT_EYE 5
2562# define RADEON_TEXGEN_INPUT_EYE_NORMAL 6
2563# define RADEON_TEXGEN_INPUT_EYE_REFLECT 7
2564# define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
2565# define RADEON_TEXGEN_0_INPUT_SHIFT 16
2566# define RADEON_TEXGEN_1_INPUT_SHIFT 20
2567# define RADEON_TEXGEN_2_INPUT_SHIFT 24
2568# define RADEON_TEXGEN_3_INPUT_SHIFT 28
2569
2570#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264
2571# define RADEON_UCP_IN_CLIP_SPACE (1 << 0)
2572# define RADEON_UCP_IN_MODEL_SPACE (1 << 1)
2573# define RADEON_UCP_ENABLE_0 (1 << 2)
2574# define RADEON_UCP_ENABLE_1 (1 << 3)
2575# define RADEON_UCP_ENABLE_2 (1 << 4)
2576# define RADEON_UCP_ENABLE_3 (1 << 5)
2577# define RADEON_UCP_ENABLE_4 (1 << 6)
2578# define RADEON_UCP_ENABLE_5 (1 << 7)
2579# define RADEON_TCL_FOG_MASK (3 << 8)
2580# define RADEON_TCL_FOG_DISABLE (0 << 8)
2581# define RADEON_TCL_FOG_EXP (1 << 8)
2582# define RADEON_TCL_FOG_EXP2 (2 << 8)
2583# define RADEON_TCL_FOG_LINEAR (3 << 8)
2584# define RADEON_RNG_BASED_FOG (1 << 10)
2585# define RADEON_LIGHT_TWOSIDE (1 << 11)
2586# define RADEON_BLEND_OP_COUNT_MASK (7 << 12)
2587# define RADEON_BLEND_OP_COUNT_SHIFT 12
2588# define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16)
2589# define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17)
2590# define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (1 << 18)
2591# define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
2592# define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (1 << 19)
2593# define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
2594# define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (1 << 20)
2595# define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
2596# define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (1 << 21)
2597# define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
2598# define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22)
2599# define RADEON_CULL_FRONT_IS_CW (0 << 28)
2600# define RADEON_CULL_FRONT_IS_CCW (1 << 28)
2601# define RADEON_CULL_FRONT (1 << 29)
2602# define RADEON_CULL_BACK (1 << 30)
2603# define RADEON_FORCE_W_TO_ONE (1 << 31)
2604
2605#define RADEON_SE_VPORT_XSCALE 0x1d98
2606#define RADEON_SE_VPORT_XOFFSET 0x1d9c
2607#define RADEON_SE_VPORT_YSCALE 0x1da0
2608#define RADEON_SE_VPORT_YOFFSET 0x1da4
2609#define RADEON_SE_VPORT_ZSCALE 0x1da8
2610#define RADEON_SE_VPORT_ZOFFSET 0x1dac
2611#define RADEON_SE_ZBIAS_FACTOR 0x1db0
2612#define RADEON_SE_ZBIAS_CONSTANT 0x1db4
2613
2614#define RADEON_SE_VTX_FMT 0x2080
2615# define RADEON_SE_VTX_FMT_XY 0x00000000
2616# define RADEON_SE_VTX_FMT_W0 0x00000001
2617# define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002
2618# define RADEON_SE_VTX_FMT_FPALPHA 0x00000004
2619# define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008
2620# define RADEON_SE_VTX_FMT_FPSPEC 0x00000010
2621# define RADEON_SE_VTX_FMT_FPFOG 0x00000020
2622# define RADEON_SE_VTX_FMT_PKSPEC 0x00000040
2623# define RADEON_SE_VTX_FMT_ST0 0x00000080
2624# define RADEON_SE_VTX_FMT_ST1 0x00000100
2625# define RADEON_SE_VTX_FMT_Q1 0x00000200
2626# define RADEON_SE_VTX_FMT_ST2 0x00000400
2627# define RADEON_SE_VTX_FMT_Q2 0x00000800
2628# define RADEON_SE_VTX_FMT_ST3 0x00001000
2629# define RADEON_SE_VTX_FMT_Q3 0x00002000
2630# define RADEON_SE_VTX_FMT_Q0 0x00004000
2631# define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000
2632# define RADEON_SE_VTX_FMT_N0 0x00040000
2633# define RADEON_SE_VTX_FMT_XY1 0x08000000
2634# define RADEON_SE_VTX_FMT_Z1 0x10000000
2635# define RADEON_SE_VTX_FMT_W1 0x20000000
2636# define RADEON_SE_VTX_FMT_N1 0x40000000
2637# define RADEON_SE_VTX_FMT_Z 0x80000000
2638
2639#define RADEON_SE_VF_CNTL 0x2084
2640# define RADEON_VF_PRIM_TYPE_POINT_LIST 1
2641# define RADEON_VF_PRIM_TYPE_LINE_LIST 2
2642# define RADEON_VF_PRIM_TYPE_LINE_STRIP 3
2643# define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST 4
2644# define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN 5
2645# define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP 6
2646# define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG 7
2647# define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST 8
2648# define RADEON_VF_PRIM_TYPE_POINT_LIST_3 9
2649# define RADEON_VF_PRIM_TYPE_LINE_LIST_3 10
2650# define RADEON_VF_PRIM_TYPE_SPIRIT_LIST 11
2651# define RADEON_VF_PRIM_TYPE_LINE_LOOP 12
2652# define RADEON_VF_PRIM_TYPE_QUAD_LIST 13
2653# define RADEON_VF_PRIM_TYPE_QUAD_STRIP 14
2654# define RADEON_VF_PRIM_TYPE_POLYGON 15
2655# define RADEON_VF_PRIM_WALK_STATE (0<<4)
2656# define RADEON_VF_PRIM_WALK_INDEX (1<<4)
2657# define RADEON_VF_PRIM_WALK_LIST (2<<4)
2658# define RADEON_VF_PRIM_WALK_DATA (3<<4)
2659# define RADEON_VF_COLOR_ORDER_RGBA (1<<6)
2660# define RADEON_VF_RADEON_MODE (1<<8)
2661# define RADEON_VF_TCL_OUTPUT_CTL_ENA (1<<9)
2662# define RADEON_VF_PROG_STREAM_ENA (1<<10)
2663# define RADEON_VF_INDEX_SIZE_SHIFT 11
2664# define RADEON_VF_NUM_VERTICES_SHIFT 16
2665
2666#define RADEON_SE_PORT_DATA0 0x2000
2667
2668#define R200_SE_VAP_CNTL 0x2080
2669# define R200_VAP_TCL_ENABLE 0x00000001
2670# define R200_VAP_SINGLE_BUF_STATE_ENABLE 0x00000010
2671# define R200_VAP_FORCE_W_TO_ONE 0x00010000
2672# define R200_VAP_D3D_TEX_DEFAULT 0x00020000
2673# define R200_VAP_VF_MAX_VTX_NUM__SHIFT 18
2674# define R200_VAP_VF_MAX_VTX_NUM (9 << 18)
2675# define R200_VAP_DX_CLIP_SPACE_DEF 0x00400000
2676#define R200_VF_MAX_VTX_INDX 0x210c
2677#define R200_VF_MIN_VTX_INDX 0x2110
2678#define R200_SE_VTE_CNTL 0x20b0
2679# define R200_VPORT_X_SCALE_ENA 0x00000001
2680# define R200_VPORT_X_OFFSET_ENA 0x00000002
2681# define R200_VPORT_Y_SCALE_ENA 0x00000004
2682# define R200_VPORT_Y_OFFSET_ENA 0x00000008
2683# define R200_VPORT_Z_SCALE_ENA 0x00000010
2684# define R200_VPORT_Z_OFFSET_ENA 0x00000020
2685# define R200_VTX_XY_FMT 0x00000100
2686# define R200_VTX_Z_FMT 0x00000200
2687# define R200_VTX_W0_FMT 0x00000400
2688# define R200_VTX_W0_NORMALIZE 0x00000800
2689# define R200_VTX_ST_DENORMALIZED 0x00001000
2690#define R200_SE_VAP_CNTL_STATUS 0x2140
2691# define R200_VC_NO_SWAP (0 << 0)
2692# define R200_VC_16BIT_SWAP (1 << 0)
2693# define R200_VC_32BIT_SWAP (2 << 0)
2694#define R200_PP_TXFILTER_0 0x2c00
2695#define R200_PP_TXFILTER_1 0x2c20
2696#define R200_PP_TXFILTER_2 0x2c40
2697#define R200_PP_TXFILTER_3 0x2c60
2698#define R200_PP_TXFILTER_4 0x2c80
2699#define R200_PP_TXFILTER_5 0x2ca0
2700# define R200_MAG_FILTER_NEAREST (0 << 0)
2701# define R200_MAG_FILTER_LINEAR (1 << 0)
2702# define R200_MAG_FILTER_MASK (1 << 0)
2703# define R200_MIN_FILTER_NEAREST (0 << 1)
2704# define R200_MIN_FILTER_LINEAR (1 << 1)
2705# define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
2706# define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
2707# define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
2708# define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
2709# define R200_MIN_FILTER_ANISO_NEAREST (8 << 1)
2710# define R200_MIN_FILTER_ANISO_LINEAR (9 << 1)
2711# define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
2712# define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
2713# define R200_MIN_FILTER_MASK (15 << 1)
2714# define R200_MAX_ANISO_1_TO_1 (0 << 5)
2715# define R200_MAX_ANISO_2_TO_1 (1 << 5)
2716# define R200_MAX_ANISO_4_TO_1 (2 << 5)
2717# define R200_MAX_ANISO_8_TO_1 (3 << 5)
2718# define R200_MAX_ANISO_16_TO_1 (4 << 5)
2719# define R200_MAX_ANISO_MASK (7 << 5)
2720# define R200_MAX_MIP_LEVEL_MASK (0x0f << 16)
2721# define R200_MAX_MIP_LEVEL_SHIFT 16
2722# define R200_YUV_TO_RGB (1 << 20)
2723# define R200_YUV_TEMPERATURE_COOL (0 << 21)
2724# define R200_YUV_TEMPERATURE_HOT (1 << 21)
2725# define R200_YUV_TEMPERATURE_MASK (1 << 21)
2726# define R200_WRAPEN_S (1 << 22)
2727# define R200_CLAMP_S_WRAP (0 << 23)
2728# define R200_CLAMP_S_MIRROR (1 << 23)
2729# define R200_CLAMP_S_CLAMP_LAST (2 << 23)
2730# define R200_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
2731# define R200_CLAMP_S_CLAMP_BORDER (4 << 23)
2732# define R200_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
2733# define R200_CLAMP_S_CLAMP_GL (6 << 23)
2734# define R200_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
2735# define R200_CLAMP_S_MASK (7 << 23)
2736# define R200_WRAPEN_T (1 << 26)
2737# define R200_CLAMP_T_WRAP (0 << 27)
2738# define R200_CLAMP_T_MIRROR (1 << 27)
2739# define R200_CLAMP_T_CLAMP_LAST (2 << 27)
2740# define R200_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
2741# define R200_CLAMP_T_CLAMP_BORDER (4 << 27)
2742# define R200_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
2743# define R200_CLAMP_T_CLAMP_GL (6 << 27)
2744# define R200_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
2745# define R200_CLAMP_T_MASK (7 << 27)
2746# define R200_KILL_LT_ZERO (1 << 30)
2747# define R200_BORDER_MODE_OGL (0 << 31)
2748# define R200_BORDER_MODE_D3D (1 << 31)
2749#define R200_PP_TXFORMAT_0 0x2c04
2750#define R200_PP_TXFORMAT_1 0x2c24
2751#define R200_PP_TXFORMAT_2 0x2c44
2752#define R200_PP_TXFORMAT_3 0x2c64
2753#define R200_PP_TXFORMAT_4 0x2c84
2754#define R200_PP_TXFORMAT_5 0x2ca4
2755# define R200_TXFORMAT_I8 (0 << 0)
2756# define R200_TXFORMAT_AI88 (1 << 0)
2757# define R200_TXFORMAT_RGB332 (2 << 0)
2758# define R200_TXFORMAT_ARGB1555 (3 << 0)
2759# define R200_TXFORMAT_RGB565 (4 << 0)
2760# define R200_TXFORMAT_ARGB4444 (5 << 0)
2761# define R200_TXFORMAT_ARGB8888 (6 << 0)
2762# define R200_TXFORMAT_RGBA8888 (7 << 0)
2763# define R200_TXFORMAT_Y8 (8 << 0)
2764# define R200_TXFORMAT_AVYU4444 (9 << 0)
2765# define R200_TXFORMAT_VYUY422 (10 << 0)
2766# define R200_TXFORMAT_YVYU422 (11 << 0)
2767# define R200_TXFORMAT_DXT1 (12 << 0)
2768# define R200_TXFORMAT_DXT23 (14 << 0)
2769# define R200_TXFORMAT_DXT45 (15 << 0)
2770# define R200_TXFORMAT_ABGR8888 (22 << 0)
2771# define R200_TXFORMAT_FORMAT_MASK (31 << 0)
2772# define R200_TXFORMAT_FORMAT_SHIFT 0
2773# define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6)
2774# define R200_TXFORMAT_NON_POWER2 (1 << 7)
2775# define R200_TXFORMAT_WIDTH_MASK (15 << 8)
2776# define R200_TXFORMAT_WIDTH_SHIFT 8
2777# define R200_TXFORMAT_HEIGHT_MASK (15 << 12)
2778# define R200_TXFORMAT_HEIGHT_SHIFT 12
2779# define R200_TXFORMAT_F5_WIDTH_MASK (15 << 16) /* cube face 5 */
2780# define R200_TXFORMAT_F5_WIDTH_SHIFT 16
2781# define R200_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
2782# define R200_TXFORMAT_F5_HEIGHT_SHIFT 20
2783# define R200_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
2784# define R200_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
2785# define R200_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
2786# define R200_TXFORMAT_ST_ROUTE_STQ3 (3 << 24)
2787# define R200_TXFORMAT_ST_ROUTE_STQ4 (4 << 24)
2788# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
2789# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
2790# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
2791# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
2792# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
2793# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
2794#define R200_PP_TXFORMAT_X_0 0x2c08
2795#define R200_PP_TXFORMAT_X_1 0x2c28
2796#define R200_PP_TXFORMAT_X_2 0x2c48
2797#define R200_PP_TXFORMAT_X_3 0x2c68
2798#define R200_PP_TXFORMAT_X_4 0x2c88
2799#define R200_PP_TXFORMAT_X_5 0x2ca8
2800
2801#define R200_PP_TXSIZE_0 0x2c0c /* NPOT only */
2802#define R200_PP_TXSIZE_1 0x2c2c /* NPOT only */
2803#define R200_PP_TXSIZE_2 0x2c4c /* NPOT only */
2804#define R200_PP_TXSIZE_3 0x2c6c /* NPOT only */
2805#define R200_PP_TXSIZE_4 0x2c8c /* NPOT only */
2806#define R200_PP_TXSIZE_5 0x2cac /* NPOT only */
2807
2808#define R200_PP_TXPITCH_0 0x2c10 /* NPOT only */
2809#define R200_PP_TXPITCH_1 0x2c30 /* NPOT only */
2810#define R200_PP_TXPITCH_2 0x2c50 /* NPOT only */
2811#define R200_PP_TXPITCH_3 0x2c70 /* NPOT only */
2812#define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */
2813#define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */
2814
2815#define R200_PP_TXOFFSET_0 0x2d00
2816# define R200_TXO_ENDIAN_NO_SWAP (0 << 0)
2817# define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0)
2818# define R200_TXO_ENDIAN_WORD_SWAP (2 << 0)
2819# define R200_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
2820# define R200_TXO_MACRO_LINEAR (0 << 2)
2821# define R200_TXO_MACRO_TILE (1 << 2)
2822# define R200_TXO_MICRO_LINEAR (0 << 3)
2823# define R200_TXO_MICRO_TILE (1 << 3)
2824# define R200_TXO_OFFSET_MASK 0xffffffe0
2825# define R200_TXO_OFFSET_SHIFT 5
2826#define R200_PP_TXOFFSET_1 0x2d18
2827#define R200_PP_TXOFFSET_2 0x2d30
2828#define R200_PP_TXOFFSET_3 0x2d48
2829#define R200_PP_TXOFFSET_4 0x2d60
2830#define R200_PP_TXOFFSET_5 0x2d78
2831
2832#define R200_PP_TFACTOR_0 0x2ee0
2833#define R200_PP_TFACTOR_1 0x2ee4
2834#define R200_PP_TFACTOR_2 0x2ee8
2835#define R200_PP_TFACTOR_3 0x2eec
2836#define R200_PP_TFACTOR_4 0x2ef0
2837#define R200_PP_TFACTOR_5 0x2ef4
2838
2839#define R200_PP_TXCBLEND_0 0x2f00
2840# define R200_TXC_ARG_A_ZERO (0)
2841# define R200_TXC_ARG_A_CURRENT_COLOR (2)
2842# define R200_TXC_ARG_A_CURRENT_ALPHA (3)
2843# define R200_TXC_ARG_A_DIFFUSE_COLOR (4)
2844# define R200_TXC_ARG_A_DIFFUSE_ALPHA (5)
2845# define R200_TXC_ARG_A_SPECULAR_COLOR (6)
2846# define R200_TXC_ARG_A_SPECULAR_ALPHA (7)
2847# define R200_TXC_ARG_A_TFACTOR_COLOR (8)
2848# define R200_TXC_ARG_A_TFACTOR_ALPHA (9)
2849# define R200_TXC_ARG_A_R0_COLOR (10)
2850# define R200_TXC_ARG_A_R0_ALPHA (11)
2851# define R200_TXC_ARG_A_R1_COLOR (12)
2852# define R200_TXC_ARG_A_R1_ALPHA (13)
2853# define R200_TXC_ARG_A_R2_COLOR (14)
2854# define R200_TXC_ARG_A_R2_ALPHA (15)
2855# define R200_TXC_ARG_A_R3_COLOR (16)
2856# define R200_TXC_ARG_A_R3_ALPHA (17)
2857# define R200_TXC_ARG_A_R4_COLOR (18)
2858# define R200_TXC_ARG_A_R4_ALPHA (19)
2859# define R200_TXC_ARG_A_R5_COLOR (20)
2860# define R200_TXC_ARG_A_R5_ALPHA (21)
2861# define R200_TXC_ARG_A_TFACTOR1_COLOR (26)
2862# define R200_TXC_ARG_A_TFACTOR1_ALPHA (27)
2863# define R200_TXC_ARG_A_MASK (31 << 0)
2864# define R200_TXC_ARG_A_SHIFT 0
2865# define R200_TXC_ARG_B_ZERO (0 << 5)
2866# define R200_TXC_ARG_B_CURRENT_COLOR (2 << 5)
2867# define R200_TXC_ARG_B_CURRENT_ALPHA (3 << 5)
2868# define R200_TXC_ARG_B_DIFFUSE_COLOR (4 << 5)
2869# define R200_TXC_ARG_B_DIFFUSE_ALPHA (5 << 5)
2870# define R200_TXC_ARG_B_SPECULAR_COLOR (6 << 5)
2871# define R200_TXC_ARG_B_SPECULAR_ALPHA (7 << 5)
2872# define R200_TXC_ARG_B_TFACTOR_COLOR (8 << 5)
2873# define R200_TXC_ARG_B_TFACTOR_ALPHA (9 << 5)
2874# define R200_TXC_ARG_B_R0_COLOR (10 << 5)
2875# define R200_TXC_ARG_B_R0_ALPHA (11 << 5)
2876# define R200_TXC_ARG_B_R1_COLOR (12 << 5)
2877# define R200_TXC_ARG_B_R1_ALPHA (13 << 5)
2878# define R200_TXC_ARG_B_R2_COLOR (14 << 5)
2879# define R200_TXC_ARG_B_R2_ALPHA (15 << 5)
2880# define R200_TXC_ARG_B_R3_COLOR (16 << 5)
2881# define R200_TXC_ARG_B_R3_ALPHA (17 << 5)
2882# define R200_TXC_ARG_B_R4_COLOR (18 << 5)
2883# define R200_TXC_ARG_B_R4_ALPHA (19 << 5)
2884# define R200_TXC_ARG_B_R5_COLOR (20 << 5)
2885# define R200_TXC_ARG_B_R5_ALPHA (21 << 5)
2886# define R200_TXC_ARG_B_TFACTOR1_COLOR (26 << 5)
2887# define R200_TXC_ARG_B_TFACTOR1_ALPHA (27 << 5)
2888# define R200_TXC_ARG_B_MASK (31 << 5)
2889# define R200_TXC_ARG_B_SHIFT 5
2890# define R200_TXC_ARG_C_ZERO (0 << 10)
2891# define R200_TXC_ARG_C_CURRENT_COLOR (2 << 10)
2892# define R200_TXC_ARG_C_CURRENT_ALPHA (3 << 10)
2893# define R200_TXC_ARG_C_DIFFUSE_COLOR (4 << 10)
2894# define R200_TXC_ARG_C_DIFFUSE_ALPHA (5 << 10)
2895# define R200_TXC_ARG_C_SPECULAR_COLOR (6 << 10)
2896# define R200_TXC_ARG_C_SPECULAR_ALPHA (7 << 10)
2897# define R200_TXC_ARG_C_TFACTOR_COLOR (8 << 10)
2898# define R200_TXC_ARG_C_TFACTOR_ALPHA (9 << 10)
2899# define R200_TXC_ARG_C_R0_COLOR (10 << 10)
2900# define R200_TXC_ARG_C_R0_ALPHA (11 << 10)
2901# define R200_TXC_ARG_C_R1_COLOR (12 << 10)
2902# define R200_TXC_ARG_C_R1_ALPHA (13 << 10)
2903# define R200_TXC_ARG_C_R2_COLOR (14 << 10)
2904# define R200_TXC_ARG_C_R2_ALPHA (15 << 10)
2905# define R200_TXC_ARG_C_R3_COLOR (16 << 10)
2906# define R200_TXC_ARG_C_R3_ALPHA (17 << 10)
2907# define R200_TXC_ARG_C_R4_COLOR (18 << 10)
2908# define R200_TXC_ARG_C_R4_ALPHA (19 << 10)
2909# define R200_TXC_ARG_C_R5_COLOR (20 << 10)
2910# define R200_TXC_ARG_C_R5_ALPHA (21 << 10)
2911# define R200_TXC_ARG_C_TFACTOR1_COLOR (26 << 10)
2912# define R200_TXC_ARG_C_TFACTOR1_ALPHA (27 << 10)
2913# define R200_TXC_ARG_C_MASK (31 << 10)
2914# define R200_TXC_ARG_C_SHIFT 10
2915# define R200_TXC_COMP_ARG_A (1 << 16)
2916# define R200_TXC_COMP_ARG_A_SHIFT (16)
2917# define R200_TXC_BIAS_ARG_A (1 << 17)
2918# define R200_TXC_SCALE_ARG_A (1 << 18)
2919# define R200_TXC_NEG_ARG_A (1 << 19)
2920# define R200_TXC_COMP_ARG_B (1 << 20)
2921# define R200_TXC_COMP_ARG_B_SHIFT (20)
2922# define R200_TXC_BIAS_ARG_B (1 << 21)
2923# define R200_TXC_SCALE_ARG_B (1 << 22)
2924# define R200_TXC_NEG_ARG_B (1 << 23)
2925# define R200_TXC_COMP_ARG_C (1 << 24)
2926# define R200_TXC_COMP_ARG_C_SHIFT (24)
2927# define R200_TXC_BIAS_ARG_C (1 << 25)
2928# define R200_TXC_SCALE_ARG_C (1 << 26)
2929# define R200_TXC_NEG_ARG_C (1 << 27)
2930# define R200_TXC_OP_MADD (0 << 28)
2931# define R200_TXC_OP_CND0 (2 << 28)
2932# define R200_TXC_OP_LERP (3 << 28)
2933# define R200_TXC_OP_DOT3 (4 << 28)
2934# define R200_TXC_OP_DOT4 (5 << 28)
2935# define R200_TXC_OP_CONDITIONAL (6 << 28)
2936# define R200_TXC_OP_DOT2_ADD (7 << 28)
2937# define R200_TXC_OP_MASK (7 << 28)
2938#define R200_PP_TXCBLEND2_0 0x2f04
2939# define R200_TXC_TFACTOR_SEL_SHIFT 0
2940# define R200_TXC_TFACTOR_SEL_MASK 0x7
2941# define R200_TXC_TFACTOR1_SEL_SHIFT 4
2942# define R200_TXC_TFACTOR1_SEL_MASK (0x7 << 4)
2943# define R200_TXC_SCALE_SHIFT 8
2944# define R200_TXC_SCALE_MASK (7 << 8)
2945# define R200_TXC_SCALE_1X (0 << 8)
2946# define R200_TXC_SCALE_2X (1 << 8)
2947# define R200_TXC_SCALE_4X (2 << 8)
2948# define R200_TXC_SCALE_8X (3 << 8)
2949# define R200_TXC_SCALE_INV2 (5 << 8)
2950# define R200_TXC_SCALE_INV4 (6 << 8)
2951# define R200_TXC_SCALE_INV8 (7 << 8)
2952# define R200_TXC_CLAMP_SHIFT 12
2953# define R200_TXC_CLAMP_MASK (3 << 12)
2954# define R200_TXC_CLAMP_WRAP (0 << 12)
2955# define R200_TXC_CLAMP_0_1 (1 << 12)
2956# define R200_TXC_CLAMP_8_8 (2 << 12)
2957# define R200_TXC_OUTPUT_REG_MASK (7 << 16)
2958# define R200_TXC_OUTPUT_REG_NONE (0 << 16)
2959# define R200_TXC_OUTPUT_REG_R0 (1 << 16)
2960# define R200_TXC_OUTPUT_REG_R1 (2 << 16)
2961# define R200_TXC_OUTPUT_REG_R2 (3 << 16)
2962# define R200_TXC_OUTPUT_REG_R3 (4 << 16)
2963# define R200_TXC_OUTPUT_REG_R4 (5 << 16)
2964# define R200_TXC_OUTPUT_REG_R5 (6 << 16)
2965# define R200_TXC_OUTPUT_MASK_MASK (7 << 20)
2966# define R200_TXC_OUTPUT_MASK_RGB (0 << 20)
2967# define R200_TXC_OUTPUT_MASK_RG (1 << 20)
2968# define R200_TXC_OUTPUT_MASK_RB (2 << 20)
2969# define R200_TXC_OUTPUT_MASK_R (3 << 20)
2970# define R200_TXC_OUTPUT_MASK_GB (4 << 20)
2971# define R200_TXC_OUTPUT_MASK_G (5 << 20)
2972# define R200_TXC_OUTPUT_MASK_B (6 << 20)
2973# define R200_TXC_OUTPUT_MASK_NONE (7 << 20)
2974# define R200_TXC_REPL_NORMAL 0
2975# define R200_TXC_REPL_RED 1
2976# define R200_TXC_REPL_GREEN 2
2977# define R200_TXC_REPL_BLUE 3
2978# define R200_TXC_REPL_ARG_A_SHIFT 26
2979# define R200_TXC_REPL_ARG_A_MASK (3 << 26)
2980# define R200_TXC_REPL_ARG_B_SHIFT 28
2981# define R200_TXC_REPL_ARG_B_MASK (3 << 28)
2982# define R200_TXC_REPL_ARG_C_SHIFT 30
2983# define R200_TXC_REPL_ARG_C_MASK (3 << 30)
2984#define R200_PP_TXABLEND_0 0x2f08
2985# define R200_TXA_ARG_A_ZERO (0)
2986# define R200_TXA_ARG_A_CURRENT_ALPHA (2) /* guess */
2987# define R200_TXA_ARG_A_CURRENT_BLUE (3) /* guess */
2988# define R200_TXA_ARG_A_DIFFUSE_ALPHA (4)
2989# define R200_TXA_ARG_A_DIFFUSE_BLUE (5)
2990# define R200_TXA_ARG_A_SPECULAR_ALPHA (6)
2991# define R200_TXA_ARG_A_SPECULAR_BLUE (7)
2992# define R200_TXA_ARG_A_TFACTOR_ALPHA (8)
2993# define R200_TXA_ARG_A_TFACTOR_BLUE (9)
2994# define R200_TXA_ARG_A_R0_ALPHA (10)
2995# define R200_TXA_ARG_A_R0_BLUE (11)
2996# define R200_TXA_ARG_A_R1_ALPHA (12)
2997# define R200_TXA_ARG_A_R1_BLUE (13)
2998# define R200_TXA_ARG_A_R2_ALPHA (14)
2999# define R200_TXA_ARG_A_R2_BLUE (15)
3000# define R200_TXA_ARG_A_R3_ALPHA (16)
3001# define R200_TXA_ARG_A_R3_BLUE (17)
3002# define R200_TXA_ARG_A_R4_ALPHA (18)
3003# define R200_TXA_ARG_A_R4_BLUE (19)
3004# define R200_TXA_ARG_A_R5_ALPHA (20)
3005# define R200_TXA_ARG_A_R5_BLUE (21)
3006# define R200_TXA_ARG_A_TFACTOR1_ALPHA (26)
3007# define R200_TXA_ARG_A_TFACTOR1_BLUE (27)
3008# define R200_TXA_ARG_A_MASK (31 << 0)
3009# define R200_TXA_ARG_A_SHIFT 0
3010# define R200_TXA_ARG_B_ZERO (0 << 5)
3011# define R200_TXA_ARG_B_CURRENT_ALPHA (2 << 5) /* guess */
3012# define R200_TXA_ARG_B_CURRENT_BLUE (3 << 5) /* guess */
3013# define R200_TXA_ARG_B_DIFFUSE_ALPHA (4 << 5)
3014# define R200_TXA_ARG_B_DIFFUSE_BLUE (5 << 5)
3015# define R200_TXA_ARG_B_SPECULAR_ALPHA (6 << 5)
3016# define R200_TXA_ARG_B_SPECULAR_BLUE (7 << 5)
3017# define R200_TXA_ARG_B_TFACTOR_ALPHA (8 << 5)
3018# define R200_TXA_ARG_B_TFACTOR_BLUE (9 << 5)
3019# define R200_TXA_ARG_B_R0_ALPHA (10 << 5)
3020# define R200_TXA_ARG_B_R0_BLUE (11 << 5)
3021# define R200_TXA_ARG_B_R1_ALPHA (12 << 5)
3022# define R200_TXA_ARG_B_R1_BLUE (13 << 5)
3023# define R200_TXA_ARG_B_R2_ALPHA (14 << 5)
3024# define R200_TXA_ARG_B_R2_BLUE (15 << 5)
3025# define R200_TXA_ARG_B_R3_ALPHA (16 << 5)
3026# define R200_TXA_ARG_B_R3_BLUE (17 << 5)
3027# define R200_TXA_ARG_B_R4_ALPHA (18 << 5)
3028# define R200_TXA_ARG_B_R4_BLUE (19 << 5)
3029# define R200_TXA_ARG_B_R5_ALPHA (20 << 5)
3030# define R200_TXA_ARG_B_R5_BLUE (21 << 5)
3031# define R200_TXA_ARG_B_TFACTOR1_ALPHA (26 << 5)
3032# define R200_TXA_ARG_B_TFACTOR1_BLUE (27 << 5)
3033# define R200_TXA_ARG_B_MASK (31 << 5)
3034# define R200_TXA_ARG_B_SHIFT 5
3035# define R200_TXA_ARG_C_ZERO (0 << 10)
3036# define R200_TXA_ARG_C_CURRENT_ALPHA (2 << 10) /* guess */
3037# define R200_TXA_ARG_C_CURRENT_BLUE (3 << 10) /* guess */
3038# define R200_TXA_ARG_C_DIFFUSE_ALPHA (4 << 10)
3039# define R200_TXA_ARG_C_DIFFUSE_BLUE (5 << 10)
3040# define R200_TXA_ARG_C_SPECULAR_ALPHA (6 << 10)
3041# define R200_TXA_ARG_C_SPECULAR_BLUE (7 << 10)
3042# define R200_TXA_ARG_C_TFACTOR_ALPHA (8 << 10)
3043# define R200_TXA_ARG_C_TFACTOR_BLUE (9 << 10)
3044# define R200_TXA_ARG_C_R0_ALPHA (10 << 10)
3045# define R200_TXA_ARG_C_R0_BLUE (11 << 10)
3046# define R200_TXA_ARG_C_R1_ALPHA (12 << 10)
3047# define R200_TXA_ARG_C_R1_BLUE (13 << 10)
3048# define R200_TXA_ARG_C_R2_ALPHA (14 << 10)
3049# define R200_TXA_ARG_C_R2_BLUE (15 << 10)
3050# define R200_TXA_ARG_C_R3_ALPHA (16 << 10)
3051# define R200_TXA_ARG_C_R3_BLUE (17 << 10)
3052# define R200_TXA_ARG_C_R4_ALPHA (18 << 10)
3053# define R200_TXA_ARG_C_R4_BLUE (19 << 10)
3054# define R200_TXA_ARG_C_R5_ALPHA (20 << 10)
3055# define R200_TXA_ARG_C_R5_BLUE (21 << 10)
3056# define R200_TXA_ARG_C_TFACTOR1_ALPHA (26 << 10)
3057# define R200_TXA_ARG_C_TFACTOR1_BLUE (27 << 10)
3058# define R200_TXA_ARG_C_MASK (31 << 10)
3059# define R200_TXA_ARG_C_SHIFT 10
3060# define R200_TXA_COMP_ARG_A (1 << 16)
3061# define R200_TXA_COMP_ARG_A_SHIFT (16)
3062# define R200_TXA_BIAS_ARG_A (1 << 17)
3063# define R200_TXA_SCALE_ARG_A (1 << 18)
3064# define R200_TXA_NEG_ARG_A (1 << 19)
3065# define R200_TXA_COMP_ARG_B (1 << 20)
3066# define R200_TXA_COMP_ARG_B_SHIFT (20)
3067# define R200_TXA_BIAS_ARG_B (1 << 21)
3068# define R200_TXA_SCALE_ARG_B (1 << 22)
3069# define R200_TXA_NEG_ARG_B (1 << 23)
3070# define R200_TXA_COMP_ARG_C (1 << 24)
3071# define R200_TXA_COMP_ARG_C_SHIFT (24)
3072# define R200_TXA_BIAS_ARG_C (1 << 25)
3073# define R200_TXA_SCALE_ARG_C (1 << 26)
3074# define R200_TXA_NEG_ARG_C (1 << 27)
3075# define R200_TXA_OP_MADD (0 << 28)
3076# define R200_TXA_OP_CND0 (2 << 28)
3077# define R200_TXA_OP_LERP (3 << 28)
3078# define R200_TXA_OP_CONDITIONAL (6 << 28)
3079# define R200_TXA_OP_MASK (7 << 28)
3080#define R200_PP_TXABLEND2_0 0x2f0c
3081# define R200_TXA_TFACTOR_SEL_SHIFT 0
3082# define R200_TXA_TFACTOR_SEL_MASK 0x7
3083# define R200_TXA_TFACTOR1_SEL_SHIFT 4
3084# define R200_TXA_TFACTOR1_SEL_MASK (0x7 << 4)
3085# define R200_TXA_SCALE_SHIFT 8
3086# define R200_TXA_SCALE_MASK (7 << 8)
3087# define R200_TXA_SCALE_1X (0 << 8)
3088# define R200_TXA_SCALE_2X (1 << 8)
3089# define R200_TXA_SCALE_4X (2 << 8)
3090# define R200_TXA_SCALE_8X (3 << 8)
3091# define R200_TXA_SCALE_INV2 (5 << 8)
3092# define R200_TXA_SCALE_INV4 (6 << 8)
3093# define R200_TXA_SCALE_INV8 (7 << 8)
3094# define R200_TXA_CLAMP_SHIFT 12
3095# define R200_TXA_CLAMP_MASK (3 << 12)
3096# define R200_TXA_CLAMP_WRAP (0 << 12)
3097# define R200_TXA_CLAMP_0_1 (1 << 12)
3098# define R200_TXA_CLAMP_8_8 (2 << 12)
3099# define R200_TXA_OUTPUT_REG_MASK (7 << 16)
3100# define R200_TXA_OUTPUT_REG_NONE (0 << 16)
3101# define R200_TXA_OUTPUT_REG_R0 (1 << 16)
3102# define R200_TXA_OUTPUT_REG_R1 (2 << 16)
3103# define R200_TXA_OUTPUT_REG_R2 (3 << 16)
3104# define R200_TXA_OUTPUT_REG_R3 (4 << 16)
3105# define R200_TXA_OUTPUT_REG_R4 (5 << 16)
3106# define R200_TXA_OUTPUT_REG_R5 (6 << 16)
3107# define R200_TXA_DOT_ALPHA (1 << 20)
3108# define R200_TXA_REPL_NORMAL 0
3109# define R200_TXA_REPL_RED 1
3110# define R200_TXA_REPL_GREEN 2
3111# define R200_TXA_REPL_ARG_A_SHIFT 26
3112# define R200_TXA_REPL_ARG_A_MASK (3 << 26)
3113# define R200_TXA_REPL_ARG_B_SHIFT 28
3114# define R200_TXA_REPL_ARG_B_MASK (3 << 28)
3115# define R200_TXA_REPL_ARG_C_SHIFT 30
3116# define R200_TXA_REPL_ARG_C_MASK (3 << 30)
3117
3118#define R200_SE_VTX_FMT_0 0x2088
3119# define R200_VTX_XY 0 /* always have xy */
3120# define R200_VTX_Z0 (1<<0)
3121# define R200_VTX_W0 (1<<1)
3122# define R200_VTX_WEIGHT_COUNT_SHIFT (2)
3123# define R200_VTX_PV_MATRIX_SEL (1<<5)
3124# define R200_VTX_N0 (1<<6)
3125# define R200_VTX_POINT_SIZE (1<<7)
3126# define R200_VTX_DISCRETE_FOG (1<<8)
3127# define R200_VTX_SHININESS_0 (1<<9)
3128# define R200_VTX_SHININESS_1 (1<<10)
3129# define R200_VTX_COLOR_NOT_PRESENT 0
3130# define R200_VTX_PK_RGBA 1
3131# define R200_VTX_FP_RGB 2
3132# define R200_VTX_FP_RGBA 3
3133# define R200_VTX_COLOR_MASK 3
3134# define R200_VTX_COLOR_0_SHIFT 11
3135# define R200_VTX_COLOR_1_SHIFT 13
3136# define R200_VTX_COLOR_2_SHIFT 15
3137# define R200_VTX_COLOR_3_SHIFT 17
3138# define R200_VTX_COLOR_4_SHIFT 19
3139# define R200_VTX_COLOR_5_SHIFT 21
3140# define R200_VTX_COLOR_6_SHIFT 23
3141# define R200_VTX_COLOR_7_SHIFT 25
3142# define R200_VTX_XY1 (1<<28)
3143# define R200_VTX_Z1 (1<<29)
3144# define R200_VTX_W1 (1<<30)
3145# define R200_VTX_N1 (1<<31)
3146#define R200_SE_VTX_FMT_1 0x208c
3147# define R200_VTX_TEX0_COMP_CNT_SHIFT 0
3148# define R200_VTX_TEX1_COMP_CNT_SHIFT 3
3149# define R200_VTX_TEX2_COMP_CNT_SHIFT 6
3150# define R200_VTX_TEX3_COMP_CNT_SHIFT 9
3151# define R200_VTX_TEX4_COMP_CNT_SHIFT 12
3152# define R200_VTX_TEX5_COMP_CNT_SHIFT 15
3153
3154#define R200_SE_TCL_OUTPUT_VTX_FMT_0 0x2090
3155#define R200_SE_TCL_OUTPUT_VTX_FMT_1 0x2094
3156#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
3157# define R200_OUTPUT_XYZW (1<<0)
3158# define R200_OUTPUT_COLOR_0 (1<<8)
3159# define R200_OUTPUT_COLOR_1 (1<<9)
3160# define R200_OUTPUT_TEX_0 (1<<16)
3161# define R200_OUTPUT_TEX_1 (1<<17)
3162# define R200_OUTPUT_TEX_2 (1<<18)
3163# define R200_OUTPUT_TEX_3 (1<<19)
3164# define R200_OUTPUT_TEX_4 (1<<20)
3165# define R200_OUTPUT_TEX_5 (1<<21)
3166# define R200_OUTPUT_TEX_MASK (0x3f<<16)
3167# define R200_OUTPUT_DISCRETE_FOG (1<<24)
3168# define R200_OUTPUT_PT_SIZE (1<<25)
3169# define R200_FORCE_INORDER_PROC (1<<31)
3170#define R200_PP_CNTL_X 0x2cc4
3171#define R200_PP_TXMULTI_CTL_0 0x2c1c
3172#define R200_SE_VTX_STATE_CNTL 0x2180
3173# define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
3174
3175 /* Registers for CP and Microcode Engine */
3176#define RADEON_CP_ME_RAM_ADDR 0x07d4
3177#define RADEON_CP_ME_RAM_RADDR 0x07d8
3178#define RADEON_CP_ME_RAM_DATAH 0x07dc
3179#define RADEON_CP_ME_RAM_DATAL 0x07e0
3180
3181#define RADEON_CP_RB_BASE 0x0700
3182#define RADEON_CP_RB_CNTL 0x0704
3183# define RADEON_RB_BUFSZ_SHIFT 0
3184# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
3185# define RADEON_RB_BLKSZ_SHIFT 8
3186# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
3187# define RADEON_MAX_FETCH_SHIFT 18
3188# define RADEON_MAX_FETCH_MASK (0x3 << 18)
3189# define RADEON_RB_NO_UPDATE (1 << 27)
3190# define RADEON_RB_RPTR_WR_ENA (1 << 31)
3191#define RADEON_CP_RB_RPTR_ADDR 0x070c
3192#define RADEON_CP_RB_RPTR 0x0710
3193#define RADEON_CP_RB_WPTR 0x0714
3194#define RADEON_CP_RB_RPTR_WR 0x071c
3195
3196#define RADEON_CP_IB_BASE 0x0738
3197#define RADEON_CP_IB_BUFSZ 0x073c
3198
3199#define RADEON_CP_CSQ_CNTL 0x0740
3200# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
3201# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
3202# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
3203# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
3204# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
3205# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
3206# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
3207
3208#define R300_CP_RESYNC_ADDR 0x778
3209#define R300_CP_RESYNC_DATA 0x77c
3210
3211#define RADEON_CP_CSQ_STAT 0x07f8
3212# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
3213# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
3214# define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16)
3215# define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24)
3216#define RADEON_CP_CSQ2_STAT 0x07fc
3217#define RADEON_CP_CSQ_ADDR 0x07f0
3218#define RADEON_CP_CSQ_DATA 0x07f4
3219#define RADEON_CP_CSQ_APER_PRIMARY 0x1000
3220#define RADEON_CP_CSQ_APER_INDIRECT 0x1300
3221
3222#define RADEON_CP_RB_WPTR_DELAY 0x0718
3223# define RADEON_PRE_WRITE_TIMER_SHIFT 0
3224# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
3225#define RADEON_CP_CSQ_MODE 0x0744
3226# define RADEON_INDIRECT2_START_SHIFT 0
3227# define RADEON_INDIRECT2_START_MASK (0x7f << 0)
3228# define RADEON_INDIRECT1_START_SHIFT 8
3229# define RADEON_INDIRECT1_START_MASK (0x7f << 8)
3230
3231#define RADEON_AIC_CNTL 0x01d0
3232# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
3233# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
3234#define RADEON_AIC_LO_ADDR 0x01dc
3235#define RADEON_AIC_PT_BASE 0x01d8
3236#define RADEON_AIC_HI_ADDR 0x01e0
3237
3238
3239
3240 /* Constants */
3241/* #define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0 */
3242/* efine RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2 */
3243
3244
3245
3246 /* CP packet types */
3247#define RADEON_CP_PACKET0 0x00000000
3248#define RADEON_CP_PACKET1 0x40000000
3249#define RADEON_CP_PACKET2 0x80000000
3250#define RADEON_CP_PACKET3 0xC0000000
3251# define RADEON_CP_PACKET_MASK 0xC0000000
3252# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
3253# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
3254# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
3255# define R300_CP_PACKET0_REG_MASK 0x00001fff
3256# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
3257# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
3258
3259#define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000
3260
3261#define RADEON_CP_PACKET3_NOP 0xC0001000
3262#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900
3263#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00
3264#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00
3265#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300
3266#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400
3267#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600
3268#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800
3269#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900
3270#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00
3271#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00
3272#define R200_CP_PACKET3_3D_DRAW_IMMD_2 0xc0003500
3273#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00
3274#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100
3275#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200
3276#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300
3277#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400
3278#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500
3279#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800
3280#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00
3281#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
3282#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00
3283
3284
3285#define RADEON_CP_VC_FRMT_XY 0x00000000
3286#define RADEON_CP_VC_FRMT_W0 0x00000001
3287#define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002
3288#define RADEON_CP_VC_FRMT_FPALPHA 0x00000004
3289#define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008
3290#define RADEON_CP_VC_FRMT_FPSPEC 0x00000010
3291#define RADEON_CP_VC_FRMT_FPFOG 0x00000020
3292#define RADEON_CP_VC_FRMT_PKSPEC 0x00000040
3293#define RADEON_CP_VC_FRMT_ST0 0x00000080
3294#define RADEON_CP_VC_FRMT_ST1 0x00000100
3295#define RADEON_CP_VC_FRMT_Q1 0x00000200
3296#define RADEON_CP_VC_FRMT_ST2 0x00000400
3297#define RADEON_CP_VC_FRMT_Q2 0x00000800
3298#define RADEON_CP_VC_FRMT_ST3 0x00001000
3299#define RADEON_CP_VC_FRMT_Q3 0x00002000
3300#define RADEON_CP_VC_FRMT_Q0 0x00004000
3301#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000
3302#define RADEON_CP_VC_FRMT_N0 0x00040000
3303#define RADEON_CP_VC_FRMT_XY1 0x08000000
3304#define RADEON_CP_VC_FRMT_Z1 0x10000000
3305#define RADEON_CP_VC_FRMT_W1 0x20000000
3306#define RADEON_CP_VC_FRMT_N1 0x40000000
3307#define RADEON_CP_VC_FRMT_Z 0x80000000
3308
3309#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000
3310#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001
3311#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002
3312#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003
3313#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
3314#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
3315#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
3316#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007
3317#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008
3318#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
3319#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a
3320#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010
3321#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020
3322#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030
3323#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000
3324#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040
3325#define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080
3326#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000
3327#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100
3328#define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000
3329#define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200
3330#define RADEON_CP_VC_CNTL_NUM_SHIFT 16
3331
3332#define RADEON_VS_MATRIX_0_ADDR 0
3333#define RADEON_VS_MATRIX_1_ADDR 4
3334#define RADEON_VS_MATRIX_2_ADDR 8
3335#define RADEON_VS_MATRIX_3_ADDR 12
3336#define RADEON_VS_MATRIX_4_ADDR 16
3337#define RADEON_VS_MATRIX_5_ADDR 20
3338#define RADEON_VS_MATRIX_6_ADDR 24
3339#define RADEON_VS_MATRIX_7_ADDR 28
3340#define RADEON_VS_MATRIX_8_ADDR 32
3341#define RADEON_VS_MATRIX_9_ADDR 36
3342#define RADEON_VS_MATRIX_10_ADDR 40
3343#define RADEON_VS_MATRIX_11_ADDR 44
3344#define RADEON_VS_MATRIX_12_ADDR 48
3345#define RADEON_VS_MATRIX_13_ADDR 52
3346#define RADEON_VS_MATRIX_14_ADDR 56
3347#define RADEON_VS_MATRIX_15_ADDR 60
3348#define RADEON_VS_LIGHT_AMBIENT_ADDR 64
3349#define RADEON_VS_LIGHT_DIFFUSE_ADDR 72
3350#define RADEON_VS_LIGHT_SPECULAR_ADDR 80
3351#define RADEON_VS_LIGHT_DIRPOS_ADDR 88
3352#define RADEON_VS_LIGHT_HWVSPOT_ADDR 96
3353#define RADEON_VS_LIGHT_ATTENUATION_ADDR 104
3354#define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112
3355#define RADEON_VS_UCP_ADDR 116
3356#define RADEON_VS_GLOBAL_AMBIENT_ADDR 122
3357#define RADEON_VS_FOG_PARAM_ADDR 123
3358#define RADEON_VS_EYE_VECTOR_ADDR 124
3359
3360#define RADEON_SS_LIGHT_DCD_ADDR 0
3361#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8
3362#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16
3363#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24
3364#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32
3365#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48
3366#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49
3367#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50
3368#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51
3369#define RADEON_SS_SHININESS 60
3370
3371#define RADEON_TV_MASTER_CNTL 0x0800
3372# define RADEON_TV_ASYNC_RST (1 << 0)
3373# define RADEON_CRT_ASYNC_RST (1 << 1)
3374# define RADEON_RESTART_PHASE_FIX (1 << 3)
3375# define RADEON_TV_FIFO_ASYNC_RST (1 << 4)
3376# define RADEON_VIN_ASYNC_RST (1 << 5)
3377# define RADEON_AUD_ASYNC_RST (1 << 6)
3378# define RADEON_DVS_ASYNC_RST (1 << 7)
3379# define RADEON_CRT_FIFO_CE_EN (1 << 9)
3380# define RADEON_TV_FIFO_CE_EN (1 << 10)
3381# define RADEON_RE_SYNC_NOW_SEL_MASK (3 << 14)
3382# define RADEON_TVCLK_ALWAYS_ONb (1 << 30)
3383# define RADEON_TV_ON (1 << 31)
3384#define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888
3385# define RADEON_Y_RED_EN (1 << 0)
3386# define RADEON_C_GRN_EN (1 << 1)
3387# define RADEON_CMP_BLU_EN (1 << 2)
3388# define RADEON_DAC_DITHER_EN (1 << 3)
3389# define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4)
3390# define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8)
3391# define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12)
3392# define RADEON_TV_FORCE_DAC_DATA_SHIFT 16
3393#define RADEON_TV_RGB_CNTL 0x0804
3394# define RADEON_SWITCH_TO_BLUE (1 << 4)
3395# define RADEON_RGB_DITHER_EN (1 << 5)
3396# define RADEON_RGB_SRC_SEL_MASK (3 << 8)
3397# define RADEON_RGB_SRC_SEL_CRTC1 (0 << 8)
3398# define RADEON_RGB_SRC_SEL_RMX (1 << 8)
3399# define RADEON_RGB_SRC_SEL_CRTC2 (2 << 8)
3400# define RADEON_RGB_CONVERT_BY_PASS (1 << 10)
3401# define RADEON_UVRAM_READ_MARGIN_SHIFT 16
3402# define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20
3403# define RADEON_TVOUT_SCALE_EN (1 << 26)
3404#define RADEON_TV_SYNC_CNTL 0x0808
3405# define RADEON_SYNC_OE (1 << 0)
3406# define RADEON_SYNC_OUT (1 << 1)
3407# define RADEON_SYNC_IN (1 << 2)
3408# define RADEON_SYNC_PUB (1 << 3)
3409# define RADEON_SYNC_PD (1 << 4)
3410# define RADEON_TV_SYNC_IO_DRIVE (1 << 5)
3411#define RADEON_TV_HTOTAL 0x080c
3412#define RADEON_TV_HDISP 0x0810
3413#define RADEON_TV_HSTART 0x0818
3414#define RADEON_TV_HCOUNT 0x081C
3415#define RADEON_TV_VTOTAL 0x0820
3416#define RADEON_TV_VDISP 0x0824
3417#define RADEON_TV_VCOUNT 0x0828
3418#define RADEON_TV_FTOTAL 0x082c
3419#define RADEON_TV_FCOUNT 0x0830
3420#define RADEON_TV_FRESTART 0x0834
3421#define RADEON_TV_HRESTART 0x0838
3422#define RADEON_TV_VRESTART 0x083c
3423#define RADEON_TV_HOST_READ_DATA 0x0840
3424#define RADEON_TV_HOST_WRITE_DATA 0x0844
3425#define RADEON_TV_HOST_RD_WT_CNTL 0x0848
3426# define RADEON_HOST_FIFO_RD (1 << 12)
3427# define RADEON_HOST_FIFO_RD_ACK (1 << 13)
3428# define RADEON_HOST_FIFO_WT (1 << 14)
3429# define RADEON_HOST_FIFO_WT_ACK (1 << 15)
3430#define RADEON_TV_VSCALER_CNTL1 0x084c
3431# define RADEON_UV_INC_MASK 0xffff
3432# define RADEON_UV_INC_SHIFT 0
3433# define RADEON_Y_W_EN (1 << 24)
3434# define RADEON_RESTART_FIELD (1 << 29) /* restart on field 0 */
3435# define RADEON_Y_DEL_W_SIG_SHIFT 26
3436#define RADEON_TV_TIMING_CNTL 0x0850
3437# define RADEON_H_INC_MASK 0xfff
3438# define RADEON_H_INC_SHIFT 0
3439# define RADEON_REQ_Y_FIRST (1 << 19)
3440# define RADEON_FORCE_BURST_ALWAYS (1 << 21)
3441# define RADEON_UV_POST_SCALE_BYPASS (1 << 23)
3442# define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
3443#define RADEON_TV_VSCALER_CNTL2 0x0854
3444# define RADEON_DITHER_MODE (1 << 0)
3445# define RADEON_Y_OUTPUT_DITHER_EN (1 << 1)
3446# define RADEON_UV_OUTPUT_DITHER_EN (1 << 2)
3447# define RADEON_UV_TO_BUF_DITHER_EN (1 << 3)
3448#define RADEON_TV_Y_FALL_CNTL 0x0858
3449# define RADEON_Y_FALL_PING_PONG (1 << 16)
3450# define RADEON_Y_COEF_EN (1 << 17)
3451#define RADEON_TV_Y_RISE_CNTL 0x085c
3452# define RADEON_Y_RISE_PING_PONG (1 << 16)
3453#define RADEON_TV_Y_SAW_TOOTH_CNTL 0x0860
3454#define RADEON_TV_UPSAMP_AND_GAIN_CNTL 0x0864
3455# define RADEON_YUPSAMP_EN (1 << 0)
3456# define RADEON_UVUPSAMP_EN (1 << 2)
3457#define RADEON_TV_GAIN_LIMIT_SETTINGS 0x0868
3458# define RADEON_Y_GAIN_LIMIT_SHIFT 0
3459# define RADEON_UV_GAIN_LIMIT_SHIFT 16
3460#define RADEON_TV_LINEAR_GAIN_SETTINGS 0x086c
3461# define RADEON_Y_GAIN_SHIFT 0
3462# define RADEON_UV_GAIN_SHIFT 16
3463#define RADEON_TV_MODULATOR_CNTL1 0x0870
3464# define RADEON_YFLT_EN (1 << 2)
3465# define RADEON_UVFLT_EN (1 << 3)
3466# define RADEON_ALT_PHASE_EN (1 << 6)
3467# define RADEON_SYNC_TIP_LEVEL (1 << 7)
3468# define RADEON_BLANK_LEVEL_SHIFT 8
3469# define RADEON_SET_UP_LEVEL_SHIFT 16
3470# define RADEON_SLEW_RATE_LIMIT (1 << 23)
3471# define RADEON_CY_FILT_BLEND_SHIFT 28
3472#define RADEON_TV_MODULATOR_CNTL2 0x0874
3473# define RADEON_TV_U_BURST_LEVEL_MASK 0x1ff
3474# define RADEON_TV_V_BURST_LEVEL_MASK 0x1ff
3475# define RADEON_TV_V_BURST_LEVEL_SHIFT 16
3476#define RADEON_TV_CRC_CNTL 0x0890
3477#define RADEON_TV_UV_ADR 0x08ac
3478# define RADEON_MAX_UV_ADR_MASK 0x000000ff
3479# define RADEON_MAX_UV_ADR_SHIFT 0
3480# define RADEON_TABLE1_BOT_ADR_MASK 0x0000ff00
3481# define RADEON_TABLE1_BOT_ADR_SHIFT 8
3482# define RADEON_TABLE3_TOP_ADR_MASK 0x00ff0000
3483# define RADEON_TABLE3_TOP_ADR_SHIFT 16
3484# define RADEON_HCODE_TABLE_SEL_MASK 0x06000000
3485# define RADEON_HCODE_TABLE_SEL_SHIFT 25
3486# define RADEON_VCODE_TABLE_SEL_MASK 0x18000000
3487# define RADEON_VCODE_TABLE_SEL_SHIFT 27
3488# define RADEON_TV_MAX_FIFO_ADDR 0x1a7
3489# define RADEON_TV_MAX_FIFO_ADDR_INTERNAL 0x1ff
3490#define RADEON_TV_PLL_FINE_CNTL 0x0020 /* PLL */
3491#define RADEON_TV_PLL_CNTL 0x0021 /* PLL */
3492# define RADEON_TV_M0LO_MASK 0xff
3493# define RADEON_TV_M0HI_MASK 0x7
3494# define RADEON_TV_M0HI_SHIFT 18
3495# define RADEON_TV_N0LO_MASK 0x1ff
3496# define RADEON_TV_N0LO_SHIFT 8
3497# define RADEON_TV_N0HI_MASK 0x3
3498# define RADEON_TV_N0HI_SHIFT 21
3499# define RADEON_TV_P_MASK 0xf
3500# define RADEON_TV_P_SHIFT 24
3501# define RADEON_TV_SLIP_EN (1 << 23)
3502# define RADEON_TV_DTO_EN (1 << 28)
3503#define RADEON_TV_PLL_CNTL1 0x0022 /* PLL */
3504# define RADEON_TVPLL_RESET (1 << 1)
3505# define RADEON_TVPLL_SLEEP (1 << 3)
3506# define RADEON_TVPLL_REFCLK_SEL (1 << 4)
3507# define RADEON_TVPCP_SHIFT 8
3508# define RADEON_TVPCP_MASK (7 << 8)
3509# define RADEON_TVPVG_SHIFT 11
3510# define RADEON_TVPVG_MASK (7 << 11)
3511# define RADEON_TVPDC_SHIFT 14
3512# define RADEON_TVPDC_MASK (3 << 14)
3513# define RADEON_TVPLL_TEST_DIS (1 << 31)
3514# define RADEON_TVCLK_SRC_SEL_TVPLL (1 << 30)
3515
3516#define RS400_DISP2_REQ_CNTL1 0xe30
3517# define RS400_DISP2_START_REQ_LEVEL_SHIFT 0
3518# define RS400_DISP2_START_REQ_LEVEL_MASK 0x3ff
3519# define RS400_DISP2_STOP_REQ_LEVEL_SHIFT 12
3520# define RS400_DISP2_STOP_REQ_LEVEL_MASK 0x3ff
3521# define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT 22
3522# define RS400_DISP2_ALLOW_FID_LEVEL_MASK 0x3ff
3523#define RS400_DISP2_REQ_CNTL2 0xe34
3524# define RS400_DISP2_CRITICAL_POINT_START_SHIFT 12
3525# define RS400_DISP2_CRITICAL_POINT_START_MASK 0x3ff
3526# define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT 22
3527# define RS400_DISP2_CRITICAL_POINT_STOP_MASK 0x3ff
3528#define RS400_DMIF_MEM_CNTL1 0xe38
3529# define RS400_DISP2_START_ADR_SHIFT 0
3530# define RS400_DISP2_START_ADR_MASK 0x3ff
3531# define RS400_DISP1_CRITICAL_POINT_START_SHIFT 12
3532# define RS400_DISP1_CRITICAL_POINT_START_MASK 0x3ff
3533# define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT 22
3534# define RS400_DISP1_CRITICAL_POINT_STOP_MASK 0x3ff
3535#define RS400_DISP1_REQ_CNTL1 0xe3c
3536# define RS400_DISP1_START_REQ_LEVEL_SHIFT 0
3537# define RS400_DISP1_START_REQ_LEVEL_MASK 0x3ff
3538# define RS400_DISP1_STOP_REQ_LEVEL_SHIFT 12
3539# define RS400_DISP1_STOP_REQ_LEVEL_MASK 0x3ff
3540# define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT 22
3541# define RS400_DISP1_ALLOW_FID_LEVEL_MASK 0x3ff
3542
3543#define RADEON_PCIE_INDEX 0x0030
3544#define RADEON_PCIE_DATA 0x0034
3545#define RADEON_PCIE_TX_GART_CNTL 0x10
3546# define RADEON_PCIE_TX_GART_EN (1 << 0)
3547# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
3548# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
3549# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
3550# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
3551# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
3552# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
3553# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
3554#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
3555#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
3556#define RADEON_PCIE_TX_GART_BASE 0x13
3557#define RADEON_PCIE_TX_GART_START_LO 0x14
3558#define RADEON_PCIE_TX_GART_START_HI 0x15
3559#define RADEON_PCIE_TX_GART_END_LO 0x16
3560#define RADEON_PCIE_TX_GART_END_HI 0x17
3561#define RADEON_PCIE_TX_GART_ERROR 0x18
3562
3563#define RADEON_SCRATCH_REG0 0x15e0
3564#define RADEON_SCRATCH_REG1 0x15e4
3565#define RADEON_SCRATCH_REG2 0x15e8
3566#define RADEON_SCRATCH_REG3 0x15ec
3567#define RADEON_SCRATCH_REG4 0x15f0
3568#define RADEON_SCRATCH_REG5 0x15f4
3569
3570#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 000000000000..a853261d1881
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,485 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35int radeon_debugfs_ib_init(struct radeon_device *rdev);
36
37/*
38 * IB.
39 */
40int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{
42 struct radeon_fence *fence;
43 struct radeon_ib *nib;
44 unsigned long i;
45 int r = 0;
46
47 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence);
49 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n");
51 return r;
52 }
53 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
55 if (i < RADEON_IB_POOL_SIZE) {
56 set_bit(i, rdev->ib_pool.alloc_bm);
57 rdev->ib_pool.ibs[i].length_dw = 0;
58 *ib = &rdev->ib_pool.ibs[i];
59 goto out;
60 }
61 if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
62 /* we go do nothings here */
63 DRM_ERROR("all IB allocated none scheduled.\n");
64 r = -EINVAL;
65 goto out;
66 }
67 /* get the first ib on the scheduled list */
68 nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
69 struct radeon_ib, list);
70 if (nib->fence == NULL) {
71 /* we go do nothings here */
72 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
73 r = -EINVAL;
74 goto out;
75 }
76 r = radeon_fence_wait(nib->fence, false);
77 if (r) {
78 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
79 (unsigned long)nib->gpu_addr, nib->length_dw);
80 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
81 goto out;
82 }
83 radeon_fence_unref(&nib->fence);
84 nib->length_dw = 0;
85 list_del(&nib->list);
86 INIT_LIST_HEAD(&nib->list);
87 *ib = nib;
88out:
89 mutex_unlock(&rdev->ib_pool.mutex);
90 if (r) {
91 radeon_fence_unref(&fence);
92 } else {
93 (*ib)->fence = fence;
94 }
95 return r;
96}
97
98void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
99{
100 struct radeon_ib *tmp = *ib;
101
102 *ib = NULL;
103 if (tmp == NULL) {
104 return;
105 }
106 mutex_lock(&rdev->ib_pool.mutex);
107 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
108 /* IB is scheduled & not signaled don't do anythings */
109 mutex_unlock(&rdev->ib_pool.mutex);
110 return;
111 }
112 list_del(&tmp->list);
113 INIT_LIST_HEAD(&tmp->list);
114 if (tmp->fence) {
115 radeon_fence_unref(&tmp->fence);
116 }
117 tmp->length_dw = 0;
118 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
119 mutex_unlock(&rdev->ib_pool.mutex);
120}
121
122static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
123{
124 while ((ib->length_dw & rdev->cp.align_mask)) {
125 ib->ptr[ib->length_dw++] = PACKET2(0);
126 }
127}
128
129static void radeon_ib_cpu_flush(struct radeon_device *rdev,
130 struct radeon_ib *ib)
131{
132 unsigned long tmp;
133 unsigned i;
134
135 /* To force CPU cache flush ugly but seems reliable */
136 for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
137 tmp = readl(&ib->ptr[i]);
138 }
139}
140
141int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142{
143 int r = 0;
144
145 mutex_lock(&rdev->ib_pool.mutex);
146 radeon_ib_align(rdev, ib);
147 radeon_ib_cpu_flush(rdev, ib);
148 if (!ib->length_dw || !rdev->cp.ready) {
149 /* TODO: Nothings in the ib we should report. */
150 mutex_unlock(&rdev->ib_pool.mutex);
151 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
152 return -EINVAL;
153 }
154 /* 64 dwords should be enought for fence too */
155 r = radeon_ring_lock(rdev, 64);
156 if (r) {
157 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
158 mutex_unlock(&rdev->ib_pool.mutex);
159 return r;
160 }
161 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
162 radeon_ring_write(rdev, ib->gpu_addr);
163 radeon_ring_write(rdev, ib->length_dw);
164 radeon_fence_emit(rdev, ib->fence);
165 radeon_ring_unlock_commit(rdev);
166 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
167 mutex_unlock(&rdev->ib_pool.mutex);
168 return 0;
169}
170
171int radeon_ib_pool_init(struct radeon_device *rdev)
172{
173 void *ptr;
174 uint64_t gpu_addr;
175 int i;
176 int r = 0;
177
178 /* Allocate 1M object buffer */
179 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
180 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
181 true, RADEON_GEM_DOMAIN_GTT,
182 false, &rdev->ib_pool.robj);
183 if (r) {
184 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
185 return r;
186 }
187 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
188 if (r) {
189 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
190 return r;
191 }
192 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
193 if (r) {
194 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
195 return r;
196 }
197 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
198 unsigned offset;
199
200 offset = i * 64 * 1024;
201 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
202 rdev->ib_pool.ibs[i].ptr = ptr + offset;
203 rdev->ib_pool.ibs[i].idx = i;
204 rdev->ib_pool.ibs[i].length_dw = 0;
205 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
206 }
207 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
208 rdev->ib_pool.ready = true;
209 DRM_INFO("radeon: ib pool ready.\n");
210 if (radeon_debugfs_ib_init(rdev)) {
211 DRM_ERROR("Failed to register debugfs file for IB !\n");
212 }
213 return r;
214}
215
216void radeon_ib_pool_fini(struct radeon_device *rdev)
217{
218 if (!rdev->ib_pool.ready) {
219 return;
220 }
221 mutex_lock(&rdev->ib_pool.mutex);
222 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
223 if (rdev->ib_pool.robj) {
224 radeon_object_kunmap(rdev->ib_pool.robj);
225 radeon_object_unref(&rdev->ib_pool.robj);
226 rdev->ib_pool.robj = NULL;
227 }
228 mutex_unlock(&rdev->ib_pool.mutex);
229}
230
231int radeon_ib_test(struct radeon_device *rdev)
232{
233 struct radeon_ib *ib;
234 uint32_t scratch;
235 uint32_t tmp = 0;
236 unsigned i;
237 int r;
238
239 r = radeon_scratch_get(rdev, &scratch);
240 if (r) {
241 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
242 return r;
243 }
244 WREG32(scratch, 0xCAFEDEAD);
245 r = radeon_ib_get(rdev, &ib);
246 if (r) {
247 return r;
248 }
249 ib->ptr[0] = PACKET0(scratch, 0);
250 ib->ptr[1] = 0xDEADBEEF;
251 ib->ptr[2] = PACKET2(0);
252 ib->ptr[3] = PACKET2(0);
253 ib->ptr[4] = PACKET2(0);
254 ib->ptr[5] = PACKET2(0);
255 ib->ptr[6] = PACKET2(0);
256 ib->ptr[7] = PACKET2(0);
257 ib->length_dw = 8;
258 r = radeon_ib_schedule(rdev, ib);
259 if (r) {
260 radeon_scratch_free(rdev, scratch);
261 radeon_ib_free(rdev, &ib);
262 return r;
263 }
264 r = radeon_fence_wait(ib->fence, false);
265 if (r) {
266 return r;
267 }
268 for (i = 0; i < rdev->usec_timeout; i++) {
269 tmp = RREG32(scratch);
270 if (tmp == 0xDEADBEEF) {
271 break;
272 }
273 DRM_UDELAY(1);
274 }
275 if (i < rdev->usec_timeout) {
276 DRM_INFO("ib test succeeded in %u usecs\n", i);
277 } else {
278 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
279 scratch, tmp);
280 r = -EINVAL;
281 }
282 radeon_scratch_free(rdev, scratch);
283 radeon_ib_free(rdev, &ib);
284 return r;
285}
286
287
288/*
289 * Ring.
290 */
291void radeon_ring_free_size(struct radeon_device *rdev)
292{
293 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
294 /* This works because ring_size is a power of 2 */
295 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
296 rdev->cp.ring_free_dw -= rdev->cp.wptr;
297 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
298 if (!rdev->cp.ring_free_dw) {
299 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
300 }
301}
302
303int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
304{
305 int r;
306
307 /* Align requested size with padding so unlock_commit can
308 * pad safely */
309 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
310 mutex_lock(&rdev->cp.mutex);
311 while (ndw > (rdev->cp.ring_free_dw - 1)) {
312 radeon_ring_free_size(rdev);
313 if (ndw < rdev->cp.ring_free_dw) {
314 break;
315 }
316 r = radeon_fence_wait_next(rdev);
317 if (r) {
318 mutex_unlock(&rdev->cp.mutex);
319 return r;
320 }
321 }
322 rdev->cp.count_dw = ndw;
323 rdev->cp.wptr_old = rdev->cp.wptr;
324 return 0;
325}
326
327void radeon_ring_unlock_commit(struct radeon_device *rdev)
328{
329 unsigned count_dw_pad;
330 unsigned i;
331
332 /* We pad to match fetch size */
333 count_dw_pad = (rdev->cp.align_mask + 1) -
334 (rdev->cp.wptr & rdev->cp.align_mask);
335 for (i = 0; i < count_dw_pad; i++) {
336 radeon_ring_write(rdev, PACKET2(0));
337 }
338 DRM_MEMORYBARRIER();
339 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
340 (void)RREG32(RADEON_CP_RB_WPTR);
341 mutex_unlock(&rdev->cp.mutex);
342}
343
344void radeon_ring_unlock_undo(struct radeon_device *rdev)
345{
346 rdev->cp.wptr = rdev->cp.wptr_old;
347 mutex_unlock(&rdev->cp.mutex);
348}
349
350int radeon_ring_test(struct radeon_device *rdev)
351{
352 uint32_t scratch;
353 uint32_t tmp = 0;
354 unsigned i;
355 int r;
356
357 r = radeon_scratch_get(rdev, &scratch);
358 if (r) {
359 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
360 return r;
361 }
362 WREG32(scratch, 0xCAFEDEAD);
363 r = radeon_ring_lock(rdev, 2);
364 if (r) {
365 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
366 radeon_scratch_free(rdev, scratch);
367 return r;
368 }
369 radeon_ring_write(rdev, PACKET0(scratch, 0));
370 radeon_ring_write(rdev, 0xDEADBEEF);
371 radeon_ring_unlock_commit(rdev);
372 for (i = 0; i < rdev->usec_timeout; i++) {
373 tmp = RREG32(scratch);
374 if (tmp == 0xDEADBEEF) {
375 break;
376 }
377 DRM_UDELAY(1);
378 }
379 if (i < rdev->usec_timeout) {
380 DRM_INFO("ring test succeeded in %d usecs\n", i);
381 } else {
382 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
383 scratch, tmp);
384 r = -EINVAL;
385 }
386 radeon_scratch_free(rdev, scratch);
387 return r;
388}
389
390int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
391{
392 int r;
393
394 rdev->cp.ring_size = ring_size;
395 /* Allocate ring buffer */
396 if (rdev->cp.ring_obj == NULL) {
397 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
398 true,
399 RADEON_GEM_DOMAIN_GTT,
400 false,
401 &rdev->cp.ring_obj);
402 if (r) {
403 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
404 mutex_unlock(&rdev->cp.mutex);
405 return r;
406 }
407 r = radeon_object_pin(rdev->cp.ring_obj,
408 RADEON_GEM_DOMAIN_GTT,
409 &rdev->cp.gpu_addr);
410 if (r) {
411 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
412 mutex_unlock(&rdev->cp.mutex);
413 return r;
414 }
415 r = radeon_object_kmap(rdev->cp.ring_obj,
416 (void **)&rdev->cp.ring);
417 if (r) {
418 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
419 mutex_unlock(&rdev->cp.mutex);
420 return r;
421 }
422 }
423 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
424 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
425 return 0;
426}
427
428void radeon_ring_fini(struct radeon_device *rdev)
429{
430 mutex_lock(&rdev->cp.mutex);
431 if (rdev->cp.ring_obj) {
432 radeon_object_kunmap(rdev->cp.ring_obj);
433 radeon_object_unpin(rdev->cp.ring_obj);
434 radeon_object_unref(&rdev->cp.ring_obj);
435 rdev->cp.ring = NULL;
436 rdev->cp.ring_obj = NULL;
437 }
438 mutex_unlock(&rdev->cp.mutex);
439}
440
441
442/*
443 * Debugfs info
444 */
445#if defined(CONFIG_DEBUG_FS)
446static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
447{
448 struct drm_info_node *node = (struct drm_info_node *) m->private;
449 struct radeon_ib *ib = node->info_ent->data;
450 unsigned i;
451
452 if (ib == NULL) {
453 return 0;
454 }
455 seq_printf(m, "IB %04lu\n", ib->idx);
456 seq_printf(m, "IB fence %p\n", ib->fence);
457 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
458 for (i = 0; i < ib->length_dw; i++) {
459 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
460 }
461 return 0;
462}
463
464static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
465static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
466#endif
467
468int radeon_debugfs_ib_init(struct radeon_device *rdev)
469{
470#if defined(CONFIG_DEBUG_FS)
471 unsigned i;
472
473 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
474 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
475 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
476 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
477 radeon_debugfs_ib_list[i].driver_features = 0;
478 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
479 }
480 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
481 RADEON_IB_POOL_SIZE);
482#else
483 return 0;
484#endif
485}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644
index 000000000000..4c087c1510d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -0,0 +1,653 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
38#include "radeon_reg.h"
39#include "radeon.h"
40
41#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
42
43static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
44{
45 struct radeon_mman *mman;
46 struct radeon_device *rdev;
47
48 mman = container_of(bdev, struct radeon_mman, bdev);
49 rdev = container_of(mman, struct radeon_device, mman);
50 return rdev;
51}
52
53
54/*
55 * Global memory.
56 */
57static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
58{
59 return ttm_mem_global_init(ref->object);
60}
61
62static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
63{
64 ttm_mem_global_release(ref->object);
65}
66
67static int radeon_ttm_global_init(struct radeon_device *rdev)
68{
69 struct ttm_global_reference *global_ref;
70 int r;
71
72 rdev->mman.mem_global_referenced = false;
73 global_ref = &rdev->mman.mem_global_ref;
74 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
75 global_ref->size = sizeof(struct ttm_mem_global);
76 global_ref->init = &radeon_ttm_mem_global_init;
77 global_ref->release = &radeon_ttm_mem_global_release;
78 r = ttm_global_item_ref(global_ref);
79 if (r != 0) {
80 DRM_ERROR("Failed referencing a global TTM memory object.\n");
81 return r;
82 }
83 rdev->mman.mem_global_referenced = true;
84 return 0;
85}
86
87static void radeon_ttm_global_fini(struct radeon_device *rdev)
88{
89 if (rdev->mman.mem_global_referenced) {
90 ttm_global_item_unref(&rdev->mman.mem_global_ref);
91 rdev->mman.mem_global_referenced = false;
92 }
93}
94
95struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
96
97static struct ttm_backend*
98radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
99{
100 struct radeon_device *rdev;
101
102 rdev = radeon_get_rdev(bdev);
103#if __OS_HAS_AGP
104 if (rdev->flags & RADEON_IS_AGP) {
105 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
106 } else
107#endif
108 {
109 return radeon_ttm_backend_create(rdev);
110 }
111}
112
113static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
114{
115 return 0;
116}
117
118static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
119 struct ttm_mem_type_manager *man)
120{
121 struct radeon_device *rdev;
122
123 rdev = radeon_get_rdev(bdev);
124
125 switch (type) {
126 case TTM_PL_SYSTEM:
127 /* System memory */
128 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
129 man->available_caching = TTM_PL_MASK_CACHING;
130 man->default_caching = TTM_PL_FLAG_CACHED;
131 break;
132 case TTM_PL_TT:
133 man->gpu_offset = 0;
134 man->available_caching = TTM_PL_MASK_CACHING;
135 man->default_caching = TTM_PL_FLAG_CACHED;
136#if __OS_HAS_AGP
137 if (rdev->flags & RADEON_IS_AGP) {
138 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
139 DRM_ERROR("AGP is not enabled for memory type %u\n",
140 (unsigned)type);
141 return -EINVAL;
142 }
143 man->io_offset = rdev->mc.agp_base;
144 man->io_size = rdev->mc.gtt_size;
145 man->io_addr = NULL;
146 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
147 TTM_MEMTYPE_FLAG_MAPPABLE;
148 man->available_caching = TTM_PL_FLAG_UNCACHED |
149 TTM_PL_FLAG_WC;
150 man->default_caching = TTM_PL_FLAG_WC;
151 } else
152#endif
153 {
154 man->io_offset = 0;
155 man->io_size = 0;
156 man->io_addr = NULL;
157 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
158 TTM_MEMTYPE_FLAG_CMA;
159 }
160 break;
161 case TTM_PL_VRAM:
162 /* "On-card" video ram */
163 man->gpu_offset = 0;
164 man->flags = TTM_MEMTYPE_FLAG_FIXED |
165 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
166 TTM_MEMTYPE_FLAG_MAPPABLE;
167 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
168 man->default_caching = TTM_PL_FLAG_WC;
169 man->io_addr = NULL;
170 man->io_offset = rdev->mc.aper_base;
171 man->io_size = rdev->mc.aper_size;
172 break;
173 default:
174 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
175 return -EINVAL;
176 }
177 return 0;
178}
179
180static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
181{
182 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
183
184 switch (bo->mem.mem_type) {
185 default:
186 return (cur_placement & ~TTM_PL_MASK_CACHING) |
187 TTM_PL_FLAG_SYSTEM |
188 TTM_PL_FLAG_CACHED;
189 }
190}
191
192static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
193{
194 return 0;
195}
196
197static void radeon_move_null(struct ttm_buffer_object *bo,
198 struct ttm_mem_reg *new_mem)
199{
200 struct ttm_mem_reg *old_mem = &bo->mem;
201
202 BUG_ON(old_mem->mm_node != NULL);
203 *old_mem = *new_mem;
204 new_mem->mm_node = NULL;
205}
206
207static int radeon_move_blit(struct ttm_buffer_object *bo,
208 bool evict, int no_wait,
209 struct ttm_mem_reg *new_mem,
210 struct ttm_mem_reg *old_mem)
211{
212 struct radeon_device *rdev;
213 uint64_t old_start, new_start;
214 struct radeon_fence *fence;
215 int r;
216
217 rdev = radeon_get_rdev(bo->bdev);
218 r = radeon_fence_create(rdev, &fence);
219 if (unlikely(r)) {
220 return r;
221 }
222 old_start = old_mem->mm_node->start << PAGE_SHIFT;
223 new_start = new_mem->mm_node->start << PAGE_SHIFT;
224
225 switch (old_mem->mem_type) {
226 case TTM_PL_VRAM:
227 old_start += rdev->mc.vram_location;
228 break;
229 case TTM_PL_TT:
230 old_start += rdev->mc.gtt_location;
231 break;
232 default:
233 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
234 return -EINVAL;
235 }
236 switch (new_mem->mem_type) {
237 case TTM_PL_VRAM:
238 new_start += rdev->mc.vram_location;
239 break;
240 case TTM_PL_TT:
241 new_start += rdev->mc.gtt_location;
242 break;
243 default:
244 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
245 return -EINVAL;
246 }
247 if (!rdev->cp.ready) {
248 DRM_ERROR("Trying to move memory with CP turned off.\n");
249 return -EINVAL;
250 }
251 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
252 /* FIXME: handle copy error */
253 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
254 evict, no_wait, new_mem);
255 radeon_fence_unref(&fence);
256 return r;
257}
258
259static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
260 bool evict, bool interruptible, bool no_wait,
261 struct ttm_mem_reg *new_mem)
262{
263 struct radeon_device *rdev;
264 struct ttm_mem_reg *old_mem = &bo->mem;
265 struct ttm_mem_reg tmp_mem;
266 uint32_t proposed_placement;
267 int r;
268
269 rdev = radeon_get_rdev(bo->bdev);
270 tmp_mem = *new_mem;
271 tmp_mem.mm_node = NULL;
272 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
273 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
274 interruptible, no_wait);
275 if (unlikely(r)) {
276 return r;
277 }
278 r = ttm_tt_bind(bo->ttm, &tmp_mem);
279 if (unlikely(r)) {
280 goto out_cleanup;
281 }
282 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
283 if (unlikely(r)) {
284 goto out_cleanup;
285 }
286 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
287out_cleanup:
288 if (tmp_mem.mm_node) {
289 spin_lock(&rdev->mman.bdev.lru_lock);
290 drm_mm_put_block(tmp_mem.mm_node);
291 spin_unlock(&rdev->mman.bdev.lru_lock);
292 return r;
293 }
294 return r;
295}
296
297static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
298 bool evict, bool interruptible, bool no_wait,
299 struct ttm_mem_reg *new_mem)
300{
301 struct radeon_device *rdev;
302 struct ttm_mem_reg *old_mem = &bo->mem;
303 struct ttm_mem_reg tmp_mem;
304 uint32_t proposed_flags;
305 int r;
306
307 rdev = radeon_get_rdev(bo->bdev);
308 tmp_mem = *new_mem;
309 tmp_mem.mm_node = NULL;
310 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
311 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
312 interruptible, no_wait);
313 if (unlikely(r)) {
314 return r;
315 }
316 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
317 if (unlikely(r)) {
318 goto out_cleanup;
319 }
320 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
321 if (unlikely(r)) {
322 goto out_cleanup;
323 }
324out_cleanup:
325 if (tmp_mem.mm_node) {
326 spin_lock(&rdev->mman.bdev.lru_lock);
327 drm_mm_put_block(tmp_mem.mm_node);
328 spin_unlock(&rdev->mman.bdev.lru_lock);
329 return r;
330 }
331 return r;
332}
333
334static int radeon_bo_move(struct ttm_buffer_object *bo,
335 bool evict, bool interruptible, bool no_wait,
336 struct ttm_mem_reg *new_mem)
337{
338 struct radeon_device *rdev;
339 struct ttm_mem_reg *old_mem = &bo->mem;
340 int r;
341
342 rdev = radeon_get_rdev(bo->bdev);
343 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
344 radeon_move_null(bo, new_mem);
345 return 0;
346 }
347 if ((old_mem->mem_type == TTM_PL_TT &&
348 new_mem->mem_type == TTM_PL_SYSTEM) ||
349 (old_mem->mem_type == TTM_PL_SYSTEM &&
350 new_mem->mem_type == TTM_PL_TT)) {
351 /* bind is enought */
352 radeon_move_null(bo, new_mem);
353 return 0;
354 }
355 if (!rdev->cp.ready) {
356 /* use memcpy */
357 DRM_ERROR("CP is not ready use memcpy.\n");
358 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
359 }
360
361 if (old_mem->mem_type == TTM_PL_VRAM &&
362 new_mem->mem_type == TTM_PL_SYSTEM) {
363 return radeon_move_vram_ram(bo, evict, interruptible,
364 no_wait, new_mem);
365 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
366 new_mem->mem_type == TTM_PL_VRAM) {
367 return radeon_move_ram_vram(bo, evict, interruptible,
368 no_wait, new_mem);
369 } else {
370 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
371 if (unlikely(r)) {
372 return r;
373 }
374 }
375 return r;
376}
377
378const uint32_t radeon_mem_prios[] = {
379 TTM_PL_VRAM,
380 TTM_PL_TT,
381 TTM_PL_SYSTEM,
382};
383
384const uint32_t radeon_busy_prios[] = {
385 TTM_PL_TT,
386 TTM_PL_VRAM,
387 TTM_PL_SYSTEM,
388};
389
390static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
391 bool lazy, bool interruptible)
392{
393 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
394}
395
396static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
397{
398 return 0;
399}
400
401static void radeon_sync_obj_unref(void **sync_obj)
402{
403 radeon_fence_unref((struct radeon_fence **)sync_obj);
404}
405
406static void *radeon_sync_obj_ref(void *sync_obj)
407{
408 return radeon_fence_ref((struct radeon_fence *)sync_obj);
409}
410
411static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
412{
413 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
414}
415
416static struct ttm_bo_driver radeon_bo_driver = {
417 .mem_type_prio = radeon_mem_prios,
418 .mem_busy_prio = radeon_busy_prios,
419 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
420 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
421 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
422 .invalidate_caches = &radeon_invalidate_caches,
423 .init_mem_type = &radeon_init_mem_type,
424 .evict_flags = &radeon_evict_flags,
425 .move = &radeon_bo_move,
426 .verify_access = &radeon_verify_access,
427 .sync_obj_signaled = &radeon_sync_obj_signaled,
428 .sync_obj_wait = &radeon_sync_obj_wait,
429 .sync_obj_flush = &radeon_sync_obj_flush,
430 .sync_obj_unref = &radeon_sync_obj_unref,
431 .sync_obj_ref = &radeon_sync_obj_ref,
432};
433
434int radeon_ttm_init(struct radeon_device *rdev)
435{
436 int r;
437
438 r = radeon_ttm_global_init(rdev);
439 if (r) {
440 return r;
441 }
442 /* No others user of address space so set it to 0 */
443 r = ttm_bo_device_init(&rdev->mman.bdev,
444 rdev->mman.mem_global_ref.object,
445 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
446 if (r) {
447 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
448 return r;
449 }
450 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
451 ((rdev->mc.aper_size) >> PAGE_SHIFT));
452 if (r) {
453 DRM_ERROR("Failed initializing VRAM heap.\n");
454 return r;
455 }
456 r = radeon_object_create(rdev, NULL, 256 * 1024, true,
457 RADEON_GEM_DOMAIN_VRAM, false,
458 &rdev->stollen_vga_memory);
459 if (r) {
460 return r;
461 }
462 r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
463 if (r) {
464 radeon_object_unref(&rdev->stollen_vga_memory);
465 return r;
466 }
467 DRM_INFO("radeon: %uM of VRAM memory ready\n",
468 rdev->mc.vram_size / (1024 * 1024));
469 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
470 ((rdev->mc.gtt_size) >> PAGE_SHIFT));
471 if (r) {
472 DRM_ERROR("Failed initializing GTT heap.\n");
473 return r;
474 }
475 DRM_INFO("radeon: %uM of GTT memory ready.\n",
476 rdev->mc.gtt_size / (1024 * 1024));
477 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
478 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
479 }
480 return 0;
481}
482
483void radeon_ttm_fini(struct radeon_device *rdev)
484{
485 if (rdev->stollen_vga_memory) {
486 radeon_object_unpin(rdev->stollen_vga_memory);
487 radeon_object_unref(&rdev->stollen_vga_memory);
488 }
489 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
490 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
491 ttm_bo_device_release(&rdev->mman.bdev);
492 radeon_gart_fini(rdev);
493 radeon_ttm_global_fini(rdev);
494 DRM_INFO("radeon: ttm finalized\n");
495}
496
497static struct vm_operations_struct radeon_ttm_vm_ops;
498static struct vm_operations_struct *ttm_vm_ops = NULL;
499
500static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
501{
502 struct ttm_buffer_object *bo;
503 int r;
504
505 bo = (struct ttm_buffer_object *)vma->vm_private_data;
506 if (bo == NULL) {
507 return VM_FAULT_NOPAGE;
508 }
509 r = ttm_vm_ops->fault(vma, vmf);
510 return r;
511}
512
513int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
514{
515 struct drm_file *file_priv;
516 struct radeon_device *rdev;
517 int r;
518
519 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
520 return drm_mmap(filp, vma);
521 }
522
523 file_priv = (struct drm_file *)filp->private_data;
524 rdev = file_priv->minor->dev->dev_private;
525 if (rdev == NULL) {
526 return -EINVAL;
527 }
528 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
529 if (unlikely(r != 0)) {
530 return r;
531 }
532 if (unlikely(ttm_vm_ops == NULL)) {
533 ttm_vm_ops = vma->vm_ops;
534 radeon_ttm_vm_ops = *ttm_vm_ops;
535 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
536 }
537 vma->vm_ops = &radeon_ttm_vm_ops;
538 return 0;
539}
540
541
542/*
543 * TTM backend functions.
544 */
545struct radeon_ttm_backend {
546 struct ttm_backend backend;
547 struct radeon_device *rdev;
548 unsigned long num_pages;
549 struct page **pages;
550 struct page *dummy_read_page;
551 bool populated;
552 bool bound;
553 unsigned offset;
554};
555
556static int radeon_ttm_backend_populate(struct ttm_backend *backend,
557 unsigned long num_pages,
558 struct page **pages,
559 struct page *dummy_read_page)
560{
561 struct radeon_ttm_backend *gtt;
562
563 gtt = container_of(backend, struct radeon_ttm_backend, backend);
564 gtt->pages = pages;
565 gtt->num_pages = num_pages;
566 gtt->dummy_read_page = dummy_read_page;
567 gtt->populated = true;
568 return 0;
569}
570
571static void radeon_ttm_backend_clear(struct ttm_backend *backend)
572{
573 struct radeon_ttm_backend *gtt;
574
575 gtt = container_of(backend, struct radeon_ttm_backend, backend);
576 gtt->pages = NULL;
577 gtt->num_pages = 0;
578 gtt->dummy_read_page = NULL;
579 gtt->populated = false;
580 gtt->bound = false;
581}
582
583
584static int radeon_ttm_backend_bind(struct ttm_backend *backend,
585 struct ttm_mem_reg *bo_mem)
586{
587 struct radeon_ttm_backend *gtt;
588 int r;
589
590 gtt = container_of(backend, struct radeon_ttm_backend, backend);
591 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
592 if (!gtt->num_pages) {
593 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
594 }
595 r = radeon_gart_bind(gtt->rdev, gtt->offset,
596 gtt->num_pages, gtt->pages);
597 if (r) {
598 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
599 gtt->num_pages, gtt->offset);
600 return r;
601 }
602 gtt->bound = true;
603 return 0;
604}
605
606static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
607{
608 struct radeon_ttm_backend *gtt;
609
610 gtt = container_of(backend, struct radeon_ttm_backend, backend);
611 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
612 gtt->bound = false;
613 return 0;
614}
615
616static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
617{
618 struct radeon_ttm_backend *gtt;
619
620 gtt = container_of(backend, struct radeon_ttm_backend, backend);
621 if (gtt->bound) {
622 radeon_ttm_backend_unbind(backend);
623 }
624 kfree(gtt);
625}
626
627static struct ttm_backend_func radeon_backend_func = {
628 .populate = &radeon_ttm_backend_populate,
629 .clear = &radeon_ttm_backend_clear,
630 .bind = &radeon_ttm_backend_bind,
631 .unbind = &radeon_ttm_backend_unbind,
632 .destroy = &radeon_ttm_backend_destroy,
633};
634
635struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
636{
637 struct radeon_ttm_backend *gtt;
638
639 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
640 if (gtt == NULL) {
641 return NULL;
642 }
643 gtt->backend.bdev = &rdev->mman.bdev;
644 gtt->backend.flags = 0;
645 gtt->backend.func = &radeon_backend_func;
646 gtt->rdev = rdev;
647 gtt->pages = NULL;
648 gtt->num_pages = 0;
649 gtt->dummy_read_page = NULL;
650 gtt->populated = false;
651 gtt->bound = false;
652 return &gtt->backend;
653}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
new file mode 100644
index 000000000000..cc074b5a8f74
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -0,0 +1,411 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include <drm/drmP.h>
30#include "radeon_reg.h"
31#include "radeon.h"
32
33/* rs400,rs480 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev);
35void r100_mc_disable_clients(struct radeon_device *rdev);
36int r300_mc_wait_for_idle(struct radeon_device *rdev);
37void r420_pipes_init(struct radeon_device *rdev);
38
39/* This files gather functions specifics to :
40 * rs400,rs480
41 *
42 * Some of these functions might be used by newer ASICs.
43 */
44void rs400_gpu_init(struct radeon_device *rdev);
45int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
46
47
48/*
49 * GART functions.
50 */
51void rs400_gart_adjust_size(struct radeon_device *rdev)
52{
53 /* Check gart size */
54 switch (rdev->mc.gtt_size/(1024*1024)) {
55 case 32:
56 case 64:
57 case 128:
58 case 256:
59 case 512:
60 case 1024:
61 case 2048:
62 break;
63 default:
64 DRM_ERROR("Unable to use IGP GART size %uM\n",
65 rdev->mc.gtt_size >> 20);
66 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
67 DRM_ERROR("Forcing to 32M GART size\n");
68 rdev->mc.gtt_size = 32 * 1024 * 1024;
69 return;
70 }
71 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
72 /* FIXME: RS400 & RS480 seems to have issue with GART size
73 * if 4G of system memory (needs more testing) */
74 rdev->mc.gtt_size = 32 * 1024 * 1024;
75 DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
76 }
77}
78
79void rs400_gart_tlb_flush(struct radeon_device *rdev)
80{
81 uint32_t tmp;
82 unsigned int timeout = rdev->usec_timeout;
83
84 WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
85 do {
86 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
87 if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
88 break;
89 DRM_UDELAY(1);
90 timeout--;
91 } while (timeout > 0);
92 WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
93}
94
95int rs400_gart_enable(struct radeon_device *rdev)
96{
97 uint32_t size_reg;
98 uint32_t tmp;
99 int r;
100
101 /* Initialize common gart structure */
102 r = radeon_gart_init(rdev);
103 if (r) {
104 return r;
105 }
106 if (rs400_debugfs_pcie_gart_info_init(rdev)) {
107 DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
108 }
109
110 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
111 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
112 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
113 /* Check gart size */
114 switch(rdev->mc.gtt_size / (1024 * 1024)) {
115 case 32:
116 size_reg = RS480_VA_SIZE_32MB;
117 break;
118 case 64:
119 size_reg = RS480_VA_SIZE_64MB;
120 break;
121 case 128:
122 size_reg = RS480_VA_SIZE_128MB;
123 break;
124 case 256:
125 size_reg = RS480_VA_SIZE_256MB;
126 break;
127 case 512:
128 size_reg = RS480_VA_SIZE_512MB;
129 break;
130 case 1024:
131 size_reg = RS480_VA_SIZE_1GB;
132 break;
133 case 2048:
134 size_reg = RS480_VA_SIZE_2GB;
135 break;
136 default:
137 return -EINVAL;
138 }
139 if (rdev->gart.table.ram.ptr == NULL) {
140 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
141 r = radeon_gart_table_ram_alloc(rdev);
142 if (r) {
143 return r;
144 }
145 }
146 /* It should be fine to program it to max value */
147 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
148 WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
149 WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
150 } else {
151 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
152 WREG32(RS480_AGP_BASE_2, 0);
153 }
154 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
155 tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
156 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
157 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
158 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
159 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
160 WREG32(RADEON_BUS_CNTL, tmp);
161 } else {
162 WREG32(RADEON_MC_AGP_LOCATION, tmp);
163 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
164 WREG32(RADEON_BUS_CNTL, tmp);
165 }
166 /* Table should be in 32bits address space so ignore bits above. */
167 tmp = rdev->gart.table_addr & 0xfffff000;
168 WREG32_MC(RS480_GART_BASE, tmp);
169 /* TODO: more tweaking here */
170 WREG32_MC(RS480_GART_FEATURE_ID,
171 (RS480_TLB_ENABLE |
172 RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
173 /* Disable snooping */
174 WREG32_MC(RS480_AGP_MODE_CNTL,
175 (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
176 /* Disable AGP mode */
177 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
178 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
179 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
180 WREG32_MC(RS480_MC_MISC_CNTL,
181 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
182 } else {
183 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
184 }
185 /* Enable gart */
186 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
187 rs400_gart_tlb_flush(rdev);
188 rdev->gart.ready = true;
189 return 0;
190}
191
192void rs400_gart_disable(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195
196 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
197 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
198 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
199 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
200}
201
202int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
203{
204 if (i < 0 || i > rdev->gart.num_gpu_pages) {
205 return -EINVAL;
206 }
207 rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC);
208 return 0;
209}
210
211
212/*
213 * MC functions.
214 */
215int rs400_mc_init(struct radeon_device *rdev)
216{
217 uint32_t tmp;
218 int r;
219
220 if (r100_debugfs_rbbm_init(rdev)) {
221 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
222 }
223
224 rs400_gpu_init(rdev);
225 rs400_gart_disable(rdev);
226 rdev->mc.gtt_location = rdev->mc.vram_size;
227 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
228 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
229 rdev->mc.vram_location = 0xFFFFFFFFUL;
230 r = radeon_mc_setup(rdev);
231 if (r) {
232 return r;
233 }
234
235 r100_mc_disable_clients(rdev);
236 if (r300_mc_wait_for_idle(rdev)) {
237 printk(KERN_WARNING "Failed to wait MC idle while "
238 "programming pipes. Bad things might happen.\n");
239 }
240
241 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
242 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
243 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
244 WREG32(RADEON_MC_FB_LOCATION, tmp);
245 tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
246 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
247 (void)RREG32(RADEON_HOST_PATH_CNTL);
248 WREG32(RADEON_HOST_PATH_CNTL, tmp);
249 (void)RREG32(RADEON_HOST_PATH_CNTL);
250 return 0;
251}
252
253void rs400_mc_fini(struct radeon_device *rdev)
254{
255 rs400_gart_disable(rdev);
256 radeon_gart_table_ram_free(rdev);
257 radeon_gart_fini(rdev);
258}
259
260
261/*
262 * Global GPU functions
263 */
264void rs400_errata(struct radeon_device *rdev)
265{
266 rdev->pll_errata = 0;
267}
268
269void rs400_gpu_init(struct radeon_device *rdev)
270{
271 /* FIXME: HDP same place on rs400 ? */
272 r100_hdp_reset(rdev);
273 /* FIXME: is this correct ? */
274 r420_pipes_init(rdev);
275 if (r300_mc_wait_for_idle(rdev)) {
276 printk(KERN_WARNING "Failed to wait MC idle while "
277 "programming pipes. Bad things might happen.\n");
278 }
279}
280
281
282/*
283 * VRAM info.
284 */
285void rs400_vram_info(struct radeon_device *rdev)
286{
287 uint32_t tom;
288
289 rs400_gart_adjust_size(rdev);
290 /* DDR for all card after R300 & IGP */
291 rdev->mc.vram_is_ddr = true;
292 rdev->mc.vram_width = 128;
293
294 /* read NB_TOM to get the amount of ram stolen for the GPU */
295 tom = RREG32(RADEON_NB_TOM);
296 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
297 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
298
299 /* Could aper size report 0 ? */
300 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
301 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
302}
303
304
305/*
306 * Indirect registers accessor
307 */
308uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
309{
310 uint32_t r;
311
312 WREG32(RS480_NB_MC_INDEX, reg & 0xff);
313 r = RREG32(RS480_NB_MC_DATA);
314 WREG32(RS480_NB_MC_INDEX, 0xff);
315 return r;
316}
317
318void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
319{
320 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
321 WREG32(RS480_NB_MC_DATA, (v));
322 WREG32(RS480_NB_MC_INDEX, 0xff);
323}
324
325
326/*
327 * Debugfs info
328 */
329#if defined(CONFIG_DEBUG_FS)
330static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
331{
332 struct drm_info_node *node = (struct drm_info_node *) m->private;
333 struct drm_device *dev = node->minor->dev;
334 struct radeon_device *rdev = dev->dev_private;
335 uint32_t tmp;
336
337 tmp = RREG32(RADEON_HOST_PATH_CNTL);
338 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
339 tmp = RREG32(RADEON_BUS_CNTL);
340 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
341 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
342 seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
343 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
344 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
345 seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
346 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
347 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
348 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
349 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
350 tmp = RREG32_MC(0x100);
351 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
352 tmp = RREG32(0x134);
353 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
354 } else {
355 tmp = RREG32(RADEON_AGP_BASE);
356 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
357 tmp = RREG32(RS480_AGP_BASE_2);
358 seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
359 tmp = RREG32(RADEON_MC_AGP_LOCATION);
360 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
361 }
362 tmp = RREG32_MC(RS480_GART_BASE);
363 seq_printf(m, "GART_BASE 0x%08x\n", tmp);
364 tmp = RREG32_MC(RS480_GART_FEATURE_ID);
365 seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
366 tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
367 seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
368 tmp = RREG32_MC(RS480_MC_MISC_CNTL);
369 seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
370 tmp = RREG32_MC(0x5F);
371 seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
372 tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
373 seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
374 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
375 seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
376 tmp = RREG32_MC(0x3B);
377 seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
378 tmp = RREG32_MC(0x3C);
379 seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
380 tmp = RREG32_MC(0x30);
381 seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
382 tmp = RREG32_MC(0x31);
383 seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
384 tmp = RREG32_MC(0x32);
385 seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
386 tmp = RREG32_MC(0x33);
387 seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
388 tmp = RREG32_MC(0x34);
389 seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
390 tmp = RREG32_MC(0x35);
391 seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
392 tmp = RREG32_MC(0x36);
393 seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
394 tmp = RREG32_MC(0x37);
395 seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
396 return 0;
397}
398
399static struct drm_info_list rs400_gart_info_list[] = {
400 {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
401};
402#endif
403
404int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
405{
406#if defined(CONFIG_DEBUG_FS)
407 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
408#else
409 return 0;
410#endif
411}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
new file mode 100644
index 000000000000..ab0c967553e6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs600 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev);
34int r100_gui_wait_for_idle(struct radeon_device *rdev);
35int r300_mc_wait_for_idle(struct radeon_device *rdev);
36void r420_pipes_init(struct radeon_device *rdev);
37
38/* This files gather functions specifics to :
39 * rs600
40 *
41 * Some of these functions might be used by newer ASICs.
42 */
43void rs600_gpu_init(struct radeon_device *rdev);
44int rs600_mc_wait_for_idle(struct radeon_device *rdev);
45void rs600_disable_vga(struct radeon_device *rdev);
46
47
48/*
49 * GART.
50 */
51void rs600_gart_tlb_flush(struct radeon_device *rdev)
52{
53 uint32_t tmp;
54
55 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
56 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
57 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
58
59 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
60 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
61 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
62
63 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
64 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
65 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
66 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
67}
68
69int rs600_gart_enable(struct radeon_device *rdev)
70{
71 uint32_t tmp;
72 int i;
73 int r;
74
75 /* Initialize common gart structure */
76 r = radeon_gart_init(rdev);
77 if (r) {
78 return r;
79 }
80 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
81 r = radeon_gart_table_vram_alloc(rdev);
82 if (r) {
83 return r;
84 }
85 /* FIXME: setup default page */
86 WREG32_MC(RS600_MC_PT0_CNTL,
87 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
88 RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
89 for (i = 0; i < 19; i++) {
90 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
91 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
92 RS600_SYSTEM_ACCESS_MODE_IN_SYS |
93 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
94 RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
95 RS600_ENABLE_FRAGMENT_PROCESSING |
96 RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
97 }
98
99 /* System context map to GART space */
100 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
101 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
102 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
103
104 /* enable first context */
105 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
106 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
107 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
108 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
109 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
110 /* disable all other contexts */
111 for (i = 1; i < 8; i++) {
112 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
113 }
114
115 /* setup the page table */
116 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
117 rdev->gart.table_addr);
118 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
119
120 /* enable page tables */
121 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
122 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
123 tmp = RREG32_MC(RS600_MC_CNTL1);
124 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
125 rs600_gart_tlb_flush(rdev);
126 rdev->gart.ready = true;
127 return 0;
128}
129
130void rs600_gart_disable(struct radeon_device *rdev)
131{
132 uint32_t tmp;
133
134 /* FIXME: disable out of gart access */
135 WREG32_MC(RS600_MC_PT0_CNTL, 0);
136 tmp = RREG32_MC(RS600_MC_CNTL1);
137 tmp &= ~RS600_ENABLE_PAGE_TABLES;
138 WREG32_MC(RS600_MC_CNTL1, tmp);
139 radeon_object_kunmap(rdev->gart.table.vram.robj);
140 radeon_object_unpin(rdev->gart.table.vram.robj);
141}
142
143#define R600_PTE_VALID (1 << 0)
144#define R600_PTE_SYSTEM (1 << 1)
145#define R600_PTE_SNOOPED (1 << 2)
146#define R600_PTE_READABLE (1 << 5)
147#define R600_PTE_WRITEABLE (1 << 6)
148
149int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
150{
151 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
152
153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
154 return -EINVAL;
155 }
156 addr = addr & 0xFFFFFFFFFFFFF000ULL;
157 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
158 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
159 writeq(addr, ((void __iomem *)ptr) + (i * 8));
160 return 0;
161}
162
163
164/*
165 * MC.
166 */
167void rs600_mc_disable_clients(struct radeon_device *rdev)
168{
169 unsigned tmp;
170
171 if (r100_gui_wait_for_idle(rdev)) {
172 printk(KERN_WARNING "Failed to wait GUI idle while "
173 "programming pipes. Bad things might happen.\n");
174 }
175
176 tmp = RREG32(AVIVO_D1VGA_CONTROL);
177 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
178 tmp = RREG32(AVIVO_D2VGA_CONTROL);
179 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
180
181 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
182 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
183 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
184 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
185
186 /* make sure all previous write got through */
187 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
188
189 mdelay(1);
190}
191
192int rs600_mc_init(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195 int r;
196
197 if (r100_debugfs_rbbm_init(rdev)) {
198 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
199 }
200
201 rs600_gpu_init(rdev);
202 rs600_gart_disable(rdev);
203
204 /* Setup GPU memory space */
205 rdev->mc.vram_location = 0xFFFFFFFFUL;
206 rdev->mc.gtt_location = 0xFFFFFFFFUL;
207 r = radeon_mc_setup(rdev);
208 if (r) {
209 return r;
210 }
211
212 /* Program GPU memory space */
213 /* Enable bus master */
214 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
215 WREG32(RADEON_BUS_CNTL, tmp);
216 /* FIXME: What does AGP means for such chipset ? */
217 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
218 /* FIXME: are this AGP reg in indirect MC range ? */
219 WREG32_MC(RS600_MC_AGP_BASE, 0);
220 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
221 rs600_mc_disable_clients(rdev);
222 if (rs600_mc_wait_for_idle(rdev)) {
223 printk(KERN_WARNING "Failed to wait MC idle while "
224 "programming pipes. Bad things might happen.\n");
225 }
226 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
227 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
228 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
229 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
230 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
231 return 0;
232}
233
234void rs600_mc_fini(struct radeon_device *rdev)
235{
236 rs600_gart_disable(rdev);
237 radeon_gart_table_vram_free(rdev);
238 radeon_gart_fini(rdev);
239}
240
241
242/*
243 * Global GPU functions
244 */
245void rs600_disable_vga(struct radeon_device *rdev)
246{
247 unsigned tmp;
248
249 WREG32(0x330, 0);
250 WREG32(0x338, 0);
251 tmp = RREG32(0x300);
252 tmp &= ~(3 << 16);
253 WREG32(0x300, tmp);
254 WREG32(0x308, (1 << 8));
255 WREG32(0x310, rdev->mc.vram_location);
256 WREG32(0x594, 0);
257}
258
259int rs600_mc_wait_for_idle(struct radeon_device *rdev)
260{
261 unsigned i;
262 uint32_t tmp;
263
264 for (i = 0; i < rdev->usec_timeout; i++) {
265 /* read MC_STATUS */
266 tmp = RREG32_MC(RS600_MC_STATUS);
267 if (tmp & RS600_MC_STATUS_IDLE) {
268 return 0;
269 }
270 DRM_UDELAY(1);
271 }
272 return -1;
273}
274
275void rs600_errata(struct radeon_device *rdev)
276{
277 rdev->pll_errata = 0;
278}
279
280void rs600_gpu_init(struct radeon_device *rdev)
281{
282 /* FIXME: HDP same place on rs600 ? */
283 r100_hdp_reset(rdev);
284 rs600_disable_vga(rdev);
285 /* FIXME: is this correct ? */
286 r420_pipes_init(rdev);
287 if (rs600_mc_wait_for_idle(rdev)) {
288 printk(KERN_WARNING "Failed to wait MC idle while "
289 "programming pipes. Bad things might happen.\n");
290 }
291}
292
293
294/*
295 * VRAM info.
296 */
297void rs600_vram_info(struct radeon_device *rdev)
298{
299 /* FIXME: to do or is these values sane ? */
300 rdev->mc.vram_is_ddr = true;
301 rdev->mc.vram_width = 128;
302}
303
304
305/*
306 * Indirect registers accessor
307 */
308uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
309{
310 uint32_t r;
311
312 WREG32(RS600_MC_INDEX,
313 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
314 r = RREG32(RS600_MC_DATA);
315 return r;
316}
317
318void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
319{
320 WREG32(RS600_MC_INDEX,
321 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
322 ((reg) & RS600_MC_ADDR_MASK));
323 WREG32(RS600_MC_DATA, v);
324}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
new file mode 100644
index 000000000000..79ba85042b5f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs690,rs740 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev);
34int r300_mc_wait_for_idle(struct radeon_device *rdev);
35void r420_pipes_init(struct radeon_device *rdev);
36void rs400_gart_disable(struct radeon_device *rdev);
37int rs400_gart_enable(struct radeon_device *rdev);
38void rs400_gart_adjust_size(struct radeon_device *rdev);
39void rs600_mc_disable_clients(struct radeon_device *rdev);
40void rs600_disable_vga(struct radeon_device *rdev);
41
42/* This files gather functions specifics to :
43 * rs690,rs740
44 *
45 * Some of these functions might be used by newer ASICs.
46 */
47void rs690_gpu_init(struct radeon_device *rdev);
48int rs690_mc_wait_for_idle(struct radeon_device *rdev);
49
50
51/*
52 * MC functions.
53 */
54int rs690_mc_init(struct radeon_device *rdev)
55{
56 uint32_t tmp;
57 int r;
58
59 if (r100_debugfs_rbbm_init(rdev)) {
60 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
61 }
62
63 rs690_gpu_init(rdev);
64 rs400_gart_disable(rdev);
65
66 /* Setup GPU memory space */
67 rdev->mc.gtt_location = rdev->mc.vram_size;
68 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
69 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
70 rdev->mc.vram_location = 0xFFFFFFFFUL;
71 r = radeon_mc_setup(rdev);
72 if (r) {
73 return r;
74 }
75
76 /* Program GPU memory space */
77 rs600_mc_disable_clients(rdev);
78 if (rs690_mc_wait_for_idle(rdev)) {
79 printk(KERN_WARNING "Failed to wait MC idle while "
80 "programming pipes. Bad things might happen.\n");
81 }
82 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
83 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
84 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
85 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
86 /* FIXME: Does this reg exist on RS480,RS740 ? */
87 WREG32(0x310, rdev->mc.vram_location);
88 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
89 return 0;
90}
91
92void rs690_mc_fini(struct radeon_device *rdev)
93{
94 rs400_gart_disable(rdev);
95 radeon_gart_table_ram_free(rdev);
96 radeon_gart_fini(rdev);
97}
98
99
100/*
101 * Global GPU functions
102 */
103int rs690_mc_wait_for_idle(struct radeon_device *rdev)
104{
105 unsigned i;
106 uint32_t tmp;
107
108 for (i = 0; i < rdev->usec_timeout; i++) {
109 /* read MC_STATUS */
110 tmp = RREG32_MC(RS690_MC_STATUS);
111 if (tmp & RS690_MC_STATUS_IDLE) {
112 return 0;
113 }
114 DRM_UDELAY(1);
115 }
116 return -1;
117}
118
119void rs690_errata(struct radeon_device *rdev)
120{
121 rdev->pll_errata = 0;
122}
123
124void rs690_gpu_init(struct radeon_device *rdev)
125{
126 /* FIXME: HDP same place on rs690 ? */
127 r100_hdp_reset(rdev);
128 rs600_disable_vga(rdev);
129 /* FIXME: is this correct ? */
130 r420_pipes_init(rdev);
131 if (rs690_mc_wait_for_idle(rdev)) {
132 printk(KERN_WARNING "Failed to wait MC idle while "
133 "programming pipes. Bad things might happen.\n");
134 }
135}
136
137
138/*
139 * VRAM info.
140 */
141void rs690_vram_info(struct radeon_device *rdev)
142{
143 uint32_t tmp;
144
145 rs400_gart_adjust_size(rdev);
146 /* DDR for all card after R300 & IGP */
147 rdev->mc.vram_is_ddr = true;
148 /* FIXME: is this correct for RS690/RS740 ? */
149 tmp = RREG32(RADEON_MEM_CNTL);
150 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
151 rdev->mc.vram_width = 128;
152 } else {
153 rdev->mc.vram_width = 64;
154 }
155 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
156
157 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
158 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
159}
160
161
162/*
163 * Indirect registers accessor
164 */
165uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
166{
167 uint32_t r;
168
169 WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
170 r = RREG32(RS690_MC_DATA);
171 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
172 return r;
173}
174
175void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
176{
177 WREG32(RS690_MC_INDEX,
178 RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
179 WREG32(RS690_MC_DATA, v);
180 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
181}
diff --git a/drivers/gpu/drm/radeon/rs780.c b/drivers/gpu/drm/radeon/rs780.c
new file mode 100644
index 000000000000..0affcff81825
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs780 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * rs780
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int rs780_mc_wait_for_idle(struct radeon_device *rdev);
41void rs780_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int rs780_mc_init(struct radeon_device *rdev)
48{
49 rs780_gpu_init(rdev);
50 /* FIXME: implement */
51
52 rs600_mc_disable_clients(rdev);
53 if (rs780_mc_wait_for_idle(rdev)) {
54 printk(KERN_WARNING "Failed to wait MC idle while "
55 "programming pipes. Bad things might happen.\n");
56 }
57 return 0;
58}
59
60void rs780_mc_fini(struct radeon_device *rdev)
61{
62 /* FIXME: implement */
63}
64
65
66/*
67 * Global GPU functions
68 */
69void rs780_errata(struct radeon_device *rdev)
70{
71 rdev->pll_errata = 0;
72}
73
74int rs780_mc_wait_for_idle(struct radeon_device *rdev)
75{
76 /* FIXME: implement */
77 return 0;
78}
79
80void rs780_gpu_init(struct radeon_device *rdev)
81{
82 /* FIXME: implement */
83}
84
85
86/*
87 * VRAM info
88 */
89void rs780_vram_get_type(struct radeon_device *rdev)
90{
91 /* FIXME: implement */
92}
93
94void rs780_vram_info(struct radeon_device *rdev)
95{
96 rs780_vram_get_type(rdev);
97
98 /* FIXME: implement */
99 /* Could aper size report 0 ? */
100 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
101 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
102}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
new file mode 100644
index 000000000000..7eab95db58ac
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -0,0 +1,504 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_reg.h"
31#include "radeon.h"
32
33/* rv515 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev);
35int r100_cp_reset(struct radeon_device *rdev);
36int r100_rb2d_reset(struct radeon_device *rdev);
37int r100_gui_wait_for_idle(struct radeon_device *rdev);
38int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39int rv370_pcie_gart_enable(struct radeon_device *rdev);
40void rv370_pcie_gart_disable(struct radeon_device *rdev);
41void r420_pipes_init(struct radeon_device *rdev);
42void rs600_mc_disable_clients(struct radeon_device *rdev);
43void rs600_disable_vga(struct radeon_device *rdev);
44
45/* This files gather functions specifics to:
46 * rv515
47 *
48 * Some of these functions might be used by newer ASICs.
49 */
50int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
51int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
52void rv515_gpu_init(struct radeon_device *rdev);
53int rv515_mc_wait_for_idle(struct radeon_device *rdev);
54
55
56/*
57 * MC
58 */
59int rv515_mc_init(struct radeon_device *rdev)
60{
61 uint32_t tmp;
62 int r;
63
64 if (r100_debugfs_rbbm_init(rdev)) {
65 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
66 }
67 if (rv515_debugfs_pipes_info_init(rdev)) {
68 DRM_ERROR("Failed to register debugfs file for pipes !\n");
69 }
70 if (rv515_debugfs_ga_info_init(rdev)) {
71 DRM_ERROR("Failed to register debugfs file for pipes !\n");
72 }
73
74 rv515_gpu_init(rdev);
75 rv370_pcie_gart_disable(rdev);
76
77 /* Setup GPU memory space */
78 rdev->mc.vram_location = 0xFFFFFFFFUL;
79 rdev->mc.gtt_location = 0xFFFFFFFFUL;
80 if (rdev->flags & RADEON_IS_AGP) {
81 r = radeon_agp_init(rdev);
82 if (r) {
83 printk(KERN_WARNING "[drm] Disabling AGP\n");
84 rdev->flags &= ~RADEON_IS_AGP;
85 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
86 } else {
87 rdev->mc.gtt_location = rdev->mc.agp_base;
88 }
89 }
90 r = radeon_mc_setup(rdev);
91 if (r) {
92 return r;
93 }
94
95 /* Program GPU memory space */
96 rs600_mc_disable_clients(rdev);
97 if (rv515_mc_wait_for_idle(rdev)) {
98 printk(KERN_WARNING "Failed to wait MC idle while "
99 "programming pipes. Bad things might happen.\n");
100 }
101 /* Write VRAM size in case we are limiting it */
102 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
103 tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
104 WREG32(0x134, tmp);
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
106 tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16);
107 tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
108 WREG32_MC(RV515_MC_FB_LOCATION, tmp);
109 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
110 WREG32(0x310, rdev->mc.vram_location);
111 if (rdev->flags & RADEON_IS_AGP) {
112 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
113 tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16);
114 tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16);
115 WREG32_MC(RV515_MC_AGP_LOCATION, tmp);
116 WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base);
117 WREG32_MC(RV515_MC_AGP_BASE_2, 0);
118 } else {
119 WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF);
120 WREG32_MC(RV515_MC_AGP_BASE, 0);
121 WREG32_MC(RV515_MC_AGP_BASE_2, 0);
122 }
123 return 0;
124}
125
126void rv515_mc_fini(struct radeon_device *rdev)
127{
128 rv370_pcie_gart_disable(rdev);
129 radeon_gart_table_vram_free(rdev);
130 radeon_gart_fini(rdev);
131}
132
133
134/*
135 * Global GPU functions
136 */
137void rv515_ring_start(struct radeon_device *rdev)
138{
139 unsigned gb_tile_config;
140 int r;
141
142 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
143 gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
144 switch (rdev->num_gb_pipes) {
145 case 2:
146 gb_tile_config |= R300_PIPE_COUNT_R300;
147 break;
148 case 3:
149 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
150 break;
151 case 4:
152 gb_tile_config |= R300_PIPE_COUNT_R420;
153 break;
154 case 1:
155 default:
156 gb_tile_config |= R300_PIPE_COUNT_RV350;
157 break;
158 }
159
160 r = radeon_ring_lock(rdev, 64);
161 if (r) {
162 return;
163 }
164 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
165 radeon_ring_write(rdev,
166 RADEON_ISYNC_ANY2D_IDLE3D |
167 RADEON_ISYNC_ANY3D_IDLE2D |
168 RADEON_ISYNC_WAIT_IDLEGUI |
169 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
170 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
171 radeon_ring_write(rdev, gb_tile_config);
172 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
173 radeon_ring_write(rdev,
174 RADEON_WAIT_2D_IDLECLEAN |
175 RADEON_WAIT_3D_IDLECLEAN);
176 radeon_ring_write(rdev, PACKET0(0x170C, 0));
177 radeon_ring_write(rdev, 1 << 31);
178 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
179 radeon_ring_write(rdev, 0);
180 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
181 radeon_ring_write(rdev, 0);
182 radeon_ring_write(rdev, PACKET0(0x42C8, 0));
183 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
184 radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0));
185 radeon_ring_write(rdev, 0);
186 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
187 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
188 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
189 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
190 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
191 radeon_ring_write(rdev,
192 RADEON_WAIT_2D_IDLECLEAN |
193 RADEON_WAIT_3D_IDLECLEAN);
194 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
195 radeon_ring_write(rdev, 0);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
198 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
199 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
200 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
201 radeon_ring_write(rdev,
202 ((6 << R300_MS_X0_SHIFT) |
203 (6 << R300_MS_Y0_SHIFT) |
204 (6 << R300_MS_X1_SHIFT) |
205 (6 << R300_MS_Y1_SHIFT) |
206 (6 << R300_MS_X2_SHIFT) |
207 (6 << R300_MS_Y2_SHIFT) |
208 (6 << R300_MSBD0_Y_SHIFT) |
209 (6 << R300_MSBD0_X_SHIFT)));
210 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
211 radeon_ring_write(rdev,
212 ((6 << R300_MS_X3_SHIFT) |
213 (6 << R300_MS_Y3_SHIFT) |
214 (6 << R300_MS_X4_SHIFT) |
215 (6 << R300_MS_Y4_SHIFT) |
216 (6 << R300_MS_X5_SHIFT) |
217 (6 << R300_MS_Y5_SHIFT) |
218 (6 << R300_MSBD1_SHIFT)));
219 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
220 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
221 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
222 radeon_ring_write(rdev,
223 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
224 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
225 radeon_ring_write(rdev,
226 R300_GEOMETRY_ROUND_NEAREST |
227 R300_COLOR_ROUND_NEAREST);
228 radeon_ring_unlock_commit(rdev);
229}
230
231void rv515_errata(struct radeon_device *rdev)
232{
233 rdev->pll_errata = 0;
234}
235
236int rv515_mc_wait_for_idle(struct radeon_device *rdev)
237{
238 unsigned i;
239 uint32_t tmp;
240
241 for (i = 0; i < rdev->usec_timeout; i++) {
242 /* read MC_STATUS */
243 tmp = RREG32_MC(RV515_MC_STATUS);
244 if (tmp & RV515_MC_STATUS_IDLE) {
245 return 0;
246 }
247 DRM_UDELAY(1);
248 }
249 return -1;
250}
251
252void rv515_gpu_init(struct radeon_device *rdev)
253{
254 unsigned pipe_select_current, gb_pipe_select, tmp;
255
256 r100_hdp_reset(rdev);
257 r100_rb2d_reset(rdev);
258
259 if (r100_gui_wait_for_idle(rdev)) {
260 printk(KERN_WARNING "Failed to wait GUI idle while "
261 "reseting GPU. Bad things might happen.\n");
262 }
263
264 rs600_disable_vga(rdev);
265
266 r420_pipes_init(rdev);
267 gb_pipe_select = RREG32(0x402C);
268 tmp = RREG32(0x170C);
269 pipe_select_current = (tmp >> 2) & 3;
270 tmp = (1 << pipe_select_current) |
271 (((gb_pipe_select >> 8) & 0xF) << 4);
272 WREG32_PLL(0x000D, tmp);
273 if (r100_gui_wait_for_idle(rdev)) {
274 printk(KERN_WARNING "Failed to wait GUI idle while "
275 "reseting GPU. Bad things might happen.\n");
276 }
277 if (rv515_mc_wait_for_idle(rdev)) {
278 printk(KERN_WARNING "Failed to wait MC idle while "
279 "programming pipes. Bad things might happen.\n");
280 }
281}
282
283int rv515_ga_reset(struct radeon_device *rdev)
284{
285 uint32_t tmp;
286 bool reinit_cp;
287 int i;
288
289 reinit_cp = rdev->cp.ready;
290 rdev->cp.ready = false;
291 for (i = 0; i < rdev->usec_timeout; i++) {
292 WREG32(RADEON_CP_CSQ_MODE, 0);
293 WREG32(RADEON_CP_CSQ_CNTL, 0);
294 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
295 (void)RREG32(RADEON_RBBM_SOFT_RESET);
296 udelay(200);
297 WREG32(RADEON_RBBM_SOFT_RESET, 0);
298 /* Wait to prevent race in RBBM_STATUS */
299 mdelay(1);
300 tmp = RREG32(RADEON_RBBM_STATUS);
301 if (tmp & ((1 << 20) | (1 << 26))) {
302 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
303 /* GA still busy soft reset it */
304 WREG32(0x429C, 0x200);
305 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
306 WREG32(0x43E0, 0);
307 WREG32(0x43E4, 0);
308 WREG32(0x24AC, 0);
309 }
310 /* Wait to prevent race in RBBM_STATUS */
311 mdelay(1);
312 tmp = RREG32(RADEON_RBBM_STATUS);
313 if (!(tmp & ((1 << 20) | (1 << 26)))) {
314 break;
315 }
316 }
317 for (i = 0; i < rdev->usec_timeout; i++) {
318 tmp = RREG32(RADEON_RBBM_STATUS);
319 if (!(tmp & ((1 << 20) | (1 << 26)))) {
320 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
321 tmp);
322 DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
323 DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
324 DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
325 if (reinit_cp) {
326 return r100_cp_init(rdev, rdev->cp.ring_size);
327 }
328 return 0;
329 }
330 DRM_UDELAY(1);
331 }
332 tmp = RREG32(RADEON_RBBM_STATUS);
333 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
334 return -1;
335}
336
337int rv515_gpu_reset(struct radeon_device *rdev)
338{
339 uint32_t status;
340
341 /* reset order likely matter */
342 status = RREG32(RADEON_RBBM_STATUS);
343 /* reset HDP */
344 r100_hdp_reset(rdev);
345 /* reset rb2d */
346 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
347 r100_rb2d_reset(rdev);
348 }
349 /* reset GA */
350 if (status & ((1 << 20) | (1 << 26))) {
351 rv515_ga_reset(rdev);
352 }
353 /* reset CP */
354 status = RREG32(RADEON_RBBM_STATUS);
355 if (status & (1 << 16)) {
356 r100_cp_reset(rdev);
357 }
358 /* Check if GPU is idle */
359 status = RREG32(RADEON_RBBM_STATUS);
360 if (status & (1 << 31)) {
361 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
362 return -1;
363 }
364 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
365 return 0;
366}
367
368
369/*
370 * VRAM info
371 */
372static void rv515_vram_get_type(struct radeon_device *rdev)
373{
374 uint32_t tmp;
375
376 rdev->mc.vram_width = 128;
377 rdev->mc.vram_is_ddr = true;
378 tmp = RREG32_MC(RV515_MC_CNTL);
379 tmp &= RV515_MEM_NUM_CHANNELS_MASK;
380 switch (tmp) {
381 case 0:
382 rdev->mc.vram_width = 64;
383 break;
384 case 1:
385 rdev->mc.vram_width = 128;
386 break;
387 default:
388 rdev->mc.vram_width = 128;
389 break;
390 }
391}
392
393void rv515_vram_info(struct radeon_device *rdev)
394{
395 rv515_vram_get_type(rdev);
396 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
397
398 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
399 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
400}
401
402
403/*
404 * Indirect registers accessor
405 */
406uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
407{
408 uint32_t r;
409
410 WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
411 r = RREG32(R520_MC_IND_DATA);
412 WREG32(R520_MC_IND_INDEX, 0);
413 return r;
414}
415
416void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
417{
418 WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
419 WREG32(R520_MC_IND_DATA, (v));
420 WREG32(R520_MC_IND_INDEX, 0);
421}
422
423uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
424{
425 uint32_t r;
426
427 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
428 (void)RREG32(RADEON_PCIE_INDEX);
429 r = RREG32(RADEON_PCIE_DATA);
430 return r;
431}
432
433void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
434{
435 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
436 (void)RREG32(RADEON_PCIE_INDEX);
437 WREG32(RADEON_PCIE_DATA, (v));
438 (void)RREG32(RADEON_PCIE_DATA);
439}
440
441
442/*
443 * Debugfs info
444 */
445#if defined(CONFIG_DEBUG_FS)
446static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
447{
448 struct drm_info_node *node = (struct drm_info_node *) m->private;
449 struct drm_device *dev = node->minor->dev;
450 struct radeon_device *rdev = dev->dev_private;
451 uint32_t tmp;
452
453 tmp = RREG32(R400_GB_PIPE_SELECT);
454 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
455 tmp = RREG32(R500_SU_REG_DEST);
456 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
457 tmp = RREG32(R300_GB_TILE_CONFIG);
458 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
459 tmp = RREG32(R300_DST_PIPE_CONFIG);
460 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
461 return 0;
462}
463
464static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
465{
466 struct drm_info_node *node = (struct drm_info_node *) m->private;
467 struct drm_device *dev = node->minor->dev;
468 struct radeon_device *rdev = dev->dev_private;
469 uint32_t tmp;
470
471 tmp = RREG32(0x2140);
472 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
473 radeon_gpu_reset(rdev);
474 tmp = RREG32(0x425C);
475 seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
476 return 0;
477}
478
479static struct drm_info_list rv515_pipes_info_list[] = {
480 {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
481};
482
483static struct drm_info_list rv515_ga_info_list[] = {
484 {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
485};
486#endif
487
488int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
489{
490#if defined(CONFIG_DEBUG_FS)
491 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
492#else
493 return 0;
494#endif
495}
496
497int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
498{
499#if defined(CONFIG_DEBUG_FS)
500 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
501#else
502 return 0;
503#endif
504}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
new file mode 100644
index 000000000000..da50cc51ede3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rv770,rv730,rv710 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * rv770,rv730,rv710
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int rv770_mc_wait_for_idle(struct radeon_device *rdev);
41void rv770_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int rv770_mc_init(struct radeon_device *rdev)
48{
49 uint32_t tmp;
50
51 rv770_gpu_init(rdev);
52
53 /* setup the gart before changing location so we can ask to
54 * discard unmapped mc request
55 */
56 /* FIXME: disable out of gart access */
57 tmp = rdev->mc.gtt_location / 4096;
58 tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
59 WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
60 tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
61 tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
62 WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
63
64 rs600_mc_disable_clients(rdev);
65 if (rv770_mc_wait_for_idle(rdev)) {
66 printk(KERN_WARNING "Failed to wait MC idle while "
67 "programming pipes. Bad things might happen.\n");
68 }
69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R700_MC_VM_FB_LOCATION, tmp);
74 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
75 tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
76 WREG32(R700_MC_VM_AGP_TOP, tmp);
77 tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
78 WREG32(R700_MC_VM_AGP_BOT, tmp);
79 return 0;
80}
81
82void rv770_mc_fini(struct radeon_device *rdev)
83{
84 /* FIXME: implement */
85}
86
87
88/*
89 * Global GPU functions
90 */
91void rv770_errata(struct radeon_device *rdev)
92{
93 rdev->pll_errata = 0;
94}
95
96int rv770_mc_wait_for_idle(struct radeon_device *rdev)
97{
98 /* FIXME: implement */
99 return 0;
100}
101
102void rv770_gpu_init(struct radeon_device *rdev)
103{
104 /* FIXME: implement */
105}
106
107
108/*
109 * VRAM info
110 */
111void rv770_vram_get_type(struct radeon_device *rdev)
112{
113 /* FIXME: implement */
114}
115
116void rv770_vram_info(struct radeon_device *rdev)
117{
118 rv770_vram_get_type(rdev);
119
120 /* FIXME: implement */
121 /* Could aper size report 0 ? */
122 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
123 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
124}
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
new file mode 100644
index 000000000000..b0a9de7a57c2
--- /dev/null
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3
4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
7
8obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644
index 000000000000..e8f6d2229d8c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -0,0 +1,150 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 * Keith Packard.
30 */
31
32#include "ttm/ttm_module.h"
33#include "ttm/ttm_bo_driver.h"
34#ifdef TTM_HAS_AGP
35#include "ttm/ttm_placement.h"
36#include <linux/agp_backend.h>
37#include <linux/module.h>
38#include <linux/io.h>
39#include <asm/agp.h>
40
41struct ttm_agp_backend {
42 struct ttm_backend backend;
43 struct agp_memory *mem;
44 struct agp_bridge_data *bridge;
45};
46
47static int ttm_agp_populate(struct ttm_backend *backend,
48 unsigned long num_pages, struct page **pages,
49 struct page *dummy_read_page)
50{
51 struct ttm_agp_backend *agp_be =
52 container_of(backend, struct ttm_agp_backend, backend);
53 struct page **cur_page, **last_page = pages + num_pages;
54 struct agp_memory *mem;
55
56 mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
57 if (unlikely(mem == NULL))
58 return -ENOMEM;
59
60 mem->page_count = 0;
61 for (cur_page = pages; cur_page < last_page; ++cur_page) {
62 struct page *page = *cur_page;
63 if (!page)
64 page = dummy_read_page;
65
66 mem->memory[mem->page_count++] =
67 phys_to_gart(page_to_phys(page));
68 }
69 agp_be->mem = mem;
70 return 0;
71}
72
73static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{
75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend);
77 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret;
80
81 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start);
85 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87
88 return ret;
89}
90
91static int ttm_agp_unbind(struct ttm_backend *backend)
92{
93 struct ttm_agp_backend *agp_be =
94 container_of(backend, struct ttm_agp_backend, backend);
95
96 if (agp_be->mem->is_bound)
97 return agp_unbind_memory(agp_be->mem);
98 else
99 return 0;
100}
101
102static void ttm_agp_clear(struct ttm_backend *backend)
103{
104 struct ttm_agp_backend *agp_be =
105 container_of(backend, struct ttm_agp_backend, backend);
106 struct agp_memory *mem = agp_be->mem;
107
108 if (mem) {
109 ttm_agp_unbind(backend);
110 agp_free_memory(mem);
111 }
112 agp_be->mem = NULL;
113}
114
115static void ttm_agp_destroy(struct ttm_backend *backend)
116{
117 struct ttm_agp_backend *agp_be =
118 container_of(backend, struct ttm_agp_backend, backend);
119
120 if (agp_be->mem)
121 ttm_agp_clear(backend);
122 kfree(agp_be);
123}
124
125static struct ttm_backend_func ttm_agp_func = {
126 .populate = ttm_agp_populate,
127 .clear = ttm_agp_clear,
128 .bind = ttm_agp_bind,
129 .unbind = ttm_agp_unbind,
130 .destroy = ttm_agp_destroy,
131};
132
133struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
134 struct agp_bridge_data *bridge)
135{
136 struct ttm_agp_backend *agp_be;
137
138 agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
139 if (!agp_be)
140 return NULL;
141
142 agp_be->mem = NULL;
143 agp_be->bridge = bridge;
144 agp_be->backend.func = &ttm_agp_func;
145 agp_be->backend.bdev = bdev;
146 return &agp_be->backend;
147}
148EXPORT_SYMBOL(ttm_agp_backend_init);
149
150#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644
index 000000000000..1587aeca7bea
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -0,0 +1,1698 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41#define TTM_ASSERT_LOCKED(param)
42#define TTM_DEBUG(fmt, arg...)
43#define TTM_BO_HASH_ORDER 13
44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48
49static inline uint32_t ttm_bo_type_flags(unsigned type)
50{
51 return 1 << (type);
52}
53
54static void ttm_bo_release_list(struct kref *list_kref)
55{
56 struct ttm_buffer_object *bo =
57 container_of(list_kref, struct ttm_buffer_object, list_kref);
58 struct ttm_bo_device *bdev = bo->bdev;
59
60 BUG_ON(atomic_read(&bo->list_kref.refcount));
61 BUG_ON(atomic_read(&bo->kref.refcount));
62 BUG_ON(atomic_read(&bo->cpu_writers));
63 BUG_ON(bo->sync_obj != NULL);
64 BUG_ON(bo->mem.mm_node != NULL);
65 BUG_ON(!list_empty(&bo->lru));
66 BUG_ON(!list_empty(&bo->ddestroy));
67
68 if (bo->ttm)
69 ttm_tt_destroy(bo->ttm);
70 if (bo->destroy)
71 bo->destroy(bo);
72 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
74 kfree(bo);
75 }
76}
77
78int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
79{
80
81 if (interruptible) {
82 int ret = 0;
83
84 ret = wait_event_interruptible(bo->event_queue,
85 atomic_read(&bo->reserved) == 0);
86 if (unlikely(ret != 0))
87 return -ERESTART;
88 } else {
89 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
90 }
91 return 0;
92}
93
94static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_mem_type_manager *man;
98
99 BUG_ON(!atomic_read(&bo->reserved));
100
101 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
102
103 BUG_ON(!list_empty(&bo->lru));
104
105 man = &bdev->man[bo->mem.mem_type];
106 list_add_tail(&bo->lru, &man->lru);
107 kref_get(&bo->list_kref);
108
109 if (bo->ttm != NULL) {
110 list_add_tail(&bo->swap, &bdev->swap_lru);
111 kref_get(&bo->list_kref);
112 }
113 }
114}
115
116/**
117 * Call with the lru_lock held.
118 */
119
120static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
121{
122 int put_count = 0;
123
124 if (!list_empty(&bo->swap)) {
125 list_del_init(&bo->swap);
126 ++put_count;
127 }
128 if (!list_empty(&bo->lru)) {
129 list_del_init(&bo->lru);
130 ++put_count;
131 }
132
133 /*
134 * TODO: Add a driver hook to delete from
135 * driver-specific LRU's here.
136 */
137
138 return put_count;
139}
140
141int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool interruptible,
143 bool no_wait, bool use_sequence, uint32_t sequence)
144{
145 struct ttm_bo_device *bdev = bo->bdev;
146 int ret;
147
148 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
149 if (use_sequence && bo->seq_valid &&
150 (sequence - bo->val_seq < (1 << 31))) {
151 return -EAGAIN;
152 }
153
154 if (no_wait)
155 return -EBUSY;
156
157 spin_unlock(&bdev->lru_lock);
158 ret = ttm_bo_wait_unreserved(bo, interruptible);
159 spin_lock(&bdev->lru_lock);
160
161 if (unlikely(ret))
162 return ret;
163 }
164
165 if (use_sequence) {
166 bo->val_seq = sequence;
167 bo->seq_valid = true;
168 } else {
169 bo->seq_valid = false;
170 }
171
172 return 0;
173}
174EXPORT_SYMBOL(ttm_bo_reserve);
175
176static void ttm_bo_ref_bug(struct kref *list_kref)
177{
178 BUG();
179}
180
181int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool interruptible,
183 bool no_wait, bool use_sequence, uint32_t sequence)
184{
185 struct ttm_bo_device *bdev = bo->bdev;
186 int put_count = 0;
187 int ret;
188
189 spin_lock(&bdev->lru_lock);
190 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 sequence);
192 if (likely(ret == 0))
193 put_count = ttm_bo_del_from_lru(bo);
194 spin_unlock(&bdev->lru_lock);
195
196 while (put_count--)
197 kref_put(&bo->list_kref, ttm_bo_ref_bug);
198
199 return ret;
200}
201
202void ttm_bo_unreserve(struct ttm_buffer_object *bo)
203{
204 struct ttm_bo_device *bdev = bo->bdev;
205
206 spin_lock(&bdev->lru_lock);
207 ttm_bo_add_to_lru(bo);
208 atomic_set(&bo->reserved, 0);
209 wake_up_all(&bo->event_queue);
210 spin_unlock(&bdev->lru_lock);
211}
212EXPORT_SYMBOL(ttm_bo_unreserve);
213
214/*
215 * Call bo->mutex locked.
216 */
217
218static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
219{
220 struct ttm_bo_device *bdev = bo->bdev;
221 int ret = 0;
222 uint32_t page_flags = 0;
223
224 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL;
226
227 switch (bo->type) {
228 case ttm_bo_type_device:
229 if (zero_alloc)
230 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
231 case ttm_bo_type_kernel:
232 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
233 page_flags, bdev->dummy_read_page);
234 if (unlikely(bo->ttm == NULL))
235 ret = -ENOMEM;
236 break;
237 case ttm_bo_type_user:
238 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
239 page_flags | TTM_PAGE_FLAG_USER,
240 bdev->dummy_read_page);
241 if (unlikely(bo->ttm == NULL))
242 ret = -ENOMEM;
243 break;
244
245 ret = ttm_tt_set_user(bo->ttm, current,
246 bo->buffer_start, bo->num_pages);
247 if (unlikely(ret != 0))
248 ttm_tt_destroy(bo->ttm);
249 break;
250 default:
251 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
252 ret = -EINVAL;
253 break;
254 }
255
256 return ret;
257}
258
259static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
260 struct ttm_mem_reg *mem,
261 bool evict, bool interruptible, bool no_wait)
262{
263 struct ttm_bo_device *bdev = bo->bdev;
264 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
265 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
266 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
267 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
268 int ret = 0;
269
270 if (old_is_pci || new_is_pci ||
271 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
272 ttm_bo_unmap_virtual(bo);
273
274 /*
275 * Create and bind a ttm if required.
276 */
277
278 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
279 ret = ttm_bo_add_ttm(bo, false);
280 if (ret)
281 goto out_err;
282
283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
284 if (ret)
285 return ret;
286
287 if (mem->mem_type != TTM_PL_SYSTEM) {
288 ret = ttm_tt_bind(bo->ttm, mem);
289 if (ret)
290 goto out_err;
291 }
292
293 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
294
295 struct ttm_mem_reg *old_mem = &bo->mem;
296 uint32_t save_flags = old_mem->placement;
297
298 *old_mem = *mem;
299 mem->mm_node = NULL;
300 ttm_flag_masked(&save_flags, mem->placement,
301 TTM_PL_MASK_MEMTYPE);
302 goto moved;
303 }
304
305 }
306
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
310 else if (bdev->driver->move)
311 ret = bdev->driver->move(bo, evict, interruptible,
312 no_wait, mem);
313 else
314 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
315
316 if (ret)
317 goto out_err;
318
319moved:
320 if (bo->evicted) {
321 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
322 if (ret)
323 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
324 bo->evicted = false;
325 }
326
327 if (bo->mem.mm_node) {
328 spin_lock(&bo->lock);
329 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
330 bdev->man[bo->mem.mem_type].gpu_offset;
331 bo->cur_placement = bo->mem.placement;
332 spin_unlock(&bo->lock);
333 }
334
335 return 0;
336
337out_err:
338 new_man = &bdev->man[bo->mem.mem_type];
339 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
340 ttm_tt_unbind(bo->ttm);
341 ttm_tt_destroy(bo->ttm);
342 bo->ttm = NULL;
343 }
344
345 return ret;
346}
347
348/**
349 * If bo idle, remove from delayed- and lru lists, and unref.
350 * If not idle, and already on delayed list, do nothing.
351 * If not idle, and not on delayed list, put on delayed list,
352 * up the list_kref and schedule a delayed list check.
353 */
354
355static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
356{
357 struct ttm_bo_device *bdev = bo->bdev;
358 struct ttm_bo_driver *driver = bdev->driver;
359 int ret;
360
361 spin_lock(&bo->lock);
362 (void) ttm_bo_wait(bo, false, false, !remove_all);
363
364 if (!bo->sync_obj) {
365 int put_count;
366
367 spin_unlock(&bo->lock);
368
369 spin_lock(&bdev->lru_lock);
370 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
371 BUG_ON(ret);
372 if (bo->ttm)
373 ttm_tt_unbind(bo->ttm);
374
375 if (!list_empty(&bo->ddestroy)) {
376 list_del_init(&bo->ddestroy);
377 kref_put(&bo->list_kref, ttm_bo_ref_bug);
378 }
379 if (bo->mem.mm_node) {
380 drm_mm_put_block(bo->mem.mm_node);
381 bo->mem.mm_node = NULL;
382 }
383 put_count = ttm_bo_del_from_lru(bo);
384 spin_unlock(&bdev->lru_lock);
385
386 atomic_set(&bo->reserved, 0);
387
388 while (put_count--)
389 kref_put(&bo->list_kref, ttm_bo_release_list);
390
391 return 0;
392 }
393
394 spin_lock(&bdev->lru_lock);
395 if (list_empty(&bo->ddestroy)) {
396 void *sync_obj = bo->sync_obj;
397 void *sync_obj_arg = bo->sync_obj_arg;
398
399 kref_get(&bo->list_kref);
400 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
401 spin_unlock(&bdev->lru_lock);
402 spin_unlock(&bo->lock);
403
404 if (sync_obj)
405 driver->sync_obj_flush(sync_obj, sync_obj_arg);
406 schedule_delayed_work(&bdev->wq,
407 ((HZ / 100) < 1) ? 1 : HZ / 100);
408 ret = 0;
409
410 } else {
411 spin_unlock(&bdev->lru_lock);
412 spin_unlock(&bo->lock);
413 ret = -EBUSY;
414 }
415
416 return ret;
417}
418
419/**
420 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
421 * encountered buffers.
422 */
423
424static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
425{
426 struct ttm_buffer_object *entry, *nentry;
427 struct list_head *list, *next;
428 int ret;
429
430 spin_lock(&bdev->lru_lock);
431 list_for_each_safe(list, next, &bdev->ddestroy) {
432 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
433 nentry = NULL;
434
435 /*
436 * Protect the next list entry from destruction while we
437 * unlock the lru_lock.
438 */
439
440 if (next != &bdev->ddestroy) {
441 nentry = list_entry(next, struct ttm_buffer_object,
442 ddestroy);
443 kref_get(&nentry->list_kref);
444 }
445 kref_get(&entry->list_kref);
446
447 spin_unlock(&bdev->lru_lock);
448 ret = ttm_bo_cleanup_refs(entry, remove_all);
449 kref_put(&entry->list_kref, ttm_bo_release_list);
450
451 spin_lock(&bdev->lru_lock);
452 if (nentry) {
453 bool next_onlist = !list_empty(next);
454 spin_unlock(&bdev->lru_lock);
455 kref_put(&nentry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock);
457 /*
458 * Someone might have raced us and removed the
459 * next entry from the list. We don't bother restarting
460 * list traversal.
461 */
462
463 if (!next_onlist)
464 break;
465 }
466 if (ret)
467 break;
468 }
469 ret = !list_empty(&bdev->ddestroy);
470 spin_unlock(&bdev->lru_lock);
471
472 return ret;
473}
474
475static void ttm_bo_delayed_workqueue(struct work_struct *work)
476{
477 struct ttm_bo_device *bdev =
478 container_of(work, struct ttm_bo_device, wq.work);
479
480 if (ttm_bo_delayed_delete(bdev, false)) {
481 schedule_delayed_work(&bdev->wq,
482 ((HZ / 100) < 1) ? 1 : HZ / 100);
483 }
484}
485
486static void ttm_bo_release(struct kref *kref)
487{
488 struct ttm_buffer_object *bo =
489 container_of(kref, struct ttm_buffer_object, kref);
490 struct ttm_bo_device *bdev = bo->bdev;
491
492 if (likely(bo->vm_node != NULL)) {
493 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
494 drm_mm_put_block(bo->vm_node);
495 bo->vm_node = NULL;
496 }
497 write_unlock(&bdev->vm_lock);
498 ttm_bo_cleanup_refs(bo, false);
499 kref_put(&bo->list_kref, ttm_bo_release_list);
500 write_lock(&bdev->vm_lock);
501}
502
503void ttm_bo_unref(struct ttm_buffer_object **p_bo)
504{
505 struct ttm_buffer_object *bo = *p_bo;
506 struct ttm_bo_device *bdev = bo->bdev;
507
508 *p_bo = NULL;
509 write_lock(&bdev->vm_lock);
510 kref_put(&bo->kref, ttm_bo_release);
511 write_unlock(&bdev->vm_lock);
512}
513EXPORT_SYMBOL(ttm_bo_unref);
514
515static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
516 bool interruptible, bool no_wait)
517{
518 int ret = 0;
519 struct ttm_bo_device *bdev = bo->bdev;
520 struct ttm_mem_reg evict_mem;
521 uint32_t proposed_placement;
522
523 if (bo->mem.mem_type != mem_type)
524 goto out;
525
526 spin_lock(&bo->lock);
527 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
528 spin_unlock(&bo->lock);
529
530 if (ret && ret != -ERESTART) {
531 printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
532 "buffer eviction.\n");
533 goto out;
534 }
535
536 BUG_ON(!atomic_read(&bo->reserved));
537
538 evict_mem = bo->mem;
539 evict_mem.mm_node = NULL;
540
541 proposed_placement = bdev->driver->evict_flags(bo);
542
543 ret = ttm_bo_mem_space(bo, proposed_placement,
544 &evict_mem, interruptible, no_wait);
545 if (unlikely(ret != 0 && ret != -ERESTART))
546 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
547 &evict_mem, interruptible, no_wait);
548
549 if (ret) {
550 if (ret != -ERESTART)
551 printk(KERN_ERR TTM_PFX
552 "Failed to find memory space for "
553 "buffer 0x%p eviction.\n", bo);
554 goto out;
555 }
556
557 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
558 no_wait);
559 if (ret) {
560 if (ret != -ERESTART)
561 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
562 goto out;
563 }
564
565 spin_lock(&bdev->lru_lock);
566 if (evict_mem.mm_node) {
567 drm_mm_put_block(evict_mem.mm_node);
568 evict_mem.mm_node = NULL;
569 }
570 spin_unlock(&bdev->lru_lock);
571 bo->evicted = true;
572out:
573 return ret;
574}
575
576/**
577 * Repeatedly evict memory from the LRU for @mem_type until we create enough
578 * space, or we've evicted everything and there isn't enough space.
579 */
580static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
581 struct ttm_mem_reg *mem,
582 uint32_t mem_type,
583 bool interruptible, bool no_wait)
584{
585 struct drm_mm_node *node;
586 struct ttm_buffer_object *entry;
587 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
588 struct list_head *lru;
589 unsigned long num_pages = mem->num_pages;
590 int put_count = 0;
591 int ret;
592
593retry_pre_get:
594 ret = drm_mm_pre_get(&man->manager);
595 if (unlikely(ret != 0))
596 return ret;
597
598 spin_lock(&bdev->lru_lock);
599 do {
600 node = drm_mm_search_free(&man->manager, num_pages,
601 mem->page_alignment, 1);
602 if (node)
603 break;
604
605 lru = &man->lru;
606 if (list_empty(lru))
607 break;
608
609 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
610 kref_get(&entry->list_kref);
611
612 ret =
613 ttm_bo_reserve_locked(entry, interruptible, no_wait,
614 false, 0);
615
616 if (likely(ret == 0))
617 put_count = ttm_bo_del_from_lru(entry);
618
619 spin_unlock(&bdev->lru_lock);
620
621 if (unlikely(ret != 0))
622 return ret;
623
624 while (put_count--)
625 kref_put(&entry->list_kref, ttm_bo_ref_bug);
626
627 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
628
629 ttm_bo_unreserve(entry);
630
631 kref_put(&entry->list_kref, ttm_bo_release_list);
632 if (ret)
633 return ret;
634
635 spin_lock(&bdev->lru_lock);
636 } while (1);
637
638 if (!node) {
639 spin_unlock(&bdev->lru_lock);
640 return -ENOMEM;
641 }
642
643 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
644 if (unlikely(!node)) {
645 spin_unlock(&bdev->lru_lock);
646 goto retry_pre_get;
647 }
648
649 spin_unlock(&bdev->lru_lock);
650 mem->mm_node = node;
651 mem->mem_type = mem_type;
652 return 0;
653}
654
655static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
656 bool disallow_fixed,
657 uint32_t mem_type,
658 uint32_t mask, uint32_t *res_mask)
659{
660 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
661
662 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
663 return false;
664
665 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
666 return false;
667
668 if ((mask & man->available_caching) == 0)
669 return false;
670 if (mask & man->default_caching)
671 cur_flags |= man->default_caching;
672 else if (mask & TTM_PL_FLAG_CACHED)
673 cur_flags |= TTM_PL_FLAG_CACHED;
674 else if (mask & TTM_PL_FLAG_WC)
675 cur_flags |= TTM_PL_FLAG_WC;
676 else
677 cur_flags |= TTM_PL_FLAG_UNCACHED;
678
679 *res_mask = cur_flags;
680 return true;
681}
682
683/**
684 * Creates space for memory region @mem according to its type.
685 *
686 * This function first searches for free space in compatible memory types in
687 * the priority order defined by the driver. If free space isn't found, then
688 * ttm_bo_mem_force_space is attempted in priority order to evict and find
689 * space.
690 */
691int ttm_bo_mem_space(struct ttm_buffer_object *bo,
692 uint32_t proposed_placement,
693 struct ttm_mem_reg *mem,
694 bool interruptible, bool no_wait)
695{
696 struct ttm_bo_device *bdev = bo->bdev;
697 struct ttm_mem_type_manager *man;
698
699 uint32_t num_prios = bdev->driver->num_mem_type_prio;
700 const uint32_t *prios = bdev->driver->mem_type_prio;
701 uint32_t i;
702 uint32_t mem_type = TTM_PL_SYSTEM;
703 uint32_t cur_flags = 0;
704 bool type_found = false;
705 bool type_ok = false;
706 bool has_eagain = false;
707 struct drm_mm_node *node = NULL;
708 int ret;
709
710 mem->mm_node = NULL;
711 for (i = 0; i < num_prios; ++i) {
712 mem_type = prios[i];
713 man = &bdev->man[mem_type];
714
715 type_ok = ttm_bo_mt_compatible(man,
716 bo->type == ttm_bo_type_user,
717 mem_type, proposed_placement,
718 &cur_flags);
719
720 if (!type_ok)
721 continue;
722
723 if (mem_type == TTM_PL_SYSTEM)
724 break;
725
726 if (man->has_type && man->use_type) {
727 type_found = true;
728 do {
729 ret = drm_mm_pre_get(&man->manager);
730 if (unlikely(ret))
731 return ret;
732
733 spin_lock(&bdev->lru_lock);
734 node = drm_mm_search_free(&man->manager,
735 mem->num_pages,
736 mem->page_alignment,
737 1);
738 if (unlikely(!node)) {
739 spin_unlock(&bdev->lru_lock);
740 break;
741 }
742 node = drm_mm_get_block_atomic(node,
743 mem->num_pages,
744 mem->
745 page_alignment);
746 spin_unlock(&bdev->lru_lock);
747 } while (!node);
748 }
749 if (node)
750 break;
751 }
752
753 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
754 mem->mm_node = node;
755 mem->mem_type = mem_type;
756 mem->placement = cur_flags;
757 return 0;
758 }
759
760 if (!type_found)
761 return -EINVAL;
762
763 num_prios = bdev->driver->num_mem_busy_prio;
764 prios = bdev->driver->mem_busy_prio;
765
766 for (i = 0; i < num_prios; ++i) {
767 mem_type = prios[i];
768 man = &bdev->man[mem_type];
769
770 if (!man->has_type)
771 continue;
772
773 if (!ttm_bo_mt_compatible(man,
774 bo->type == ttm_bo_type_user,
775 mem_type,
776 proposed_placement, &cur_flags))
777 continue;
778
779 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
780 interruptible, no_wait);
781
782 if (ret == 0 && mem->mm_node) {
783 mem->placement = cur_flags;
784 return 0;
785 }
786
787 if (ret == -ERESTART)
788 has_eagain = true;
789 }
790
791 ret = (has_eagain) ? -ERESTART : -ENOMEM;
792 return ret;
793}
794EXPORT_SYMBOL(ttm_bo_mem_space);
795
796int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
797{
798 int ret = 0;
799
800 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
801 return -EBUSY;
802
803 ret = wait_event_interruptible(bo->event_queue,
804 atomic_read(&bo->cpu_writers) == 0);
805
806 if (ret == -ERESTARTSYS)
807 ret = -ERESTART;
808
809 return ret;
810}
811
812int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
813 uint32_t proposed_placement,
814 bool interruptible, bool no_wait)
815{
816 struct ttm_bo_device *bdev = bo->bdev;
817 int ret = 0;
818 struct ttm_mem_reg mem;
819
820 BUG_ON(!atomic_read(&bo->reserved));
821
822 /*
823 * FIXME: It's possible to pipeline buffer moves.
824 * Have the driver move function wait for idle when necessary,
825 * instead of doing it here.
826 */
827
828 spin_lock(&bo->lock);
829 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
830 spin_unlock(&bo->lock);
831
832 if (ret)
833 return ret;
834
835 mem.num_pages = bo->num_pages;
836 mem.size = mem.num_pages << PAGE_SHIFT;
837 mem.page_alignment = bo->mem.page_alignment;
838
839 /*
840 * Determine where to move the buffer.
841 */
842
843 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
844 interruptible, no_wait);
845 if (ret)
846 goto out_unlock;
847
848 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
849
850out_unlock:
851 if (ret && mem.mm_node) {
852 spin_lock(&bdev->lru_lock);
853 drm_mm_put_block(mem.mm_node);
854 spin_unlock(&bdev->lru_lock);
855 }
856 return ret;
857}
858
859static int ttm_bo_mem_compat(uint32_t proposed_placement,
860 struct ttm_mem_reg *mem)
861{
862 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
863 return 0;
864 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
865 return 0;
866
867 return 1;
868}
869
870int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
871 uint32_t proposed_placement,
872 bool interruptible, bool no_wait)
873{
874 int ret;
875
876 BUG_ON(!atomic_read(&bo->reserved));
877 bo->proposed_placement = proposed_placement;
878
879 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
880 (unsigned long)proposed_placement,
881 (unsigned long)bo->mem.placement);
882
883 /*
884 * Check whether we need to move buffer.
885 */
886
887 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
888 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
889 interruptible, no_wait);
890 if (ret) {
891 if (ret != -ERESTART)
892 printk(KERN_ERR TTM_PFX
893 "Failed moving buffer. "
894 "Proposed placement 0x%08x\n",
895 bo->proposed_placement);
896 if (ret == -ENOMEM)
897 printk(KERN_ERR TTM_PFX
898 "Out of aperture space or "
899 "DRM memory quota.\n");
900 return ret;
901 }
902 }
903
904 /*
905 * We might need to add a TTM.
906 */
907
908 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
909 ret = ttm_bo_add_ttm(bo, true);
910 if (ret)
911 return ret;
912 }
913 /*
914 * Validation has succeeded, move the access and other
915 * non-mapping-related flag bits from the proposed flags to
916 * the active flags
917 */
918
919 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
920 ~TTM_PL_MASK_MEMTYPE);
921
922 return 0;
923}
924EXPORT_SYMBOL(ttm_buffer_object_validate);
925
926int
927ttm_bo_check_placement(struct ttm_buffer_object *bo,
928 uint32_t set_flags, uint32_t clr_flags)
929{
930 uint32_t new_mask = set_flags | clr_flags;
931
932 if ((bo->type == ttm_bo_type_user) &&
933 (clr_flags & TTM_PL_FLAG_CACHED)) {
934 printk(KERN_ERR TTM_PFX
935 "User buffers require cache-coherent memory.\n");
936 return -EINVAL;
937 }
938
939 if (!capable(CAP_SYS_ADMIN)) {
940 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
941 printk(KERN_ERR TTM_PFX "Need to be root to modify"
942 " NO_EVICT status.\n");
943 return -EINVAL;
944 }
945
946 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
947 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
948 printk(KERN_ERR TTM_PFX
949 "Incompatible memory specification"
950 " for NO_EVICT buffer.\n");
951 return -EINVAL;
952 }
953 }
954 return 0;
955}
956
957int ttm_buffer_object_init(struct ttm_bo_device *bdev,
958 struct ttm_buffer_object *bo,
959 unsigned long size,
960 enum ttm_bo_type type,
961 uint32_t flags,
962 uint32_t page_alignment,
963 unsigned long buffer_start,
964 bool interruptible,
965 struct file *persistant_swap_storage,
966 size_t acc_size,
967 void (*destroy) (struct ttm_buffer_object *))
968{
969 int ret = 0;
970 unsigned long num_pages;
971
972 size += buffer_start & ~PAGE_MASK;
973 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
974 if (num_pages == 0) {
975 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
976 return -EINVAL;
977 }
978 bo->destroy = destroy;
979
980 spin_lock_init(&bo->lock);
981 kref_init(&bo->kref);
982 kref_init(&bo->list_kref);
983 atomic_set(&bo->cpu_writers, 0);
984 atomic_set(&bo->reserved, 1);
985 init_waitqueue_head(&bo->event_queue);
986 INIT_LIST_HEAD(&bo->lru);
987 INIT_LIST_HEAD(&bo->ddestroy);
988 INIT_LIST_HEAD(&bo->swap);
989 bo->bdev = bdev;
990 bo->type = type;
991 bo->num_pages = num_pages;
992 bo->mem.mem_type = TTM_PL_SYSTEM;
993 bo->mem.num_pages = bo->num_pages;
994 bo->mem.mm_node = NULL;
995 bo->mem.page_alignment = page_alignment;
996 bo->buffer_start = buffer_start & PAGE_MASK;
997 bo->priv_flags = 0;
998 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
999 bo->seq_valid = false;
1000 bo->persistant_swap_storage = persistant_swap_storage;
1001 bo->acc_size = acc_size;
1002
1003 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1004 if (unlikely(ret != 0))
1005 goto out_err;
1006
1007 /*
1008 * If no caching attributes are set, accept any form of caching.
1009 */
1010
1011 if ((flags & TTM_PL_MASK_CACHING) == 0)
1012 flags |= TTM_PL_MASK_CACHING;
1013
1014 /*
1015 * For ttm_bo_type_device buffers, allocate
1016 * address space from the device.
1017 */
1018
1019 if (bo->type == ttm_bo_type_device) {
1020 ret = ttm_bo_setup_vm(bo);
1021 if (ret)
1022 goto out_err;
1023 }
1024
1025 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1026 if (ret)
1027 goto out_err;
1028
1029 ttm_bo_unreserve(bo);
1030 return 0;
1031
1032out_err:
1033 ttm_bo_unreserve(bo);
1034 ttm_bo_unref(&bo);
1035
1036 return ret;
1037}
1038EXPORT_SYMBOL(ttm_buffer_object_init);
1039
1040static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1041 unsigned long num_pages)
1042{
1043 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1044 PAGE_MASK;
1045
1046 return bdev->ttm_bo_size + 2 * page_array_size;
1047}
1048
1049int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1050 unsigned long size,
1051 enum ttm_bo_type type,
1052 uint32_t flags,
1053 uint32_t page_alignment,
1054 unsigned long buffer_start,
1055 bool interruptible,
1056 struct file *persistant_swap_storage,
1057 struct ttm_buffer_object **p_bo)
1058{
1059 struct ttm_buffer_object *bo;
1060 int ret;
1061 struct ttm_mem_global *mem_glob = bdev->mem_glob;
1062
1063 size_t acc_size =
1064 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1065 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1066 if (unlikely(ret != 0))
1067 return ret;
1068
1069 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1070
1071 if (unlikely(bo == NULL)) {
1072 ttm_mem_global_free(mem_glob, acc_size, false);
1073 return -ENOMEM;
1074 }
1075
1076 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1077 page_alignment, buffer_start,
1078 interruptible,
1079 persistant_swap_storage, acc_size, NULL);
1080 if (likely(ret == 0))
1081 *p_bo = bo;
1082
1083 return ret;
1084}
1085
1086static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1087 uint32_t mem_type, bool allow_errors)
1088{
1089 int ret;
1090
1091 spin_lock(&bo->lock);
1092 ret = ttm_bo_wait(bo, false, false, false);
1093 spin_unlock(&bo->lock);
1094
1095 if (ret && allow_errors)
1096 goto out;
1097
1098 if (bo->mem.mem_type == mem_type)
1099 ret = ttm_bo_evict(bo, mem_type, false, false);
1100
1101 if (ret) {
1102 if (allow_errors) {
1103 goto out;
1104 } else {
1105 ret = 0;
1106 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1107 }
1108 }
1109
1110out:
1111 return ret;
1112}
1113
1114static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1115 struct list_head *head,
1116 unsigned mem_type, bool allow_errors)
1117{
1118 struct ttm_buffer_object *entry;
1119 int ret;
1120 int put_count;
1121
1122 /*
1123 * Can't use standard list traversal since we're unlocking.
1124 */
1125
1126 spin_lock(&bdev->lru_lock);
1127
1128 while (!list_empty(head)) {
1129 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1130 kref_get(&entry->list_kref);
1131 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1132 put_count = ttm_bo_del_from_lru(entry);
1133 spin_unlock(&bdev->lru_lock);
1134 while (put_count--)
1135 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1136 BUG_ON(ret);
1137 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1138 ttm_bo_unreserve(entry);
1139 kref_put(&entry->list_kref, ttm_bo_release_list);
1140 spin_lock(&bdev->lru_lock);
1141 }
1142
1143 spin_unlock(&bdev->lru_lock);
1144
1145 return 0;
1146}
1147
1148int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1149{
1150 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1151 int ret = -EINVAL;
1152
1153 if (mem_type >= TTM_NUM_MEM_TYPES) {
1154 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1155 return ret;
1156 }
1157
1158 if (!man->has_type) {
1159 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1160 "memory manager type %u\n", mem_type);
1161 return ret;
1162 }
1163
1164 man->use_type = false;
1165 man->has_type = false;
1166
1167 ret = 0;
1168 if (mem_type > 0) {
1169 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1170
1171 spin_lock(&bdev->lru_lock);
1172 if (drm_mm_clean(&man->manager))
1173 drm_mm_takedown(&man->manager);
1174 else
1175 ret = -EBUSY;
1176
1177 spin_unlock(&bdev->lru_lock);
1178 }
1179
1180 return ret;
1181}
1182EXPORT_SYMBOL(ttm_bo_clean_mm);
1183
1184int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1185{
1186 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1187
1188 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1189 printk(KERN_ERR TTM_PFX
1190 "Illegal memory manager memory type %u.\n",
1191 mem_type);
1192 return -EINVAL;
1193 }
1194
1195 if (!man->has_type) {
1196 printk(KERN_ERR TTM_PFX
1197 "Memory type %u has not been initialized.\n",
1198 mem_type);
1199 return 0;
1200 }
1201
1202 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1203}
1204EXPORT_SYMBOL(ttm_bo_evict_mm);
1205
1206int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1207 unsigned long p_offset, unsigned long p_size)
1208{
1209 int ret = -EINVAL;
1210 struct ttm_mem_type_manager *man;
1211
1212 if (type >= TTM_NUM_MEM_TYPES) {
1213 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1214 return ret;
1215 }
1216
1217 man = &bdev->man[type];
1218 if (man->has_type) {
1219 printk(KERN_ERR TTM_PFX
1220 "Memory manager already initialized for type %d\n",
1221 type);
1222 return ret;
1223 }
1224
1225 ret = bdev->driver->init_mem_type(bdev, type, man);
1226 if (ret)
1227 return ret;
1228
1229 ret = 0;
1230 if (type != TTM_PL_SYSTEM) {
1231 if (!p_size) {
1232 printk(KERN_ERR TTM_PFX
1233 "Zero size memory manager type %d\n",
1234 type);
1235 return ret;
1236 }
1237 ret = drm_mm_init(&man->manager, p_offset, p_size);
1238 if (ret)
1239 return ret;
1240 }
1241 man->has_type = true;
1242 man->use_type = true;
1243 man->size = p_size;
1244
1245 INIT_LIST_HEAD(&man->lru);
1246
1247 return 0;
1248}
1249EXPORT_SYMBOL(ttm_bo_init_mm);
1250
1251int ttm_bo_device_release(struct ttm_bo_device *bdev)
1252{
1253 int ret = 0;
1254 unsigned i = TTM_NUM_MEM_TYPES;
1255 struct ttm_mem_type_manager *man;
1256
1257 while (i--) {
1258 man = &bdev->man[i];
1259 if (man->has_type) {
1260 man->use_type = false;
1261 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1262 ret = -EBUSY;
1263 printk(KERN_ERR TTM_PFX
1264 "DRM memory manager type %d "
1265 "is not clean.\n", i);
1266 }
1267 man->has_type = false;
1268 }
1269 }
1270
1271 if (!cancel_delayed_work(&bdev->wq))
1272 flush_scheduled_work();
1273
1274 while (ttm_bo_delayed_delete(bdev, true))
1275 ;
1276
1277 spin_lock(&bdev->lru_lock);
1278 if (list_empty(&bdev->ddestroy))
1279 TTM_DEBUG("Delayed destroy list was clean\n");
1280
1281 if (list_empty(&bdev->man[0].lru))
1282 TTM_DEBUG("Swap list was clean\n");
1283 spin_unlock(&bdev->lru_lock);
1284
1285 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1286 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1287 write_lock(&bdev->vm_lock);
1288 drm_mm_takedown(&bdev->addr_space_mm);
1289 write_unlock(&bdev->vm_lock);
1290
1291 __free_page(bdev->dummy_read_page);
1292 return ret;
1293}
1294EXPORT_SYMBOL(ttm_bo_device_release);
1295
1296/*
1297 * This function is intended to be called on drm driver load.
1298 * If you decide to call it from firstopen, you must protect the call
1299 * from a potentially racing ttm_bo_driver_finish in lastclose.
1300 * (This may happen on X server restart).
1301 */
1302
1303int ttm_bo_device_init(struct ttm_bo_device *bdev,
1304 struct ttm_mem_global *mem_glob,
1305 struct ttm_bo_driver *driver, uint64_t file_page_offset)
1306{
1307 int ret = -EINVAL;
1308
1309 bdev->dummy_read_page = NULL;
1310 rwlock_init(&bdev->vm_lock);
1311 spin_lock_init(&bdev->lru_lock);
1312
1313 bdev->driver = driver;
1314 bdev->mem_glob = mem_glob;
1315
1316 memset(bdev->man, 0, sizeof(bdev->man));
1317
1318 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1319 if (unlikely(bdev->dummy_read_page == NULL)) {
1320 ret = -ENOMEM;
1321 goto out_err0;
1322 }
1323
1324 /*
1325 * Initialize the system memory buffer type.
1326 * Other types need to be driver / IOCTL initialized.
1327 */
1328 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1329 if (unlikely(ret != 0))
1330 goto out_err1;
1331
1332 bdev->addr_space_rb = RB_ROOT;
1333 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1334 if (unlikely(ret != 0))
1335 goto out_err2;
1336
1337 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1338 bdev->nice_mode = true;
1339 INIT_LIST_HEAD(&bdev->ddestroy);
1340 INIT_LIST_HEAD(&bdev->swap_lru);
1341 bdev->dev_mapping = NULL;
1342 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1343 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1344 if (unlikely(ret != 0)) {
1345 printk(KERN_ERR TTM_PFX
1346 "Could not register buffer object swapout.\n");
1347 goto out_err2;
1348 }
1349
1350 bdev->ttm_bo_extra_size =
1351 ttm_round_pot(sizeof(struct ttm_tt)) +
1352 ttm_round_pot(sizeof(struct ttm_backend));
1353
1354 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1355 ttm_round_pot(sizeof(struct ttm_buffer_object));
1356
1357 return 0;
1358out_err2:
1359 ttm_bo_clean_mm(bdev, 0);
1360out_err1:
1361 __free_page(bdev->dummy_read_page);
1362out_err0:
1363 return ret;
1364}
1365EXPORT_SYMBOL(ttm_bo_device_init);
1366
1367/*
1368 * buffer object vm functions.
1369 */
1370
1371bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1372{
1373 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1374
1375 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1376 if (mem->mem_type == TTM_PL_SYSTEM)
1377 return false;
1378
1379 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1380 return false;
1381
1382 if (mem->placement & TTM_PL_FLAG_CACHED)
1383 return false;
1384 }
1385 return true;
1386}
1387
1388int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1389 struct ttm_mem_reg *mem,
1390 unsigned long *bus_base,
1391 unsigned long *bus_offset, unsigned long *bus_size)
1392{
1393 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1394
1395 *bus_size = 0;
1396 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1397 return -EINVAL;
1398
1399 if (ttm_mem_reg_is_pci(bdev, mem)) {
1400 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1401 *bus_size = mem->num_pages << PAGE_SHIFT;
1402 *bus_base = man->io_offset;
1403 }
1404
1405 return 0;
1406}
1407
1408void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1409{
1410 struct ttm_bo_device *bdev = bo->bdev;
1411 loff_t offset = (loff_t) bo->addr_space_offset;
1412 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1413
1414 if (!bdev->dev_mapping)
1415 return;
1416
1417 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1418}
1419
1420static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1421{
1422 struct ttm_bo_device *bdev = bo->bdev;
1423 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1424 struct rb_node *parent = NULL;
1425 struct ttm_buffer_object *cur_bo;
1426 unsigned long offset = bo->vm_node->start;
1427 unsigned long cur_offset;
1428
1429 while (*cur) {
1430 parent = *cur;
1431 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1432 cur_offset = cur_bo->vm_node->start;
1433 if (offset < cur_offset)
1434 cur = &parent->rb_left;
1435 else if (offset > cur_offset)
1436 cur = &parent->rb_right;
1437 else
1438 BUG();
1439 }
1440
1441 rb_link_node(&bo->vm_rb, parent, cur);
1442 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1443}
1444
1445/**
1446 * ttm_bo_setup_vm:
1447 *
1448 * @bo: the buffer to allocate address space for
1449 *
1450 * Allocate address space in the drm device so that applications
1451 * can mmap the buffer and access the contents. This only
1452 * applies to ttm_bo_type_device objects as others are not
1453 * placed in the drm device address space.
1454 */
1455
1456static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1457{
1458 struct ttm_bo_device *bdev = bo->bdev;
1459 int ret;
1460
1461retry_pre_get:
1462 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1463 if (unlikely(ret != 0))
1464 return ret;
1465
1466 write_lock(&bdev->vm_lock);
1467 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1468 bo->mem.num_pages, 0, 0);
1469
1470 if (unlikely(bo->vm_node == NULL)) {
1471 ret = -ENOMEM;
1472 goto out_unlock;
1473 }
1474
1475 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1476 bo->mem.num_pages, 0);
1477
1478 if (unlikely(bo->vm_node == NULL)) {
1479 write_unlock(&bdev->vm_lock);
1480 goto retry_pre_get;
1481 }
1482
1483 ttm_bo_vm_insert_rb(bo);
1484 write_unlock(&bdev->vm_lock);
1485 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1486
1487 return 0;
1488out_unlock:
1489 write_unlock(&bdev->vm_lock);
1490 return ret;
1491}
1492
1493int ttm_bo_wait(struct ttm_buffer_object *bo,
1494 bool lazy, bool interruptible, bool no_wait)
1495{
1496 struct ttm_bo_driver *driver = bo->bdev->driver;
1497 void *sync_obj;
1498 void *sync_obj_arg;
1499 int ret = 0;
1500
1501 if (likely(bo->sync_obj == NULL))
1502 return 0;
1503
1504 while (bo->sync_obj) {
1505
1506 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1507 void *tmp_obj = bo->sync_obj;
1508 bo->sync_obj = NULL;
1509 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1510 spin_unlock(&bo->lock);
1511 driver->sync_obj_unref(&tmp_obj);
1512 spin_lock(&bo->lock);
1513 continue;
1514 }
1515
1516 if (no_wait)
1517 return -EBUSY;
1518
1519 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1520 sync_obj_arg = bo->sync_obj_arg;
1521 spin_unlock(&bo->lock);
1522 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1523 lazy, interruptible);
1524 if (unlikely(ret != 0)) {
1525 driver->sync_obj_unref(&sync_obj);
1526 spin_lock(&bo->lock);
1527 return ret;
1528 }
1529 spin_lock(&bo->lock);
1530 if (likely(bo->sync_obj == sync_obj &&
1531 bo->sync_obj_arg == sync_obj_arg)) {
1532 void *tmp_obj = bo->sync_obj;
1533 bo->sync_obj = NULL;
1534 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1535 &bo->priv_flags);
1536 spin_unlock(&bo->lock);
1537 driver->sync_obj_unref(&sync_obj);
1538 driver->sync_obj_unref(&tmp_obj);
1539 spin_lock(&bo->lock);
1540 }
1541 }
1542 return 0;
1543}
1544EXPORT_SYMBOL(ttm_bo_wait);
1545
1546void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1547{
1548 atomic_set(&bo->reserved, 0);
1549 wake_up_all(&bo->event_queue);
1550}
1551
1552int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1553 bool no_wait)
1554{
1555 int ret;
1556
1557 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1558 if (no_wait)
1559 return -EBUSY;
1560 else if (interruptible) {
1561 ret = wait_event_interruptible
1562 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1563 if (unlikely(ret != 0))
1564 return -ERESTART;
1565 } else {
1566 wait_event(bo->event_queue,
1567 atomic_read(&bo->reserved) == 0);
1568 }
1569 }
1570 return 0;
1571}
1572
1573int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1574{
1575 int ret = 0;
1576
1577 /*
1578 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1579 * makes sure the lru lists are updated.
1580 */
1581
1582 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1583 if (unlikely(ret != 0))
1584 return ret;
1585 spin_lock(&bo->lock);
1586 ret = ttm_bo_wait(bo, false, true, no_wait);
1587 spin_unlock(&bo->lock);
1588 if (likely(ret == 0))
1589 atomic_inc(&bo->cpu_writers);
1590 ttm_bo_unreserve(bo);
1591 return ret;
1592}
1593
1594void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1595{
1596 if (atomic_dec_and_test(&bo->cpu_writers))
1597 wake_up_all(&bo->event_queue);
1598}
1599
1600/**
1601 * A buffer object shrink method that tries to swap out the first
1602 * buffer object on the bo_global::swap_lru list.
1603 */
1604
1605static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1606{
1607 struct ttm_bo_device *bdev =
1608 container_of(shrink, struct ttm_bo_device, shrink);
1609 struct ttm_buffer_object *bo;
1610 int ret = -EBUSY;
1611 int put_count;
1612 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1613
1614 spin_lock(&bdev->lru_lock);
1615 while (ret == -EBUSY) {
1616 if (unlikely(list_empty(&bdev->swap_lru))) {
1617 spin_unlock(&bdev->lru_lock);
1618 return -EBUSY;
1619 }
1620
1621 bo = list_first_entry(&bdev->swap_lru,
1622 struct ttm_buffer_object, swap);
1623 kref_get(&bo->list_kref);
1624
1625 /**
1626 * Reserve buffer. Since we unlock while sleeping, we need
1627 * to re-check that nobody removed us from the swap-list while
1628 * we slept.
1629 */
1630
1631 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1632 if (unlikely(ret == -EBUSY)) {
1633 spin_unlock(&bdev->lru_lock);
1634 ttm_bo_wait_unreserved(bo, false);
1635 kref_put(&bo->list_kref, ttm_bo_release_list);
1636 spin_lock(&bdev->lru_lock);
1637 }
1638 }
1639
1640 BUG_ON(ret != 0);
1641 put_count = ttm_bo_del_from_lru(bo);
1642 spin_unlock(&bdev->lru_lock);
1643
1644 while (put_count--)
1645 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1646
1647 /**
1648 * Wait for GPU, then move to system cached.
1649 */
1650
1651 spin_lock(&bo->lock);
1652 ret = ttm_bo_wait(bo, false, false, false);
1653 spin_unlock(&bo->lock);
1654
1655 if (unlikely(ret != 0))
1656 goto out;
1657
1658 if ((bo->mem.placement & swap_placement) != swap_placement) {
1659 struct ttm_mem_reg evict_mem;
1660
1661 evict_mem = bo->mem;
1662 evict_mem.mm_node = NULL;
1663 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1664 evict_mem.mem_type = TTM_PL_SYSTEM;
1665
1666 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1667 false, false);
1668 if (unlikely(ret != 0))
1669 goto out;
1670 }
1671
1672 ttm_bo_unmap_virtual(bo);
1673
1674 /**
1675 * Swap out. Buffer will be swapped in again as soon as
1676 * anyone tries to access a ttm page.
1677 */
1678
1679 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1680out:
1681
1682 /**
1683 *
1684 * Unreserve without putting on LRU to avoid swapping out an
1685 * already swapped buffer.
1686 */
1687
1688 atomic_set(&bo->reserved, 0);
1689 wake_up_all(&bo->event_queue);
1690 kref_put(&bo->list_kref, ttm_bo_release_list);
1691 return ret;
1692}
1693
1694void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1695{
1696 while (ttm_bo_swapout(&bdev->shrink) == 0)
1697 ;
1698}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
new file mode 100644
index 000000000000..517c84559633
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -0,0 +1,561 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/vmalloc.h>
37#include <linux/version.h>
38#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
42 struct ttm_mem_reg *old_mem = &bo->mem;
43
44 if (old_mem->mm_node) {
45 spin_lock(&bo->bdev->lru_lock);
46 drm_mm_put_block(old_mem->mm_node);
47 spin_unlock(&bo->bdev->lru_lock);
48 }
49 old_mem->mm_node = NULL;
50}
51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
54{
55 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem;
57 uint32_t save_flags = old_mem->placement;
58 int ret;
59
60 if (old_mem->mem_type != TTM_PL_SYSTEM) {
61 ttm_tt_unbind(ttm);
62 ttm_bo_free_old_node(bo);
63 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
64 TTM_PL_MASK_MEM);
65 old_mem->mem_type = TTM_PL_SYSTEM;
66 save_flags = old_mem->placement;
67 }
68
69 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
70 if (unlikely(ret != 0))
71 return ret;
72
73 if (new_mem->mem_type != TTM_PL_SYSTEM) {
74 ret = ttm_tt_bind(ttm, new_mem);
75 if (unlikely(ret != 0))
76 return ret;
77 }
78
79 *old_mem = *new_mem;
80 new_mem->mm_node = NULL;
81 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
82 return 0;
83}
84EXPORT_SYMBOL(ttm_bo_move_ttm);
85
86int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
87 void **virtual)
88{
89 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
90 unsigned long bus_offset;
91 unsigned long bus_size;
92 unsigned long bus_base;
93 int ret;
94 void *addr;
95
96 *virtual = NULL;
97 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
98 if (ret || bus_size == 0)
99 return ret;
100
101 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
102 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
103 else {
104 if (mem->placement & TTM_PL_FLAG_WC)
105 addr = ioremap_wc(bus_base + bus_offset, bus_size);
106 else
107 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
108 if (!addr)
109 return -ENOMEM;
110 }
111 *virtual = addr;
112 return 0;
113}
114
115void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
116 void *virtual)
117{
118 struct ttm_mem_type_manager *man;
119
120 man = &bdev->man[mem->mem_type];
121
122 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
123 iounmap(virtual);
124}
125
126static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
127{
128 uint32_t *dstP =
129 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
130 uint32_t *srcP =
131 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
132
133 int i;
134 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
135 iowrite32(ioread32(srcP++), dstP++);
136 return 0;
137}
138
139static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
140 unsigned long page)
141{
142 struct page *d = ttm_tt_get_page(ttm, page);
143 void *dst;
144
145 if (!d)
146 return -ENOMEM;
147
148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
149 dst = kmap(d);
150 if (!dst)
151 return -ENOMEM;
152
153 memcpy_fromio(dst, src, PAGE_SIZE);
154 kunmap(d);
155 return 0;
156}
157
158static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
159 unsigned long page)
160{
161 struct page *s = ttm_tt_get_page(ttm, page);
162 void *src;
163
164 if (!s)
165 return -ENOMEM;
166
167 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
168 src = kmap(s);
169 if (!src)
170 return -ENOMEM;
171
172 memcpy_toio(dst, src, PAGE_SIZE);
173 kunmap(s);
174 return 0;
175}
176
177int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
178 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
179{
180 struct ttm_bo_device *bdev = bo->bdev;
181 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
182 struct ttm_tt *ttm = bo->ttm;
183 struct ttm_mem_reg *old_mem = &bo->mem;
184 struct ttm_mem_reg old_copy = *old_mem;
185 void *old_iomap;
186 void *new_iomap;
187 int ret;
188 uint32_t save_flags = old_mem->placement;
189 unsigned long i;
190 unsigned long page;
191 unsigned long add = 0;
192 int dir;
193
194 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
195 if (ret)
196 return ret;
197 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
198 if (ret)
199 goto out;
200
201 if (old_iomap == NULL && new_iomap == NULL)
202 goto out2;
203 if (old_iomap == NULL && ttm == NULL)
204 goto out2;
205
206 add = 0;
207 dir = 1;
208
209 if ((old_mem->mem_type == new_mem->mem_type) &&
210 (new_mem->mm_node->start <
211 old_mem->mm_node->start + old_mem->mm_node->size)) {
212 dir = -1;
213 add = new_mem->num_pages - 1;
214 }
215
216 for (i = 0; i < new_mem->num_pages; ++i) {
217 page = i * dir + add;
218 if (old_iomap == NULL)
219 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
220 else if (new_iomap == NULL)
221 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
222 else
223 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
224 if (ret)
225 goto out1;
226 }
227 mb();
228out2:
229 ttm_bo_free_old_node(bo);
230
231 *old_mem = *new_mem;
232 new_mem->mm_node = NULL;
233 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
234
235 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
236 ttm_tt_unbind(ttm);
237 ttm_tt_destroy(ttm);
238 bo->ttm = NULL;
239 }
240
241out1:
242 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
243out:
244 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
245 return ret;
246}
247EXPORT_SYMBOL(ttm_bo_move_memcpy);
248
249static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
250{
251 kfree(bo);
252}
253
254/**
255 * ttm_buffer_object_transfer
256 *
257 * @bo: A pointer to a struct ttm_buffer_object.
258 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
259 * holding the data of @bo with the old placement.
260 *
261 * This is a utility function that may be called after an accelerated move
262 * has been scheduled. A new buffer object is created as a placeholder for
263 * the old data while it's being copied. When that buffer object is idle,
264 * it can be destroyed, releasing the space of the old placement.
265 * Returns:
266 * !0: Failure.
267 */
268
269static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
270 struct ttm_buffer_object **new_obj)
271{
272 struct ttm_buffer_object *fbo;
273 struct ttm_bo_device *bdev = bo->bdev;
274 struct ttm_bo_driver *driver = bdev->driver;
275
276 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
277 if (!fbo)
278 return -ENOMEM;
279
280 *fbo = *bo;
281
282 /**
283 * Fix up members that we shouldn't copy directly:
284 * TODO: Explicit member copy would probably be better here.
285 */
286
287 spin_lock_init(&fbo->lock);
288 init_waitqueue_head(&fbo->event_queue);
289 INIT_LIST_HEAD(&fbo->ddestroy);
290 INIT_LIST_HEAD(&fbo->lru);
291 INIT_LIST_HEAD(&fbo->swap);
292 fbo->vm_node = NULL;
293
294 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
295 if (fbo->mem.mm_node)
296 fbo->mem.mm_node->private = (void *)fbo;
297 kref_init(&fbo->list_kref);
298 kref_init(&fbo->kref);
299 fbo->destroy = &ttm_transfered_destroy;
300
301 *new_obj = fbo;
302 return 0;
303}
304
305pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
306{
307#if defined(__i386__) || defined(__x86_64__)
308 if (caching_flags & TTM_PL_FLAG_WC)
309 tmp = pgprot_writecombine(tmp);
310 else if (boot_cpu_data.x86 > 3)
311 tmp = pgprot_noncached(tmp);
312
313#elif defined(__powerpc__)
314 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
315 pgprot_val(tmp) |= _PAGE_NO_CACHE;
316 if (caching_flags & TTM_PL_FLAG_UNCACHED)
317 pgprot_val(tmp) |= _PAGE_GUARDED;
318 }
319#endif
320#if defined(__ia64__)
321 if (caching_flags & TTM_PL_FLAG_WC)
322 tmp = pgprot_writecombine(tmp);
323 else
324 tmp = pgprot_noncached(tmp);
325#endif
326#if defined(__sparc__)
327 if (!(caching_flags & TTM_PL_FLAG_CACHED))
328 tmp = pgprot_noncached(tmp);
329#endif
330 return tmp;
331}
332
333static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
334 unsigned long bus_base,
335 unsigned long bus_offset,
336 unsigned long bus_size,
337 struct ttm_bo_kmap_obj *map)
338{
339 struct ttm_bo_device *bdev = bo->bdev;
340 struct ttm_mem_reg *mem = &bo->mem;
341 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
342
343 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
344 map->bo_kmap_type = ttm_bo_map_premapped;
345 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
346 } else {
347 map->bo_kmap_type = ttm_bo_map_iomap;
348 if (mem->placement & TTM_PL_FLAG_WC)
349 map->virtual = ioremap_wc(bus_base + bus_offset,
350 bus_size);
351 else
352 map->virtual = ioremap_nocache(bus_base + bus_offset,
353 bus_size);
354 }
355 return (!map->virtual) ? -ENOMEM : 0;
356}
357
358static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
359 unsigned long start_page,
360 unsigned long num_pages,
361 struct ttm_bo_kmap_obj *map)
362{
363 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
364 struct ttm_tt *ttm = bo->ttm;
365 struct page *d;
366 int i;
367
368 BUG_ON(!ttm);
369 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
370 /*
371 * We're mapping a single page, and the desired
372 * page protection is consistent with the bo.
373 */
374
375 map->bo_kmap_type = ttm_bo_map_kmap;
376 map->page = ttm_tt_get_page(ttm, start_page);
377 map->virtual = kmap(map->page);
378 } else {
379 /*
380 * Populate the part we're mapping;
381 */
382 for (i = start_page; i < start_page + num_pages; ++i) {
383 d = ttm_tt_get_page(ttm, i);
384 if (!d)
385 return -ENOMEM;
386 }
387
388 /*
389 * We need to use vmap to get the desired page protection
390 * or to make the buffer object look contigous.
391 */
392 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
393 PAGE_KERNEL :
394 ttm_io_prot(mem->placement, PAGE_KERNEL);
395 map->bo_kmap_type = ttm_bo_map_vmap;
396 map->virtual = vmap(ttm->pages + start_page, num_pages,
397 0, prot);
398 }
399 return (!map->virtual) ? -ENOMEM : 0;
400}
401
402int ttm_bo_kmap(struct ttm_buffer_object *bo,
403 unsigned long start_page, unsigned long num_pages,
404 struct ttm_bo_kmap_obj *map)
405{
406 int ret;
407 unsigned long bus_base;
408 unsigned long bus_offset;
409 unsigned long bus_size;
410
411 BUG_ON(!list_empty(&bo->swap));
412 map->virtual = NULL;
413 if (num_pages > bo->num_pages)
414 return -EINVAL;
415 if (start_page > bo->num_pages)
416 return -EINVAL;
417#if 0
418 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
419 return -EPERM;
420#endif
421 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
422 &bus_offset, &bus_size);
423 if (ret)
424 return ret;
425 if (bus_size == 0) {
426 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
427 } else {
428 bus_offset += start_page << PAGE_SHIFT;
429 bus_size = num_pages << PAGE_SHIFT;
430 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
431 }
432}
433EXPORT_SYMBOL(ttm_bo_kmap);
434
435void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
436{
437 if (!map->virtual)
438 return;
439 switch (map->bo_kmap_type) {
440 case ttm_bo_map_iomap:
441 iounmap(map->virtual);
442 break;
443 case ttm_bo_map_vmap:
444 vunmap(map->virtual);
445 break;
446 case ttm_bo_map_kmap:
447 kunmap(map->page);
448 break;
449 case ttm_bo_map_premapped:
450 break;
451 default:
452 BUG();
453 }
454 map->virtual = NULL;
455 map->page = NULL;
456}
457EXPORT_SYMBOL(ttm_bo_kunmap);
458
459int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
460 unsigned long dst_offset,
461 unsigned long *pfn, pgprot_t *prot)
462{
463 struct ttm_mem_reg *mem = &bo->mem;
464 struct ttm_bo_device *bdev = bo->bdev;
465 unsigned long bus_offset;
466 unsigned long bus_size;
467 unsigned long bus_base;
468 int ret;
469 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
470 &bus_size);
471 if (ret)
472 return -EINVAL;
473 if (bus_size != 0)
474 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
475 else
476 if (!bo->ttm)
477 return -EINVAL;
478 else
479 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
480 dst_offset >>
481 PAGE_SHIFT));
482 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
483 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
484
485 return 0;
486}
487
488int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
489 void *sync_obj,
490 void *sync_obj_arg,
491 bool evict, bool no_wait,
492 struct ttm_mem_reg *new_mem)
493{
494 struct ttm_bo_device *bdev = bo->bdev;
495 struct ttm_bo_driver *driver = bdev->driver;
496 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
497 struct ttm_mem_reg *old_mem = &bo->mem;
498 int ret;
499 uint32_t save_flags = old_mem->placement;
500 struct ttm_buffer_object *ghost_obj;
501 void *tmp_obj = NULL;
502
503 spin_lock(&bo->lock);
504 if (bo->sync_obj) {
505 tmp_obj = bo->sync_obj;
506 bo->sync_obj = NULL;
507 }
508 bo->sync_obj = driver->sync_obj_ref(sync_obj);
509 bo->sync_obj_arg = sync_obj_arg;
510 if (evict) {
511 ret = ttm_bo_wait(bo, false, false, false);
512 spin_unlock(&bo->lock);
513 driver->sync_obj_unref(&bo->sync_obj);
514
515 if (ret)
516 return ret;
517
518 ttm_bo_free_old_node(bo);
519 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
520 (bo->ttm != NULL)) {
521 ttm_tt_unbind(bo->ttm);
522 ttm_tt_destroy(bo->ttm);
523 bo->ttm = NULL;
524 }
525 } else {
526 /**
527 * This should help pipeline ordinary buffer moves.
528 *
529 * Hang old buffer memory on a new buffer object,
530 * and leave it to be released when the GPU
531 * operation has completed.
532 */
533
534 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
535 spin_unlock(&bo->lock);
536
537 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
538 if (ret)
539 return ret;
540
541 /**
542 * If we're not moving to fixed memory, the TTM object
543 * needs to stay alive. Otherwhise hang it on the ghost
544 * bo to be unbound and destroyed.
545 */
546
547 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
548 ghost_obj->ttm = NULL;
549 else
550 bo->ttm = NULL;
551
552 ttm_bo_unreserve(ghost_obj);
553 ttm_bo_unref(&ghost_obj);
554 }
555
556 *old_mem = *new_mem;
557 new_mem->mm_node = NULL;
558 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
559 return 0;
560}
561EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
new file mode 100644
index 000000000000..27b146c54fbc
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -0,0 +1,454 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <ttm/ttm_module.h>
32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h>
34#include <linux/mm.h>
35#include <linux/version.h>
36#include <linux/rbtree.h>
37#include <linux/module.h>
38#include <linux/uaccess.h>
39
40#define TTM_BO_VM_NUM_PREFAULT 16
41
42static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
43 unsigned long page_start,
44 unsigned long num_pages)
45{
46 struct rb_node *cur = bdev->addr_space_rb.rb_node;
47 unsigned long cur_offset;
48 struct ttm_buffer_object *bo;
49 struct ttm_buffer_object *best_bo = NULL;
50
51 while (likely(cur != NULL)) {
52 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
53 cur_offset = bo->vm_node->start;
54 if (page_start >= cur_offset) {
55 cur = cur->rb_right;
56 best_bo = bo;
57 if (page_start == cur_offset)
58 break;
59 } else
60 cur = cur->rb_left;
61 }
62
63 if (unlikely(best_bo == NULL))
64 return NULL;
65
66 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
67 (page_start + num_pages)))
68 return NULL;
69
70 return best_bo;
71}
72
73static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74{
75 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76 vma->vm_private_data;
77 struct ttm_bo_device *bdev = bo->bdev;
78 unsigned long bus_base;
79 unsigned long bus_offset;
80 unsigned long bus_size;
81 unsigned long page_offset;
82 unsigned long page_last;
83 unsigned long pfn;
84 struct ttm_tt *ttm = NULL;
85 struct page *page;
86 int ret;
87 int i;
88 bool is_iomem;
89 unsigned long address = (unsigned long)vmf->virtual_address;
90 int retval = VM_FAULT_NOPAGE;
91
92 /*
93 * Work around locking order reversal in fault / nopfn
94 * between mmap_sem and bo_reserve: Perform a trylock operation
95 * for reserve, and if it fails, retry the fault after scheduling.
96 */
97
98 ret = ttm_bo_reserve(bo, true, true, false, 0);
99 if (unlikely(ret != 0)) {
100 if (ret == -EBUSY)
101 set_need_resched();
102 return VM_FAULT_NOPAGE;
103 }
104
105 /*
106 * Wait for buffer data in transit, due to a pipelined
107 * move.
108 */
109
110 spin_lock(&bo->lock);
111 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
112 ret = ttm_bo_wait(bo, false, true, false);
113 spin_unlock(&bo->lock);
114 if (unlikely(ret != 0)) {
115 retval = (ret != -ERESTART) ?
116 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
117 goto out_unlock;
118 }
119 } else
120 spin_unlock(&bo->lock);
121
122
123 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
124 &bus_size);
125 if (unlikely(ret != 0)) {
126 retval = VM_FAULT_SIGBUS;
127 goto out_unlock;
128 }
129
130 is_iomem = (bus_size != 0);
131
132 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
133 bo->vm_node->start - vma->vm_pgoff;
134 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff;
136
137 if (unlikely(page_offset >= bo->num_pages)) {
138 retval = VM_FAULT_SIGBUS;
139 goto out_unlock;
140 }
141
142 /*
143 * Strictly, we're not allowed to modify vma->vm_page_prot here,
144 * since the mmap_sem is only held in read mode. However, we
145 * modify only the caching bits of vma->vm_page_prot and
146 * consider those bits protected by
147 * the bo->mutex, as we should be the only writers.
148 * There shouldn't really be any readers of these bits except
149 * within vm_insert_mixed()? fork?
150 *
151 * TODO: Add a list of vmas to the bo, and change the
152 * vma->vm_page_prot when the object changes caching policy, with
153 * the correct locks held.
154 */
155
156 if (is_iomem) {
157 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
158 vma->vm_page_prot);
159 } else {
160 ttm = bo->ttm;
161 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
162 vm_get_page_prot(vma->vm_flags) :
163 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
164 }
165
166 /*
167 * Speculatively prefault a number of pages. Only error on
168 * first page.
169 */
170
171 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
172
173 if (is_iomem)
174 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
175 page_offset;
176 else {
177 page = ttm_tt_get_page(ttm, page_offset);
178 if (unlikely(!page && i == 0)) {
179 retval = VM_FAULT_OOM;
180 goto out_unlock;
181 } else if (unlikely(!page)) {
182 break;
183 }
184 pfn = page_to_pfn(page);
185 }
186
187 ret = vm_insert_mixed(vma, address, pfn);
188 /*
189 * Somebody beat us to this PTE or prefaulting to
190 * an already populated PTE, or prefaulting error.
191 */
192
193 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
194 break;
195 else if (unlikely(ret != 0)) {
196 retval =
197 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
198 goto out_unlock;
199
200 }
201
202 address += PAGE_SIZE;
203 if (unlikely(++page_offset >= page_last))
204 break;
205 }
206
207out_unlock:
208 ttm_bo_unreserve(bo);
209 return retval;
210}
211
212static void ttm_bo_vm_open(struct vm_area_struct *vma)
213{
214 struct ttm_buffer_object *bo =
215 (struct ttm_buffer_object *)vma->vm_private_data;
216
217 (void)ttm_bo_reference(bo);
218}
219
220static void ttm_bo_vm_close(struct vm_area_struct *vma)
221{
222 struct ttm_buffer_object *bo =
223 (struct ttm_buffer_object *)vma->vm_private_data;
224
225 ttm_bo_unref(&bo);
226 vma->vm_private_data = NULL;
227}
228
229static struct vm_operations_struct ttm_bo_vm_ops = {
230 .fault = ttm_bo_vm_fault,
231 .open = ttm_bo_vm_open,
232 .close = ttm_bo_vm_close
233};
234
235int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
236 struct ttm_bo_device *bdev)
237{
238 struct ttm_bo_driver *driver;
239 struct ttm_buffer_object *bo;
240 int ret;
241
242 read_lock(&bdev->vm_lock);
243 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
244 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
245 if (likely(bo != NULL))
246 ttm_bo_reference(bo);
247 read_unlock(&bdev->vm_lock);
248
249 if (unlikely(bo == NULL)) {
250 printk(KERN_ERR TTM_PFX
251 "Could not find buffer object to map.\n");
252 return -EINVAL;
253 }
254
255 driver = bo->bdev->driver;
256 if (unlikely(!driver->verify_access)) {
257 ret = -EPERM;
258 goto out_unref;
259 }
260 ret = driver->verify_access(bo, filp);
261 if (unlikely(ret != 0))
262 goto out_unref;
263
264 vma->vm_ops = &ttm_bo_vm_ops;
265
266 /*
267 * Note: We're transferring the bo reference to
268 * vma->vm_private_data here.
269 */
270
271 vma->vm_private_data = bo;
272 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
273 return 0;
274out_unref:
275 ttm_bo_unref(&bo);
276 return ret;
277}
278EXPORT_SYMBOL(ttm_bo_mmap);
279
280int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
281{
282 if (vma->vm_pgoff != 0)
283 return -EACCES;
284
285 vma->vm_ops = &ttm_bo_vm_ops;
286 vma->vm_private_data = ttm_bo_reference(bo);
287 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
288 return 0;
289}
290EXPORT_SYMBOL(ttm_fbdev_mmap);
291
292
293ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
294 const char __user *wbuf, char __user *rbuf, size_t count,
295 loff_t *f_pos, bool write)
296{
297 struct ttm_buffer_object *bo;
298 struct ttm_bo_driver *driver;
299 struct ttm_bo_kmap_obj map;
300 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
301 unsigned long kmap_offset;
302 unsigned long kmap_end;
303 unsigned long kmap_num;
304 size_t io_size;
305 unsigned int page_offset;
306 char *virtual;
307 int ret;
308 bool no_wait = false;
309 bool dummy;
310
311 read_lock(&bdev->vm_lock);
312 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
313 if (likely(bo != NULL))
314 ttm_bo_reference(bo);
315 read_unlock(&bdev->vm_lock);
316
317 if (unlikely(bo == NULL))
318 return -EFAULT;
319
320 driver = bo->bdev->driver;
321 if (unlikely(driver->verify_access)) {
322 ret = -EPERM;
323 goto out_unref;
324 }
325
326 ret = driver->verify_access(bo, filp);
327 if (unlikely(ret != 0))
328 goto out_unref;
329
330 kmap_offset = dev_offset - bo->vm_node->start;
331 if (unlikely(kmap_offset) >= bo->num_pages) {
332 ret = -EFBIG;
333 goto out_unref;
334 }
335
336 page_offset = *f_pos & ~PAGE_MASK;
337 io_size = bo->num_pages - kmap_offset;
338 io_size = (io_size << PAGE_SHIFT) - page_offset;
339 if (count < io_size)
340 io_size = count;
341
342 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
343 kmap_num = kmap_end - kmap_offset + 1;
344
345 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
346
347 switch (ret) {
348 case 0:
349 break;
350 case -ERESTART:
351 ret = -EINTR;
352 goto out_unref;
353 case -EBUSY:
354 ret = -EAGAIN;
355 goto out_unref;
356 default:
357 goto out_unref;
358 }
359
360 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
361 if (unlikely(ret != 0)) {
362 ttm_bo_unreserve(bo);
363 goto out_unref;
364 }
365
366 virtual = ttm_kmap_obj_virtual(&map, &dummy);
367 virtual += page_offset;
368
369 if (write)
370 ret = copy_from_user(virtual, wbuf, io_size);
371 else
372 ret = copy_to_user(rbuf, virtual, io_size);
373
374 ttm_bo_kunmap(&map);
375 ttm_bo_unreserve(bo);
376 ttm_bo_unref(&bo);
377
378 if (unlikely(ret != 0))
379 return -EFBIG;
380
381 *f_pos += io_size;
382
383 return io_size;
384out_unref:
385 ttm_bo_unref(&bo);
386 return ret;
387}
388
389ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
390 char __user *rbuf, size_t count, loff_t *f_pos,
391 bool write)
392{
393 struct ttm_bo_kmap_obj map;
394 unsigned long kmap_offset;
395 unsigned long kmap_end;
396 unsigned long kmap_num;
397 size_t io_size;
398 unsigned int page_offset;
399 char *virtual;
400 int ret;
401 bool no_wait = false;
402 bool dummy;
403
404 kmap_offset = (*f_pos >> PAGE_SHIFT);
405 if (unlikely(kmap_offset) >= bo->num_pages)
406 return -EFBIG;
407
408 page_offset = *f_pos & ~PAGE_MASK;
409 io_size = bo->num_pages - kmap_offset;
410 io_size = (io_size << PAGE_SHIFT) - page_offset;
411 if (count < io_size)
412 io_size = count;
413
414 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
415 kmap_num = kmap_end - kmap_offset + 1;
416
417 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
418
419 switch (ret) {
420 case 0:
421 break;
422 case -ERESTART:
423 return -EINTR;
424 case -EBUSY:
425 return -EAGAIN;
426 default:
427 return ret;
428 }
429
430 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 if (unlikely(ret != 0)) {
432 ttm_bo_unreserve(bo);
433 return ret;
434 }
435
436 virtual = ttm_kmap_obj_virtual(&map, &dummy);
437 virtual += page_offset;
438
439 if (write)
440 ret = copy_from_user(virtual, wbuf, io_size);
441 else
442 ret = copy_to_user(rbuf, virtual, io_size);
443
444 ttm_bo_kunmap(&map);
445 ttm_bo_unreserve(bo);
446 ttm_bo_unref(&bo);
447
448 if (unlikely(ret != 0))
449 return ret;
450
451 *f_pos += io_size;
452
453 return io_size;
454}
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
new file mode 100644
index 000000000000..0b14eb1972b8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -0,0 +1,114 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include <linux/mutex.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35
36struct ttm_global_item {
37 struct mutex mutex;
38 void *object;
39 int refcount;
40};
41
42static struct ttm_global_item glob[TTM_GLOBAL_NUM];
43
44void ttm_global_init(void)
45{
46 int i;
47
48 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
49 struct ttm_global_item *item = &glob[i];
50 mutex_init(&item->mutex);
51 item->object = NULL;
52 item->refcount = 0;
53 }
54}
55
56void ttm_global_release(void)
57{
58 int i;
59 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
60 struct ttm_global_item *item = &glob[i];
61 BUG_ON(item->object != NULL);
62 BUG_ON(item->refcount != 0);
63 }
64}
65
66int ttm_global_item_ref(struct ttm_global_reference *ref)
67{
68 int ret;
69 struct ttm_global_item *item = &glob[ref->global_type];
70 void *object;
71
72 mutex_lock(&item->mutex);
73 if (item->refcount == 0) {
74 item->object = kmalloc(ref->size, GFP_KERNEL);
75 if (unlikely(item->object == NULL)) {
76 ret = -ENOMEM;
77 goto out_err;
78 }
79
80 ref->object = item->object;
81 ret = ref->init(ref);
82 if (unlikely(ret != 0))
83 goto out_err;
84
85 ++item->refcount;
86 }
87 ref->object = item->object;
88 object = item->object;
89 mutex_unlock(&item->mutex);
90 return 0;
91out_err:
92 kfree(item->object);
93 mutex_unlock(&item->mutex);
94 item->object = NULL;
95 return ret;
96}
97EXPORT_SYMBOL(ttm_global_item_ref);
98
99void ttm_global_item_unref(struct ttm_global_reference *ref)
100{
101 struct ttm_global_item *item = &glob[ref->global_type];
102
103 mutex_lock(&item->mutex);
104 BUG_ON(item->refcount == 0);
105 BUG_ON(ref->object != item->object);
106 if (--item->refcount == 0) {
107 ref->release(ref);
108 kfree(item->object);
109 item->object = NULL;
110 }
111 mutex_unlock(&item->mutex);
112}
113EXPORT_SYMBOL(ttm_global_item_unref);
114
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
new file mode 100644
index 000000000000..87323d4ff68d
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -0,0 +1,234 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_memory.h"
29#include <linux/spinlock.h>
30#include <linux/sched.h>
31#include <linux/wait.h>
32#include <linux/mm.h>
33#include <linux/module.h>
34
35#define TTM_PFX "[TTM] "
36#define TTM_MEMORY_ALLOC_RETRIES 4
37
38/**
39 * At this point we only support a single shrink callback.
40 * Extend this if needed, perhaps using a linked list of callbacks.
41 * Note that this function is reentrant:
42 * many threads may try to swap out at any given time.
43 */
44
45static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
46 uint64_t extra)
47{
48 int ret;
49 struct ttm_mem_shrink *shrink;
50 uint64_t target;
51 uint64_t total_target;
52
53 spin_lock(&glob->lock);
54 if (glob->shrink == NULL)
55 goto out;
56
57 if (from_workqueue) {
58 target = glob->swap_limit;
59 total_target = glob->total_memory_swap_limit;
60 } else if (capable(CAP_SYS_ADMIN)) {
61 total_target = glob->emer_total_memory;
62 target = glob->emer_memory;
63 } else {
64 total_target = glob->max_total_memory;
65 target = glob->max_memory;
66 }
67
68 total_target = (extra >= total_target) ? 0 : total_target - extra;
69 target = (extra >= target) ? 0 : target - extra;
70
71 while (glob->used_memory > target ||
72 glob->used_total_memory > total_target) {
73 shrink = glob->shrink;
74 spin_unlock(&glob->lock);
75 ret = shrink->do_shrink(shrink);
76 spin_lock(&glob->lock);
77 if (unlikely(ret != 0))
78 goto out;
79 }
80out:
81 spin_unlock(&glob->lock);
82}
83
84static void ttm_shrink_work(struct work_struct *work)
85{
86 struct ttm_mem_global *glob =
87 container_of(work, struct ttm_mem_global, work);
88
89 ttm_shrink(glob, true, 0ULL);
90}
91
92int ttm_mem_global_init(struct ttm_mem_global *glob)
93{
94 struct sysinfo si;
95 uint64_t mem;
96
97 spin_lock_init(&glob->lock);
98 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
99 INIT_WORK(&glob->work, ttm_shrink_work);
100 init_waitqueue_head(&glob->queue);
101
102 si_meminfo(&si);
103
104 mem = si.totalram - si.totalhigh;
105 mem *= si.mem_unit;
106
107 glob->max_memory = mem >> 1;
108 glob->emer_memory = (mem >> 1) + (mem >> 2);
109 glob->swap_limit = glob->max_memory - (mem >> 3);
110 glob->used_memory = 0;
111 glob->used_total_memory = 0;
112 glob->shrink = NULL;
113
114 mem = si.totalram;
115 mem *= si.mem_unit;
116
117 glob->max_total_memory = mem >> 1;
118 glob->emer_total_memory = (mem >> 1) + (mem >> 2);
119
120 glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
121
122 printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
123 glob->max_total_memory >> 20);
124 printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
125 glob->max_memory >> 20);
126
127 return 0;
128}
129EXPORT_SYMBOL(ttm_mem_global_init);
130
131void ttm_mem_global_release(struct ttm_mem_global *glob)
132{
133 printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
134 (unsigned long long)glob->used_total_memory);
135 flush_workqueue(glob->swap_queue);
136 destroy_workqueue(glob->swap_queue);
137 glob->swap_queue = NULL;
138}
139EXPORT_SYMBOL(ttm_mem_global_release);
140
141static inline void ttm_check_swapping(struct ttm_mem_global *glob)
142{
143 bool needs_swapping;
144
145 spin_lock(&glob->lock);
146 needs_swapping = (glob->used_memory > glob->swap_limit ||
147 glob->used_total_memory >
148 glob->total_memory_swap_limit);
149 spin_unlock(&glob->lock);
150
151 if (unlikely(needs_swapping))
152 (void)queue_work(glob->swap_queue, &glob->work);
153
154}
155
156void ttm_mem_global_free(struct ttm_mem_global *glob,
157 uint64_t amount, bool himem)
158{
159 spin_lock(&glob->lock);
160 glob->used_total_memory -= amount;
161 if (!himem)
162 glob->used_memory -= amount;
163 wake_up_all(&glob->queue);
164 spin_unlock(&glob->lock);
165}
166
167static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
168 uint64_t amount, bool himem, bool reserve)
169{
170 uint64_t limit;
171 uint64_t lomem_limit;
172 int ret = -ENOMEM;
173
174 spin_lock(&glob->lock);
175
176 if (capable(CAP_SYS_ADMIN)) {
177 limit = glob->emer_total_memory;
178 lomem_limit = glob->emer_memory;
179 } else {
180 limit = glob->max_total_memory;
181 lomem_limit = glob->max_memory;
182 }
183
184 if (unlikely(glob->used_total_memory + amount > limit))
185 goto out_unlock;
186 if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
187 goto out_unlock;
188
189 if (reserve) {
190 glob->used_total_memory += amount;
191 if (!himem)
192 glob->used_memory += amount;
193 }
194 ret = 0;
195out_unlock:
196 spin_unlock(&glob->lock);
197 ttm_check_swapping(glob);
198
199 return ret;
200}
201
202int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
203 bool no_wait, bool interruptible, bool himem)
204{
205 int count = TTM_MEMORY_ALLOC_RETRIES;
206
207 while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
208 != 0)) {
209 if (no_wait)
210 return -ENOMEM;
211 if (unlikely(count-- == 0))
212 return -ENOMEM;
213 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
214 }
215
216 return 0;
217}
218
219size_t ttm_round_pot(size_t size)
220{
221 if ((size & (size - 1)) == 0)
222 return size;
223 else if (size > PAGE_SIZE)
224 return PAGE_ALIGN(size);
225 else {
226 size_t tmp_size = 4;
227
228 while (tmp_size < size)
229 tmp_size <<= 1;
230
231 return tmp_size;
232 }
233 return 0;
234}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
new file mode 100644
index 000000000000..59ce8191d584
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -0,0 +1,50 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 * Jerome Glisse
30 */
31#include <linux/module.h>
32#include <ttm/ttm_module.h>
33
34static int __init ttm_init(void)
35{
36 ttm_global_init();
37 return 0;
38}
39
40static void __exit ttm_exit(void)
41{
42 ttm_global_release();
43}
44
45module_init(ttm_init);
46module_exit(ttm_exit);
47
48MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
49MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
50MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
new file mode 100644
index 000000000000..c27ab3a877ad
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -0,0 +1,635 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <linux/version.h>
32#include <linux/vmalloc.h>
33#include <linux/sched.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
36#include <linux/file.h>
37#include <linux/swap.h>
38#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h"
41
42static int ttm_tt_swapin(struct ttm_tt *ttm);
43
44#if defined(CONFIG_X86)
45static void ttm_tt_clflush_page(struct page *page)
46{
47 uint8_t *page_virtual;
48 unsigned int i;
49
50 if (unlikely(page == NULL))
51 return;
52
53 page_virtual = kmap_atomic(page, KM_USER0);
54
55 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
56 clflush(page_virtual + i);
57
58 kunmap_atomic(page_virtual, KM_USER0);
59}
60
61static void ttm_tt_cache_flush_clflush(struct page *pages[],
62 unsigned long num_pages)
63{
64 unsigned long i;
65
66 mb();
67 for (i = 0; i < num_pages; ++i)
68 ttm_tt_clflush_page(*pages++);
69 mb();
70}
71#else
72static void ttm_tt_ipi_handler(void *null)
73{
74 ;
75}
76#endif
77
78void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
79{
80
81#if defined(CONFIG_X86)
82 if (cpu_has_clflush) {
83 ttm_tt_cache_flush_clflush(pages, num_pages);
84 return;
85 }
86#else
87 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
88 printk(KERN_ERR TTM_PFX
89 "Timed out waiting for drm cache flush.\n");
90#endif
91}
92
93/**
94 * Allocates storage for pointers to the pages that back the ttm.
95 *
96 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
97 */
98static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
99{
100 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
101 ttm->pages = NULL;
102
103 if (size <= PAGE_SIZE)
104 ttm->pages = kzalloc(size, GFP_KERNEL);
105
106 if (!ttm->pages) {
107 ttm->pages = vmalloc_user(size);
108 if (ttm->pages)
109 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
110 }
111}
112
113static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
114{
115 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
116 vfree(ttm->pages);
117 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
118 } else {
119 kfree(ttm->pages);
120 }
121 ttm->pages = NULL;
122}
123
124static struct page *ttm_tt_alloc_page(unsigned page_flags)
125{
126 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
127 return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
128
129 return alloc_page(GFP_HIGHUSER);
130}
131
132static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
133{
134 int write;
135 int dirty;
136 struct page *page;
137 int i;
138 struct ttm_backend *be = ttm->be;
139
140 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
141 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
142 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
143
144 if (be)
145 be->func->clear(be);
146
147 for (i = 0; i < ttm->num_pages; ++i) {
148 page = ttm->pages[i];
149 if (page == NULL)
150 continue;
151
152 if (page == ttm->dummy_read_page) {
153 BUG_ON(write);
154 continue;
155 }
156
157 if (write && dirty && !PageReserved(page))
158 set_page_dirty_lock(page);
159
160 ttm->pages[i] = NULL;
161 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
162 put_page(page);
163 }
164 ttm->state = tt_unpopulated;
165 ttm->first_himem_page = ttm->num_pages;
166 ttm->last_lomem_page = -1;
167}
168
169static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
170{
171 struct page *p;
172 struct ttm_bo_device *bdev = ttm->bdev;
173 struct ttm_mem_global *mem_glob = bdev->mem_glob;
174 int ret;
175
176 while (NULL == (p = ttm->pages[index])) {
177 p = ttm_tt_alloc_page(ttm->page_flags);
178
179 if (!p)
180 return NULL;
181
182 if (PageHighMem(p)) {
183 ret =
184 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
185 false, false, true);
186 if (unlikely(ret != 0))
187 goto out_err;
188 ttm->pages[--ttm->first_himem_page] = p;
189 } else {
190 ret =
191 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
192 false, false, false);
193 if (unlikely(ret != 0))
194 goto out_err;
195 ttm->pages[++ttm->last_lomem_page] = p;
196 }
197 }
198 return p;
199out_err:
200 put_page(p);
201 return NULL;
202}
203
204struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
205{
206 int ret;
207
208 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
209 ret = ttm_tt_swapin(ttm);
210 if (unlikely(ret != 0))
211 return NULL;
212 }
213 return __ttm_tt_get_page(ttm, index);
214}
215
216int ttm_tt_populate(struct ttm_tt *ttm)
217{
218 struct page *page;
219 unsigned long i;
220 struct ttm_backend *be;
221 int ret;
222
223 if (ttm->state != tt_unpopulated)
224 return 0;
225
226 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
227 ret = ttm_tt_swapin(ttm);
228 if (unlikely(ret != 0))
229 return ret;
230 }
231
232 be = ttm->be;
233
234 for (i = 0; i < ttm->num_pages; ++i) {
235 page = __ttm_tt_get_page(ttm, i);
236 if (!page)
237 return -ENOMEM;
238 }
239
240 be->func->populate(be, ttm->num_pages, ttm->pages,
241 ttm->dummy_read_page);
242 ttm->state = tt_unbound;
243 return 0;
244}
245
246#ifdef CONFIG_X86
247static inline int ttm_tt_set_page_caching(struct page *p,
248 enum ttm_caching_state c_state)
249{
250 if (PageHighMem(p))
251 return 0;
252
253 switch (c_state) {
254 case tt_cached:
255 return set_pages_wb(p, 1);
256 case tt_wc:
257 return set_memory_wc((unsigned long) page_address(p), 1);
258 default:
259 return set_pages_uc(p, 1);
260 }
261}
262#else /* CONFIG_X86 */
263static inline int ttm_tt_set_page_caching(struct page *p,
264 enum ttm_caching_state c_state)
265{
266 return 0;
267}
268#endif /* CONFIG_X86 */
269
270/*
271 * Change caching policy for the linear kernel map
272 * for range of pages in a ttm.
273 */
274
275static int ttm_tt_set_caching(struct ttm_tt *ttm,
276 enum ttm_caching_state c_state)
277{
278 int i, j;
279 struct page *cur_page;
280 int ret;
281
282 if (ttm->caching_state == c_state)
283 return 0;
284
285 if (c_state != tt_cached) {
286 ret = ttm_tt_populate(ttm);
287 if (unlikely(ret != 0))
288 return ret;
289 }
290
291 if (ttm->caching_state == tt_cached)
292 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
293
294 for (i = 0; i < ttm->num_pages; ++i) {
295 cur_page = ttm->pages[i];
296 if (likely(cur_page != NULL)) {
297 ret = ttm_tt_set_page_caching(cur_page, c_state);
298 if (unlikely(ret != 0))
299 goto out_err;
300 }
301 }
302
303 ttm->caching_state = c_state;
304
305 return 0;
306
307out_err:
308 for (j = 0; j < i; ++j) {
309 cur_page = ttm->pages[j];
310 if (likely(cur_page != NULL)) {
311 (void)ttm_tt_set_page_caching(cur_page,
312 ttm->caching_state);
313 }
314 }
315
316 return ret;
317}
318
319int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
320{
321 enum ttm_caching_state state;
322
323 if (placement & TTM_PL_FLAG_WC)
324 state = tt_wc;
325 else if (placement & TTM_PL_FLAG_UNCACHED)
326 state = tt_uncached;
327 else
328 state = tt_cached;
329
330 return ttm_tt_set_caching(ttm, state);
331}
332
333static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
334{
335 int i;
336 struct page *cur_page;
337 struct ttm_backend *be = ttm->be;
338
339 if (be)
340 be->func->clear(be);
341 (void)ttm_tt_set_caching(ttm, tt_cached);
342 for (i = 0; i < ttm->num_pages; ++i) {
343 cur_page = ttm->pages[i];
344 ttm->pages[i] = NULL;
345 if (cur_page) {
346 if (page_count(cur_page) != 1)
347 printk(KERN_ERR TTM_PFX
348 "Erroneous page count. "
349 "Leaking pages.\n");
350 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
351 PageHighMem(cur_page));
352 __free_page(cur_page);
353 }
354 }
355 ttm->state = tt_unpopulated;
356 ttm->first_himem_page = ttm->num_pages;
357 ttm->last_lomem_page = -1;
358}
359
360void ttm_tt_destroy(struct ttm_tt *ttm)
361{
362 struct ttm_backend *be;
363
364 if (unlikely(ttm == NULL))
365 return;
366
367 be = ttm->be;
368 if (likely(be != NULL)) {
369 be->func->destroy(be);
370 ttm->be = NULL;
371 }
372
373 if (likely(ttm->pages != NULL)) {
374 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
375 ttm_tt_free_user_pages(ttm);
376 else
377 ttm_tt_free_alloced_pages(ttm);
378
379 ttm_tt_free_page_directory(ttm);
380 }
381
382 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
383 ttm->swap_storage)
384 fput(ttm->swap_storage);
385
386 kfree(ttm);
387}
388
389int ttm_tt_set_user(struct ttm_tt *ttm,
390 struct task_struct *tsk,
391 unsigned long start, unsigned long num_pages)
392{
393 struct mm_struct *mm = tsk->mm;
394 int ret;
395 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
396 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
397
398 BUG_ON(num_pages != ttm->num_pages);
399 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
400
401 /**
402 * Account user pages as lowmem pages for now.
403 */
404
405 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
406 false, false, false);
407 if (unlikely(ret != 0))
408 return ret;
409
410 down_read(&mm->mmap_sem);
411 ret = get_user_pages(tsk, mm, start, num_pages,
412 write, 0, ttm->pages, NULL);
413 up_read(&mm->mmap_sem);
414
415 if (ret != num_pages && write) {
416 ttm_tt_free_user_pages(ttm);
417 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
418 return -ENOMEM;
419 }
420
421 ttm->tsk = tsk;
422 ttm->start = start;
423 ttm->state = tt_unbound;
424
425 return 0;
426}
427
428struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
429 uint32_t page_flags, struct page *dummy_read_page)
430{
431 struct ttm_bo_driver *bo_driver = bdev->driver;
432 struct ttm_tt *ttm;
433
434 if (!bo_driver)
435 return NULL;
436
437 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
438 if (!ttm)
439 return NULL;
440
441 ttm->bdev = bdev;
442
443 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
444 ttm->first_himem_page = ttm->num_pages;
445 ttm->last_lomem_page = -1;
446 ttm->caching_state = tt_cached;
447 ttm->page_flags = page_flags;
448
449 ttm->dummy_read_page = dummy_read_page;
450
451 ttm_tt_alloc_page_directory(ttm);
452 if (!ttm->pages) {
453 ttm_tt_destroy(ttm);
454 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
455 return NULL;
456 }
457 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
458 if (!ttm->be) {
459 ttm_tt_destroy(ttm);
460 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
461 return NULL;
462 }
463 ttm->state = tt_unpopulated;
464 return ttm;
465}
466
467void ttm_tt_unbind(struct ttm_tt *ttm)
468{
469 int ret;
470 struct ttm_backend *be = ttm->be;
471
472 if (ttm->state == tt_bound) {
473 ret = be->func->unbind(be);
474 BUG_ON(ret);
475 ttm->state = tt_unbound;
476 }
477}
478
479int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
480{
481 int ret = 0;
482 struct ttm_backend *be;
483
484 if (!ttm)
485 return -EINVAL;
486
487 if (ttm->state == tt_bound)
488 return 0;
489
490 be = ttm->be;
491
492 ret = ttm_tt_populate(ttm);
493 if (ret)
494 return ret;
495
496 ret = be->func->bind(be, bo_mem);
497 if (ret) {
498 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
499 return ret;
500 }
501
502 ttm->state = tt_bound;
503
504 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
505 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
506 return 0;
507}
508EXPORT_SYMBOL(ttm_tt_bind);
509
510static int ttm_tt_swapin(struct ttm_tt *ttm)
511{
512 struct address_space *swap_space;
513 struct file *swap_storage;
514 struct page *from_page;
515 struct page *to_page;
516 void *from_virtual;
517 void *to_virtual;
518 int i;
519 int ret;
520
521 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
522 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
523 ttm->num_pages);
524 if (unlikely(ret != 0))
525 return ret;
526
527 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
528 return 0;
529 }
530
531 swap_storage = ttm->swap_storage;
532 BUG_ON(swap_storage == NULL);
533
534 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
535
536 for (i = 0; i < ttm->num_pages; ++i) {
537 from_page = read_mapping_page(swap_space, i, NULL);
538 if (IS_ERR(from_page))
539 goto out_err;
540 to_page = __ttm_tt_get_page(ttm, i);
541 if (unlikely(to_page == NULL))
542 goto out_err;
543
544 preempt_disable();
545 from_virtual = kmap_atomic(from_page, KM_USER0);
546 to_virtual = kmap_atomic(to_page, KM_USER1);
547 memcpy(to_virtual, from_virtual, PAGE_SIZE);
548 kunmap_atomic(to_virtual, KM_USER1);
549 kunmap_atomic(from_virtual, KM_USER0);
550 preempt_enable();
551 page_cache_release(from_page);
552 }
553
554 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
555 fput(swap_storage);
556 ttm->swap_storage = NULL;
557 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
558
559 return 0;
560out_err:
561 ttm_tt_free_alloced_pages(ttm);
562 return -ENOMEM;
563}
564
565int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
566{
567 struct address_space *swap_space;
568 struct file *swap_storage;
569 struct page *from_page;
570 struct page *to_page;
571 void *from_virtual;
572 void *to_virtual;
573 int i;
574
575 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
576 BUG_ON(ttm->caching_state != tt_cached);
577
578 /*
579 * For user buffers, just unpin the pages, as there should be
580 * vma references.
581 */
582
583 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
584 ttm_tt_free_user_pages(ttm);
585 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
586 ttm->swap_storage = NULL;
587 return 0;
588 }
589
590 if (!persistant_swap_storage) {
591 swap_storage = shmem_file_setup("ttm swap",
592 ttm->num_pages << PAGE_SHIFT,
593 0);
594 if (unlikely(IS_ERR(swap_storage))) {
595 printk(KERN_ERR "Failed allocating swap storage.\n");
596 return -ENOMEM;
597 }
598 } else
599 swap_storage = persistant_swap_storage;
600
601 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
602
603 for (i = 0; i < ttm->num_pages; ++i) {
604 from_page = ttm->pages[i];
605 if (unlikely(from_page == NULL))
606 continue;
607 to_page = read_mapping_page(swap_space, i, NULL);
608 if (unlikely(to_page == NULL))
609 goto out_err;
610
611 preempt_disable();
612 from_virtual = kmap_atomic(from_page, KM_USER0);
613 to_virtual = kmap_atomic(to_page, KM_USER1);
614 memcpy(to_virtual, from_virtual, PAGE_SIZE);
615 kunmap_atomic(to_virtual, KM_USER1);
616 kunmap_atomic(from_virtual, KM_USER0);
617 preempt_enable();
618 set_page_dirty(to_page);
619 mark_page_accessed(to_page);
620 page_cache_release(to_page);
621 }
622
623 ttm_tt_free_alloced_pages(ttm);
624 ttm->swap_storage = swap_storage;
625 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
626 if (persistant_swap_storage)
627 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
628
629 return 0;
630out_err:
631 if (!persistant_swap_storage)
632 fput(swap_storage);
633
634 return -ENOMEM;
635}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 0dcf9ca0b0ac..d0fcf36c2ab2 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -115,5 +115,7 @@ source "drivers/staging/line6/Kconfig"
115 115
116source "drivers/staging/serqt_usb/Kconfig" 116source "drivers/staging/serqt_usb/Kconfig"
117 117
118source "drivers/gpu/drm/radeon/Kconfig"
119
118endif # !STAGING_EXCLUDE_BUILD 120endif # !STAGING_EXCLUDE_BUILD
119endif # STAGING 121endif # STAGING
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f8634ab53b8f..45c18672b093 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -254,8 +254,8 @@
254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \ 256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 257 {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 258 {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 259 {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 260 {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
261 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \ 261 {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
@@ -273,8 +273,8 @@
273 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ 273 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
274 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 274 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
275 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 275 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
276 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 276 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
277 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 277 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
278 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 278 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
279 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 279 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
280 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 280 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index fe3e3a4b4aed..41862e9a4c20 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -496,6 +496,16 @@ typedef struct {
496#define DRM_RADEON_SETPARAM 0x19 496#define DRM_RADEON_SETPARAM 0x19
497#define DRM_RADEON_SURF_ALLOC 0x1a 497#define DRM_RADEON_SURF_ALLOC 0x1a
498#define DRM_RADEON_SURF_FREE 0x1b 498#define DRM_RADEON_SURF_FREE 0x1b
499/* KMS ioctl */
500#define DRM_RADEON_GEM_INFO 0x1c
501#define DRM_RADEON_GEM_CREATE 0x1d
502#define DRM_RADEON_GEM_MMAP 0x1e
503#define DRM_RADEON_GEM_PREAD 0x21
504#define DRM_RADEON_GEM_PWRITE 0x22
505#define DRM_RADEON_GEM_SET_DOMAIN 0x23
506#define DRM_RADEON_GEM_WAIT_IDLE 0x24
507#define DRM_RADEON_CS 0x26
508#define DRM_RADEON_INFO 0x27
499 509
500#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) 510#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
501#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) 511#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -524,6 +534,17 @@ typedef struct {
524#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) 534#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
525#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) 535#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
526#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) 536#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
537/* KMS */
538#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
539#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
540#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
541#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
542#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
543#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
544#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
545#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
546#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
547
527 548
528typedef struct drm_radeon_init { 549typedef struct drm_radeon_init {
529 enum { 550 enum {
@@ -682,6 +703,7 @@ typedef struct drm_radeon_indirect {
682#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ 703#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
683#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ 704#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
684#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ 705#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
706#define RADEON_PARAM_DEVICE_ID 16
685 707
686typedef struct drm_radeon_getparam { 708typedef struct drm_radeon_getparam {
687 int param; 709 int param;
@@ -751,4 +773,112 @@ typedef struct drm_radeon_surface_free {
751#define DRM_RADEON_VBLANK_CRTC1 1 773#define DRM_RADEON_VBLANK_CRTC1 1
752#define DRM_RADEON_VBLANK_CRTC2 2 774#define DRM_RADEON_VBLANK_CRTC2 2
753 775
776/*
777 * Kernel modesetting world below.
778 */
779#define RADEON_GEM_DOMAIN_CPU 0x1
780#define RADEON_GEM_DOMAIN_GTT 0x2
781#define RADEON_GEM_DOMAIN_VRAM 0x4
782
783struct drm_radeon_gem_info {
784 uint64_t gart_size;
785 uint64_t vram_size;
786 uint64_t vram_visible;
787};
788
789#define RADEON_GEM_NO_BACKING_STORE 1
790
791struct drm_radeon_gem_create {
792 uint64_t size;
793 uint64_t alignment;
794 uint32_t handle;
795 uint32_t initial_domain;
796 uint32_t flags;
797};
798
799struct drm_radeon_gem_mmap {
800 uint32_t handle;
801 uint32_t pad;
802 uint64_t offset;
803 uint64_t size;
804 uint64_t addr_ptr;
805};
806
807struct drm_radeon_gem_set_domain {
808 uint32_t handle;
809 uint32_t read_domains;
810 uint32_t write_domain;
811};
812
813struct drm_radeon_gem_wait_idle {
814 uint32_t handle;
815 uint32_t pad;
816};
817
818struct drm_radeon_gem_busy {
819 uint32_t handle;
820 uint32_t busy;
821};
822
823struct drm_radeon_gem_pread {
824 /** Handle for the object being read. */
825 uint32_t handle;
826 uint32_t pad;
827 /** Offset into the object to read from */
828 uint64_t offset;
829 /** Length of data to read */
830 uint64_t size;
831 /** Pointer to write the data into. */
832 /* void *, but pointers are not 32/64 compatible */
833 uint64_t data_ptr;
834};
835
836struct drm_radeon_gem_pwrite {
837 /** Handle for the object being written to. */
838 uint32_t handle;
839 uint32_t pad;
840 /** Offset into the object to write to */
841 uint64_t offset;
842 /** Length of data to write */
843 uint64_t size;
844 /** Pointer to read the data from. */
845 /* void *, but pointers are not 32/64 compatible */
846 uint64_t data_ptr;
847};
848
849#define RADEON_CHUNK_ID_RELOCS 0x01
850#define RADEON_CHUNK_ID_IB 0x02
851
852struct drm_radeon_cs_chunk {
853 uint32_t chunk_id;
854 uint32_t length_dw;
855 uint64_t chunk_data;
856};
857
858struct drm_radeon_cs_reloc {
859 uint32_t handle;
860 uint32_t read_domains;
861 uint32_t write_domain;
862 uint32_t flags;
863};
864
865struct drm_radeon_cs {
866 uint32_t num_chunks;
867 uint32_t cs_id;
868 /* this points to uint64_t * which point to cs chunks */
869 uint64_t chunks;
870 /* updates to the limits after this CS ioctl */
871 uint64_t gart_limit;
872 uint64_t vram_limit;
873};
874
875#define RADEON_INFO_DEVICE_ID 0x00
876#define RADEON_INFO_NUM_GB_PIPES 0x01
877
878struct drm_radeon_info {
879 uint32_t request;
880 uint32_t pad;
881 uint64_t value;
882};
883
754#endif 884#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
new file mode 100644
index 000000000000..cd22ab4b495c
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -0,0 +1,618 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_BO_API_H_
32#define _TTM_BO_API_H_
33
34#include "drm_hashtab.h"
35#include <linux/kref.h>
36#include <linux/list.h>
37#include <linux/wait.h>
38#include <linux/mutex.h>
39#include <linux/mm.h>
40#include <linux/rbtree.h>
41#include <linux/bitmap.h>
42
43struct ttm_bo_device;
44
45struct drm_mm_node;
46
47/**
48 * struct ttm_mem_reg
49 *
50 * @mm_node: Memory manager node.
51 * @size: Requested size of memory region.
52 * @num_pages: Actual size of memory region in pages.
53 * @page_alignment: Page alignment.
54 * @placement: Placement flags.
55 *
56 * Structure indicating the placement and space resources used by a
57 * buffer object.
58 */
59
60struct ttm_mem_reg {
61 struct drm_mm_node *mm_node;
62 unsigned long size;
63 unsigned long num_pages;
64 uint32_t page_alignment;
65 uint32_t mem_type;
66 uint32_t placement;
67};
68
69/**
70 * enum ttm_bo_type
71 *
72 * @ttm_bo_type_device: These are 'normal' buffers that can
73 * be mmapped by user space. Each of these bos occupy a slot in the
74 * device address space, that can be used for normal vm operations.
75 *
76 * @ttm_bo_type_user: These are user-space memory areas that are made
77 * available to the GPU by mapping the buffer pages into the GPU aperture
78 * space. These buffers cannot be mmaped from the device address space.
79 *
80 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
81 * but they cannot be accessed from user-space. For kernel-only use.
82 */
83
84enum ttm_bo_type {
85 ttm_bo_type_device,
86 ttm_bo_type_user,
87 ttm_bo_type_kernel
88};
89
90struct ttm_tt;
91
92/**
93 * struct ttm_buffer_object
94 *
95 * @bdev: Pointer to the buffer object device structure.
96 * @buffer_start: The virtual user-space start address of ttm_bo_type_user
97 * buffers.
98 * @type: The bo type.
99 * @destroy: Destruction function. If NULL, kfree is used.
100 * @num_pages: Actual number of pages.
101 * @addr_space_offset: Address space offset.
102 * @acc_size: Accounted size for this object.
103 * @kref: Reference count of this buffer object. When this refcount reaches
104 * zero, the object is put on the delayed delete list.
105 * @list_kref: List reference count of this buffer object. This member is
106 * used to avoid destruction while the buffer object is still on a list.
107 * Lru lists may keep one refcount, the delayed delete list, and kref != 0
108 * keeps one refcount. When this refcount reaches zero,
109 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member
119 * holds a pointer to a persistant shmem object.
120 * @ttm: TTM structure holding system pages.
121 * @evicted: Whether the object was evicted without user-space knowing.
122 * @cpu_writes: For synchronization. Number of cpu writers.
123 * @lru: List head for the lru list.
124 * @ddestroy: List head for the delayed destroy list.
125 * @swap: List head for swap LRU list.
126 * @val_seq: Sequence of the validation holding the @reserved lock.
127 * Used to avoid starvation when many processes compete to validate the
128 * buffer. This member is protected by the bo_device::lru_lock.
129 * @seq_valid: The value of @val_seq is valid. This value is protected by
130 * the bo_device::lru_lock.
131 * @reserved: Deadlock-free lock used for synchronization state transitions.
132 * @sync_obj_arg: Opaque argument to synchronization object function.
133 * @sync_obj: Pointer to a synchronization object.
134 * @priv_flags: Flags describing buffer object internal state.
135 * @vm_rb: Rb node for the vm rb tree.
136 * @vm_node: Address space manager node.
137 * @offset: The current GPU offset, which can have different meanings
138 * depending on the memory type. For SYSTEM type memory, it should be 0.
139 * @cur_placement: Hint of current placement.
140 *
141 * Base class for TTM buffer object, that deals with data placement and CPU
142 * mappings. GPU mappings are really up to the driver, but for simpler GPUs
143 * the driver can usually use the placement offset @offset directly as the
144 * GPU virtual address. For drivers implementing multiple
145 * GPU memory manager contexts, the driver should manage the address space
146 * in these contexts separately and use these objects to get the correct
147 * placement and caching for these GPU maps. This makes it possible to use
148 * these objects for even quite elaborate memory management schemes.
149 * The destroy member, the API visibility of this object makes it possible
150 * to derive driver specific types.
151 */
152
153struct ttm_buffer_object {
154 /**
155 * Members constant at init.
156 */
157
158 struct ttm_bo_device *bdev;
159 unsigned long buffer_start;
160 enum ttm_bo_type type;
161 void (*destroy) (struct ttm_buffer_object *);
162 unsigned long num_pages;
163 uint64_t addr_space_offset;
164 size_t acc_size;
165
166 /**
167 * Members not needing protection.
168 */
169
170 struct kref kref;
171 struct kref list_kref;
172 wait_queue_head_t event_queue;
173 spinlock_t lock;
174
175 /**
176 * Members protected by the bo::reserved lock.
177 */
178
179 uint32_t proposed_placement;
180 struct ttm_mem_reg mem;
181 struct file *persistant_swap_storage;
182 struct ttm_tt *ttm;
183 bool evicted;
184
185 /**
186 * Members protected by the bo::reserved lock only when written to.
187 */
188
189 atomic_t cpu_writers;
190
191 /**
192 * Members protected by the bdev::lru_lock.
193 */
194
195 struct list_head lru;
196 struct list_head ddestroy;
197 struct list_head swap;
198 uint32_t val_seq;
199 bool seq_valid;
200
201 /**
202 * Members protected by the bdev::lru_lock
203 * only when written to.
204 */
205
206 atomic_t reserved;
207
208
209 /**
210 * Members protected by the bo::lock
211 */
212
213 void *sync_obj_arg;
214 void *sync_obj;
215 unsigned long priv_flags;
216
217 /**
218 * Members protected by the bdev::vm_lock
219 */
220
221 struct rb_node vm_rb;
222 struct drm_mm_node *vm_node;
223
224
225 /**
226 * Special members that are protected by the reserve lock
227 * and the bo::lock when written to. Can be read with
228 * either of these locks held.
229 */
230
231 unsigned long offset;
232 uint32_t cur_placement;
233};
234
235/**
236 * struct ttm_bo_kmap_obj
237 *
238 * @virtual: The current kernel virtual address.
239 * @page: The page when kmap'ing a single page.
240 * @bo_kmap_type: Type of bo_kmap.
241 *
242 * Object describing a kernel mapping. Since a TTM bo may be located
243 * in various memory types with various caching policies, the
244 * mapping can either be an ioremap, a vmap, a kmap or part of a
245 * premapped region.
246 */
247
248struct ttm_bo_kmap_obj {
249 void *virtual;
250 struct page *page;
251 enum {
252 ttm_bo_map_iomap,
253 ttm_bo_map_vmap,
254 ttm_bo_map_kmap,
255 ttm_bo_map_premapped,
256 } bo_kmap_type;
257};
258
259/**
260 * ttm_bo_reference - reference a struct ttm_buffer_object
261 *
262 * @bo: The buffer object.
263 *
264 * Returns a refcounted pointer to a buffer object.
265 */
266
267static inline struct ttm_buffer_object *
268ttm_bo_reference(struct ttm_buffer_object *bo)
269{
270 kref_get(&bo->kref);
271 return bo;
272}
273
274/**
275 * ttm_bo_wait - wait for buffer idle.
276 *
277 * @bo: The buffer object.
278 * @interruptible: Use interruptible wait.
279 * @no_wait: Return immediately if buffer is busy.
280 *
281 * This function must be called with the bo::mutex held, and makes
282 * sure any previous rendering to the buffer is completed.
283 * Note: It might be necessary to block validations before the
284 * wait by reserving the buffer.
285 * Returns -EBUSY if no_wait is true and the buffer is busy.
286 * Returns -ERESTART if interrupted by a signal.
287 */
288extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
289 bool interruptible, bool no_wait);
290/**
291 * ttm_buffer_object_validate
292 *
293 * @bo: The buffer object.
294 * @proposed_placement: Proposed_placement for the buffer object.
295 * @interruptible: Sleep interruptible if sleeping.
296 * @no_wait: Return immediately if the buffer is busy.
297 *
298 * Changes placement and caching policy of the buffer object
299 * according to bo::proposed_flags.
300 * Returns
301 * -EINVAL on invalid proposed_flags.
302 * -ENOMEM on out-of-memory condition.
303 * -EBUSY if no_wait is true and buffer busy.
304 * -ERESTART if interrupted by a signal.
305 */
306extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
307 uint32_t proposed_placement,
308 bool interruptible, bool no_wait);
309/**
310 * ttm_bo_unref
311 *
312 * @bo: The buffer object.
313 *
314 * Unreference and clear a pointer to a buffer object.
315 */
316extern void ttm_bo_unref(struct ttm_buffer_object **bo);
317
318/**
319 * ttm_bo_synccpu_write_grab
320 *
321 * @bo: The buffer object:
322 * @no_wait: Return immediately if buffer is busy.
323 *
324 * Synchronizes a buffer object for CPU RW access. This means
325 * blocking command submission that affects the buffer and
326 * waiting for buffer idle. This lock is recursive.
327 * Returns
328 * -EBUSY if the buffer is busy and no_wait is true.
329 * -ERESTART if interrupted by a signal.
330 */
331
332extern int
333ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
334/**
335 * ttm_bo_synccpu_write_release:
336 *
337 * @bo : The buffer object.
338 *
339 * Releases a synccpu lock.
340 */
341extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
342
343/**
344 * ttm_buffer_object_init
345 *
346 * @bdev: Pointer to a ttm_bo_device struct.
347 * @bo: Pointer to a ttm_buffer_object to be initialized.
348 * @size: Requested size of buffer object.
349 * @type: Requested type of buffer object.
350 * @flags: Initial placement flags.
351 * @page_alignment: Data alignment in pages.
352 * @buffer_start: Virtual address of user space data backing a
353 * user buffer object.
354 * @interruptible: If needing to sleep to wait for GPU resources,
355 * sleep interruptible.
356 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
357 * pinned in physical memory. If this behaviour is not desired, this member
358 * holds a pointer to a persistant shmem object. Typically, this would
359 * point to the shmem object backing a GEM object if TTM is used to back a
360 * GEM user interface.
361 * @acc_size: Accounted size for this object.
362 * @destroy: Destroy function. Use NULL for kfree().
363 *
364 * This function initializes a pre-allocated struct ttm_buffer_object.
365 * As this object may be part of a larger structure, this function,
366 * together with the @destroy function,
367 * enables driver-specific objects derived from a ttm_buffer_object.
368 * On successful return, the object kref and list_kref are set to 1.
369 * Returns
370 * -ENOMEM: Out of memory.
371 * -EINVAL: Invalid placement flags.
372 * -ERESTART: Interrupted by signal while sleeping waiting for resources.
373 */
374
375extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
376 struct ttm_buffer_object *bo,
377 unsigned long size,
378 enum ttm_bo_type type,
379 uint32_t flags,
380 uint32_t page_alignment,
381 unsigned long buffer_start,
382 bool interrubtible,
383 struct file *persistant_swap_storage,
384 size_t acc_size,
385 void (*destroy) (struct ttm_buffer_object *));
386/**
387 * ttm_bo_synccpu_object_init
388 *
389 * @bdev: Pointer to a ttm_bo_device struct.
390 * @bo: Pointer to a ttm_buffer_object to be initialized.
391 * @size: Requested size of buffer object.
392 * @type: Requested type of buffer object.
393 * @flags: Initial placement flags.
394 * @page_alignment: Data alignment in pages.
395 * @buffer_start: Virtual address of user space data backing a
396 * user buffer object.
397 * @interruptible: If needing to sleep while waiting for GPU resources,
398 * sleep interruptible.
399 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
400 * pinned in physical memory. If this behaviour is not desired, this member
401 * holds a pointer to a persistant shmem object. Typically, this would
402 * point to the shmem object backing a GEM object if TTM is used to back a
403 * GEM user interface.
404 * @p_bo: On successful completion *p_bo points to the created object.
405 *
406 * This function allocates a ttm_buffer_object, and then calls
407 * ttm_buffer_object_init on that object.
408 * The destroy function is set to kfree().
409 * Returns
410 * -ENOMEM: Out of memory.
411 * -EINVAL: Invalid placement flags.
412 * -ERESTART: Interrupted by signal while waiting for resources.
413 */
414
415extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
416 unsigned long size,
417 enum ttm_bo_type type,
418 uint32_t flags,
419 uint32_t page_alignment,
420 unsigned long buffer_start,
421 bool interruptible,
422 struct file *persistant_swap_storage,
423 struct ttm_buffer_object **p_bo);
424
425/**
426 * ttm_bo_check_placement
427 *
428 * @bo: the buffer object.
429 * @set_flags: placement flags to set.
430 * @clr_flags: placement flags to clear.
431 *
432 * Performs minimal validity checking on an intended change of
433 * placement flags.
434 * Returns
435 * -EINVAL: Intended change is invalid or not allowed.
436 */
437
438extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
439 uint32_t set_flags, uint32_t clr_flags);
440
441/**
442 * ttm_bo_init_mm
443 *
444 * @bdev: Pointer to a ttm_bo_device struct.
445 * @mem_type: The memory type.
446 * @p_offset: offset for managed area in pages.
447 * @p_size: size managed area in pages.
448 *
449 * Initialize a manager for a given memory type.
450 * Note: if part of driver firstopen, it must be protected from a
451 * potentially racing lastclose.
452 * Returns:
453 * -EINVAL: invalid size or memory type.
454 * -ENOMEM: Not enough memory.
455 * May also return driver-specified errors.
456 */
457
458extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
459 unsigned long p_offset, unsigned long p_size);
460/**
461 * ttm_bo_clean_mm
462 *
463 * @bdev: Pointer to a ttm_bo_device struct.
464 * @mem_type: The memory type.
465 *
466 * Take down a manager for a given memory type after first walking
467 * the LRU list to evict any buffers left alive.
468 *
469 * Normally, this function is part of lastclose() or unload(), and at that
470 * point there shouldn't be any buffers left created by user-space, since
471 * there should've been removed by the file descriptor release() method.
472 * However, before this function is run, make sure to signal all sync objects,
473 * and verify that the delayed delete queue is empty. The driver must also
474 * make sure that there are no NO_EVICT buffers present in this memory type
475 * when the call is made.
476 *
477 * If this function is part of a VT switch, the caller must make sure that
478 * there are no appications currently validating buffers before this
479 * function is called. The caller can do that by first taking the
480 * struct ttm_bo_device::ttm_lock in write mode.
481 *
482 * Returns:
483 * -EINVAL: invalid or uninitialized memory type.
484 * -EBUSY: There are still buffers left in this memory type.
485 */
486
487extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
488
489/**
490 * ttm_bo_evict_mm
491 *
492 * @bdev: Pointer to a ttm_bo_device struct.
493 * @mem_type: The memory type.
494 *
495 * Evicts all buffers on the lru list of the memory type.
496 * This is normally part of a VT switch or an
497 * out-of-memory-space-due-to-fragmentation handler.
498 * The caller must make sure that there are no other processes
499 * currently validating buffers, and can do that by taking the
500 * struct ttm_bo_device::ttm_lock in write mode.
501 *
502 * Returns:
503 * -EINVAL: Invalid or uninitialized memory type.
504 * -ERESTART: The call was interrupted by a signal while waiting to
505 * evict a buffer.
506 */
507
508extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
509
510/**
511 * ttm_kmap_obj_virtual
512 *
513 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
514 * @is_iomem: Pointer to an integer that on return indicates 1 if the
515 * virtual map is io memory, 0 if normal memory.
516 *
517 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
518 * If *is_iomem is 1 on return, the virtual address points to an io memory area,
519 * that should strictly be accessed by the iowriteXX() and similar functions.
520 */
521
522static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
523 bool *is_iomem)
524{
525 *is_iomem = (map->bo_kmap_type == ttm_bo_map_iomap ||
526 map->bo_kmap_type == ttm_bo_map_premapped);
527 return map->virtual;
528}
529
530/**
531 * ttm_bo_kmap
532 *
533 * @bo: The buffer object.
534 * @start_page: The first page to map.
535 * @num_pages: Number of pages to map.
536 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
537 *
538 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
539 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
540 * used to obtain a virtual address to the data.
541 *
542 * Returns
543 * -ENOMEM: Out of memory.
544 * -EINVAL: Invalid range.
545 */
546
547extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
548 unsigned long num_pages, struct ttm_bo_kmap_obj *map);
549
550/**
551 * ttm_bo_kunmap
552 *
553 * @map: Object describing the map to unmap.
554 *
555 * Unmaps a kernel map set up by ttm_bo_kmap.
556 */
557
558extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
559
560#if 0
561#endif
562
563/**
564 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
565 *
566 * @vma: vma as input from the fbdev mmap method.
567 * @bo: The bo backing the address space. The address space will
568 * have the same size as the bo, and start at offset 0.
569 *
570 * This function is intended to be called by the fbdev mmap method
571 * if the fbdev address space is to be backed by a bo.
572 */
573
574extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
575 struct ttm_buffer_object *bo);
576
577/**
578 * ttm_bo_mmap - mmap out of the ttm device address space.
579 *
580 * @filp: filp as input from the mmap method.
581 * @vma: vma as input from the mmap method.
582 * @bdev: Pointer to the ttm_bo_device with the address space manager.
583 *
584 * This function is intended to be called by the device mmap method.
585 * if the device address space is to be backed by the bo manager.
586 */
587
588extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
589 struct ttm_bo_device *bdev);
590
591/**
592 * ttm_bo_io
593 *
594 * @bdev: Pointer to the struct ttm_bo_device.
595 * @filp: Pointer to the struct file attempting to read / write.
596 * @wbuf: User-space pointer to address of buffer to write. NULL on read.
597 * @rbuf: User-space pointer to address of buffer to read into.
598 * Null on write.
599 * @count: Number of bytes to read / write.
600 * @f_pos: Pointer to current file position.
601 * @write: 1 for read, 0 for write.
602 *
603 * This function implements read / write into ttm buffer objects, and is
604 * intended to
605 * be called from the fops::read and fops::write method.
606 * Returns:
607 * See man (2) write, man(2) read. In particular,
608 * the function may return -EINTR if
609 * interrupted by a signal.
610 */
611
612extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
613 const char __user *wbuf, char __user *rbuf,
614 size_t count, loff_t *f_pos, bool write);
615
616extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
617
618#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
new file mode 100644
index 000000000000..62ed733c52a2
--- /dev/null
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -0,0 +1,867 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30#ifndef _TTM_BO_DRIVER_H_
31#define _TTM_BO_DRIVER_H_
32
33#include "ttm/ttm_bo_api.h"
34#include "ttm/ttm_memory.h"
35#include "drm_mm.h"
36#include "linux/workqueue.h"
37#include "linux/fs.h"
38#include "linux/spinlock.h"
39
40struct ttm_backend;
41
42struct ttm_backend_func {
43 /**
44 * struct ttm_backend_func member populate
45 *
46 * @backend: Pointer to a struct ttm_backend.
47 * @num_pages: Number of pages to populate.
48 * @pages: Array of pointers to ttm pages.
49 * @dummy_read_page: Page to be used instead of NULL pages in the
50 * array @pages.
51 *
52 * Populate the backend with ttm pages. Depending on the backend,
53 * it may or may not copy the @pages array.
54 */
55 int (*populate) (struct ttm_backend *backend,
56 unsigned long num_pages, struct page **pages,
57 struct page *dummy_read_page);
58 /**
59 * struct ttm_backend_func member clear
60 *
61 * @backend: Pointer to a struct ttm_backend.
62 *
63 * This is an "unpopulate" function. Release all resources
64 * allocated with populate.
65 */
66 void (*clear) (struct ttm_backend *backend);
67
68 /**
69 * struct ttm_backend_func member bind
70 *
71 * @backend: Pointer to a struct ttm_backend.
72 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
73 * memory type and location for binding.
74 *
75 * Bind the backend pages into the aperture in the location
76 * indicated by @bo_mem. This function should be able to handle
77 * differences between aperture- and system page sizes.
78 */
79 int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
80
81 /**
82 * struct ttm_backend_func member unbind
83 *
84 * @backend: Pointer to a struct ttm_backend.
85 *
86 * Unbind previously bound backend pages. This function should be
87 * able to handle differences between aperture- and system page sizes.
88 */
89 int (*unbind) (struct ttm_backend *backend);
90
91 /**
92 * struct ttm_backend_func member destroy
93 *
94 * @backend: Pointer to a struct ttm_backend.
95 *
96 * Destroy the backend.
97 */
98 void (*destroy) (struct ttm_backend *backend);
99};
100
101/**
102 * struct ttm_backend
103 *
104 * @bdev: Pointer to a struct ttm_bo_device.
105 * @flags: For driver use.
106 * @func: Pointer to a struct ttm_backend_func that describes
107 * the backend methods.
108 *
109 */
110
111struct ttm_backend {
112 struct ttm_bo_device *bdev;
113 uint32_t flags;
114 struct ttm_backend_func *func;
115};
116
117#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
118#define TTM_PAGE_FLAG_USER (1 << 1)
119#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
120#define TTM_PAGE_FLAG_WRITE (1 << 3)
121#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
122#define TTM_PAGE_FLAG_PERSISTANT_SWAP (1 << 5)
123#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
124
125enum ttm_caching_state {
126 tt_uncached,
127 tt_wc,
128 tt_cached
129};
130
131/**
132 * struct ttm_tt
133 *
134 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
135 * pointer.
136 * @pages: Array of pages backing the data.
137 * @first_himem_page: Himem pages are put last in the page array, which
138 * enables us to run caching attribute changes on only the first part
139 * of the page array containing lomem pages. This is the index of the
140 * first himem page.
141 * @last_lomem_page: Index of the last lomem page in the page array.
142 * @num_pages: Number of pages in the page array.
143 * @bdev: Pointer to the current struct ttm_bo_device.
144 * @be: Pointer to the ttm backend.
145 * @tsk: The task for user ttm.
146 * @start: virtual address for user ttm.
147 * @swap_storage: Pointer to shmem struct file for swap storage.
148 * @caching_state: The current caching state of the pages.
149 * @state: The current binding state of the pages.
150 *
151 * This is a structure holding the pages, caching- and aperture binding
152 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
153 * memory.
154 */
155
156struct ttm_tt {
157 struct page *dummy_read_page;
158 struct page **pages;
159 long first_himem_page;
160 long last_lomem_page;
161 uint32_t page_flags;
162 unsigned long num_pages;
163 struct ttm_bo_device *bdev;
164 struct ttm_backend *be;
165 struct task_struct *tsk;
166 unsigned long start;
167 struct file *swap_storage;
168 enum ttm_caching_state caching_state;
169 enum {
170 tt_bound,
171 tt_unbound,
172 tt_unpopulated,
173 } state;
174};
175
176#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
177#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
178#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
179 before kernel access. */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @io_offset: The io_offset of the first managed page of IO memory or
192 * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
193 * memory, this should be set to NULL.
194 * @io_size: The size of a managed IO region (fixed memory or aperture).
195 * @io_addr: Virtual kernel address if the io region is pre-mapped. For
196 * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
197 * @io_addr should be set to NULL.
198 * @size: Size of the managed region.
199 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
200 * as defined in ttm_placement_common.h
201 * @default_caching: The default caching policy used for a buffer object
202 * placed in this memory type if the user doesn't provide one.
203 * @manager: The range manager used for this memory type. FIXME: If the aperture
204 * has a page size different from the underlying system, the granularity
205 * of this manager should take care of this. But the range allocating code
206 * in ttm_bo.c needs to be modified for this.
207 * @lru: The lru list for this memory type.
208 *
209 * This structure is used to identify and manage memory types for a device.
210 * It's set up by the ttm_bo_driver::init_mem_type method.
211 */
212
213struct ttm_mem_type_manager {
214
215 /*
216 * No protection. Constant from start.
217 */
218
219 bool has_type;
220 bool use_type;
221 uint32_t flags;
222 unsigned long gpu_offset;
223 unsigned long io_offset;
224 unsigned long io_size;
225 void *io_addr;
226 uint64_t size;
227 uint32_t available_caching;
228 uint32_t default_caching;
229
230 /*
231 * Protected by the bdev->lru_lock.
232 * TODO: Consider one lru_lock per ttm_mem_type_manager.
233 * Plays ill with list removal, though.
234 */
235
236 struct drm_mm manager;
237 struct list_head lru;
238};
239
240/**
241 * struct ttm_bo_driver
242 *
243 * @mem_type_prio: Priority array of memory types to place a buffer object in
244 * if it fits without evicting buffers from any of these memory types.
245 * @mem_busy_prio: Priority array of memory types to place a buffer object in
246 * if it needs to evict buffers to make room.
247 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
248 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
249 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
250 * @invalidate_caches: Callback to invalidate read caches when a buffer object
251 * has been evicted.
252 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
253 * structure.
254 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
255 * @move: Callback for a driver to hook in accelerated functions to
256 * move a buffer.
257 * If set to NULL, a potentially slow memcpy() move is used.
258 * @sync_obj_signaled: See ttm_fence_api.h
259 * @sync_obj_wait: See ttm_fence_api.h
260 * @sync_obj_flush: See ttm_fence_api.h
261 * @sync_obj_unref: See ttm_fence_api.h
262 * @sync_obj_ref: See ttm_fence_api.h
263 */
264
265struct ttm_bo_driver {
266 const uint32_t *mem_type_prio;
267 const uint32_t *mem_busy_prio;
268 uint32_t num_mem_type_prio;
269 uint32_t num_mem_busy_prio;
270
271 /**
272 * struct ttm_bo_driver member create_ttm_backend_entry
273 *
274 * @bdev: The buffer object device.
275 *
276 * Create a driver specific struct ttm_backend.
277 */
278
279 struct ttm_backend *(*create_ttm_backend_entry)
280 (struct ttm_bo_device *bdev);
281
282 /**
283 * struct ttm_bo_driver member invalidate_caches
284 *
285 * @bdev: the buffer object device.
286 * @flags: new placement of the rebound buffer object.
287 *
288 * A previosly evicted buffer has been rebound in a
289 * potentially new location. Tell the driver that it might
290 * consider invalidating read (texture) caches on the next command
291 * submission as a consequence.
292 */
293
294 int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
295 int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
296 struct ttm_mem_type_manager *man);
297 /**
298 * struct ttm_bo_driver member evict_flags:
299 *
300 * @bo: the buffer object to be evicted
301 *
302 * Return the bo flags for a buffer which is not mapped to the hardware.
303 * These will be placed in proposed_flags so that when the move is
304 * finished, they'll end up in bo->mem.flags
305 */
306
307 uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
308 /**
309 * struct ttm_bo_driver member move:
310 *
311 * @bo: the buffer to move
312 * @evict: whether this motion is evicting the buffer from
313 * the graphics address space
314 * @interruptible: Use interruptible sleeps if possible when sleeping.
315 * @no_wait: whether this should give up and return -EBUSY
316 * if this move would require sleeping
317 * @new_mem: the new memory region receiving the buffer
318 *
319 * Move a buffer between two memory regions.
320 */
321 int (*move) (struct ttm_buffer_object *bo,
322 bool evict, bool interruptible,
323 bool no_wait, struct ttm_mem_reg *new_mem);
324
325 /**
326 * struct ttm_bo_driver_member verify_access
327 *
328 * @bo: Pointer to a buffer object.
329 * @filp: Pointer to a struct file trying to access the object.
330 *
331 * Called from the map / write / read methods to verify that the
332 * caller is permitted to access the buffer object.
333 * This member may be set to NULL, which will refuse this kind of
334 * access for all buffer objects.
335 * This function should return 0 if access is granted, -EPERM otherwise.
336 */
337 int (*verify_access) (struct ttm_buffer_object *bo,
338 struct file *filp);
339
340 /**
341 * In case a driver writer dislikes the TTM fence objects,
342 * the driver writer can replace those with sync objects of
343 * his / her own. If it turns out that no driver writer is
344 * using these. I suggest we remove these hooks and plug in
345 * fences directly. The bo driver needs the following functionality:
346 * See the corresponding functions in the fence object API
347 * documentation.
348 */
349
350 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
351 int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
352 bool lazy, bool interruptible);
353 int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
354 void (*sync_obj_unref) (void **sync_obj);
355 void *(*sync_obj_ref) (void *sync_obj);
356};
357
358#define TTM_NUM_MEM_TYPES 8
359
360#define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
361 idling before CPU mapping */
362#define TTM_BO_PRIV_FLAG_MAX 1
363/**
364 * struct ttm_bo_device - Buffer object driver device-specific data.
365 *
366 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
367 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
368 * @count: Current number of buffer object.
369 * @pages: Current number of pinned pages.
370 * @dummy_read_page: Pointer to a dummy page used for mapping requests
371 * of unpopulated pages.
372 * @shrink: A shrink callback object used for buffre object swap.
373 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
374 * used by a buffer object. This is excluding page arrays and backing pages.
375 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
376 * @man: An array of mem_type_managers.
377 * @addr_space_mm: Range manager for the device address space.
378 * lru_lock: Spinlock that protects the buffer+device lru lists and
379 * ddestroy lists.
380 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
381 * If a GPU lockup has been detected, this is forced to 0.
382 * @dev_mapping: A pointer to the struct address_space representing the
383 * device address space.
384 * @wq: Work queue structure for the delayed delete workqueue.
385 *
386 */
387
388struct ttm_bo_device {
389
390 /*
391 * Constant after bo device init / atomic.
392 */
393
394 struct ttm_mem_global *mem_glob;
395 struct ttm_bo_driver *driver;
396 struct page *dummy_read_page;
397 struct ttm_mem_shrink shrink;
398
399 size_t ttm_bo_extra_size;
400 size_t ttm_bo_size;
401
402 rwlock_t vm_lock;
403 /*
404 * Protected by the vm lock.
405 */
406 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
407 struct rb_root addr_space_rb;
408 struct drm_mm addr_space_mm;
409
410 /*
411 * Might want to change this to one lock per manager.
412 */
413 spinlock_t lru_lock;
414 /*
415 * Protected by the lru lock.
416 */
417 struct list_head ddestroy;
418 struct list_head swap_lru;
419
420 /*
421 * Protected by load / firstopen / lastclose /unload sync.
422 */
423
424 bool nice_mode;
425 struct address_space *dev_mapping;
426
427 /*
428 * Internal protection.
429 */
430
431 struct delayed_work wq;
432};
433
434/**
435 * ttm_flag_masked
436 *
437 * @old: Pointer to the result and original value.
438 * @new: New value of bits.
439 * @mask: Mask of bits to change.
440 *
441 * Convenience function to change a number of bits identified by a mask.
442 */
443
444static inline uint32_t
445ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
446{
447 *old ^= (*old ^ new) & mask;
448 return *old;
449}
450
451/**
452 * ttm_tt_create
453 *
454 * @bdev: pointer to a struct ttm_bo_device:
455 * @size: Size of the data needed backing.
456 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
457 * @dummy_read_page: See struct ttm_bo_device.
458 *
459 * Create a struct ttm_tt to back data with system memory pages.
460 * No pages are actually allocated.
461 * Returns:
462 * NULL: Out of memory.
463 */
464extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
465 unsigned long size,
466 uint32_t page_flags,
467 struct page *dummy_read_page);
468
469/**
470 * ttm_tt_set_user:
471 *
472 * @ttm: The struct ttm_tt to populate.
473 * @tsk: A struct task_struct for which @start is a valid user-space address.
474 * @start: A valid user-space address.
475 * @num_pages: Size in pages of the user memory area.
476 *
477 * Populate a struct ttm_tt with a user-space memory area after first pinning
478 * the pages backing it.
479 * Returns:
480 * !0: Error.
481 */
482
483extern int ttm_tt_set_user(struct ttm_tt *ttm,
484 struct task_struct *tsk,
485 unsigned long start, unsigned long num_pages);
486
487/**
488 * ttm_ttm_bind:
489 *
490 * @ttm: The struct ttm_tt containing backing pages.
491 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
492 *
493 * Bind the pages of @ttm to an aperture location identified by @bo_mem
494 */
495extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
496
497/**
498 * ttm_ttm_destroy:
499 *
500 * @ttm: The struct ttm_tt.
501 *
502 * Unbind, unpopulate and destroy a struct ttm_tt.
503 */
504extern void ttm_tt_destroy(struct ttm_tt *ttm);
505
506/**
507 * ttm_ttm_unbind:
508 *
509 * @ttm: The struct ttm_tt.
510 *
511 * Unbind a struct ttm_tt.
512 */
513extern void ttm_tt_unbind(struct ttm_tt *ttm);
514
515/**
516 * ttm_ttm_destroy:
517 *
518 * @ttm: The struct ttm_tt.
519 * @index: Index of the desired page.
520 *
521 * Return a pointer to the struct page backing @ttm at page
522 * index @index. If the page is unpopulated, one will be allocated to
523 * populate that index.
524 *
525 * Returns:
526 * NULL on OOM.
527 */
528extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
529
530/**
531 * ttm_tt_cache_flush:
532 *
533 * @pages: An array of pointers to struct page:s to flush.
534 * @num_pages: Number of pages to flush.
535 *
536 * Flush the data of the indicated pages from the cpu caches.
537 * This is used when changing caching attributes of the pages from
538 * cache-coherent.
539 */
540extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
541
542/**
543 * ttm_tt_set_placement_caching:
544 *
545 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
546 * @placement: Flag indicating the desired caching policy.
547 *
548 * This function will change caching policy of any default kernel mappings of
549 * the pages backing @ttm. If changing from cached to uncached or
550 * write-combined,
551 * all CPU caches will first be flushed to make sure the data of the pages
552 * hit RAM. This function may be very costly as it involves global TLB
553 * and cache flushes and potential page splitting / combining.
554 */
555extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
556extern int ttm_tt_swapout(struct ttm_tt *ttm,
557 struct file *persistant_swap_storage);
558
559/*
560 * ttm_bo.c
561 */
562
563/**
564 * ttm_mem_reg_is_pci
565 *
566 * @bdev: Pointer to a struct ttm_bo_device.
567 * @mem: A valid struct ttm_mem_reg.
568 *
569 * Returns true if the memory described by @mem is PCI memory,
570 * false otherwise.
571 */
572extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
573 struct ttm_mem_reg *mem);
574
575/**
576 * ttm_bo_mem_space
577 *
578 * @bo: Pointer to a struct ttm_buffer_object. the data of which
579 * we want to allocate space for.
580 * @proposed_placement: Proposed new placement for the buffer object.
581 * @mem: A struct ttm_mem_reg.
582 * @interruptible: Sleep interruptible when sliping.
583 * @no_wait: Don't sleep waiting for space to become available.
584 *
585 * Allocate memory space for the buffer object pointed to by @bo, using
586 * the placement flags in @mem, potentially evicting other idle buffer objects.
587 * This function may sleep while waiting for space to become available.
588 * Returns:
589 * -EBUSY: No space available (only if no_wait == 1).
590 * -ENOMEM: Could not allocate memory for the buffer object, either due to
591 * fragmentation or concurrent allocators.
592 * -ERESTART: An interruptible sleep was interrupted by a signal.
593 */
594extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
595 uint32_t proposed_placement,
596 struct ttm_mem_reg *mem,
597 bool interruptible, bool no_wait);
598/**
599 * ttm_bo_wait_for_cpu
600 *
601 * @bo: Pointer to a struct ttm_buffer_object.
602 * @no_wait: Don't sleep while waiting.
603 *
604 * Wait until a buffer object is no longer sync'ed for CPU access.
605 * Returns:
606 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
607 * -ERESTART: An interruptible sleep was interrupted by a signal.
608 */
609
610extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
611
612/**
613 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
614 *
615 * @bo Pointer to a struct ttm_buffer_object.
616 * @bus_base On return the base of the PCI region
617 * @bus_offset On return the byte offset into the PCI region
618 * @bus_size On return the byte size of the buffer object or zero if
619 * the buffer object memory is not accessible through a PCI region.
620 *
621 * Returns:
622 * -EINVAL if the buffer object is currently not mappable.
623 * 0 otherwise.
624 */
625
626extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
627 struct ttm_mem_reg *mem,
628 unsigned long *bus_base,
629 unsigned long *bus_offset,
630 unsigned long *bus_size);
631
632extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
633
634/**
635 * ttm_bo_device_init
636 *
637 * @bdev: A pointer to a struct ttm_bo_device to initialize.
638 * @mem_global: A pointer to an initialized struct ttm_mem_global.
639 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
640 * @file_page_offset: Offset into the device address space that is available
641 * for buffer data. This ensures compatibility with other users of the
642 * address space.
643 *
644 * Initializes a struct ttm_bo_device:
645 * Returns:
646 * !0: Failure.
647 */
648extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
649 struct ttm_mem_global *mem_glob,
650 struct ttm_bo_driver *driver,
651 uint64_t file_page_offset);
652
653/**
654 * ttm_bo_reserve:
655 *
656 * @bo: A pointer to a struct ttm_buffer_object.
657 * @interruptible: Sleep interruptible if waiting.
658 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
659 * @use_sequence: If @bo is already reserved, Only sleep waiting for
660 * it to become unreserved if @sequence < (@bo)->sequence.
661 *
662 * Locks a buffer object for validation. (Or prevents other processes from
663 * locking it for validation) and removes it from lru lists, while taking
664 * a number of measures to prevent deadlocks.
665 *
666 * Deadlocks may occur when two processes try to reserve multiple buffers in
667 * different order, either by will or as a result of a buffer being evicted
668 * to make room for a buffer already reserved. (Buffers are reserved before
669 * they are evicted). The following algorithm prevents such deadlocks from
670 * occuring:
671 * 1) Buffers are reserved with the lru spinlock held. Upon successful
672 * reservation they are removed from the lru list. This stops a reserved buffer
673 * from being evicted. However the lru spinlock is released between the time
674 * a buffer is selected for eviction and the time it is reserved.
675 * Therefore a check is made when a buffer is reserved for eviction, that it
676 * is still the first buffer in the lru list, before it is removed from the
677 * list. @check_lru == 1 forces this check. If it fails, the function returns
678 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
679 * the procedure.
680 * 2) Processes attempting to reserve multiple buffers other than for eviction,
681 * (typically execbuf), should first obtain a unique 32-bit
682 * validation sequence number,
683 * and call this function with @use_sequence == 1 and @sequence == the unique
684 * sequence number. If upon call of this function, the buffer object is already
685 * reserved, the validation sequence is checked against the validation
686 * sequence of the process currently reserving the buffer,
687 * and if the current validation sequence is greater than that of the process
688 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
689 * waiting for the buffer to become unreserved, after which it retries
690 * reserving.
691 * The caller should, when receiving an -EAGAIN error
692 * release all its buffer reservations, wait for @bo to become unreserved, and
693 * then rerun the validation with the same validation sequence. This procedure
694 * will always guarantee that the process with the lowest validation sequence
695 * will eventually succeed, preventing both deadlocks and starvation.
696 *
697 * Returns:
698 * -EAGAIN: The reservation may cause a deadlock.
699 * Release all buffer reservations, wait for @bo to become unreserved and
700 * try again. (only if use_sequence == 1).
701 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
702 * a signal. Release all buffer reservations and return to user-space.
703 */
704extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
705 bool interruptible,
706 bool no_wait, bool use_sequence, uint32_t sequence);
707
708/**
709 * ttm_bo_unreserve
710 *
711 * @bo: A pointer to a struct ttm_buffer_object.
712 *
713 * Unreserve a previous reservation of @bo.
714 */
715extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
716
717/**
718 * ttm_bo_wait_unreserved
719 *
720 * @bo: A pointer to a struct ttm_buffer_object.
721 *
722 * Wait for a struct ttm_buffer_object to become unreserved.
723 * This is typically used in the execbuf code to relax cpu-usage when
724 * a potential deadlock condition backoff.
725 */
726extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
727 bool interruptible);
728
729/**
730 * ttm_bo_block_reservation
731 *
732 * @bo: A pointer to a struct ttm_buffer_object.
733 * @interruptible: Use interruptible sleep when waiting.
734 * @no_wait: Don't sleep, but rather return -EBUSY.
735 *
736 * Block reservation for validation by simply reserving the buffer.
737 * This is intended for single buffer use only without eviction,
738 * and thus needs no deadlock protection.
739 *
740 * Returns:
741 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
742 * -ERESTART: If interruptible == 1 and the process received a signal
743 * while sleeping.
744 */
745extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
746 bool interruptible, bool no_wait);
747
748/**
749 * ttm_bo_unblock_reservation
750 *
751 * @bo: A pointer to a struct ttm_buffer_object.
752 *
753 * Unblocks reservation leaving lru lists untouched.
754 */
755extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
756
757/*
758 * ttm_bo_util.c
759 */
760
761/**
762 * ttm_bo_move_ttm
763 *
764 * @bo: A pointer to a struct ttm_buffer_object.
765 * @evict: 1: This is an eviction. Don't try to pipeline.
766 * @no_wait: Never sleep, but rather return with -EBUSY.
767 * @new_mem: struct ttm_mem_reg indicating where to move.
768 *
769 * Optimized move function for a buffer object with both old and
770 * new placement backed by a TTM. The function will, if successful,
771 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
772 * and update the (@bo)->mem placement flags. If unsuccessful, the old
773 * data remains untouched, and it's up to the caller to free the
774 * memory space indicated by @new_mem.
775 * Returns:
776 * !0: Failure.
777 */
778
779extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
780 bool evict, bool no_wait,
781 struct ttm_mem_reg *new_mem);
782
783/**
784 * ttm_bo_move_memcpy
785 *
786 * @bo: A pointer to a struct ttm_buffer_object.
787 * @evict: 1: This is an eviction. Don't try to pipeline.
788 * @no_wait: Never sleep, but rather return with -EBUSY.
789 * @new_mem: struct ttm_mem_reg indicating where to move.
790 *
791 * Fallback move function for a mappable buffer object in mappable memory.
792 * The function will, if successful,
793 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
794 * and update the (@bo)->mem placement flags. If unsuccessful, the old
795 * data remains untouched, and it's up to the caller to free the
796 * memory space indicated by @new_mem.
797 * Returns:
798 * !0: Failure.
799 */
800
801extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
802 bool evict,
803 bool no_wait, struct ttm_mem_reg *new_mem);
804
805/**
806 * ttm_bo_free_old_node
807 *
808 * @bo: A pointer to a struct ttm_buffer_object.
809 *
810 * Utility function to free an old placement after a successful move.
811 */
812extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
813
814/**
815 * ttm_bo_move_accel_cleanup.
816 *
817 * @bo: A pointer to a struct ttm_buffer_object.
818 * @sync_obj: A sync object that signals when moving is complete.
819 * @sync_obj_arg: An argument to pass to the sync object idle / wait
820 * functions.
821 * @evict: This is an evict move. Don't return until the buffer is idle.
822 * @no_wait: Never sleep, but rather return with -EBUSY.
823 * @new_mem: struct ttm_mem_reg indicating where to move.
824 *
825 * Accelerated move function to be called when an accelerated move
826 * has been scheduled. The function will create a new temporary buffer object
827 * representing the old placement, and put the sync object on both buffer
828 * objects. After that the newly created buffer object is unref'd to be
829 * destroyed when the move is complete. This will help pipeline
830 * buffer moves.
831 */
832
833extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
834 void *sync_obj,
835 void *sync_obj_arg,
836 bool evict, bool no_wait,
837 struct ttm_mem_reg *new_mem);
838/**
839 * ttm_io_prot
840 *
841 * @c_state: Caching state.
842 * @tmp: Page protection flag for a normal, cached mapping.
843 *
844 * Utility function that returns the pgprot_t that should be used for
845 * setting up a PTE with the caching model indicated by @c_state.
846 */
847extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
848
849#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
850#define TTM_HAS_AGP
851#include <linux/agp_backend.h>
852
853/**
854 * ttm_agp_backend_init
855 *
856 * @bdev: Pointer to a struct ttm_bo_device.
857 * @bridge: The agp bridge this device is sitting on.
858 *
859 * Create a TTM backend that uses the indicated AGP bridge as an aperture
860 * for TT memory. This function uses the linux agpgart interface to
861 * bind and unbind memory backing a ttm_tt.
862 */
863extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
864 struct agp_bridge_data *bridge);
865#endif
866
867#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
new file mode 100644
index 000000000000..d8b8f042c4f1
--- /dev/null
+++ b/include/drm/ttm/ttm_memory.h
@@ -0,0 +1,153 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef TTM_MEMORY_H
29#define TTM_MEMORY_H
30
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
33#include <linux/wait.h>
34#include <linux/errno.h>
35
36/**
37 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
38 *
39 * @do_shrink: The callback function.
40 *
41 * Arguments to the do_shrink functions are intended to be passed using
42 * inheritance. That is, the argument class derives from struct ttm_mem_srink,
43 * and can be accessed using container_of().
44 */
45
46struct ttm_mem_shrink {
47 int (*do_shrink) (struct ttm_mem_shrink *);
48};
49
50/**
51 * struct ttm_mem_global - Global memory accounting structure.
52 *
53 * @shrink: A single callback to shrink TTM memory usage. Extend this
54 * to a linked list to be able to handle multiple callbacks when needed.
55 * @swap_queue: A workqueue to handle shrinking in low memory situations. We
56 * need a separate workqueue since it will spend a lot of time waiting
57 * for the GPU, and this will otherwise block other workqueue tasks(?)
58 * At this point we use only a single-threaded workqueue.
59 * @work: The workqueue callback for the shrink queue.
60 * @queue: Wait queue for processes suspended waiting for memory.
61 * @lock: Lock to protect the @shrink - and the memory accounting members,
62 * that is, essentially the whole structure with some exceptions.
63 * @emer_memory: Lowmem memory limit available for root.
64 * @max_memory: Lowmem memory limit available for non-root.
65 * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
66 * @used_memory: Currently used lowmem memory.
67 * @used_total_memory: Currently used total (lowmem + highmem) memory.
68 * @total_memory_swap_limit: Total memory limit where the shrink workqueue
69 * kicks in.
70 * @max_total_memory: Total memory available to non-root processes.
71 * @emer_total_memory: Total memory available to root processes.
72 *
73 * Note that this structure is not per device. It should be global for all
74 * graphics devices.
75 */
76
77struct ttm_mem_global {
78 struct ttm_mem_shrink *shrink;
79 struct workqueue_struct *swap_queue;
80 struct work_struct work;
81 wait_queue_head_t queue;
82 spinlock_t lock;
83 uint64_t emer_memory;
84 uint64_t max_memory;
85 uint64_t swap_limit;
86 uint64_t used_memory;
87 uint64_t used_total_memory;
88 uint64_t total_memory_swap_limit;
89 uint64_t max_total_memory;
90 uint64_t emer_total_memory;
91};
92
93/**
94 * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
95 *
96 * @shrink: The object to initialize.
97 * @func: The callback function.
98 */
99
100static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
101 int (*func) (struct ttm_mem_shrink *))
102{
103 shrink->do_shrink = func;
104}
105
106/**
107 * ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
108 *
109 * @glob: The struct ttm_mem_global object to register with.
110 * @shrink: An initialized struct ttm_mem_shrink object to register.
111 *
112 * Returns:
113 * -EBUSY: There's already a callback registered. (May change).
114 */
115
116static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
117 struct ttm_mem_shrink *shrink)
118{
119 spin_lock(&glob->lock);
120 if (glob->shrink != NULL) {
121 spin_unlock(&glob->lock);
122 return -EBUSY;
123 }
124 glob->shrink = shrink;
125 spin_unlock(&glob->lock);
126 return 0;
127}
128
129/**
130 * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
131 *
132 * @glob: The struct ttm_mem_global object to unregister from.
133 * @shrink: A previously registert struct ttm_mem_shrink object.
134 *
135 */
136
137static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
138 struct ttm_mem_shrink *shrink)
139{
140 spin_lock(&glob->lock);
141 BUG_ON(glob->shrink != shrink);
142 glob->shrink = NULL;
143 spin_unlock(&glob->lock);
144}
145
146extern int ttm_mem_global_init(struct ttm_mem_global *glob);
147extern void ttm_mem_global_release(struct ttm_mem_global *glob);
148extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
149 bool no_wait, bool interruptible, bool himem);
150extern void ttm_mem_global_free(struct ttm_mem_global *glob,
151 uint64_t amount, bool himem);
152extern size_t ttm_round_pot(size_t size);
153#endif
diff --git a/include/drm/ttm/ttm_module.h b/include/drm/ttm/ttm_module.h
new file mode 100644
index 000000000000..889a4c7958ae
--- /dev/null
+++ b/include/drm/ttm/ttm_module.h
@@ -0,0 +1,58 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_MODULE_H_
32#define _TTM_MODULE_H_
33
34#include <linux/kernel.h>
35
36#define TTM_PFX "[TTM]"
37
38enum ttm_global_types {
39 TTM_GLOBAL_TTM_MEM = 0,
40 TTM_GLOBAL_TTM_BO,
41 TTM_GLOBAL_TTM_OBJECT,
42 TTM_GLOBAL_NUM
43};
44
45struct ttm_global_reference {
46 enum ttm_global_types global_type;
47 size_t size;
48 void *object;
49 int (*init) (struct ttm_global_reference *);
50 void (*release) (struct ttm_global_reference *);
51};
52
53extern void ttm_global_init(void);
54extern void ttm_global_release(void);
55extern int ttm_global_item_ref(struct ttm_global_reference *ref);
56extern void ttm_global_item_unref(struct ttm_global_reference *ref);
57
58#endif /* _TTM_MODULE_H_ */
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
new file mode 100644
index 000000000000..c84ff153a564
--- /dev/null
+++ b/include/drm/ttm/ttm_placement.h
@@ -0,0 +1,92 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_PLACEMENT_H_
32#define _TTM_PLACEMENT_H_
33/*
34 * Memory regions for data placement.
35 */
36
37#define TTM_PL_SYSTEM 0
38#define TTM_PL_TT 1
39#define TTM_PL_VRAM 2
40#define TTM_PL_PRIV0 3
41#define TTM_PL_PRIV1 4
42#define TTM_PL_PRIV2 5
43#define TTM_PL_PRIV3 6
44#define TTM_PL_PRIV4 7
45#define TTM_PL_PRIV5 8
46#define TTM_PL_SWAPPED 15
47
48#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
49#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
50#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
51#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
52#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
53#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
54#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
55#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
56#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
57#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
58#define TTM_PL_MASK_MEM 0x0000FFFF
59
60/*
61 * Other flags that affects data placement.
62 * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
63 * if available.
64 * TTM_PL_FLAG_SHARED means that another application may
65 * reference the buffer.
66 * TTM_PL_FLAG_NO_EVICT means that the buffer may never
67 * be evicted to make room for other buffers.
68 */
69
70#define TTM_PL_FLAG_CACHED (1 << 16)
71#define TTM_PL_FLAG_UNCACHED (1 << 17)
72#define TTM_PL_FLAG_WC (1 << 18)
73#define TTM_PL_FLAG_SHARED (1 << 20)
74#define TTM_PL_FLAG_NO_EVICT (1 << 21)
75
76#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
77 TTM_PL_FLAG_UNCACHED | \
78 TTM_PL_FLAG_WC)
79
80#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
81
82/*
83 * Access flags to be used for CPU- and GPU- mappings.
84 * The idea is that the TTM synchronization mechanism will
85 * allow concurrent READ access and exclusive write access.
86 * Currently GPU- and CPU accesses are exclusive.
87 */
88
89#define TTM_ACCESS_READ (1 << 0)
90#define TTM_ACCESS_WRITE (1 << 1)
91
92#endif