aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/via
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/gpu/drm/via
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/via')
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h1650
-rw-r--r--drivers/gpu/drm/via/via_dma.c755
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c816
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h140
-rw-r--r--drivers/gpu/drm/via/via_drv.c100
-rw-r--r--drivers/gpu/drm/via/via_drv.h153
-rw-r--r--drivers/gpu/drm/via/via_irq.c377
-rw-r--r--drivers/gpu/drm/via/via_map.c123
-rw-r--r--drivers/gpu/drm/via/via_mm.c194
-rw-r--r--drivers/gpu/drm/via/via_verifier.c1116
-rw-r--r--drivers/gpu/drm/via/via_verifier.h62
-rw-r--r--drivers/gpu/drm/via/via_video.c93
13 files changed, 5587 insertions, 0 deletions
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
new file mode 100644
index 000000000000..d59e258e2c13
--- /dev/null
+++ b/drivers/gpu/drm/via/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
7
8obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/gpu/drm/via/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
new file mode 100644
index 000000000000..462375d543b9
--- /dev/null
+++ b/drivers/gpu/drm/via/via_3d_reg.h
@@ -0,0 +1,1650 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef VIA_3D_REG_H
26#define VIA_3D_REG_H
27#define HC_REG_BASE 0x0400
28
29#define HC_REG_TRANS_SPACE 0x0040
30
31#define HC_ParaN_MASK 0xffffffff
32#define HC_Para_MASK 0x00ffffff
33#define HC_SubA_MASK 0xff000000
34#define HC_SubA_SHIFT 24
35/* Transmission Setting
36 */
37#define HC_REG_TRANS_SET 0x003c
38#define HC_ParaSubType_MASK 0xff000000
39#define HC_ParaType_MASK 0x00ff0000
40#define HC_ParaOS_MASK 0x0000ff00
41#define HC_ParaAdr_MASK 0x000000ff
42#define HC_ParaSubType_SHIFT 24
43#define HC_ParaType_SHIFT 16
44#define HC_ParaOS_SHIFT 8
45#define HC_ParaAdr_SHIFT 0
46
47#define HC_ParaType_CmdVdata 0x0000
48#define HC_ParaType_NotTex 0x0001
49#define HC_ParaType_Tex 0x0002
50#define HC_ParaType_Palette 0x0003
51#define HC_ParaType_PreCR 0x0010
52#define HC_ParaType_Auto 0x00fe
53
54/* Transmission Space
55 */
56#define HC_REG_Hpara0 0x0040
57#define HC_REG_HpataAF 0x02fc
58
59/* Read
60 */
61#define HC_REG_HREngSt 0x0000
62#define HC_REG_HRFIFOempty 0x0004
63#define HC_REG_HRFIFOfull 0x0008
64#define HC_REG_HRErr 0x000c
65#define HC_REG_FIFOstatus 0x0010
66/* HC_REG_HREngSt 0x0000
67 */
68#define HC_HDASZC_MASK 0x00010000
69#define HC_HSGEMI_MASK 0x0000f000
70#define HC_HLGEMISt_MASK 0x00000f00
71#define HC_HCRSt_MASK 0x00000080
72#define HC_HSE0St_MASK 0x00000040
73#define HC_HSE1St_MASK 0x00000020
74#define HC_HPESt_MASK 0x00000010
75#define HC_HXESt_MASK 0x00000008
76#define HC_HBESt_MASK 0x00000004
77#define HC_HE2St_MASK 0x00000002
78#define HC_HE3St_MASK 0x00000001
79/* HC_REG_HRFIFOempty 0x0004
80 */
81#define HC_HRZDempty_MASK 0x00000010
82#define HC_HRTXAempty_MASK 0x00000008
83#define HC_HRTXDempty_MASK 0x00000004
84#define HC_HWZDempty_MASK 0x00000002
85#define HC_HWCDempty_MASK 0x00000001
86/* HC_REG_HRFIFOfull 0x0008
87 */
88#define HC_HRZDfull_MASK 0x00000010
89#define HC_HRTXAfull_MASK 0x00000008
90#define HC_HRTXDfull_MASK 0x00000004
91#define HC_HWZDfull_MASK 0x00000002
92#define HC_HWCDfull_MASK 0x00000001
93/* HC_REG_HRErr 0x000c
94 */
95#define HC_HAGPCMErr_MASK 0x80000000
96#define HC_HAGPCMErrC_MASK 0x70000000
97/* HC_REG_FIFOstatus 0x0010
98 */
99#define HC_HRFIFOATall_MASK 0x80000000
100#define HC_HRFIFOATbusy_MASK 0x40000000
101#define HC_HRATFGMDo_MASK 0x00000100
102#define HC_HRATFGMDi_MASK 0x00000080
103#define HC_HRATFRZD_MASK 0x00000040
104#define HC_HRATFRTXA_MASK 0x00000020
105#define HC_HRATFRTXD_MASK 0x00000010
106#define HC_HRATFWZD_MASK 0x00000008
107#define HC_HRATFWCD_MASK 0x00000004
108#define HC_HRATTXTAG_MASK 0x00000002
109#define HC_HRATTXCH_MASK 0x00000001
110
111/* AGP Command Setting
112 */
113#define HC_SubA_HAGPBstL 0x0060
114#define HC_SubA_HAGPBendL 0x0061
115#define HC_SubA_HAGPCMNT 0x0062
116#define HC_SubA_HAGPBpL 0x0063
117#define HC_SubA_HAGPBpH 0x0064
118/* HC_SubA_HAGPCMNT 0x0062
119 */
120#define HC_HAGPCMNT_MASK 0x00800000
121#define HC_HCmdErrClr_MASK 0x00400000
122#define HC_HAGPBendH_MASK 0x0000ff00
123#define HC_HAGPBstH_MASK 0x000000ff
124#define HC_HAGPBendH_SHIFT 8
125#define HC_HAGPBstH_SHIFT 0
126/* HC_SubA_HAGPBpL 0x0063
127 */
128#define HC_HAGPBpL_MASK 0x00fffffc
129#define HC_HAGPBpID_MASK 0x00000003
130#define HC_HAGPBpID_PAUSE 0x00000000
131#define HC_HAGPBpID_JUMP 0x00000001
132#define HC_HAGPBpID_STOP 0x00000002
133/* HC_SubA_HAGPBpH 0x0064
134 */
135#define HC_HAGPBpH_MASK 0x00ffffff
136
137/* Miscellaneous Settings
138 */
139#define HC_SubA_HClipTB 0x0070
140#define HC_SubA_HClipLR 0x0071
141#define HC_SubA_HFPClipTL 0x0072
142#define HC_SubA_HFPClipBL 0x0073
143#define HC_SubA_HFPClipLL 0x0074
144#define HC_SubA_HFPClipRL 0x0075
145#define HC_SubA_HFPClipTBH 0x0076
146#define HC_SubA_HFPClipLRH 0x0077
147#define HC_SubA_HLP 0x0078
148#define HC_SubA_HLPRF 0x0079
149#define HC_SubA_HSolidCL 0x007a
150#define HC_SubA_HPixGC 0x007b
151#define HC_SubA_HSPXYOS 0x007c
152#define HC_SubA_HVertexCNT 0x007d
153
154#define HC_HClipT_MASK 0x00fff000
155#define HC_HClipT_SHIFT 12
156#define HC_HClipB_MASK 0x00000fff
157#define HC_HClipB_SHIFT 0
158#define HC_HClipL_MASK 0x00fff000
159#define HC_HClipL_SHIFT 12
160#define HC_HClipR_MASK 0x00000fff
161#define HC_HClipR_SHIFT 0
162#define HC_HFPClipBH_MASK 0x0000ff00
163#define HC_HFPClipBH_SHIFT 8
164#define HC_HFPClipTH_MASK 0x000000ff
165#define HC_HFPClipTH_SHIFT 0
166#define HC_HFPClipRH_MASK 0x0000ff00
167#define HC_HFPClipRH_SHIFT 8
168#define HC_HFPClipLH_MASK 0x000000ff
169#define HC_HFPClipLH_SHIFT 0
170#define HC_HSolidCH_MASK 0x000000ff
171#define HC_HPixGC_MASK 0x00800000
172#define HC_HSPXOS_MASK 0x00fff000
173#define HC_HSPXOS_SHIFT 12
174#define HC_HSPYOS_MASK 0x00000fff
175
176/* Command
177 * Command A
178 */
179#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
180#define HC_HE3Fire_MASK 0x00100000
181#define HC_HPMType_MASK 0x000f0000
182#define HC_HEFlag_MASK 0x0000e000
183#define HC_HShading_MASK 0x00001c00
184#define HC_HPMValidN_MASK 0x00000200
185#define HC_HPLEND_MASK 0x00000100
186#define HC_HVCycle_MASK 0x000000ff
187#define HC_HVCycle_Style_MASK 0x000000c0
188#define HC_HVCycle_ChgA_MASK 0x00000030
189#define HC_HVCycle_ChgB_MASK 0x0000000c
190#define HC_HVCycle_ChgC_MASK 0x00000003
191#define HC_HPMType_Point 0x00000000
192#define HC_HPMType_Line 0x00010000
193#define HC_HPMType_Tri 0x00020000
194#define HC_HPMType_TriWF 0x00040000
195#define HC_HEFlag_NoAA 0x00000000
196#define HC_HEFlag_ab 0x00008000
197#define HC_HEFlag_bc 0x00004000
198#define HC_HEFlag_ca 0x00002000
199#define HC_HShading_Solid 0x00000000
200#define HC_HShading_FlatA 0x00000400
201#define HC_HShading_FlatB 0x00000800
202#define HC_HShading_FlatC 0x00000c00
203#define HC_HShading_Gouraud 0x00001000
204#define HC_HVCycle_Full 0x00000000
205#define HC_HVCycle_AFP 0x00000040
206#define HC_HVCycle_One 0x000000c0
207#define HC_HVCycle_NewA 0x00000000
208#define HC_HVCycle_AA 0x00000010
209#define HC_HVCycle_AB 0x00000020
210#define HC_HVCycle_AC 0x00000030
211#define HC_HVCycle_NewB 0x00000000
212#define HC_HVCycle_BA 0x00000004
213#define HC_HVCycle_BB 0x00000008
214#define HC_HVCycle_BC 0x0000000c
215#define HC_HVCycle_NewC 0x00000000
216#define HC_HVCycle_CA 0x00000001
217#define HC_HVCycle_CB 0x00000002
218#define HC_HVCycle_CC 0x00000003
219
220/* Command B
221 */
222#define HC_HLPrst_MASK 0x00010000
223#define HC_HLLastP_MASK 0x00008000
224#define HC_HVPMSK_MASK 0x00007f80
225#define HC_HBFace_MASK 0x00000040
226#define HC_H2nd1VT_MASK 0x0000003f
227#define HC_HVPMSK_X 0x00004000
228#define HC_HVPMSK_Y 0x00002000
229#define HC_HVPMSK_Z 0x00001000
230#define HC_HVPMSK_W 0x00000800
231#define HC_HVPMSK_Cd 0x00000400
232#define HC_HVPMSK_Cs 0x00000200
233#define HC_HVPMSK_S 0x00000100
234#define HC_HVPMSK_T 0x00000080
235
236/* Enable Setting
237 */
238#define HC_SubA_HEnable 0x0000
239#define HC_HenTXEnvMap_MASK 0x00200000
240#define HC_HenVertexCNT_MASK 0x00100000
241#define HC_HenCPUDAZ_MASK 0x00080000
242#define HC_HenDASZWC_MASK 0x00040000
243#define HC_HenFBCull_MASK 0x00020000
244#define HC_HenCW_MASK 0x00010000
245#define HC_HenAA_MASK 0x00008000
246#define HC_HenST_MASK 0x00004000
247#define HC_HenZT_MASK 0x00002000
248#define HC_HenZW_MASK 0x00001000
249#define HC_HenAT_MASK 0x00000800
250#define HC_HenAW_MASK 0x00000400
251#define HC_HenSP_MASK 0x00000200
252#define HC_HenLP_MASK 0x00000100
253#define HC_HenTXCH_MASK 0x00000080
254#define HC_HenTXMP_MASK 0x00000040
255#define HC_HenTXPP_MASK 0x00000020
256#define HC_HenTXTR_MASK 0x00000010
257#define HC_HenCS_MASK 0x00000008
258#define HC_HenFOG_MASK 0x00000004
259#define HC_HenABL_MASK 0x00000002
260#define HC_HenDT_MASK 0x00000001
261
262/* Z Setting
263 */
264#define HC_SubA_HZWBBasL 0x0010
265#define HC_SubA_HZWBBasH 0x0011
266#define HC_SubA_HZWBType 0x0012
267#define HC_SubA_HZBiasL 0x0013
268#define HC_SubA_HZWBend 0x0014
269#define HC_SubA_HZWTMD 0x0015
270#define HC_SubA_HZWCDL 0x0016
271#define HC_SubA_HZWCTAGnum 0x0017
272#define HC_SubA_HZCYNum 0x0018
273#define HC_SubA_HZWCFire 0x0019
274/* HC_SubA_HZWBType
275 */
276#define HC_HZWBType_MASK 0x00800000
277#define HC_HZBiasedWB_MASK 0x00400000
278#define HC_HZONEasFF_MASK 0x00200000
279#define HC_HZOONEasFF_MASK 0x00100000
280#define HC_HZWBFM_MASK 0x00030000
281#define HC_HZWBLoc_MASK 0x0000c000
282#define HC_HZWBPit_MASK 0x00003fff
283#define HC_HZWBFM_16 0x00000000
284#define HC_HZWBFM_32 0x00020000
285#define HC_HZWBFM_24 0x00030000
286#define HC_HZWBLoc_Local 0x00000000
287#define HC_HZWBLoc_SyS 0x00004000
288/* HC_SubA_HZWBend
289 */
290#define HC_HZWBend_MASK 0x00ffe000
291#define HC_HZBiasH_MASK 0x000000ff
292#define HC_HZWBend_SHIFT 10
293/* HC_SubA_HZWTMD
294 */
295#define HC_HZWTMD_MASK 0x00070000
296#define HC_HEBEBias_MASK 0x00007f00
297#define HC_HZNF_MASK 0x000000ff
298#define HC_HZWTMD_NeverPass 0x00000000
299#define HC_HZWTMD_LT 0x00010000
300#define HC_HZWTMD_EQ 0x00020000
301#define HC_HZWTMD_LE 0x00030000
302#define HC_HZWTMD_GT 0x00040000
303#define HC_HZWTMD_NE 0x00050000
304#define HC_HZWTMD_GE 0x00060000
305#define HC_HZWTMD_AllPass 0x00070000
306#define HC_HEBEBias_SHIFT 8
307/* HC_SubA_HZWCDL 0x0016
308 */
309#define HC_HZWCDL_MASK 0x00ffffff
310/* HC_SubA_HZWCTAGnum 0x0017
311 */
312#define HC_HZWCTAGnum_MASK 0x00ff0000
313#define HC_HZWCTAGnum_SHIFT 16
314#define HC_HZWCDH_MASK 0x000000ff
315#define HC_HZWCDH_SHIFT 0
316/* HC_SubA_HZCYNum 0x0018
317 */
318#define HC_HZCYNum_MASK 0x00030000
319#define HC_HZCYNum_SHIFT 16
320#define HC_HZWCQWnum_MASK 0x00003fff
321#define HC_HZWCQWnum_SHIFT 0
322/* HC_SubA_HZWCFire 0x0019
323 */
324#define HC_ZWCFire_MASK 0x00010000
325#define HC_HZWCQWnumLast_MASK 0x00003fff
326#define HC_HZWCQWnumLast_SHIFT 0
327
328/* Stencil Setting
329 */
330#define HC_SubA_HSTREF 0x0023
331#define HC_SubA_HSTMD 0x0024
332/* HC_SubA_HSBFM
333 */
334#define HC_HSBFM_MASK 0x00030000
335#define HC_HSBLoc_MASK 0x0000c000
336#define HC_HSBPit_MASK 0x00003fff
337/* HC_SubA_HSTREF
338 */
339#define HC_HSTREF_MASK 0x00ff0000
340#define HC_HSTOPMSK_MASK 0x0000ff00
341#define HC_HSTBMSK_MASK 0x000000ff
342#define HC_HSTREF_SHIFT 16
343#define HC_HSTOPMSK_SHIFT 8
344/* HC_SubA_HSTMD
345 */
346#define HC_HSTMD_MASK 0x00070000
347#define HC_HSTOPSF_MASK 0x000001c0
348#define HC_HSTOPSPZF_MASK 0x00000038
349#define HC_HSTOPSPZP_MASK 0x00000007
350#define HC_HSTMD_NeverPass 0x00000000
351#define HC_HSTMD_LT 0x00010000
352#define HC_HSTMD_EQ 0x00020000
353#define HC_HSTMD_LE 0x00030000
354#define HC_HSTMD_GT 0x00040000
355#define HC_HSTMD_NE 0x00050000
356#define HC_HSTMD_GE 0x00060000
357#define HC_HSTMD_AllPass 0x00070000
358#define HC_HSTOPSF_KEEP 0x00000000
359#define HC_HSTOPSF_ZERO 0x00000040
360#define HC_HSTOPSF_REPLACE 0x00000080
361#define HC_HSTOPSF_INCRSAT 0x000000c0
362#define HC_HSTOPSF_DECRSAT 0x00000100
363#define HC_HSTOPSF_INVERT 0x00000140
364#define HC_HSTOPSF_INCR 0x00000180
365#define HC_HSTOPSF_DECR 0x000001c0
366#define HC_HSTOPSPZF_KEEP 0x00000000
367#define HC_HSTOPSPZF_ZERO 0x00000008
368#define HC_HSTOPSPZF_REPLACE 0x00000010
369#define HC_HSTOPSPZF_INCRSAT 0x00000018
370#define HC_HSTOPSPZF_DECRSAT 0x00000020
371#define HC_HSTOPSPZF_INVERT 0x00000028
372#define HC_HSTOPSPZF_INCR 0x00000030
373#define HC_HSTOPSPZF_DECR 0x00000038
374#define HC_HSTOPSPZP_KEEP 0x00000000
375#define HC_HSTOPSPZP_ZERO 0x00000001
376#define HC_HSTOPSPZP_REPLACE 0x00000002
377#define HC_HSTOPSPZP_INCRSAT 0x00000003
378#define HC_HSTOPSPZP_DECRSAT 0x00000004
379#define HC_HSTOPSPZP_INVERT 0x00000005
380#define HC_HSTOPSPZP_INCR 0x00000006
381#define HC_HSTOPSPZP_DECR 0x00000007
382
383/* Alpha Setting
384 */
385#define HC_SubA_HABBasL 0x0030
386#define HC_SubA_HABBasH 0x0031
387#define HC_SubA_HABFM 0x0032
388#define HC_SubA_HATMD 0x0033
389#define HC_SubA_HABLCsat 0x0034
390#define HC_SubA_HABLCop 0x0035
391#define HC_SubA_HABLAsat 0x0036
392#define HC_SubA_HABLAop 0x0037
393#define HC_SubA_HABLRCa 0x0038
394#define HC_SubA_HABLRFCa 0x0039
395#define HC_SubA_HABLRCbias 0x003a
396#define HC_SubA_HABLRCb 0x003b
397#define HC_SubA_HABLRFCb 0x003c
398#define HC_SubA_HABLRAa 0x003d
399#define HC_SubA_HABLRAb 0x003e
400/* HC_SubA_HABFM
401 */
402#define HC_HABFM_MASK 0x00030000
403#define HC_HABLoc_MASK 0x0000c000
404#define HC_HABPit_MASK 0x000007ff
405/* HC_SubA_HATMD
406 */
407#define HC_HATMD_MASK 0x00000700
408#define HC_HATREF_MASK 0x000000ff
409#define HC_HATMD_NeverPass 0x00000000
410#define HC_HATMD_LT 0x00000100
411#define HC_HATMD_EQ 0x00000200
412#define HC_HATMD_LE 0x00000300
413#define HC_HATMD_GT 0x00000400
414#define HC_HATMD_NE 0x00000500
415#define HC_HATMD_GE 0x00000600
416#define HC_HATMD_AllPass 0x00000700
417/* HC_SubA_HABLCsat
418 */
419#define HC_HABLCsat_MASK 0x00010000
420#define HC_HABLCa_MASK 0x0000fc00
421#define HC_HABLCa_C_MASK 0x0000c000
422#define HC_HABLCa_OPC_MASK 0x00003c00
423#define HC_HABLFCa_MASK 0x000003f0
424#define HC_HABLFCa_C_MASK 0x00000300
425#define HC_HABLFCa_OPC_MASK 0x000000f0
426#define HC_HABLCbias_MASK 0x0000000f
427#define HC_HABLCbias_C_MASK 0x00000008
428#define HC_HABLCbias_OPC_MASK 0x00000007
429/*-- Define the input color.
430 */
431#define HC_XC_Csrc 0x00000000
432#define HC_XC_Cdst 0x00000001
433#define HC_XC_Asrc 0x00000002
434#define HC_XC_Adst 0x00000003
435#define HC_XC_Fog 0x00000004
436#define HC_XC_HABLRC 0x00000005
437#define HC_XC_minSrcDst 0x00000006
438#define HC_XC_maxSrcDst 0x00000007
439#define HC_XC_mimAsrcInvAdst 0x00000008
440#define HC_XC_OPC 0x00000000
441#define HC_XC_InvOPC 0x00000010
442#define HC_XC_OPCp5 0x00000020
443/*-- Define the input Alpha
444 */
445#define HC_XA_OPA 0x00000000
446#define HC_XA_InvOPA 0x00000010
447#define HC_XA_OPAp5 0x00000020
448#define HC_XA_0 0x00000000
449#define HC_XA_Asrc 0x00000001
450#define HC_XA_Adst 0x00000002
451#define HC_XA_Fog 0x00000003
452#define HC_XA_minAsrcFog 0x00000004
453#define HC_XA_minAsrcAdst 0x00000005
454#define HC_XA_maxAsrcFog 0x00000006
455#define HC_XA_maxAsrcAdst 0x00000007
456#define HC_XA_HABLRA 0x00000008
457#define HC_XA_minAsrcInvAdst 0x00000008
458#define HC_XA_HABLFRA 0x00000009
459/*--
460 */
461#define HC_HABLCa_OPC (HC_XC_OPC << 10)
462#define HC_HABLCa_InvOPC (HC_XC_InvOPC << 10)
463#define HC_HABLCa_OPCp5 (HC_XC_OPCp5 << 10)
464#define HC_HABLCa_Csrc (HC_XC_Csrc << 10)
465#define HC_HABLCa_Cdst (HC_XC_Cdst << 10)
466#define HC_HABLCa_Asrc (HC_XC_Asrc << 10)
467#define HC_HABLCa_Adst (HC_XC_Adst << 10)
468#define HC_HABLCa_Fog (HC_XC_Fog << 10)
469#define HC_HABLCa_HABLRCa (HC_XC_HABLRC << 10)
470#define HC_HABLCa_minSrcDst (HC_XC_minSrcDst << 10)
471#define HC_HABLCa_maxSrcDst (HC_XC_maxSrcDst << 10)
472#define HC_HABLFCa_OPC (HC_XC_OPC << 4)
473#define HC_HABLFCa_InvOPC (HC_XC_InvOPC << 4)
474#define HC_HABLFCa_OPCp5 (HC_XC_OPCp5 << 4)
475#define HC_HABLFCa_Csrc (HC_XC_Csrc << 4)
476#define HC_HABLFCa_Cdst (HC_XC_Cdst << 4)
477#define HC_HABLFCa_Asrc (HC_XC_Asrc << 4)
478#define HC_HABLFCa_Adst (HC_XC_Adst << 4)
479#define HC_HABLFCa_Fog (HC_XC_Fog << 4)
480#define HC_HABLFCa_HABLRCa (HC_XC_HABLRC << 4)
481#define HC_HABLFCa_minSrcDst (HC_XC_minSrcDst << 4)
482#define HC_HABLFCa_maxSrcDst (HC_XC_maxSrcDst << 4)
483#define HC_HABLFCa_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 4)
484#define HC_HABLCbias_HABLRCbias 0x00000000
485#define HC_HABLCbias_Asrc 0x00000001
486#define HC_HABLCbias_Adst 0x00000002
487#define HC_HABLCbias_Fog 0x00000003
488#define HC_HABLCbias_Cin 0x00000004
489/* HC_SubA_HABLCop 0x0035
490 */
491#define HC_HABLdot_MASK 0x00010000
492#define HC_HABLCop_MASK 0x00004000
493#define HC_HABLCb_MASK 0x00003f00
494#define HC_HABLCb_C_MASK 0x00003000
495#define HC_HABLCb_OPC_MASK 0x00000f00
496#define HC_HABLFCb_MASK 0x000000fc
497#define HC_HABLFCb_C_MASK 0x000000c0
498#define HC_HABLFCb_OPC_MASK 0x0000003c
499#define HC_HABLCshift_MASK 0x00000003
500#define HC_HABLCb_OPC (HC_XC_OPC << 8)
501#define HC_HABLCb_InvOPC (HC_XC_InvOPC << 8)
502#define HC_HABLCb_OPCp5 (HC_XC_OPCp5 << 8)
503#define HC_HABLCb_Csrc (HC_XC_Csrc << 8)
504#define HC_HABLCb_Cdst (HC_XC_Cdst << 8)
505#define HC_HABLCb_Asrc (HC_XC_Asrc << 8)
506#define HC_HABLCb_Adst (HC_XC_Adst << 8)
507#define HC_HABLCb_Fog (HC_XC_Fog << 8)
508#define HC_HABLCb_HABLRCa (HC_XC_HABLRC << 8)
509#define HC_HABLCb_minSrcDst (HC_XC_minSrcDst << 8)
510#define HC_HABLCb_maxSrcDst (HC_XC_maxSrcDst << 8)
511#define HC_HABLFCb_OPC (HC_XC_OPC << 2)
512#define HC_HABLFCb_InvOPC (HC_XC_InvOPC << 2)
513#define HC_HABLFCb_OPCp5 (HC_XC_OPCp5 << 2)
514#define HC_HABLFCb_Csrc (HC_XC_Csrc << 2)
515#define HC_HABLFCb_Cdst (HC_XC_Cdst << 2)
516#define HC_HABLFCb_Asrc (HC_XC_Asrc << 2)
517#define HC_HABLFCb_Adst (HC_XC_Adst << 2)
518#define HC_HABLFCb_Fog (HC_XC_Fog << 2)
519#define HC_HABLFCb_HABLRCb (HC_XC_HABLRC << 2)
520#define HC_HABLFCb_minSrcDst (HC_XC_minSrcDst << 2)
521#define HC_HABLFCb_maxSrcDst (HC_XC_maxSrcDst << 2)
522#define HC_HABLFCb_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 2)
523/* HC_SubA_HABLAsat 0x0036
524 */
525#define HC_HABLAsat_MASK 0x00010000
526#define HC_HABLAa_MASK 0x0000fc00
527#define HC_HABLAa_A_MASK 0x0000c000
528#define HC_HABLAa_OPA_MASK 0x00003c00
529#define HC_HABLFAa_MASK 0x000003f0
530#define HC_HABLFAa_A_MASK 0x00000300
531#define HC_HABLFAa_OPA_MASK 0x000000f0
532#define HC_HABLAbias_MASK 0x0000000f
533#define HC_HABLAbias_A_MASK 0x00000008
534#define HC_HABLAbias_OPA_MASK 0x00000007
535#define HC_HABLAa_OPA (HC_XA_OPA << 10)
536#define HC_HABLAa_InvOPA (HC_XA_InvOPA << 10)
537#define HC_HABLAa_OPAp5 (HC_XA_OPAp5 << 10)
538#define HC_HABLAa_0 (HC_XA_0 << 10)
539#define HC_HABLAa_Asrc (HC_XA_Asrc << 10)
540#define HC_HABLAa_Adst (HC_XA_Adst << 10)
541#define HC_HABLAa_Fog (HC_XA_Fog << 10)
542#define HC_HABLAa_minAsrcFog (HC_XA_minAsrcFog << 10)
543#define HC_HABLAa_minAsrcAdst (HC_XA_minAsrcAdst << 10)
544#define HC_HABLAa_maxAsrcFog (HC_XA_maxAsrcFog << 10)
545#define HC_HABLAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 10)
546#define HC_HABLAa_HABLRA (HC_XA_HABLRA << 10)
547#define HC_HABLFAa_OPA (HC_XA_OPA << 4)
548#define HC_HABLFAa_InvOPA (HC_XA_InvOPA << 4)
549#define HC_HABLFAa_OPAp5 (HC_XA_OPAp5 << 4)
550#define HC_HABLFAa_0 (HC_XA_0 << 4)
551#define HC_HABLFAa_Asrc (HC_XA_Asrc << 4)
552#define HC_HABLFAa_Adst (HC_XA_Adst << 4)
553#define HC_HABLFAa_Fog (HC_XA_Fog << 4)
554#define HC_HABLFAa_minAsrcFog (HC_XA_minAsrcFog << 4)
555#define HC_HABLFAa_minAsrcAdst (HC_XA_minAsrcAdst << 4)
556#define HC_HABLFAa_maxAsrcFog (HC_XA_maxAsrcFog << 4)
557#define HC_HABLFAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 4)
558#define HC_HABLFAa_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 4)
559#define HC_HABLFAa_HABLFRA (HC_XA_HABLFRA << 4)
560#define HC_HABLAbias_HABLRAbias 0x00000000
561#define HC_HABLAbias_Asrc 0x00000001
562#define HC_HABLAbias_Adst 0x00000002
563#define HC_HABLAbias_Fog 0x00000003
564#define HC_HABLAbias_Aaa 0x00000004
565/* HC_SubA_HABLAop 0x0037
566 */
567#define HC_HABLAop_MASK 0x00004000
568#define HC_HABLAb_MASK 0x00003f00
569#define HC_HABLAb_OPA_MASK 0x00000f00
570#define HC_HABLFAb_MASK 0x000000fc
571#define HC_HABLFAb_OPA_MASK 0x0000003c
572#define HC_HABLAshift_MASK 0x00000003
573#define HC_HABLAb_OPA (HC_XA_OPA << 8)
574#define HC_HABLAb_InvOPA (HC_XA_InvOPA << 8)
575#define HC_HABLAb_OPAp5 (HC_XA_OPAp5 << 8)
576#define HC_HABLAb_0 (HC_XA_0 << 8)
577#define HC_HABLAb_Asrc (HC_XA_Asrc << 8)
578#define HC_HABLAb_Adst (HC_XA_Adst << 8)
579#define HC_HABLAb_Fog (HC_XA_Fog << 8)
580#define HC_HABLAb_minAsrcFog (HC_XA_minAsrcFog << 8)
581#define HC_HABLAb_minAsrcAdst (HC_XA_minAsrcAdst << 8)
582#define HC_HABLAb_maxAsrcFog (HC_XA_maxAsrcFog << 8)
583#define HC_HABLAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 8)
584#define HC_HABLAb_HABLRA (HC_XA_HABLRA << 8)
585#define HC_HABLFAb_OPA (HC_XA_OPA << 2)
586#define HC_HABLFAb_InvOPA (HC_XA_InvOPA << 2)
587#define HC_HABLFAb_OPAp5 (HC_XA_OPAp5 << 2)
588#define HC_HABLFAb_0 (HC_XA_0 << 2)
589#define HC_HABLFAb_Asrc (HC_XA_Asrc << 2)
590#define HC_HABLFAb_Adst (HC_XA_Adst << 2)
591#define HC_HABLFAb_Fog (HC_XA_Fog << 2)
592#define HC_HABLFAb_minAsrcFog (HC_XA_minAsrcFog << 2)
593#define HC_HABLFAb_minAsrcAdst (HC_XA_minAsrcAdst << 2)
594#define HC_HABLFAb_maxAsrcFog (HC_XA_maxAsrcFog << 2)
595#define HC_HABLFAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 2)
596#define HC_HABLFAb_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 2)
597#define HC_HABLFAb_HABLFRA (HC_XA_HABLFRA << 2)
598/* HC_SubA_HABLRAa 0x003d
599 */
600#define HC_HABLRAa_MASK 0x00ff0000
601#define HC_HABLRFAa_MASK 0x0000ff00
602#define HC_HABLRAbias_MASK 0x000000ff
603#define HC_HABLRAa_SHIFT 16
604#define HC_HABLRFAa_SHIFT 8
605/* HC_SubA_HABLRAb 0x003e
606 */
607#define HC_HABLRAb_MASK 0x0000ff00
608#define HC_HABLRFAb_MASK 0x000000ff
609#define HC_HABLRAb_SHIFT 8
610
611/* Destination Setting
612 */
613#define HC_SubA_HDBBasL 0x0040
614#define HC_SubA_HDBBasH 0x0041
615#define HC_SubA_HDBFM 0x0042
616#define HC_SubA_HFBBMSKL 0x0043
617#define HC_SubA_HROP 0x0044
618/* HC_SubA_HDBFM 0x0042
619 */
620#define HC_HDBFM_MASK 0x001f0000
621#define HC_HDBLoc_MASK 0x0000c000
622#define HC_HDBPit_MASK 0x00003fff
623#define HC_HDBFM_RGB555 0x00000000
624#define HC_HDBFM_RGB565 0x00010000
625#define HC_HDBFM_ARGB4444 0x00020000
626#define HC_HDBFM_ARGB1555 0x00030000
627#define HC_HDBFM_BGR555 0x00040000
628#define HC_HDBFM_BGR565 0x00050000
629#define HC_HDBFM_ABGR4444 0x00060000
630#define HC_HDBFM_ABGR1555 0x00070000
631#define HC_HDBFM_ARGB0888 0x00080000
632#define HC_HDBFM_ARGB8888 0x00090000
633#define HC_HDBFM_ABGR0888 0x000a0000
634#define HC_HDBFM_ABGR8888 0x000b0000
635#define HC_HDBLoc_Local 0x00000000
636#define HC_HDBLoc_Sys 0x00004000
637/* HC_SubA_HROP 0x0044
638 */
639#define HC_HROP_MASK 0x00000f00
640#define HC_HFBBMSKH_MASK 0x000000ff
641#define HC_HROP_BLACK 0x00000000
642#define HC_HROP_DPon 0x00000100
643#define HC_HROP_DPna 0x00000200
644#define HC_HROP_Pn 0x00000300
645#define HC_HROP_PDna 0x00000400
646#define HC_HROP_Dn 0x00000500
647#define HC_HROP_DPx 0x00000600
648#define HC_HROP_DPan 0x00000700
649#define HC_HROP_DPa 0x00000800
650#define HC_HROP_DPxn 0x00000900
651#define HC_HROP_D 0x00000a00
652#define HC_HROP_DPno 0x00000b00
653#define HC_HROP_P 0x00000c00
654#define HC_HROP_PDno 0x00000d00
655#define HC_HROP_DPo 0x00000e00
656#define HC_HROP_WHITE 0x00000f00
657
658/* Fog Setting
659 */
660#define HC_SubA_HFogLF 0x0050
661#define HC_SubA_HFogCL 0x0051
662#define HC_SubA_HFogCH 0x0052
663#define HC_SubA_HFogStL 0x0053
664#define HC_SubA_HFogStH 0x0054
665#define HC_SubA_HFogOOdMF 0x0055
666#define HC_SubA_HFogOOdEF 0x0056
667#define HC_SubA_HFogEndL 0x0057
668#define HC_SubA_HFogDenst 0x0058
669/* HC_SubA_FogLF 0x0050
670 */
671#define HC_FogLF_MASK 0x00000010
672#define HC_FogEq_MASK 0x00000008
673#define HC_FogMD_MASK 0x00000007
674#define HC_FogMD_LocalFog 0x00000000
675#define HC_FogMD_LinearFog 0x00000002
676#define HC_FogMD_ExponentialFog 0x00000004
677#define HC_FogMD_Exponential2Fog 0x00000005
678/* #define HC_FogMD_FogTable 0x00000003 */
679
680/* HC_SubA_HFogDenst 0x0058
681 */
682#define HC_FogDenst_MASK 0x001fff00
683#define HC_FogEndL_MASK 0x000000ff
684
685/* Texture subtype definitions
686 */
687#define HC_SubType_Tex0 0x00000000
688#define HC_SubType_Tex1 0x00000001
689#define HC_SubType_TexGeneral 0x000000fe
690
691/* Attribute of texture n
692 */
693#define HC_SubA_HTXnL0BasL 0x0000
694#define HC_SubA_HTXnL1BasL 0x0001
695#define HC_SubA_HTXnL2BasL 0x0002
696#define HC_SubA_HTXnL3BasL 0x0003
697#define HC_SubA_HTXnL4BasL 0x0004
698#define HC_SubA_HTXnL5BasL 0x0005
699#define HC_SubA_HTXnL6BasL 0x0006
700#define HC_SubA_HTXnL7BasL 0x0007
701#define HC_SubA_HTXnL8BasL 0x0008
702#define HC_SubA_HTXnL9BasL 0x0009
703#define HC_SubA_HTXnLaBasL 0x000a
704#define HC_SubA_HTXnLbBasL 0x000b
705#define HC_SubA_HTXnLcBasL 0x000c
706#define HC_SubA_HTXnLdBasL 0x000d
707#define HC_SubA_HTXnLeBasL 0x000e
708#define HC_SubA_HTXnLfBasL 0x000f
709#define HC_SubA_HTXnL10BasL 0x0010
710#define HC_SubA_HTXnL11BasL 0x0011
711#define HC_SubA_HTXnL012BasH 0x0020
712#define HC_SubA_HTXnL345BasH 0x0021
713#define HC_SubA_HTXnL678BasH 0x0022
714#define HC_SubA_HTXnL9abBasH 0x0023
715#define HC_SubA_HTXnLcdeBasH 0x0024
716#define HC_SubA_HTXnLf1011BasH 0x0025
717#define HC_SubA_HTXnL0Pit 0x002b
718#define HC_SubA_HTXnL1Pit 0x002c
719#define HC_SubA_HTXnL2Pit 0x002d
720#define HC_SubA_HTXnL3Pit 0x002e
721#define HC_SubA_HTXnL4Pit 0x002f
722#define HC_SubA_HTXnL5Pit 0x0030
723#define HC_SubA_HTXnL6Pit 0x0031
724#define HC_SubA_HTXnL7Pit 0x0032
725#define HC_SubA_HTXnL8Pit 0x0033
726#define HC_SubA_HTXnL9Pit 0x0034
727#define HC_SubA_HTXnLaPit 0x0035
728#define HC_SubA_HTXnLbPit 0x0036
729#define HC_SubA_HTXnLcPit 0x0037
730#define HC_SubA_HTXnLdPit 0x0038
731#define HC_SubA_HTXnLePit 0x0039
732#define HC_SubA_HTXnLfPit 0x003a
733#define HC_SubA_HTXnL10Pit 0x003b
734#define HC_SubA_HTXnL11Pit 0x003c
735#define HC_SubA_HTXnL0_5WE 0x004b
736#define HC_SubA_HTXnL6_bWE 0x004c
737#define HC_SubA_HTXnLc_11WE 0x004d
738#define HC_SubA_HTXnL0_5HE 0x0051
739#define HC_SubA_HTXnL6_bHE 0x0052
740#define HC_SubA_HTXnLc_11HE 0x0053
741#define HC_SubA_HTXnL0OS 0x0077
742#define HC_SubA_HTXnTB 0x0078
743#define HC_SubA_HTXnMPMD 0x0079
744#define HC_SubA_HTXnCLODu 0x007a
745#define HC_SubA_HTXnFM 0x007b
746#define HC_SubA_HTXnTRCH 0x007c
747#define HC_SubA_HTXnTRCL 0x007d
748#define HC_SubA_HTXnTBC 0x007e
749#define HC_SubA_HTXnTRAH 0x007f
750#define HC_SubA_HTXnTBLCsat 0x0080
751#define HC_SubA_HTXnTBLCop 0x0081
752#define HC_SubA_HTXnTBLMPfog 0x0082
753#define HC_SubA_HTXnTBLAsat 0x0083
754#define HC_SubA_HTXnTBLRCa 0x0085
755#define HC_SubA_HTXnTBLRCb 0x0086
756#define HC_SubA_HTXnTBLRCc 0x0087
757#define HC_SubA_HTXnTBLRCbias 0x0088
758#define HC_SubA_HTXnTBLRAa 0x0089
759#define HC_SubA_HTXnTBLRFog 0x008a
760#define HC_SubA_HTXnBumpM00 0x0090
761#define HC_SubA_HTXnBumpM01 0x0091
762#define HC_SubA_HTXnBumpM10 0x0092
763#define HC_SubA_HTXnBumpM11 0x0093
764#define HC_SubA_HTXnLScale 0x0094
765#define HC_SubA_HTXSMD 0x0000
766/* HC_SubA_HTXnL012BasH 0x0020
767 */
768#define HC_HTXnL0BasH_MASK 0x000000ff
769#define HC_HTXnL1BasH_MASK 0x0000ff00
770#define HC_HTXnL2BasH_MASK 0x00ff0000
771#define HC_HTXnL1BasH_SHIFT 8
772#define HC_HTXnL2BasH_SHIFT 16
773/* HC_SubA_HTXnL345BasH 0x0021
774 */
775#define HC_HTXnL3BasH_MASK 0x000000ff
776#define HC_HTXnL4BasH_MASK 0x0000ff00
777#define HC_HTXnL5BasH_MASK 0x00ff0000
778#define HC_HTXnL4BasH_SHIFT 8
779#define HC_HTXnL5BasH_SHIFT 16
780/* HC_SubA_HTXnL678BasH 0x0022
781 */
782#define HC_HTXnL6BasH_MASK 0x000000ff
783#define HC_HTXnL7BasH_MASK 0x0000ff00
784#define HC_HTXnL8BasH_MASK 0x00ff0000
785#define HC_HTXnL7BasH_SHIFT 8
786#define HC_HTXnL8BasH_SHIFT 16
787/* HC_SubA_HTXnL9abBasH 0x0023
788 */
789#define HC_HTXnL9BasH_MASK 0x000000ff
790#define HC_HTXnLaBasH_MASK 0x0000ff00
791#define HC_HTXnLbBasH_MASK 0x00ff0000
792#define HC_HTXnLaBasH_SHIFT 8
793#define HC_HTXnLbBasH_SHIFT 16
794/* HC_SubA_HTXnLcdeBasH 0x0024
795 */
796#define HC_HTXnLcBasH_MASK 0x000000ff
797#define HC_HTXnLdBasH_MASK 0x0000ff00
798#define HC_HTXnLeBasH_MASK 0x00ff0000
799#define HC_HTXnLdBasH_SHIFT 8
800#define HC_HTXnLeBasH_SHIFT 16
801/* HC_SubA_HTXnLcdeBasH 0x0025
802 */
803#define HC_HTXnLfBasH_MASK 0x000000ff
804#define HC_HTXnL10BasH_MASK 0x0000ff00
805#define HC_HTXnL11BasH_MASK 0x00ff0000
806#define HC_HTXnL10BasH_SHIFT 8
807#define HC_HTXnL11BasH_SHIFT 16
808/* HC_SubA_HTXnL0Pit 0x002b
809 */
810#define HC_HTXnLnPit_MASK 0x00003fff
811#define HC_HTXnEnPit_MASK 0x00080000
812#define HC_HTXnLnPitE_MASK 0x00f00000
813#define HC_HTXnLnPitE_SHIFT 20
814/* HC_SubA_HTXnL0_5WE 0x004b
815 */
816#define HC_HTXnL0WE_MASK 0x0000000f
817#define HC_HTXnL1WE_MASK 0x000000f0
818#define HC_HTXnL2WE_MASK 0x00000f00
819#define HC_HTXnL3WE_MASK 0x0000f000
820#define HC_HTXnL4WE_MASK 0x000f0000
821#define HC_HTXnL5WE_MASK 0x00f00000
822#define HC_HTXnL1WE_SHIFT 4
823#define HC_HTXnL2WE_SHIFT 8
824#define HC_HTXnL3WE_SHIFT 12
825#define HC_HTXnL4WE_SHIFT 16
826#define HC_HTXnL5WE_SHIFT 20
827/* HC_SubA_HTXnL6_bWE 0x004c
828 */
829#define HC_HTXnL6WE_MASK 0x0000000f
830#define HC_HTXnL7WE_MASK 0x000000f0
831#define HC_HTXnL8WE_MASK 0x00000f00
832#define HC_HTXnL9WE_MASK 0x0000f000
833#define HC_HTXnLaWE_MASK 0x000f0000
834#define HC_HTXnLbWE_MASK 0x00f00000
835#define HC_HTXnL7WE_SHIFT 4
836#define HC_HTXnL8WE_SHIFT 8
837#define HC_HTXnL9WE_SHIFT 12
838#define HC_HTXnLaWE_SHIFT 16
839#define HC_HTXnLbWE_SHIFT 20
840/* HC_SubA_HTXnLc_11WE 0x004d
841 */
842#define HC_HTXnLcWE_MASK 0x0000000f
843#define HC_HTXnLdWE_MASK 0x000000f0
844#define HC_HTXnLeWE_MASK 0x00000f00
845#define HC_HTXnLfWE_MASK 0x0000f000
846#define HC_HTXnL10WE_MASK 0x000f0000
847#define HC_HTXnL11WE_MASK 0x00f00000
848#define HC_HTXnLdWE_SHIFT 4
849#define HC_HTXnLeWE_SHIFT 8
850#define HC_HTXnLfWE_SHIFT 12
851#define HC_HTXnL10WE_SHIFT 16
852#define HC_HTXnL11WE_SHIFT 20
853/* HC_SubA_HTXnL0_5HE 0x0051
854 */
855#define HC_HTXnL0HE_MASK 0x0000000f
856#define HC_HTXnL1HE_MASK 0x000000f0
857#define HC_HTXnL2HE_MASK 0x00000f00
858#define HC_HTXnL3HE_MASK 0x0000f000
859#define HC_HTXnL4HE_MASK 0x000f0000
860#define HC_HTXnL5HE_MASK 0x00f00000
861#define HC_HTXnL1HE_SHIFT 4
862#define HC_HTXnL2HE_SHIFT 8
863#define HC_HTXnL3HE_SHIFT 12
864#define HC_HTXnL4HE_SHIFT 16
865#define HC_HTXnL5HE_SHIFT 20
866/* HC_SubA_HTXnL6_bHE 0x0052
867 */
868#define HC_HTXnL6HE_MASK 0x0000000f
869#define HC_HTXnL7HE_MASK 0x000000f0
870#define HC_HTXnL8HE_MASK 0x00000f00
871#define HC_HTXnL9HE_MASK 0x0000f000
872#define HC_HTXnLaHE_MASK 0x000f0000
873#define HC_HTXnLbHE_MASK 0x00f00000
874#define HC_HTXnL7HE_SHIFT 4
875#define HC_HTXnL8HE_SHIFT 8
876#define HC_HTXnL9HE_SHIFT 12
877#define HC_HTXnLaHE_SHIFT 16
878#define HC_HTXnLbHE_SHIFT 20
879/* HC_SubA_HTXnLc_11HE 0x0053
880 */
881#define HC_HTXnLcHE_MASK 0x0000000f
882#define HC_HTXnLdHE_MASK 0x000000f0
883#define HC_HTXnLeHE_MASK 0x00000f00
884#define HC_HTXnLfHE_MASK 0x0000f000
885#define HC_HTXnL10HE_MASK 0x000f0000
886#define HC_HTXnL11HE_MASK 0x00f00000
887#define HC_HTXnLdHE_SHIFT 4
888#define HC_HTXnLeHE_SHIFT 8
889#define HC_HTXnLfHE_SHIFT 12
890#define HC_HTXnL10HE_SHIFT 16
891#define HC_HTXnL11HE_SHIFT 20
892/* HC_SubA_HTXnL0OS 0x0077
893 */
894#define HC_HTXnL0OS_MASK 0x003ff000
895#define HC_HTXnLVmax_MASK 0x00000fc0
896#define HC_HTXnLVmin_MASK 0x0000003f
897#define HC_HTXnL0OS_SHIFT 12
898#define HC_HTXnLVmax_SHIFT 6
899/* HC_SubA_HTXnTB 0x0078
900 */
901#define HC_HTXnTB_MASK 0x00f00000
902#define HC_HTXnFLSe_MASK 0x0000e000
903#define HC_HTXnFLSs_MASK 0x00001c00
904#define HC_HTXnFLTe_MASK 0x00000380
905#define HC_HTXnFLTs_MASK 0x00000070
906#define HC_HTXnFLDs_MASK 0x0000000f
907#define HC_HTXnTB_NoTB 0x00000000
908#define HC_HTXnTB_TBC_S 0x00100000
909#define HC_HTXnTB_TBC_T 0x00200000
910#define HC_HTXnTB_TB_S 0x00400000
911#define HC_HTXnTB_TB_T 0x00800000
912#define HC_HTXnFLSe_Nearest 0x00000000
913#define HC_HTXnFLSe_Linear 0x00002000
914#define HC_HTXnFLSe_NonLinear 0x00004000
915#define HC_HTXnFLSe_Sharp 0x00008000
916#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
917#define HC_HTXnFLSs_Nearest 0x00000000
918#define HC_HTXnFLSs_Linear 0x00000400
919#define HC_HTXnFLSs_NonLinear 0x00000800
920#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
921#define HC_HTXnFLTe_Nearest 0x00000000
922#define HC_HTXnFLTe_Linear 0x00000080
923#define HC_HTXnFLTe_NonLinear 0x00000100
924#define HC_HTXnFLTe_Sharp 0x00000180
925#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
926#define HC_HTXnFLTs_Nearest 0x00000000
927#define HC_HTXnFLTs_Linear 0x00000010
928#define HC_HTXnFLTs_NonLinear 0x00000020
929#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
930#define HC_HTXnFLDs_Tex0 0x00000000
931#define HC_HTXnFLDs_Nearest 0x00000001
932#define HC_HTXnFLDs_Linear 0x00000002
933#define HC_HTXnFLDs_NonLinear 0x00000003
934#define HC_HTXnFLDs_Dither 0x00000004
935#define HC_HTXnFLDs_ConstLOD 0x00000005
936#define HC_HTXnFLDs_Ani 0x00000006
937#define HC_HTXnFLDs_AniDither 0x00000007
938/* HC_SubA_HTXnMPMD 0x0079
939 */
940#define HC_HTXnMPMD_SMASK 0x00070000
941#define HC_HTXnMPMD_TMASK 0x00380000
942#define HC_HTXnLODDTf_MASK 0x00000007
943#define HC_HTXnXY2ST_MASK 0x00000008
944#define HC_HTXnMPMD_Tsingle 0x00000000
945#define HC_HTXnMPMD_Tclamp 0x00080000
946#define HC_HTXnMPMD_Trepeat 0x00100000
947#define HC_HTXnMPMD_Tmirror 0x00180000
948#define HC_HTXnMPMD_Twrap 0x00200000
949#define HC_HTXnMPMD_Ssingle 0x00000000
950#define HC_HTXnMPMD_Sclamp 0x00010000
951#define HC_HTXnMPMD_Srepeat 0x00020000
952#define HC_HTXnMPMD_Smirror 0x00030000
953#define HC_HTXnMPMD_Swrap 0x00040000
954/* HC_SubA_HTXnCLODu 0x007a
955 */
956#define HC_HTXnCLODu_MASK 0x000ffc00
957#define HC_HTXnCLODd_MASK 0x000003ff
958#define HC_HTXnCLODu_SHIFT 10
959/* HC_SubA_HTXnFM 0x007b
960 */
961#define HC_HTXnFM_MASK 0x00ff0000
962#define HC_HTXnLoc_MASK 0x00000003
963#define HC_HTXnFM_INDEX 0x00000000
964#define HC_HTXnFM_Intensity 0x00080000
965#define HC_HTXnFM_Lum 0x00100000
966#define HC_HTXnFM_Alpha 0x00180000
967#define HC_HTXnFM_DX 0x00280000
968#define HC_HTXnFM_ARGB16 0x00880000
969#define HC_HTXnFM_ARGB32 0x00980000
970#define HC_HTXnFM_ABGR16 0x00a80000
971#define HC_HTXnFM_ABGR32 0x00b80000
972#define HC_HTXnFM_RGBA16 0x00c80000
973#define HC_HTXnFM_RGBA32 0x00d80000
974#define HC_HTXnFM_BGRA16 0x00e80000
975#define HC_HTXnFM_BGRA32 0x00f80000
976#define HC_HTXnFM_BUMPMAP 0x00380000
977#define HC_HTXnFM_Index1 (HC_HTXnFM_INDEX | 0x00000000)
978#define HC_HTXnFM_Index2 (HC_HTXnFM_INDEX | 0x00010000)
979#define HC_HTXnFM_Index4 (HC_HTXnFM_INDEX | 0x00020000)
980#define HC_HTXnFM_Index8 (HC_HTXnFM_INDEX | 0x00030000)
981#define HC_HTXnFM_T1 (HC_HTXnFM_Intensity | 0x00000000)
982#define HC_HTXnFM_T2 (HC_HTXnFM_Intensity | 0x00010000)
983#define HC_HTXnFM_T4 (HC_HTXnFM_Intensity | 0x00020000)
984#define HC_HTXnFM_T8 (HC_HTXnFM_Intensity | 0x00030000)
985#define HC_HTXnFM_L1 (HC_HTXnFM_Lum | 0x00000000)
986#define HC_HTXnFM_L2 (HC_HTXnFM_Lum | 0x00010000)
987#define HC_HTXnFM_L4 (HC_HTXnFM_Lum | 0x00020000)
988#define HC_HTXnFM_L8 (HC_HTXnFM_Lum | 0x00030000)
989#define HC_HTXnFM_AL44 (HC_HTXnFM_Lum | 0x00040000)
990#define HC_HTXnFM_AL88 (HC_HTXnFM_Lum | 0x00050000)
991#define HC_HTXnFM_A1 (HC_HTXnFM_Alpha | 0x00000000)
992#define HC_HTXnFM_A2 (HC_HTXnFM_Alpha | 0x00010000)
993#define HC_HTXnFM_A4 (HC_HTXnFM_Alpha | 0x00020000)
994#define HC_HTXnFM_A8 (HC_HTXnFM_Alpha | 0x00030000)
995#define HC_HTXnFM_DX1 (HC_HTXnFM_DX | 0x00010000)
996#define HC_HTXnFM_DX23 (HC_HTXnFM_DX | 0x00020000)
997#define HC_HTXnFM_DX45 (HC_HTXnFM_DX | 0x00030000)
998#define HC_HTXnFM_RGB555 (HC_HTXnFM_ARGB16 | 0x00000000)
999#define HC_HTXnFM_RGB565 (HC_HTXnFM_ARGB16 | 0x00010000)
1000#define HC_HTXnFM_ARGB1555 (HC_HTXnFM_ARGB16 | 0x00020000)
1001#define HC_HTXnFM_ARGB4444 (HC_HTXnFM_ARGB16 | 0x00030000)
1002#define HC_HTXnFM_ARGB0888 (HC_HTXnFM_ARGB32 | 0x00000000)
1003#define HC_HTXnFM_ARGB8888 (HC_HTXnFM_ARGB32 | 0x00010000)
1004#define HC_HTXnFM_BGR555 (HC_HTXnFM_ABGR16 | 0x00000000)
1005#define HC_HTXnFM_BGR565 (HC_HTXnFM_ABGR16 | 0x00010000)
1006#define HC_HTXnFM_ABGR1555 (HC_HTXnFM_ABGR16 | 0x00020000)
1007#define HC_HTXnFM_ABGR4444 (HC_HTXnFM_ABGR16 | 0x00030000)
1008#define HC_HTXnFM_ABGR0888 (HC_HTXnFM_ABGR32 | 0x00000000)
1009#define HC_HTXnFM_ABGR8888 (HC_HTXnFM_ABGR32 | 0x00010000)
1010#define HC_HTXnFM_RGBA5550 (HC_HTXnFM_RGBA16 | 0x00000000)
1011#define HC_HTXnFM_RGBA5551 (HC_HTXnFM_RGBA16 | 0x00020000)
1012#define HC_HTXnFM_RGBA4444 (HC_HTXnFM_RGBA16 | 0x00030000)
1013#define HC_HTXnFM_RGBA8880 (HC_HTXnFM_RGBA32 | 0x00000000)
1014#define HC_HTXnFM_RGBA8888 (HC_HTXnFM_RGBA32 | 0x00010000)
1015#define HC_HTXnFM_BGRA5550 (HC_HTXnFM_BGRA16 | 0x00000000)
1016#define HC_HTXnFM_BGRA5551 (HC_HTXnFM_BGRA16 | 0x00020000)
1017#define HC_HTXnFM_BGRA4444 (HC_HTXnFM_BGRA16 | 0x00030000)
1018#define HC_HTXnFM_BGRA8880 (HC_HTXnFM_BGRA32 | 0x00000000)
1019#define HC_HTXnFM_BGRA8888 (HC_HTXnFM_BGRA32 | 0x00010000)
1020#define HC_HTXnFM_VU88 (HC_HTXnFM_BUMPMAP | 0x00000000)
1021#define HC_HTXnFM_LVU655 (HC_HTXnFM_BUMPMAP | 0x00010000)
1022#define HC_HTXnFM_LVU888 (HC_HTXnFM_BUMPMAP | 0x00020000)
1023#define HC_HTXnLoc_Local 0x00000000
1024#define HC_HTXnLoc_Sys 0x00000002
1025#define HC_HTXnLoc_AGP 0x00000003
1026/* HC_SubA_HTXnTRAH 0x007f
1027 */
1028#define HC_HTXnTRAH_MASK 0x00ff0000
1029#define HC_HTXnTRAL_MASK 0x0000ff00
1030#define HC_HTXnTBA_MASK 0x000000ff
1031#define HC_HTXnTRAH_SHIFT 16
1032#define HC_HTXnTRAL_SHIFT 8
1033/* HC_SubA_HTXnTBLCsat 0x0080
1034 *-- Define the input texture.
1035 */
1036#define HC_XTC_TOPC 0x00000000
1037#define HC_XTC_InvTOPC 0x00000010
1038#define HC_XTC_TOPCp5 0x00000020
1039#define HC_XTC_Cbias 0x00000000
1040#define HC_XTC_InvCbias 0x00000010
1041#define HC_XTC_0 0x00000000
1042#define HC_XTC_Dif 0x00000001
1043#define HC_XTC_Spec 0x00000002
1044#define HC_XTC_Tex 0x00000003
1045#define HC_XTC_Cur 0x00000004
1046#define HC_XTC_Adif 0x00000005
1047#define HC_XTC_Fog 0x00000006
1048#define HC_XTC_Atex 0x00000007
1049#define HC_XTC_Acur 0x00000008
1050#define HC_XTC_HTXnTBLRC 0x00000009
1051#define HC_XTC_Ctexnext 0x0000000a
1052/*--
1053 */
1054#define HC_HTXnTBLCsat_MASK 0x00800000
1055#define HC_HTXnTBLCa_MASK 0x000fc000
1056#define HC_HTXnTBLCb_MASK 0x00001f80
1057#define HC_HTXnTBLCc_MASK 0x0000003f
1058#define HC_HTXnTBLCa_TOPC (HC_XTC_TOPC << 14)
1059#define HC_HTXnTBLCa_InvTOPC (HC_XTC_InvTOPC << 14)
1060#define HC_HTXnTBLCa_TOPCp5 (HC_XTC_TOPCp5 << 14)
1061#define HC_HTXnTBLCa_0 (HC_XTC_0 << 14)
1062#define HC_HTXnTBLCa_Dif (HC_XTC_Dif << 14)
1063#define HC_HTXnTBLCa_Spec (HC_XTC_Spec << 14)
1064#define HC_HTXnTBLCa_Tex (HC_XTC_Tex << 14)
1065#define HC_HTXnTBLCa_Cur (HC_XTC_Cur << 14)
1066#define HC_HTXnTBLCa_Adif (HC_XTC_Adif << 14)
1067#define HC_HTXnTBLCa_Fog (HC_XTC_Fog << 14)
1068#define HC_HTXnTBLCa_Atex (HC_XTC_Atex << 14)
1069#define HC_HTXnTBLCa_Acur (HC_XTC_Acur << 14)
1070#define HC_HTXnTBLCa_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14)
1071#define HC_HTXnTBLCa_Ctexnext (HC_XTC_Ctexnext << 14)
1072#define HC_HTXnTBLCb_TOPC (HC_XTC_TOPC << 7)
1073#define HC_HTXnTBLCb_InvTOPC (HC_XTC_InvTOPC << 7)
1074#define HC_HTXnTBLCb_TOPCp5 (HC_XTC_TOPCp5 << 7)
1075#define HC_HTXnTBLCb_0 (HC_XTC_0 << 7)
1076#define HC_HTXnTBLCb_Dif (HC_XTC_Dif << 7)
1077#define HC_HTXnTBLCb_Spec (HC_XTC_Spec << 7)
1078#define HC_HTXnTBLCb_Tex (HC_XTC_Tex << 7)
1079#define HC_HTXnTBLCb_Cur (HC_XTC_Cur << 7)
1080#define HC_HTXnTBLCb_Adif (HC_XTC_Adif << 7)
1081#define HC_HTXnTBLCb_Fog (HC_XTC_Fog << 7)
1082#define HC_HTXnTBLCb_Atex (HC_XTC_Atex << 7)
1083#define HC_HTXnTBLCb_Acur (HC_XTC_Acur << 7)
1084#define HC_HTXnTBLCb_HTXnTBLRC (HC_XTC_HTXnTBLRC << 7)
1085#define HC_HTXnTBLCb_Ctexnext (HC_XTC_Ctexnext << 7)
1086#define HC_HTXnTBLCc_TOPC (HC_XTC_TOPC << 0)
1087#define HC_HTXnTBLCc_InvTOPC (HC_XTC_InvTOPC << 0)
1088#define HC_HTXnTBLCc_TOPCp5 (HC_XTC_TOPCp5 << 0)
1089#define HC_HTXnTBLCc_0 (HC_XTC_0 << 0)
1090#define HC_HTXnTBLCc_Dif (HC_XTC_Dif << 0)
1091#define HC_HTXnTBLCc_Spec (HC_XTC_Spec << 0)
1092#define HC_HTXnTBLCc_Tex (HC_XTC_Tex << 0)
1093#define HC_HTXnTBLCc_Cur (HC_XTC_Cur << 0)
1094#define HC_HTXnTBLCc_Adif (HC_XTC_Adif << 0)
1095#define HC_HTXnTBLCc_Fog (HC_XTC_Fog << 0)
1096#define HC_HTXnTBLCc_Atex (HC_XTC_Atex << 0)
1097#define HC_HTXnTBLCc_Acur (HC_XTC_Acur << 0)
1098#define HC_HTXnTBLCc_HTXnTBLRC (HC_XTC_HTXnTBLRC << 0)
1099#define HC_HTXnTBLCc_Ctexnext (HC_XTC_Ctexnext << 0)
1100/* HC_SubA_HTXnTBLCop 0x0081
1101 */
1102#define HC_HTXnTBLdot_MASK 0x00c00000
1103#define HC_HTXnTBLCop_MASK 0x00380000
1104#define HC_HTXnTBLCbias_MASK 0x0007c000
1105#define HC_HTXnTBLCshift_MASK 0x00001800
1106#define HC_HTXnTBLAop_MASK 0x00000380
1107#define HC_HTXnTBLAbias_MASK 0x00000078
1108#define HC_HTXnTBLAshift_MASK 0x00000003
1109#define HC_HTXnTBLCop_Add 0x00000000
1110#define HC_HTXnTBLCop_Sub 0x00080000
1111#define HC_HTXnTBLCop_Min 0x00100000
1112#define HC_HTXnTBLCop_Max 0x00180000
1113#define HC_HTXnTBLCop_Mask 0x00200000
1114#define HC_HTXnTBLCbias_Cbias (HC_XTC_Cbias << 14)
1115#define HC_HTXnTBLCbias_InvCbias (HC_XTC_InvCbias << 14)
1116#define HC_HTXnTBLCbias_0 (HC_XTC_0 << 14)
1117#define HC_HTXnTBLCbias_Dif (HC_XTC_Dif << 14)
1118#define HC_HTXnTBLCbias_Spec (HC_XTC_Spec << 14)
1119#define HC_HTXnTBLCbias_Tex (HC_XTC_Tex << 14)
1120#define HC_HTXnTBLCbias_Cur (HC_XTC_Cur << 14)
1121#define HC_HTXnTBLCbias_Adif (HC_XTC_Adif << 14)
1122#define HC_HTXnTBLCbias_Fog (HC_XTC_Fog << 14)
1123#define HC_HTXnTBLCbias_Atex (HC_XTC_Atex << 14)
1124#define HC_HTXnTBLCbias_Acur (HC_XTC_Acur << 14)
1125#define HC_HTXnTBLCbias_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14)
1126#define HC_HTXnTBLCshift_1 0x00000000
1127#define HC_HTXnTBLCshift_2 0x00000800
1128#define HC_HTXnTBLCshift_No 0x00001000
1129#define HC_HTXnTBLCshift_DotP 0x00001800
1130/*=* John Sheng [2003.7.18] texture combine *=*/
1131#define HC_HTXnTBLDOT3 0x00080000
1132#define HC_HTXnTBLDOT4 0x000C0000
1133
1134#define HC_HTXnTBLAop_Add 0x00000000
1135#define HC_HTXnTBLAop_Sub 0x00000080
1136#define HC_HTXnTBLAop_Min 0x00000100
1137#define HC_HTXnTBLAop_Max 0x00000180
1138#define HC_HTXnTBLAop_Mask 0x00000200
1139#define HC_HTXnTBLAbias_Inv 0x00000040
1140#define HC_HTXnTBLAbias_Adif 0x00000000
1141#define HC_HTXnTBLAbias_Fog 0x00000008
1142#define HC_HTXnTBLAbias_Acur 0x00000010
1143#define HC_HTXnTBLAbias_HTXnTBLRAbias 0x00000018
1144#define HC_HTXnTBLAbias_Atex 0x00000020
1145#define HC_HTXnTBLAshift_1 0x00000000
1146#define HC_HTXnTBLAshift_2 0x00000001
1147#define HC_HTXnTBLAshift_No 0x00000002
1148/* #define HC_HTXnTBLAshift_DotP 0x00000003 */
1149/* HC_SubA_HTXnTBLMPFog 0x0082
1150 */
1151#define HC_HTXnTBLMPfog_MASK 0x00e00000
1152#define HC_HTXnTBLMPfog_0 0x00000000
1153#define HC_HTXnTBLMPfog_Adif 0x00200000
1154#define HC_HTXnTBLMPfog_Fog 0x00400000
1155#define HC_HTXnTBLMPfog_Atex 0x00600000
1156#define HC_HTXnTBLMPfog_Acur 0x00800000
1157#define HC_HTXnTBLMPfog_GHTXnTBLRFog 0x00a00000
1158/* HC_SubA_HTXnTBLAsat 0x0083
1159 *-- Define the texture alpha input.
1160 */
1161#define HC_XTA_TOPA 0x00000000
1162#define HC_XTA_InvTOPA 0x00000008
1163#define HC_XTA_TOPAp5 0x00000010
1164#define HC_XTA_Adif 0x00000000
1165#define HC_XTA_Fog 0x00000001
1166#define HC_XTA_Acur 0x00000002
1167#define HC_XTA_HTXnTBLRA 0x00000003
1168#define HC_XTA_Atex 0x00000004
1169#define HC_XTA_Atexnext 0x00000005
1170/*--
1171 */
1172#define HC_HTXnTBLAsat_MASK 0x00800000
1173#define HC_HTXnTBLAMB_MASK 0x00700000
1174#define HC_HTXnTBLAa_MASK 0x0007c000
1175#define HC_HTXnTBLAb_MASK 0x00000f80
1176#define HC_HTXnTBLAc_MASK 0x0000001f
1177#define HC_HTXnTBLAMB_SHIFT 20
1178#define HC_HTXnTBLAa_TOPA (HC_XTA_TOPA << 14)
1179#define HC_HTXnTBLAa_InvTOPA (HC_XTA_InvTOPA << 14)
1180#define HC_HTXnTBLAa_TOPAp5 (HC_XTA_TOPAp5 << 14)
1181#define HC_HTXnTBLAa_Adif (HC_XTA_Adif << 14)
1182#define HC_HTXnTBLAa_Fog (HC_XTA_Fog << 14)
1183#define HC_HTXnTBLAa_Acur (HC_XTA_Acur << 14)
1184#define HC_HTXnTBLAa_HTXnTBLRA (HC_XTA_HTXnTBLRA << 14)
1185#define HC_HTXnTBLAa_Atex (HC_XTA_Atex << 14)
1186#define HC_HTXnTBLAa_Atexnext (HC_XTA_Atexnext << 14)
1187#define HC_HTXnTBLAb_TOPA (HC_XTA_TOPA << 7)
1188#define HC_HTXnTBLAb_InvTOPA (HC_XTA_InvTOPA << 7)
1189#define HC_HTXnTBLAb_TOPAp5 (HC_XTA_TOPAp5 << 7)
1190#define HC_HTXnTBLAb_Adif (HC_XTA_Adif << 7)
1191#define HC_HTXnTBLAb_Fog (HC_XTA_Fog << 7)
1192#define HC_HTXnTBLAb_Acur (HC_XTA_Acur << 7)
1193#define HC_HTXnTBLAb_HTXnTBLRA (HC_XTA_HTXnTBLRA << 7)
1194#define HC_HTXnTBLAb_Atex (HC_XTA_Atex << 7)
1195#define HC_HTXnTBLAb_Atexnext (HC_XTA_Atexnext << 7)
1196#define HC_HTXnTBLAc_TOPA (HC_XTA_TOPA << 0)
1197#define HC_HTXnTBLAc_InvTOPA (HC_XTA_InvTOPA << 0)
1198#define HC_HTXnTBLAc_TOPAp5 (HC_XTA_TOPAp5 << 0)
1199#define HC_HTXnTBLAc_Adif (HC_XTA_Adif << 0)
1200#define HC_HTXnTBLAc_Fog (HC_XTA_Fog << 0)
1201#define HC_HTXnTBLAc_Acur (HC_XTA_Acur << 0)
1202#define HC_HTXnTBLAc_HTXnTBLRA (HC_XTA_HTXnTBLRA << 0)
1203#define HC_HTXnTBLAc_Atex (HC_XTA_Atex << 0)
1204#define HC_HTXnTBLAc_Atexnext (HC_XTA_Atexnext << 0)
1205/* HC_SubA_HTXnTBLRAa 0x0089
1206 */
1207#define HC_HTXnTBLRAa_MASK 0x00ff0000
1208#define HC_HTXnTBLRAb_MASK 0x0000ff00
1209#define HC_HTXnTBLRAc_MASK 0x000000ff
1210#define HC_HTXnTBLRAa_SHIFT 16
1211#define HC_HTXnTBLRAb_SHIFT 8
1212#define HC_HTXnTBLRAc_SHIFT 0
1213/* HC_SubA_HTXnTBLRFog 0x008a
1214 */
1215#define HC_HTXnTBLRFog_MASK 0x0000ff00
1216#define HC_HTXnTBLRAbias_MASK 0x000000ff
1217#define HC_HTXnTBLRFog_SHIFT 8
1218#define HC_HTXnTBLRAbias_SHIFT 0
1219/* HC_SubA_HTXnLScale 0x0094
1220 */
1221#define HC_HTXnLScale_MASK 0x0007fc00
1222#define HC_HTXnLOff_MASK 0x000001ff
1223#define HC_HTXnLScale_SHIFT 10
1224/* HC_SubA_HTXSMD 0x0000
1225 */
1226#define HC_HTXSMD_MASK 0x00000080
1227#define HC_HTXTMD_MASK 0x00000040
1228#define HC_HTXNum_MASK 0x00000038
1229#define HC_HTXTRMD_MASK 0x00000006
1230#define HC_HTXCHCLR_MASK 0x00000001
1231#define HC_HTXNum_SHIFT 3
1232
1233/* Texture Palette n
1234 */
1235#define HC_SubType_TexPalette0 0x00000000
1236#define HC_SubType_TexPalette1 0x00000001
1237#define HC_SubType_FogTable 0x00000010
1238#define HC_SubType_Stipple 0x00000014
1239/* HC_SubA_TexPalette0 0x0000
1240 */
1241#define HC_HTPnA_MASK 0xff000000
1242#define HC_HTPnR_MASK 0x00ff0000
1243#define HC_HTPnG_MASK 0x0000ff00
1244#define HC_HTPnB_MASK 0x000000ff
1245/* HC_SubA_FogTable 0x0010
1246 */
1247#define HC_HFPn3_MASK 0xff000000
1248#define HC_HFPn2_MASK 0x00ff0000
1249#define HC_HFPn1_MASK 0x0000ff00
1250#define HC_HFPn_MASK 0x000000ff
1251#define HC_HFPn3_SHIFT 24
1252#define HC_HFPn2_SHIFT 16
1253#define HC_HFPn1_SHIFT 8
1254
1255/* Auto Testing & Security
1256 */
1257#define HC_SubA_HenFIFOAT 0x0000
1258#define HC_SubA_HFBDrawFirst 0x0004
1259#define HC_SubA_HFBBasL 0x0005
1260#define HC_SubA_HFBDst 0x0006
1261/* HC_SubA_HenFIFOAT 0x0000
1262 */
1263#define HC_HenFIFOAT_MASK 0x00000020
1264#define HC_HenGEMILock_MASK 0x00000010
1265#define HC_HenFBASwap_MASK 0x00000008
1266#define HC_HenOT_MASK 0x00000004
1267#define HC_HenCMDQ_MASK 0x00000002
1268#define HC_HenTXCTSU_MASK 0x00000001
1269/* HC_SubA_HFBDrawFirst 0x0004
1270 */
1271#define HC_HFBDrawFirst_MASK 0x00000800
1272#define HC_HFBQueue_MASK 0x00000400
1273#define HC_HFBLock_MASK 0x00000200
1274#define HC_HEOF_MASK 0x00000100
1275#define HC_HFBBasH_MASK 0x000000ff
1276
1277/* GEMI Setting
1278 */
1279#define HC_SubA_HTArbRCM 0x0008
1280#define HC_SubA_HTArbRZ 0x000a
1281#define HC_SubA_HTArbWZ 0x000b
1282#define HC_SubA_HTArbRTX 0x000c
1283#define HC_SubA_HTArbRCW 0x000d
1284#define HC_SubA_HTArbE2 0x000e
1285#define HC_SubA_HArbRQCM 0x0010
1286#define HC_SubA_HArbWQCM 0x0011
1287#define HC_SubA_HGEMITout 0x0020
1288#define HC_SubA_HFthRTXD 0x0040
1289#define HC_SubA_HFthRTXA 0x0044
1290#define HC_SubA_HCMDQstL 0x0050
1291#define HC_SubA_HCMDQendL 0x0051
1292#define HC_SubA_HCMDQLen 0x0052
1293/* HC_SubA_HTArbRCM 0x0008
1294 */
1295#define HC_HTArbRCM_MASK 0x0000ffff
1296/* HC_SubA_HTArbRZ 0x000a
1297 */
1298#define HC_HTArbRZ_MASK 0x0000ffff
1299/* HC_SubA_HTArbWZ 0x000b
1300 */
1301#define HC_HTArbWZ_MASK 0x0000ffff
1302/* HC_SubA_HTArbRTX 0x000c
1303 */
1304#define HC_HTArbRTX_MASK 0x0000ffff
1305/* HC_SubA_HTArbRCW 0x000d
1306 */
1307#define HC_HTArbRCW_MASK 0x0000ffff
1308/* HC_SubA_HTArbE2 0x000e
1309 */
1310#define HC_HTArbE2_MASK 0x0000ffff
1311/* HC_SubA_HArbRQCM 0x0010
1312 */
1313#define HC_HTArbRQCM_MASK 0x0000ffff
1314/* HC_SubA_HArbWQCM 0x0011
1315 */
1316#define HC_HArbWQCM_MASK 0x0000ffff
1317/* HC_SubA_HGEMITout 0x0020
1318 */
1319#define HC_HGEMITout_MASK 0x000f0000
1320#define HC_HNPArbZC_MASK 0x0000ffff
1321#define HC_HGEMITout_SHIFT 16
1322/* HC_SubA_HFthRTXD 0x0040
1323 */
1324#define HC_HFthRTXD_MASK 0x00ff0000
1325#define HC_HFthRZD_MASK 0x0000ff00
1326#define HC_HFthWZD_MASK 0x000000ff
1327#define HC_HFthRTXD_SHIFT 16
1328#define HC_HFthRZD_SHIFT 8
1329/* HC_SubA_HFthRTXA 0x0044
1330 */
1331#define HC_HFthRTXA_MASK 0x000000ff
1332
1333/******************************************************************************
1334** Define the Halcyon Internal register access constants. For simulator only.
1335******************************************************************************/
1336#define HC_SIMA_HAGPBstL 0x0000
1337#define HC_SIMA_HAGPBendL 0x0001
1338#define HC_SIMA_HAGPCMNT 0x0002
1339#define HC_SIMA_HAGPBpL 0x0003
1340#define HC_SIMA_HAGPBpH 0x0004
1341#define HC_SIMA_HClipTB 0x0005
1342#define HC_SIMA_HClipLR 0x0006
1343#define HC_SIMA_HFPClipTL 0x0007
1344#define HC_SIMA_HFPClipBL 0x0008
1345#define HC_SIMA_HFPClipLL 0x0009
1346#define HC_SIMA_HFPClipRL 0x000a
1347#define HC_SIMA_HFPClipTBH 0x000b
1348#define HC_SIMA_HFPClipLRH 0x000c
1349#define HC_SIMA_HLP 0x000d
1350#define HC_SIMA_HLPRF 0x000e
1351#define HC_SIMA_HSolidCL 0x000f
1352#define HC_SIMA_HPixGC 0x0010
1353#define HC_SIMA_HSPXYOS 0x0011
1354#define HC_SIMA_HCmdA 0x0012
1355#define HC_SIMA_HCmdB 0x0013
1356#define HC_SIMA_HEnable 0x0014
1357#define HC_SIMA_HZWBBasL 0x0015
1358#define HC_SIMA_HZWBBasH 0x0016
1359#define HC_SIMA_HZWBType 0x0017
1360#define HC_SIMA_HZBiasL 0x0018
1361#define HC_SIMA_HZWBend 0x0019
1362#define HC_SIMA_HZWTMD 0x001a
1363#define HC_SIMA_HZWCDL 0x001b
1364#define HC_SIMA_HZWCTAGnum 0x001c
1365#define HC_SIMA_HZCYNum 0x001d
1366#define HC_SIMA_HZWCFire 0x001e
1367/* #define HC_SIMA_HSBBasL 0x001d */
1368/* #define HC_SIMA_HSBBasH 0x001e */
1369/* #define HC_SIMA_HSBFM 0x001f */
1370#define HC_SIMA_HSTREF 0x0020
1371#define HC_SIMA_HSTMD 0x0021
1372#define HC_SIMA_HABBasL 0x0022
1373#define HC_SIMA_HABBasH 0x0023
1374#define HC_SIMA_HABFM 0x0024
1375#define HC_SIMA_HATMD 0x0025
1376#define HC_SIMA_HABLCsat 0x0026
1377#define HC_SIMA_HABLCop 0x0027
1378#define HC_SIMA_HABLAsat 0x0028
1379#define HC_SIMA_HABLAop 0x0029
1380#define HC_SIMA_HABLRCa 0x002a
1381#define HC_SIMA_HABLRFCa 0x002b
1382#define HC_SIMA_HABLRCbias 0x002c
1383#define HC_SIMA_HABLRCb 0x002d
1384#define HC_SIMA_HABLRFCb 0x002e
1385#define HC_SIMA_HABLRAa 0x002f
1386#define HC_SIMA_HABLRAb 0x0030
1387#define HC_SIMA_HDBBasL 0x0031
1388#define HC_SIMA_HDBBasH 0x0032
1389#define HC_SIMA_HDBFM 0x0033
1390#define HC_SIMA_HFBBMSKL 0x0034
1391#define HC_SIMA_HROP 0x0035
1392#define HC_SIMA_HFogLF 0x0036
1393#define HC_SIMA_HFogCL 0x0037
1394#define HC_SIMA_HFogCH 0x0038
1395#define HC_SIMA_HFogStL 0x0039
1396#define HC_SIMA_HFogStH 0x003a
1397#define HC_SIMA_HFogOOdMF 0x003b
1398#define HC_SIMA_HFogOOdEF 0x003c
1399#define HC_SIMA_HFogEndL 0x003d
1400#define HC_SIMA_HFogDenst 0x003e
1401/*---- start of texture 0 setting ----
1402 */
1403#define HC_SIMA_HTX0L0BasL 0x0040
1404#define HC_SIMA_HTX0L1BasL 0x0041
1405#define HC_SIMA_HTX0L2BasL 0x0042
1406#define HC_SIMA_HTX0L3BasL 0x0043
1407#define HC_SIMA_HTX0L4BasL 0x0044
1408#define HC_SIMA_HTX0L5BasL 0x0045
1409#define HC_SIMA_HTX0L6BasL 0x0046
1410#define HC_SIMA_HTX0L7BasL 0x0047
1411#define HC_SIMA_HTX0L8BasL 0x0048
1412#define HC_SIMA_HTX0L9BasL 0x0049
1413#define HC_SIMA_HTX0LaBasL 0x004a
1414#define HC_SIMA_HTX0LbBasL 0x004b
1415#define HC_SIMA_HTX0LcBasL 0x004c
1416#define HC_SIMA_HTX0LdBasL 0x004d
1417#define HC_SIMA_HTX0LeBasL 0x004e
1418#define HC_SIMA_HTX0LfBasL 0x004f
1419#define HC_SIMA_HTX0L10BasL 0x0050
1420#define HC_SIMA_HTX0L11BasL 0x0051
1421#define HC_SIMA_HTX0L012BasH 0x0052
1422#define HC_SIMA_HTX0L345BasH 0x0053
1423#define HC_SIMA_HTX0L678BasH 0x0054
1424#define HC_SIMA_HTX0L9abBasH 0x0055
1425#define HC_SIMA_HTX0LcdeBasH 0x0056
1426#define HC_SIMA_HTX0Lf1011BasH 0x0057
1427#define HC_SIMA_HTX0L0Pit 0x0058
1428#define HC_SIMA_HTX0L1Pit 0x0059
1429#define HC_SIMA_HTX0L2Pit 0x005a
1430#define HC_SIMA_HTX0L3Pit 0x005b
1431#define HC_SIMA_HTX0L4Pit 0x005c
1432#define HC_SIMA_HTX0L5Pit 0x005d
1433#define HC_SIMA_HTX0L6Pit 0x005e
1434#define HC_SIMA_HTX0L7Pit 0x005f
1435#define HC_SIMA_HTX0L8Pit 0x0060
1436#define HC_SIMA_HTX0L9Pit 0x0061
1437#define HC_SIMA_HTX0LaPit 0x0062
1438#define HC_SIMA_HTX0LbPit 0x0063
1439#define HC_SIMA_HTX0LcPit 0x0064
1440#define HC_SIMA_HTX0LdPit 0x0065
1441#define HC_SIMA_HTX0LePit 0x0066
1442#define HC_SIMA_HTX0LfPit 0x0067
1443#define HC_SIMA_HTX0L10Pit 0x0068
1444#define HC_SIMA_HTX0L11Pit 0x0069
1445#define HC_SIMA_HTX0L0_5WE 0x006a
1446#define HC_SIMA_HTX0L6_bWE 0x006b
1447#define HC_SIMA_HTX0Lc_11WE 0x006c
1448#define HC_SIMA_HTX0L0_5HE 0x006d
1449#define HC_SIMA_HTX0L6_bHE 0x006e
1450#define HC_SIMA_HTX0Lc_11HE 0x006f
1451#define HC_SIMA_HTX0L0OS 0x0070
1452#define HC_SIMA_HTX0TB 0x0071
1453#define HC_SIMA_HTX0MPMD 0x0072
1454#define HC_SIMA_HTX0CLODu 0x0073
1455#define HC_SIMA_HTX0FM 0x0074
1456#define HC_SIMA_HTX0TRCH 0x0075
1457#define HC_SIMA_HTX0TRCL 0x0076
1458#define HC_SIMA_HTX0TBC 0x0077
1459#define HC_SIMA_HTX0TRAH 0x0078
1460#define HC_SIMA_HTX0TBLCsat 0x0079
1461#define HC_SIMA_HTX0TBLCop 0x007a
1462#define HC_SIMA_HTX0TBLMPfog 0x007b
1463#define HC_SIMA_HTX0TBLAsat 0x007c
1464#define HC_SIMA_HTX0TBLRCa 0x007d
1465#define HC_SIMA_HTX0TBLRCb 0x007e
1466#define HC_SIMA_HTX0TBLRCc 0x007f
1467#define HC_SIMA_HTX0TBLRCbias 0x0080
1468#define HC_SIMA_HTX0TBLRAa 0x0081
1469#define HC_SIMA_HTX0TBLRFog 0x0082
1470#define HC_SIMA_HTX0BumpM00 0x0083
1471#define HC_SIMA_HTX0BumpM01 0x0084
1472#define HC_SIMA_HTX0BumpM10 0x0085
1473#define HC_SIMA_HTX0BumpM11 0x0086
1474#define HC_SIMA_HTX0LScale 0x0087
1475/*---- end of texture 0 setting ---- 0x008f
1476 */
1477#define HC_SIMA_TX0TX1_OFF 0x0050
1478/*---- start of texture 1 setting ----
1479 */
1480#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
1481#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
1482#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
1483#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
1484#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
1485#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
1486#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
1487#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
1488#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
1489#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
1490#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
1491#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
1492#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
1493#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
1494#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
1495#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
1496#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
1497#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
1498#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
1499#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
1500#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
1501#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
1502#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
1503#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
1504#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
1505#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
1506#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
1507#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
1508#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
1509#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
1510#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
1511#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
1512#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
1513#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
1514#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
1515#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
1516#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
1517#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
1518#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
1519#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
1520#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
1521#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
1522#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
1523#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
1524#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
1525#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
1526#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
1527#define HC_SIMA_HTX1Lc_11HE (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
1528#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
1529#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
1530#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
1531#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
1532#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
1533#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
1534#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
1535#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
1536#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
1537#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
1538#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
1539#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
1540#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
1541#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
1542#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
1543#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
1544#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
1545#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
1546#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
1547#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
1548#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
1549#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
1550#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
1551#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
1552#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
1553#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
1554/*---- end of texture 1 setting ---- 0xaf
1555 */
1556#define HC_SIMA_HTXSMD 0x00b0
1557#define HC_SIMA_HenFIFOAT 0x00b1
1558#define HC_SIMA_HFBDrawFirst 0x00b2
1559#define HC_SIMA_HFBBasL 0x00b3
1560#define HC_SIMA_HTArbRCM 0x00b4
1561#define HC_SIMA_HTArbRZ 0x00b5
1562#define HC_SIMA_HTArbWZ 0x00b6
1563#define HC_SIMA_HTArbRTX 0x00b7
1564#define HC_SIMA_HTArbRCW 0x00b8
1565#define HC_SIMA_HTArbE2 0x00b9
1566#define HC_SIMA_HGEMITout 0x00ba
1567#define HC_SIMA_HFthRTXD 0x00bb
1568#define HC_SIMA_HFthRTXA 0x00bc
1569/* Define the texture palette 0
1570 */
1571#define HC_SIMA_HTP0 0x0100
1572#define HC_SIMA_HTP1 0x0200
1573#define HC_SIMA_FOGTABLE 0x0300
1574#define HC_SIMA_STIPPLE 0x0400
1575#define HC_SIMA_HE3Fire 0x0440
1576#define HC_SIMA_TRANS_SET 0x0441
1577#define HC_SIMA_HREngSt 0x0442
1578#define HC_SIMA_HRFIFOempty 0x0443
1579#define HC_SIMA_HRFIFOfull 0x0444
1580#define HC_SIMA_HRErr 0x0445
1581#define HC_SIMA_FIFOstatus 0x0446
1582
1583/******************************************************************************
1584** Define the AGP command header.
1585******************************************************************************/
1586#define HC_ACMD_MASK 0xfe000000
1587#define HC_ACMD_SUB_MASK 0x0c000000
1588#define HC_ACMD_HCmdA 0xee000000
1589#define HC_ACMD_HCmdB 0xec000000
1590#define HC_ACMD_HCmdC 0xea000000
1591#define HC_ACMD_H1 0xf0000000
1592#define HC_ACMD_H2 0xf2000000
1593#define HC_ACMD_H3 0xf4000000
1594#define HC_ACMD_H4 0xf6000000
1595
1596#define HC_ACMD_H1IO_MASK 0x000001ff
1597#define HC_ACMD_H2IO1_MASK 0x001ff000
1598#define HC_ACMD_H2IO2_MASK 0x000001ff
1599#define HC_ACMD_H2IO1_SHIFT 12
1600#define HC_ACMD_H2IO2_SHIFT 0
1601#define HC_ACMD_H3IO_MASK 0x000001ff
1602#define HC_ACMD_H3COUNT_MASK 0x01fff000
1603#define HC_ACMD_H3COUNT_SHIFT 12
1604#define HC_ACMD_H4ID_MASK 0x000001ff
1605#define HC_ACMD_H4COUNT_MASK 0x01fffe00
1606#define HC_ACMD_H4COUNT_SHIFT 9
1607
1608/********************************************************************************
1609** Define Header
1610********************************************************************************/
1611#define HC_HEADER2 0xF210F110
1612
1613/********************************************************************************
1614** Define Dummy Value
1615********************************************************************************/
1616#define HC_DUMMY 0xCCCCCCCC
1617/********************************************************************************
1618** Define for DMA use
1619********************************************************************************/
1620#define HALCYON_HEADER2 0XF210F110
1621#define HALCYON_FIRECMD 0XEE100000
1622#define HALCYON_FIREMASK 0XFFF00000
1623#define HALCYON_CMDB 0XEC000000
1624#define HALCYON_CMDBMASK 0XFFFE0000
1625#define HALCYON_SUB_ADDR0 0X00000000
1626#define HALCYON_HEADER1MASK 0XFFFFFC00
1627#define HALCYON_HEADER1 0XF0000000
1628#define HC_SubA_HAGPBstL 0x0060
1629#define HC_SubA_HAGPBendL 0x0061
1630#define HC_SubA_HAGPCMNT 0x0062
1631#define HC_SubA_HAGPBpL 0x0063
1632#define HC_SubA_HAGPBpH 0x0064
1633#define HC_HAGPCMNT_MASK 0x00800000
1634#define HC_HCmdErrClr_MASK 0x00400000
1635#define HC_HAGPBendH_MASK 0x0000ff00
1636#define HC_HAGPBstH_MASK 0x000000ff
1637#define HC_HAGPBendH_SHIFT 8
1638#define HC_HAGPBstH_SHIFT 0
1639#define HC_HAGPBpL_MASK 0x00fffffc
1640#define HC_HAGPBpID_MASK 0x00000003
1641#define HC_HAGPBpID_PAUSE 0x00000000
1642#define HC_HAGPBpID_JUMP 0x00000001
1643#define HC_HAGPBpID_STOP 0x00000002
1644#define HC_HAGPBpH_MASK 0x00ffffff
1645
1646#define VIA_VIDEO_HEADER5 0xFE040000
1647#define VIA_VIDEO_HEADER6 0xFE050000
1648#define VIA_VIDEO_HEADER7 0xFE060000
1649#define VIA_VIDEOMASK 0xFFFF0000
1650#endif
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
new file mode 100644
index 000000000000..7a339dba6a69
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -0,0 +1,755 @@
1/* via_dma.c -- DMA support for the VIA Unichrome/Pro
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
7 * All Rights Reserved.
8 *
9 * Copyright 2004 The Unichrome project.
10 * All Rights Reserved.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sub license,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
21 * of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 *
31 * Authors:
32 * Tungsten Graphics,
33 * Erdi Chen,
34 * Thomas Hellstrom.
35 */
36
37#include "drmP.h"
38#include "drm.h"
39#include "via_drm.h"
40#include "via_drv.h"
41#include "via_3d_reg.h"
42
43#define CMDBUF_ALIGNMENT_SIZE (0x100)
44#define CMDBUF_ALIGNMENT_MASK (0x0ff)
45
46/* defines for VIA 3D registers */
47#define VIA_REG_STATUS 0x400
48#define VIA_REG_TRANSET 0x43C
49#define VIA_REG_TRANSPACE 0x440
50
51/* VIA_REG_STATUS(0x400): Engine Status */
52#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
53#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
54#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
55#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
56
57#define SetReg2DAGP(nReg, nData) { \
58 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
59 *((uint32_t *)(vb) + 1) = (nData); \
60 vb = ((uint32_t *)vb) + 2; \
61 dev_priv->dma_low +=8; \
62}
63
64#define via_flush_write_combine() DRM_MEMORYBARRIER()
65
66#define VIA_OUT_RING_QW(w1,w2) \
67 *vb++ = (w1); \
68 *vb++ = (w2); \
69 dev_priv->dma_low += 8;
70
71static void via_cmdbuf_start(drm_via_private_t * dev_priv);
72static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
73static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
74static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
75static int via_wait_idle(drm_via_private_t * dev_priv);
76static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
77
78/*
79 * Free space in command buffer.
80 */
81
82static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
83{
84 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
85 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
86
87 return ((hw_addr <= dev_priv->dma_low) ?
88 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
89 (hw_addr - dev_priv->dma_low));
90}
91
92/*
93 * How much does the command regulator lag behind?
94 */
95
96static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
97{
98 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
99 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
100
101 return ((hw_addr <= dev_priv->dma_low) ?
102 (dev_priv->dma_low - hw_addr) :
103 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
104}
105
106/*
107 * Check that the given size fits in the buffer, otherwise wait.
108 */
109
110static inline int
111via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
112{
113 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
114 uint32_t cur_addr, hw_addr, next_addr;
115 volatile uint32_t *hw_addr_ptr;
116 uint32_t count;
117 hw_addr_ptr = dev_priv->hw_addr_ptr;
118 cur_addr = dev_priv->dma_low;
119 next_addr = cur_addr + size + 512 * 1024;
120 count = 1000000;
121 do {
122 hw_addr = *hw_addr_ptr - agp_base;
123 if (count-- == 0) {
124 DRM_ERROR
125 ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
126 hw_addr, cur_addr, next_addr);
127 return -1;
128 }
129 if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
130 msleep(1);
131 } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
132 return 0;
133}
134
135/*
136 * Checks whether buffer head has reach the end. Rewind the ring buffer
137 * when necessary.
138 *
139 * Returns virtual pointer to ring buffer.
140 */
141
142static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
143 unsigned int size)
144{
145 if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
146 dev_priv->dma_high) {
147 via_cmdbuf_rewind(dev_priv);
148 }
149 if (via_cmdbuf_wait(dev_priv, size) != 0) {
150 return NULL;
151 }
152
153 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
154}
155
156int via_dma_cleanup(struct drm_device * dev)
157{
158 if (dev->dev_private) {
159 drm_via_private_t *dev_priv =
160 (drm_via_private_t *) dev->dev_private;
161
162 if (dev_priv->ring.virtual_start) {
163 via_cmdbuf_reset(dev_priv);
164
165 drm_core_ioremapfree(&dev_priv->ring.map, dev);
166 dev_priv->ring.virtual_start = NULL;
167 }
168
169 }
170
171 return 0;
172}
173
174static int via_initialize(struct drm_device * dev,
175 drm_via_private_t * dev_priv,
176 drm_via_dma_init_t * init)
177{
178 if (!dev_priv || !dev_priv->mmio) {
179 DRM_ERROR("via_dma_init called before via_map_init\n");
180 return -EFAULT;
181 }
182
183 if (dev_priv->ring.virtual_start != NULL) {
184 DRM_ERROR("called again without calling cleanup\n");
185 return -EFAULT;
186 }
187
188 if (!dev->agp || !dev->agp->base) {
189 DRM_ERROR("called with no agp memory available\n");
190 return -EFAULT;
191 }
192
193 if (dev_priv->chipset == VIA_DX9_0) {
194 DRM_ERROR("AGP DMA is not supported on this chip\n");
195 return -EINVAL;
196 }
197
198 dev_priv->ring.map.offset = dev->agp->base + init->offset;
199 dev_priv->ring.map.size = init->size;
200 dev_priv->ring.map.type = 0;
201 dev_priv->ring.map.flags = 0;
202 dev_priv->ring.map.mtrr = 0;
203
204 drm_core_ioremap(&dev_priv->ring.map, dev);
205
206 if (dev_priv->ring.map.handle == NULL) {
207 via_dma_cleanup(dev);
208 DRM_ERROR("can not ioremap virtual address for"
209 " ring buffer\n");
210 return -ENOMEM;
211 }
212
213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
214
215 dev_priv->dma_ptr = dev_priv->ring.virtual_start;
216 dev_priv->dma_low = 0;
217 dev_priv->dma_high = init->size;
218 dev_priv->dma_wrap = init->size;
219 dev_priv->dma_offset = init->offset;
220 dev_priv->last_pause_ptr = NULL;
221 dev_priv->hw_addr_ptr =
222 (volatile uint32_t *)((char *)dev_priv->mmio->handle +
223 init->reg_pause_addr);
224
225 via_cmdbuf_start(dev_priv);
226
227 return 0;
228}
229
230static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
231{
232 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
233 drm_via_dma_init_t *init = data;
234 int retcode = 0;
235
236 switch (init->func) {
237 case VIA_INIT_DMA:
238 if (!DRM_SUSER(DRM_CURPROC))
239 retcode = -EPERM;
240 else
241 retcode = via_initialize(dev, dev_priv, init);
242 break;
243 case VIA_CLEANUP_DMA:
244 if (!DRM_SUSER(DRM_CURPROC))
245 retcode = -EPERM;
246 else
247 retcode = via_dma_cleanup(dev);
248 break;
249 case VIA_DMA_INITIALIZED:
250 retcode = (dev_priv->ring.virtual_start != NULL) ?
251 0 : -EFAULT;
252 break;
253 default:
254 retcode = -EINVAL;
255 break;
256 }
257
258 return retcode;
259}
260
261static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
262{
263 drm_via_private_t *dev_priv;
264 uint32_t *vb;
265 int ret;
266
267 dev_priv = (drm_via_private_t *) dev->dev_private;
268
269 if (dev_priv->ring.virtual_start == NULL) {
270 DRM_ERROR("called without initializing AGP ring buffer.\n");
271 return -EFAULT;
272 }
273
274 if (cmd->size > VIA_PCI_BUF_SIZE) {
275 return -ENOMEM;
276 }
277
278 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
279 return -EFAULT;
280
281 /*
282 * Running this function on AGP memory is dead slow. Therefore
283 * we run it on a temporary cacheable system memory buffer and
284 * copy it to AGP memory when ready.
285 */
286
287 if ((ret =
288 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
289 cmd->size, dev, 1))) {
290 return ret;
291 }
292
293 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
294 if (vb == NULL) {
295 return -EAGAIN;
296 }
297
298 memcpy(vb, dev_priv->pci_buf, cmd->size);
299
300 dev_priv->dma_low += cmd->size;
301
302 /*
303 * Small submissions somehow stalls the CPU. (AGP cache effects?)
304 * pad to greater size.
305 */
306
307 if (cmd->size < 0x100)
308 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
309 via_cmdbuf_pause(dev_priv);
310
311 return 0;
312}
313
314int via_driver_dma_quiescent(struct drm_device * dev)
315{
316 drm_via_private_t *dev_priv = dev->dev_private;
317
318 if (!via_wait_idle(dev_priv)) {
319 return -EBUSY;
320 }
321 return 0;
322}
323
324static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
325{
326
327 LOCK_TEST_WITH_RETURN(dev, file_priv);
328
329 return via_driver_dma_quiescent(dev);
330}
331
332static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
333{
334 drm_via_cmdbuffer_t *cmdbuf = data;
335 int ret;
336
337 LOCK_TEST_WITH_RETURN(dev, file_priv);
338
339 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
340
341 ret = via_dispatch_cmdbuffer(dev, cmdbuf);
342 if (ret) {
343 return ret;
344 }
345
346 return 0;
347}
348
349static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
350 drm_via_cmdbuffer_t * cmd)
351{
352 drm_via_private_t *dev_priv = dev->dev_private;
353 int ret;
354
355 if (cmd->size > VIA_PCI_BUF_SIZE) {
356 return -ENOMEM;
357 }
358 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
359 return -EFAULT;
360
361 if ((ret =
362 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
363 cmd->size, dev, 0))) {
364 return ret;
365 }
366
367 ret =
368 via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
369 cmd->size);
370 return ret;
371}
372
373static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
374{
375 drm_via_cmdbuffer_t *cmdbuf = data;
376 int ret;
377
378 LOCK_TEST_WITH_RETURN(dev, file_priv);
379
380 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
381
382 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
383 if (ret) {
384 return ret;
385 }
386
387 return 0;
388}
389
390static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
391 uint32_t * vb, int qw_count)
392{
393 for (; qw_count > 0; --qw_count) {
394 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
395 }
396 return vb;
397}
398
399/*
400 * This function is used internally by ring buffer management code.
401 *
402 * Returns virtual pointer to ring buffer.
403 */
404static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
405{
406 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
407}
408
409/*
410 * Hooks a segment of data into the tail of the ring-buffer by
411 * modifying the pause address stored in the buffer itself. If
412 * the regulator has already paused, restart it.
413 */
414static int via_hook_segment(drm_via_private_t * dev_priv,
415 uint32_t pause_addr_hi, uint32_t pause_addr_lo,
416 int no_pci_fire)
417{
418 int paused, count;
419 volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
420 uint32_t reader,ptr;
421 uint32_t diff;
422
423 paused = 0;
424 via_flush_write_combine();
425 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
426
427 *paused_at = pause_addr_lo;
428 via_flush_write_combine();
429 (void) *paused_at;
430
431 reader = *(dev_priv->hw_addr_ptr);
432 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
433 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
434
435 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
436
437 /*
438 * If there is a possibility that the command reader will
439 * miss the new pause address and pause on the old one,
440 * In that case we need to program the new start address
441 * using PCI.
442 */
443
444 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
445 count = 10000000;
446 while(diff == 0 && count--) {
447 paused = (VIA_READ(0x41c) & 0x80000000);
448 if (paused)
449 break;
450 reader = *(dev_priv->hw_addr_ptr);
451 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
452 }
453
454 paused = VIA_READ(0x41c) & 0x80000000;
455
456 if (paused && !no_pci_fire) {
457 reader = *(dev_priv->hw_addr_ptr);
458 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
459 diff &= (dev_priv->dma_high - 1);
460 if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
461 DRM_ERROR("Paused at incorrect address. "
462 "0x%08x, 0x%08x 0x%08x\n",
463 ptr, reader, dev_priv->dma_diff);
464 } else if (diff == 0) {
465 /*
466 * There is a concern that these writes may stall the PCI bus
467 * if the GPU is not idle. However, idling the GPU first
468 * doesn't make a difference.
469 */
470
471 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
472 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
473 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
474 VIA_READ(VIA_REG_TRANSPACE);
475 }
476 }
477 return paused;
478}
479
480static int via_wait_idle(drm_via_private_t * dev_priv)
481{
482 int count = 10000000;
483
484 while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
485
486 while (count-- && (VIA_READ(VIA_REG_STATUS) &
487 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
488 VIA_3D_ENG_BUSY))) ;
489 return count;
490}
491
492static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
493 uint32_t addr, uint32_t * cmd_addr_hi,
494 uint32_t * cmd_addr_lo, int skip_wait)
495{
496 uint32_t agp_base;
497 uint32_t cmd_addr, addr_lo, addr_hi;
498 uint32_t *vb;
499 uint32_t qw_pad_count;
500
501 if (!skip_wait)
502 via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
503
504 vb = via_get_dma(dev_priv);
505 VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
506 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
507 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
508 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
509 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
510
511 cmd_addr = (addr) ? addr :
512 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
513 addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
514 (cmd_addr & HC_HAGPBpL_MASK));
515 addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
516
517 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
518 VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
519 return vb;
520}
521
522static void via_cmdbuf_start(drm_via_private_t * dev_priv)
523{
524 uint32_t pause_addr_lo, pause_addr_hi;
525 uint32_t start_addr, start_addr_lo;
526 uint32_t end_addr, end_addr_lo;
527 uint32_t command;
528 uint32_t agp_base;
529 uint32_t ptr;
530 uint32_t reader;
531 int count;
532
533 dev_priv->dma_low = 0;
534
535 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
536 start_addr = agp_base;
537 end_addr = agp_base + dev_priv->dma_high;
538
539 start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
540 end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
541 command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
542 ((end_addr & 0xff000000) >> 16));
543
544 dev_priv->last_pause_ptr =
545 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
546 &pause_addr_hi, &pause_addr_lo, 1) - 1;
547
548 via_flush_write_combine();
549 (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
550
551 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
552 VIA_WRITE(VIA_REG_TRANSPACE, command);
553 VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
554 VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
555
556 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
557 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
558 DRM_WRITEMEMORYBARRIER();
559 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
560 VIA_READ(VIA_REG_TRANSPACE);
561
562 dev_priv->dma_diff = 0;
563
564 count = 10000000;
565 while (!(VIA_READ(0x41c) & 0x80000000) && count--);
566
567 reader = *(dev_priv->hw_addr_ptr);
568 ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
569 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
570
571 /*
572 * This is the difference between where we tell the
573 * command reader to pause and where it actually pauses.
574 * This differs between hw implementation so we need to
575 * detect it.
576 */
577
578 dev_priv->dma_diff = ptr - reader;
579}
580
581static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
582{
583 uint32_t *vb;
584
585 via_cmdbuf_wait(dev_priv, qwords + 2);
586 vb = via_get_dma(dev_priv);
587 VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
588 via_align_buffer(dev_priv, vb, qwords);
589}
590
591static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
592{
593 uint32_t *vb = via_get_dma(dev_priv);
594 SetReg2DAGP(0x0C, (0 | (0 << 16)));
595 SetReg2DAGP(0x10, 0 | (0 << 16));
596 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
597}
598
599static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
600{
601 uint32_t agp_base;
602 uint32_t pause_addr_lo, pause_addr_hi;
603 uint32_t jump_addr_lo, jump_addr_hi;
604 volatile uint32_t *last_pause_ptr;
605 uint32_t dma_low_save1, dma_low_save2;
606
607 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
608 via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
609 &jump_addr_lo, 0);
610
611 dev_priv->dma_wrap = dev_priv->dma_low;
612
613 /*
614 * Wrap command buffer to the beginning.
615 */
616
617 dev_priv->dma_low = 0;
618 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
619 DRM_ERROR("via_cmdbuf_jump failed\n");
620 }
621
622 via_dummy_bitblt(dev_priv);
623 via_dummy_bitblt(dev_priv);
624
625 last_pause_ptr =
626 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
627 &pause_addr_lo, 0) - 1;
628 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
629 &pause_addr_lo, 0);
630
631 *last_pause_ptr = pause_addr_lo;
632 dma_low_save1 = dev_priv->dma_low;
633
634 /*
635 * Now, set a trap that will pause the regulator if it tries to rerun the old
636 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
637 * and reissues the jump command over PCI, while the regulator has already taken the jump
638 * and actually paused at the current buffer end).
639 * There appears to be no other way to detect this condition, since the hw_addr_pointer
640 * does not seem to get updated immediately when a jump occurs.
641 */
642
643 last_pause_ptr =
644 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
645 &pause_addr_lo, 0) - 1;
646 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
647 &pause_addr_lo, 0);
648 *last_pause_ptr = pause_addr_lo;
649
650 dma_low_save2 = dev_priv->dma_low;
651 dev_priv->dma_low = dma_low_save1;
652 via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
653 dev_priv->dma_low = dma_low_save2;
654 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
655}
656
657
658static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
659{
660 via_cmdbuf_jump(dev_priv);
661}
662
663static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
664{
665 uint32_t pause_addr_lo, pause_addr_hi;
666
667 via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
668 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
669}
670
671static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
672{
673 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
674}
675
676static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
677{
678 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
679 via_wait_idle(dev_priv);
680}
681
682/*
683 * User interface to the space and lag functions.
684 */
685
686static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
687{
688 drm_via_cmdbuf_size_t *d_siz = data;
689 int ret = 0;
690 uint32_t tmp_size, count;
691 drm_via_private_t *dev_priv;
692
693 DRM_DEBUG("\n");
694 LOCK_TEST_WITH_RETURN(dev, file_priv);
695
696 dev_priv = (drm_via_private_t *) dev->dev_private;
697
698 if (dev_priv->ring.virtual_start == NULL) {
699 DRM_ERROR("called without initializing AGP ring buffer.\n");
700 return -EFAULT;
701 }
702
703 count = 1000000;
704 tmp_size = d_siz->size;
705 switch (d_siz->func) {
706 case VIA_CMDBUF_SPACE:
707 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
708 && count--) {
709 if (!d_siz->wait) {
710 break;
711 }
712 }
713 if (!count) {
714 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
715 ret = -EAGAIN;
716 }
717 break;
718 case VIA_CMDBUF_LAG:
719 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
720 && count--) {
721 if (!d_siz->wait) {
722 break;
723 }
724 }
725 if (!count) {
726 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
727 ret = -EAGAIN;
728 }
729 break;
730 default:
731 ret = -EFAULT;
732 }
733 d_siz->size = tmp_size;
734
735 return ret;
736}
737
738struct drm_ioctl_desc via_ioctls[] = {
739 DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
740 DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
741 DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
742 DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
743 DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
744 DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
745 DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
746 DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
747 DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
748 DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
749 DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
750 DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
751 DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
752 DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
753};
754
755int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
new file mode 100644
index 000000000000..409e00afdd07
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -0,0 +1,816 @@
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Thomas Hellstrom.
26 * Partially based on code obtained from Digeo Inc.
27 */
28
29
30/*
31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings?
35 */
36
37#include "drmP.h"
38#include "via_drm.h"
39#include "via_drv.h"
40#include "via_dmablit.h"
41
42#include <linux/pagemap.h>
43
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
47
48typedef struct _drm_via_descriptor {
49 uint32_t mem_addr;
50 uint32_t dev_addr;
51 uint32_t size;
52 uint32_t next;
53} drm_via_descriptor_t;
54
55
56/*
57 * Unmap a DMA mapping.
58 */
59
60
61
62static void
63via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
64{
65 int num_desc = vsg->num_desc;
66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 descriptor_this_page;
70 dma_addr_t next = vsg->chain_start;
71
72 while(num_desc--) {
73 if (descriptor_this_page-- == 0) {
74 cur_descriptor_page--;
75 descriptor_this_page = vsg->descriptors_per_page - 1;
76 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 descriptor_this_page;
78 }
79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
80 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
81 next = (dma_addr_t) desc_ptr->next;
82 desc_ptr--;
83 }
84}
85
86/*
87 * If mode = 0, count how many descriptors are needed.
88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
90 * 'next' field without syncing calls when the descriptor is already mapped.
91 */
92
93static void
94via_map_blit_for_device(struct pci_dev *pdev,
95 const drm_via_dmablit_t *xfer,
96 drm_via_sg_info_t *vsg,
97 int mode)
98{
99 unsigned cur_descriptor_page = 0;
100 unsigned num_descriptors_this_page = 0;
101 unsigned char *mem_addr = xfer->mem_addr;
102 unsigned char *cur_mem;
103 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
104 uint32_t fb_addr = xfer->fb_addr;
105 uint32_t cur_fb;
106 unsigned long line_len;
107 unsigned remaining_len;
108 int num_desc = 0;
109 int cur_line;
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = NULL;
112
113 if (mode == 1)
114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
115
116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
117
118 line_len = xfer->line_length;
119 cur_fb = fb_addr;
120 cur_mem = mem_addr;
121
122 while (line_len > 0) {
123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len;
126
127 if (mode == 1) {
128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb;
135
136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE);
140 desc_ptr++;
141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
142 num_descriptors_this_page = 0;
143 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
144 }
145 }
146
147 num_desc++;
148 cur_mem += remaining_len;
149 cur_fb += remaining_len;
150 }
151
152 mem_addr += xfer->mem_stride;
153 fb_addr += xfer->fb_stride;
154 }
155
156 if (mode == 1) {
157 vsg->chain_start = next;
158 vsg->state = dr_via_device_mapped;
159 }
160 vsg->num_desc = num_desc;
161}
162
163/*
164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only been partially built as long as the status enum is consistent
166 * with the actual status of the used resources.
167 */
168
169
170static void
171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172{
173 struct page *page;
174 int i;
175
176 switch(vsg->state) {
177 case dr_via_device_mapped:
178 via_unmap_blit_from_device(pdev, vsg);
179 case dr_via_desc_pages_alloc:
180 for (i=0; i<vsg->num_desc_pages; ++i) {
181 if (vsg->desc_pages[i] != NULL)
182 free_page((unsigned long)vsg->desc_pages[i]);
183 }
184 kfree(vsg->desc_pages);
185 case dr_via_pages_locked:
186 for (i=0; i<vsg->num_pages; ++i) {
187 if ( NULL != (page = vsg->pages[i])) {
188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 SetPageDirty(page);
190 page_cache_release(page);
191 }
192 }
193 case dr_via_pages_alloc:
194 vfree(vsg->pages);
195 default:
196 vsg->state = dr_via_sg_init;
197 }
198 if (vsg->bounce_buffer) {
199 vfree(vsg->bounce_buffer);
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0;
203}
204
205/*
206 * Fire a blit engine.
207 */
208
209static void
210via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
211{
212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
213
214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 VIA_DMA_CSR_DE);
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221 DRM_WRITEMEMORYBARRIER();
222 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
223 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
224}
225
226/*
227 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
228 * occur here if the calling user does not have access to the submitted address.
229 */
230
231static int
232via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
233{
234 int ret;
235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
237 first_pfn + 1;
238
239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
240 return -ENOMEM;
241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
242 down_read(&current->mm->mmap_sem);
243 ret = get_user_pages(current, current->mm,
244 (unsigned long)xfer->mem_addr,
245 vsg->num_pages,
246 (vsg->direction == DMA_FROM_DEVICE),
247 0, vsg->pages, NULL);
248
249 up_read(&current->mm->mmap_sem);
250 if (ret != vsg->num_pages) {
251 if (ret < 0)
252 return ret;
253 vsg->state = dr_via_pages_locked;
254 return -EINVAL;
255 }
256 vsg->state = dr_via_pages_locked;
257 DRM_DEBUG("DMA pages locked\n");
258 return 0;
259}
260
261/*
262 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
263 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
264 * quite large for some blits, and pages don't need to be contingous.
265 */
266
267static int
268via_alloc_desc_pages(drm_via_sg_info_t *vsg)
269{
270 int i;
271
272 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
273 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
274 vsg->descriptors_per_page;
275
276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
277 return -ENOMEM;
278
279 vsg->state = dr_via_desc_pages_alloc;
280 for (i=0; i<vsg->num_desc_pages; ++i) {
281 if (NULL == (vsg->desc_pages[i] =
282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
283 return -ENOMEM;
284 }
285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
286 vsg->num_desc);
287 return 0;
288}
289
290static void
291via_abort_dmablit(struct drm_device *dev, int engine)
292{
293 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
294
295 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
296}
297
298static void
299via_dmablit_engine_off(struct drm_device *dev, int engine)
300{
301 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
302
303 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
304}
305
306
307
308/*
309 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
310 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
311 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
312 * the workqueue task takes care of processing associated with the old blit.
313 */
314
315void
316via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
317{
318 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
319 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
320 int cur;
321 int done_transfer;
322 unsigned long irqsave=0;
323 uint32_t status = 0;
324
325 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
326 engine, from_irq, (unsigned long) blitq);
327
328 if (from_irq) {
329 spin_lock(&blitq->blit_lock);
330 } else {
331 spin_lock_irqsave(&blitq->blit_lock, irqsave);
332 }
333
334 done_transfer = blitq->is_active &&
335 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
336 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
337
338 cur = blitq->cur;
339 if (done_transfer) {
340
341 blitq->blits[cur]->aborted = blitq->aborting;
342 blitq->done_blit_handle++;
343 DRM_WAKEUP(blitq->blit_queue + cur);
344
345 cur++;
346 if (cur >= VIA_NUM_BLIT_SLOTS)
347 cur = 0;
348 blitq->cur = cur;
349
350 /*
351 * Clear transfer done flag.
352 */
353
354 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
355
356 blitq->is_active = 0;
357 blitq->aborting = 0;
358 schedule_work(&blitq->wq);
359
360 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
361
362 /*
363 * Abort transfer after one second.
364 */
365
366 via_abort_dmablit(dev, engine);
367 blitq->aborting = 1;
368 blitq->end = jiffies + DRM_HZ;
369 }
370
371 if (!blitq->is_active) {
372 if (blitq->num_outstanding) {
373 via_fire_dmablit(dev, blitq->blits[cur], engine);
374 blitq->is_active = 1;
375 blitq->cur = cur;
376 blitq->num_outstanding--;
377 blitq->end = jiffies + DRM_HZ;
378 if (!timer_pending(&blitq->poll_timer))
379 mod_timer(&blitq->poll_timer, jiffies + 1);
380 } else {
381 if (timer_pending(&blitq->poll_timer)) {
382 del_timer(&blitq->poll_timer);
383 }
384 via_dmablit_engine_off(dev, engine);
385 }
386 }
387
388 if (from_irq) {
389 spin_unlock(&blitq->blit_lock);
390 } else {
391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
392 }
393}
394
395
396
397/*
398 * Check whether this blit is still active, performing necessary locking.
399 */
400
401static int
402via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
403{
404 unsigned long irqsave;
405 uint32_t slot;
406 int active;
407
408 spin_lock_irqsave(&blitq->blit_lock, irqsave);
409
410 /*
411 * Allow for handle wraparounds.
412 */
413
414 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
415 ((blitq->cur_blit_handle - handle) <= (1 << 23));
416
417 if (queue && active) {
418 slot = handle - blitq->done_blit_handle + blitq->cur -1;
419 if (slot >= VIA_NUM_BLIT_SLOTS) {
420 slot -= VIA_NUM_BLIT_SLOTS;
421 }
422 *queue = blitq->blit_queue + slot;
423 }
424
425 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
426
427 return active;
428}
429
430/*
431 * Sync. Wait for at least three seconds for the blit to be performed.
432 */
433
434static int
435via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
436{
437
438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
439 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
440 wait_queue_head_t *queue;
441 int ret = 0;
442
443 if (via_dmablit_active(blitq, engine, handle, &queue)) {
444 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
445 !via_dmablit_active(blitq, engine, handle, NULL));
446 }
447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
448 handle, engine, ret);
449
450 return ret;
451}
452
453
454/*
455 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
456 * a) Broken hardware (typically those that don't have any video capture facility).
457 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
458 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
459 * irqs, it will shorten the latency somewhat.
460 */
461
462
463
464static void
465via_dmablit_timer(unsigned long data)
466{
467 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
468 struct drm_device *dev = blitq->dev;
469 int engine = (int)
470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
471
472 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
473 (unsigned long) jiffies);
474
475 via_dmablit_handler(dev, engine, 0);
476
477 if (!timer_pending(&blitq->poll_timer)) {
478 mod_timer(&blitq->poll_timer, jiffies + 1);
479
480 /*
481 * Rerun handler to delete timer if engines are off, and
482 * to shorten abort latency. This is a little nasty.
483 */
484
485 via_dmablit_handler(dev, engine, 0);
486
487 }
488}
489
490
491
492
493/*
494 * Workqueue task that frees data and mappings associated with a blit.
495 * Also wakes up waiting processes. Each of these tasks handles one
496 * blit engine only and may not be called on each interrupt.
497 */
498
499
500static void
501via_dmablit_workqueue(struct work_struct *work)
502{
503 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
504 struct drm_device *dev = blitq->dev;
505 unsigned long irqsave;
506 drm_via_sg_info_t *cur_sg;
507 int cur_released;
508
509
510 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
511 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
512
513 spin_lock_irqsave(&blitq->blit_lock, irqsave);
514
515 while(blitq->serviced != blitq->cur) {
516
517 cur_released = blitq->serviced++;
518
519 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
520
521 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
522 blitq->serviced = 0;
523
524 cur_sg = blitq->blits[cur_released];
525 blitq->num_free++;
526
527 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
528
529 DRM_WAKEUP(&blitq->busy_queue);
530
531 via_free_sg_info(dev->pdev, cur_sg);
532 kfree(cur_sg);
533
534 spin_lock_irqsave(&blitq->blit_lock, irqsave);
535 }
536
537 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
538}
539
540
541/*
542 * Init all blit engines. Currently we use two, but some hardware have 4.
543 */
544
545
546void
547via_init_dmablit(struct drm_device *dev)
548{
549 int i,j;
550 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
551 drm_via_blitq_t *blitq;
552
553 pci_set_master(dev->pdev);
554
555 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
556 blitq = dev_priv->blit_queues + i;
557 blitq->dev = dev;
558 blitq->cur_blit_handle = 0;
559 blitq->done_blit_handle = 0;
560 blitq->head = 0;
561 blitq->cur = 0;
562 blitq->serviced = 0;
563 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
564 blitq->num_outstanding = 0;
565 blitq->is_active = 0;
566 blitq->aborting = 0;
567 spin_lock_init(&blitq->blit_lock);
568 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
569 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
570 }
571 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
572 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
573 setup_timer(&blitq->poll_timer, via_dmablit_timer,
574 (unsigned long)blitq);
575 }
576}
577
578/*
579 * Build all info and do all mappings required for a blit.
580 */
581
582
583static int
584via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
585{
586 int draw = xfer->to_fb;
587 int ret = 0;
588
589 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
590 vsg->bounce_buffer = NULL;
591
592 vsg->state = dr_via_sg_init;
593
594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
595 DRM_ERROR("Zero size bitblt.\n");
596 return -EINVAL;
597 }
598
599 /*
600 * Below check is a driver limitation, not a hardware one. We
601 * don't want to lock unused pages, and don't want to incoporate the
602 * extra logic of avoiding them. Make sure there are no.
603 * (Not a big limitation anyway.)
604 */
605
606 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
607 DRM_ERROR("Too large system memory stride. Stride: %d, "
608 "Length: %d\n", xfer->mem_stride, xfer->line_length);
609 return -EINVAL;
610 }
611
612 if ((xfer->mem_stride == xfer->line_length) &&
613 (xfer->fb_stride == xfer->line_length)) {
614 xfer->mem_stride *= xfer->num_lines;
615 xfer->line_length = xfer->mem_stride;
616 xfer->fb_stride = xfer->mem_stride;
617 xfer->num_lines = 1;
618 }
619
620 /*
621 * Don't lock an arbitrary large number of pages, since that causes a
622 * DOS security hole.
623 */
624
625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
626 DRM_ERROR("Too large PCI DMA bitblt.\n");
627 return -EINVAL;
628 }
629
630 /*
631 * we allow a negative fb stride to allow flipping of images in
632 * transfer.
633 */
634
635 if (xfer->mem_stride < xfer->line_length ||
636 abs(xfer->fb_stride) < xfer->line_length) {
637 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
638 return -EINVAL;
639 }
640
641 /*
642 * A hardware bug seems to be worked around if system memory addresses start on
643 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
644 * about this. Meanwhile, impose the following restrictions:
645 */
646
647#ifdef VIA_BUGFREE
648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
650 DRM_ERROR("Invalid DRM bitblt alignment.\n");
651 return -EINVAL;
652 }
653#else
654 if ((((unsigned long)xfer->mem_addr & 15) ||
655 ((unsigned long)xfer->fb_addr & 3)) ||
656 ((xfer->num_lines > 1) &&
657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
658 DRM_ERROR("Invalid DRM bitblt alignment.\n");
659 return -EINVAL;
660 }
661#endif
662
663 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
664 DRM_ERROR("Could not lock DMA pages.\n");
665 via_free_sg_info(dev->pdev, vsg);
666 return ret;
667 }
668
669 via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
670 if (0 != (ret = via_alloc_desc_pages(vsg))) {
671 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
672 via_free_sg_info(dev->pdev, vsg);
673 return ret;
674 }
675 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
676
677 return 0;
678}
679
680
681/*
682 * Reserve one free slot in the blit queue. Will wait for one second for one
683 * to become available. Otherwise -EBUSY is returned.
684 */
685
686static int
687via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
688{
689 int ret=0;
690 unsigned long irqsave;
691
692 DRM_DEBUG("Num free is %d\n", blitq->num_free);
693 spin_lock_irqsave(&blitq->blit_lock, irqsave);
694 while(blitq->num_free == 0) {
695 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
696
697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
698 if (ret) {
699 return (-EINTR == ret) ? -EAGAIN : ret;
700 }
701
702 spin_lock_irqsave(&blitq->blit_lock, irqsave);
703 }
704
705 blitq->num_free--;
706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
707
708 return 0;
709}
710
711/*
712 * Hand back a free slot if we changed our mind.
713 */
714
715static void
716via_dmablit_release_slot(drm_via_blitq_t *blitq)
717{
718 unsigned long irqsave;
719
720 spin_lock_irqsave(&blitq->blit_lock, irqsave);
721 blitq->num_free++;
722 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
723 DRM_WAKEUP( &blitq->busy_queue );
724}
725
726/*
727 * Grab a free slot. Build blit info and queue a blit.
728 */
729
730
731static int
732via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
733{
734 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
735 drm_via_sg_info_t *vsg;
736 drm_via_blitq_t *blitq;
737 int ret;
738 int engine;
739 unsigned long irqsave;
740
741 if (dev_priv == NULL) {
742 DRM_ERROR("Called without initialization.\n");
743 return -EINVAL;
744 }
745
746 engine = (xfer->to_fb) ? 0 : 1;
747 blitq = dev_priv->blit_queues + engine;
748 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
749 return ret;
750 }
751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
752 via_dmablit_release_slot(blitq);
753 return -ENOMEM;
754 }
755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
756 via_dmablit_release_slot(blitq);
757 kfree(vsg);
758 return ret;
759 }
760 spin_lock_irqsave(&blitq->blit_lock, irqsave);
761
762 blitq->blits[blitq->head++] = vsg;
763 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
764 blitq->head = 0;
765 blitq->num_outstanding++;
766 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
767
768 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
769 xfer->sync.engine = engine;
770
771 via_dmablit_handler(dev, engine, 0);
772
773 return 0;
774}
775
776/*
777 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
778 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
779 * case it returns with -EAGAIN for the signal to be delivered.
780 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
781 */
782
783int
784via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
785{
786 drm_via_blitsync_t *sync = data;
787 int err;
788
789 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
790 return -EINVAL;
791
792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
793
794 if (-EINTR == err)
795 err = -EAGAIN;
796
797 return err;
798}
799
800
801/*
802 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
803 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
804 * be reissued. See the above IOCTL code.
805 */
806
807int
808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
809{
810 drm_via_dmablit_t *xfer = data;
811 int err;
812
813 err = via_dmablit(dev, xfer);
814
815 return err;
816}
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
new file mode 100644
index 000000000000..7408a547a036
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dmablit.h
@@ -0,0 +1,140 @@
1/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright 2005 Thomas Hellstrom.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Thomas Hellstrom.
27 * Register info from Digeo Inc.
28 */
29
30#ifndef _VIA_DMABLIT_H
31#define _VIA_DMABLIT_H
32
33#include <linux/dma-mapping.h>
34
35#define VIA_NUM_BLIT_ENGINES 2
36#define VIA_NUM_BLIT_SLOTS 8
37
38struct _drm_via_descriptor;
39
40typedef struct _drm_via_sg_info {
41 struct page **pages;
42 unsigned long num_pages;
43 struct _drm_via_descriptor **desc_pages;
44 int num_desc_pages;
45 int num_desc;
46 enum dma_data_direction direction;
47 unsigned char *bounce_buffer;
48 dma_addr_t chain_start;
49 uint32_t free_on_sequence;
50 unsigned int descriptors_per_page;
51 int aborted;
52 enum {
53 dr_via_device_mapped,
54 dr_via_desc_pages_alloc,
55 dr_via_pages_locked,
56 dr_via_pages_alloc,
57 dr_via_sg_init
58 } state;
59} drm_via_sg_info_t;
60
61typedef struct _drm_via_blitq {
62 struct drm_device *dev;
63 uint32_t cur_blit_handle;
64 uint32_t done_blit_handle;
65 unsigned serviced;
66 unsigned head;
67 unsigned cur;
68 unsigned num_free;
69 unsigned num_outstanding;
70 unsigned long end;
71 int aborting;
72 int is_active;
73 drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
74 spinlock_t blit_lock;
75 wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
76 wait_queue_head_t busy_queue;
77 struct work_struct wq;
78 struct timer_list poll_timer;
79} drm_via_blitq_t;
80
81
82/*
83 * PCI DMA Registers
84 * Channels 2 & 3 don't seem to be implemented in hardware.
85 */
86
87#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
88#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
89#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
90#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
91
92#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
93#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
94#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
95#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
96
97#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
98#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
99#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
100#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
101
102#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
103#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
104#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
105#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
106
107#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
108#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
109#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
110#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
111
112#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
113#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
114#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
115#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
116
117#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
118
119/* Define for DMA engine */
120/* DPR */
121#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
122#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
123#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
124
125/* MR */
126#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
127#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
128#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
129
130/* CSR */
131#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
132#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
133#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
134#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
135#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
136#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
137
138
139
140#endif
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
new file mode 100644
index 000000000000..80c01cdfa37d
--- /dev/null
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "via_drm.h"
27#include "via_drv.h"
28
29#include "drm_pciids.h"
30
31static int dri_library_name(struct drm_device *dev, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "unichrome");
34}
35
36static struct pci_device_id pciidlist[] = {
37 viadrv_PCI_IDS
38};
39
40static struct drm_driver driver = {
41 .driver_features =
42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
44 .load = via_driver_load,
45 .unload = via_driver_unload,
46 .context_dtor = via_final_context,
47 .vblank_wait = via_driver_vblank_wait,
48 .irq_preinstall = via_driver_irq_preinstall,
49 .irq_postinstall = via_driver_irq_postinstall,
50 .irq_uninstall = via_driver_irq_uninstall,
51 .irq_handler = via_driver_irq_handler,
52 .dma_quiescent = via_driver_dma_quiescent,
53 .dri_library_name = dri_library_name,
54 .reclaim_buffers = drm_core_reclaim_buffers,
55 .reclaim_buffers_locked = NULL,
56 .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
57 .lastclose = via_lastclose,
58 .get_map_ofs = drm_core_get_map_ofs,
59 .get_reg_ofs = drm_core_get_reg_ofs,
60 .ioctls = via_ioctls,
61 .fops = {
62 .owner = THIS_MODULE,
63 .open = drm_open,
64 .release = drm_release,
65 .ioctl = drm_ioctl,
66 .mmap = drm_mmap,
67 .poll = drm_poll,
68 .fasync = drm_fasync,
69 },
70 .pci_driver = {
71 .name = DRIVER_NAME,
72 .id_table = pciidlist,
73 },
74
75 .name = DRIVER_NAME,
76 .desc = DRIVER_DESC,
77 .date = DRIVER_DATE,
78 .major = DRIVER_MAJOR,
79 .minor = DRIVER_MINOR,
80 .patchlevel = DRIVER_PATCHLEVEL,
81};
82
83static int __init via_init(void)
84{
85 driver.num_ioctls = via_max_ioctl;
86 via_init_command_verifier();
87 return drm_init(&driver);
88}
89
90static void __exit via_exit(void)
91{
92 drm_exit(&driver);
93}
94
95module_init(via_init);
96module_exit(via_exit);
97
98MODULE_AUTHOR(DRIVER_AUTHOR);
99MODULE_DESCRIPTION(DRIVER_DESC);
100MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
new file mode 100644
index 000000000000..2daae81874cd
--- /dev/null
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _VIA_DRV_H_
25#define _VIA_DRV_H_
26
27#include "drm_sman.h"
28#define DRIVER_AUTHOR "Various"
29
30#define DRIVER_NAME "via"
31#define DRIVER_DESC "VIA Unichrome / Pro"
32#define DRIVER_DATE "20070202"
33
34#define DRIVER_MAJOR 2
35#define DRIVER_MINOR 11
36#define DRIVER_PATCHLEVEL 1
37
38#include "via_verifier.h"
39
40#include "via_dmablit.h"
41
42#define VIA_PCI_BUF_SIZE 60000
43#define VIA_FIRE_BUF_SIZE 1024
44#define VIA_NUM_IRQS 4
45
46typedef struct drm_via_ring_buffer {
47 drm_local_map_t map;
48 char *virtual_start;
49} drm_via_ring_buffer_t;
50
51typedef uint32_t maskarray_t[5];
52
53typedef struct drm_via_irq {
54 atomic_t irq_received;
55 uint32_t pending_mask;
56 uint32_t enable_mask;
57 wait_queue_head_t irq_queue;
58} drm_via_irq_t;
59
60typedef struct drm_via_private {
61 drm_via_sarea_t *sarea_priv;
62 drm_local_map_t *sarea;
63 drm_local_map_t *fb;
64 drm_local_map_t *mmio;
65 unsigned long agpAddr;
66 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
67 char *dma_ptr;
68 unsigned int dma_low;
69 unsigned int dma_high;
70 unsigned int dma_offset;
71 uint32_t dma_wrap;
72 volatile uint32_t *last_pause_ptr;
73 volatile uint32_t *hw_addr_ptr;
74 drm_via_ring_buffer_t ring;
75 struct timeval last_vblank;
76 int last_vblank_valid;
77 unsigned usec_per_vblank;
78 drm_via_state_t hc_state;
79 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
81 uint32_t num_fire_offsets;
82 int chipset;
83 drm_via_irq_t via_irqs[VIA_NUM_IRQS];
84 unsigned num_irqs;
85 maskarray_t *irq_masks;
86 uint32_t irq_enable_mask;
87 uint32_t irq_pending_mask;
88 int *irq_map;
89 unsigned int idle_fault;
90 struct drm_sman sman;
91 int vram_initialized;
92 int agp_initialized;
93 unsigned long vram_offset;
94 unsigned long agp_offset;
95 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
96 uint32_t dma_diff;
97} drm_via_private_t;
98
99enum via_family {
100 VIA_OTHER = 0, /* Baseline */
101 VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
102 VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
103};
104
105/* VIA MMIO register access */
106#define VIA_BASE ((dev_priv->mmio))
107
108#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
109#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val)
110#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
111#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
112
113extern struct drm_ioctl_desc via_ioctls[];
114extern int via_max_ioctl;
115
116extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
117extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
118extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
119extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
120extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
121extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
122extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
123extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
124extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
125
126extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
127extern int via_driver_unload(struct drm_device *dev);
128
129extern int via_init_context(struct drm_device * dev, int context);
130extern int via_final_context(struct drm_device * dev, int context);
131
132extern int via_do_cleanup_map(struct drm_device * dev);
133extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
134
135extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
136extern void via_driver_irq_preinstall(struct drm_device * dev);
137extern void via_driver_irq_postinstall(struct drm_device * dev);
138extern void via_driver_irq_uninstall(struct drm_device * dev);
139
140extern int via_dma_cleanup(struct drm_device * dev);
141extern void via_init_command_verifier(void);
142extern int via_driver_dma_quiescent(struct drm_device * dev);
143extern void via_init_futex(drm_via_private_t * dev_priv);
144extern void via_cleanup_futex(drm_via_private_t * dev_priv);
145extern void via_release_futex(drm_via_private_t * dev_priv, int context);
146
147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
148extern void via_lastclose(struct drm_device *dev);
149
150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
151extern void via_init_dmablit(struct drm_device *dev);
152
153#endif
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
new file mode 100644
index 000000000000..c6bb978a1106
--- /dev/null
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -0,0 +1,377 @@
1/* via_irq.c
2 *
3 * Copyright 2004 BEAM Ltd.
4 * Copyright 2002 Tungsten Graphics, Inc.
5 * Copyright 2005 Thomas Hellstrom.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Terry Barnaby <terry1@beam.ltd.uk>
30 * Keith Whitwell <keith@tungstengraphics.com>
31 * Thomas Hellstrom <unichrome@shipmail.org>
32 *
33 * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
34 * interrupt, as well as an infrastructure to handle other interrupts of the chip.
35 * The refresh rate is also calculated for video playback sync purposes.
36 */
37
38#include "drmP.h"
39#include "drm.h"
40#include "via_drm.h"
41#include "via_drv.h"
42
43#define VIA_REG_INTERRUPT 0x200
44
45/* VIA_REG_INTERRUPT */
46#define VIA_IRQ_GLOBAL (1 << 31)
47#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
48#define VIA_IRQ_VBLANK_PENDING (1 << 3)
49#define VIA_IRQ_HQV0_ENABLE (1 << 11)
50#define VIA_IRQ_HQV1_ENABLE (1 << 25)
51#define VIA_IRQ_HQV0_PENDING (1 << 9)
52#define VIA_IRQ_HQV1_PENDING (1 << 10)
53#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
54#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
55#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
56#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
57#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
58#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
59#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
60#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
61
62
63/*
64 * Device-specific IRQs go here. This type might need to be extended with
65 * the register if there are multiple IRQ control registers.
66 * Currently we activate the HQV interrupts of Unichrome Pro group A.
67 */
68
69static maskarray_t via_pro_group_a_irqs[] = {
70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
71 0x00000000},
72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
73 0x00000000},
74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
78};
79static int via_num_pro_group_a =
80 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
81static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
82
83static maskarray_t via_unichrome_irqs[] = {
84 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
85 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
86 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
87 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
88};
89static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
90static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
91
92static unsigned time_diff(struct timeval *now, struct timeval *then)
93{
94 return (now->tv_usec >= then->tv_usec) ?
95 now->tv_usec - then->tv_usec :
96 1000000 - (then->tv_usec - now->tv_usec);
97}
98
99irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
100{
101 struct drm_device *dev = (struct drm_device *) arg;
102 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
103 u32 status;
104 int handled = 0;
105 struct timeval cur_vblank;
106 drm_via_irq_t *cur_irq = dev_priv->via_irqs;
107 int i;
108
109 status = VIA_READ(VIA_REG_INTERRUPT);
110 if (status & VIA_IRQ_VBLANK_PENDING) {
111 atomic_inc(&dev->vbl_received);
112 if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
113 do_gettimeofday(&cur_vblank);
114 if (dev_priv->last_vblank_valid) {
115 dev_priv->usec_per_vblank =
116 time_diff(&cur_vblank,
117 &dev_priv->last_vblank) >> 4;
118 }
119 dev_priv->last_vblank = cur_vblank;
120 dev_priv->last_vblank_valid = 1;
121 }
122 if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
123 DRM_DEBUG("US per vblank is: %u\n",
124 dev_priv->usec_per_vblank);
125 }
126 DRM_WAKEUP(&dev->vbl_queue);
127 drm_vbl_send_signals(dev);
128 handled = 1;
129 }
130
131 for (i = 0; i < dev_priv->num_irqs; ++i) {
132 if (status & cur_irq->pending_mask) {
133 atomic_inc(&cur_irq->irq_received);
134 DRM_WAKEUP(&cur_irq->irq_queue);
135 handled = 1;
136 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
137 via_dmablit_handler(dev, 0, 1);
138 } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
139 via_dmablit_handler(dev, 1, 1);
140 }
141 }
142 cur_irq++;
143 }
144
145 /* Acknowlege interrupts */
146 VIA_WRITE(VIA_REG_INTERRUPT, status);
147
148 if (handled)
149 return IRQ_HANDLED;
150 else
151 return IRQ_NONE;
152}
153
154static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
155{
156 u32 status;
157
158 if (dev_priv) {
159 /* Acknowlege interrupts */
160 status = VIA_READ(VIA_REG_INTERRUPT);
161 VIA_WRITE(VIA_REG_INTERRUPT, status |
162 dev_priv->irq_pending_mask);
163 }
164}
165
166int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
167{
168 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
169 unsigned int cur_vblank;
170 int ret = 0;
171
172 DRM_DEBUG("\n");
173 if (!dev_priv) {
174 DRM_ERROR("called with no initialization\n");
175 return -EINVAL;
176 }
177
178 viadrv_acknowledge_irqs(dev_priv);
179
180 /* Assume that the user has missed the current sequence number
181 * by about a day rather than she wants to wait for years
182 * using vertical blanks...
183 */
184
185 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
186 (((cur_vblank = atomic_read(&dev->vbl_received)) -
187 *sequence) <= (1 << 23)));
188
189 *sequence = cur_vblank;
190 return ret;
191}
192
193static int
194via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
195 unsigned int *sequence)
196{
197 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
198 unsigned int cur_irq_sequence;
199 drm_via_irq_t *cur_irq;
200 int ret = 0;
201 maskarray_t *masks;
202 int real_irq;
203
204 DRM_DEBUG("\n");
205
206 if (!dev_priv) {
207 DRM_ERROR("called with no initialization\n");
208 return -EINVAL;
209 }
210
211 if (irq >= drm_via_irq_num) {
212 DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
213 return -EINVAL;
214 }
215
216 real_irq = dev_priv->irq_map[irq];
217
218 if (real_irq < 0) {
219 DRM_ERROR("Video IRQ %d not available on this hardware.\n",
220 irq);
221 return -EINVAL;
222 }
223
224 masks = dev_priv->irq_masks;
225 cur_irq = dev_priv->via_irqs + real_irq;
226
227 if (masks[real_irq][2] && !force_sequence) {
228 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
229 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
230 masks[irq][4]));
231 cur_irq_sequence = atomic_read(&cur_irq->irq_received);
232 } else {
233 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
234 (((cur_irq_sequence =
235 atomic_read(&cur_irq->irq_received)) -
236 *sequence) <= (1 << 23)));
237 }
238 *sequence = cur_irq_sequence;
239 return ret;
240}
241
242/*
243 * drm_dma.h hooks
244 */
245
246void via_driver_irq_preinstall(struct drm_device * dev)
247{
248 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
249 u32 status;
250 drm_via_irq_t *cur_irq;
251 int i;
252
253 DRM_DEBUG("dev_priv: %p\n", dev_priv);
254 if (dev_priv) {
255 cur_irq = dev_priv->via_irqs;
256
257 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
258 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
259
260 if (dev_priv->chipset == VIA_PRO_GROUP_A ||
261 dev_priv->chipset == VIA_DX9_0) {
262 dev_priv->irq_masks = via_pro_group_a_irqs;
263 dev_priv->num_irqs = via_num_pro_group_a;
264 dev_priv->irq_map = via_irqmap_pro_group_a;
265 } else {
266 dev_priv->irq_masks = via_unichrome_irqs;
267 dev_priv->num_irqs = via_num_unichrome;
268 dev_priv->irq_map = via_irqmap_unichrome;
269 }
270
271 for (i = 0; i < dev_priv->num_irqs; ++i) {
272 atomic_set(&cur_irq->irq_received, 0);
273 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
274 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
275 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
276 dev_priv->irq_enable_mask |= cur_irq->enable_mask;
277 dev_priv->irq_pending_mask |= cur_irq->pending_mask;
278 cur_irq++;
279
280 DRM_DEBUG("Initializing IRQ %d\n", i);
281 }
282
283 dev_priv->last_vblank_valid = 0;
284
285 /* Clear VSync interrupt regs */
286 status = VIA_READ(VIA_REG_INTERRUPT);
287 VIA_WRITE(VIA_REG_INTERRUPT, status &
288 ~(dev_priv->irq_enable_mask));
289
290 /* Clear bits if they're already high */
291 viadrv_acknowledge_irqs(dev_priv);
292 }
293}
294
295void via_driver_irq_postinstall(struct drm_device * dev)
296{
297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
298 u32 status;
299
300 DRM_DEBUG("\n");
301 if (dev_priv) {
302 status = VIA_READ(VIA_REG_INTERRUPT);
303 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
304 | dev_priv->irq_enable_mask);
305
306 /* Some magic, oh for some data sheets ! */
307
308 VIA_WRITE8(0x83d4, 0x11);
309 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
310
311 }
312}
313
314void via_driver_irq_uninstall(struct drm_device * dev)
315{
316 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
317 u32 status;
318
319 DRM_DEBUG("\n");
320 if (dev_priv) {
321
322 /* Some more magic, oh for some data sheets ! */
323
324 VIA_WRITE8(0x83d4, 0x11);
325 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
326
327 status = VIA_READ(VIA_REG_INTERRUPT);
328 VIA_WRITE(VIA_REG_INTERRUPT, status &
329 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
330 }
331}
332
333int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
334{
335 drm_via_irqwait_t *irqwait = data;
336 struct timeval now;
337 int ret = 0;
338 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
339 drm_via_irq_t *cur_irq = dev_priv->via_irqs;
340 int force_sequence;
341
342 if (!dev->irq)
343 return -EINVAL;
344
345 if (irqwait->request.irq >= dev_priv->num_irqs) {
346 DRM_ERROR("Trying to wait on unknown irq %d\n",
347 irqwait->request.irq);
348 return -EINVAL;
349 }
350
351 cur_irq += irqwait->request.irq;
352
353 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
354 case VIA_IRQ_RELATIVE:
355 irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
356 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
357 case VIA_IRQ_ABSOLUTE:
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 if (irqwait->request.type & VIA_IRQ_SIGNAL) {
364 DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
365 return -EINVAL;
366 }
367
368 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
369
370 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
371 &irqwait->request.sequence);
372 do_gettimeofday(&now);
373 irqwait->reply.tval_sec = now.tv_sec;
374 irqwait->reply.tval_usec = now.tv_usec;
375
376 return ret;
377}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
new file mode 100644
index 000000000000..a967556be014
--- /dev/null
+++ b/drivers/gpu/drm/via/via_map.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include "drmP.h"
25#include "via_drm.h"
26#include "via_drv.h"
27
28static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
29{
30 drm_via_private_t *dev_priv = dev->dev_private;
31
32 DRM_DEBUG("\n");
33
34 dev_priv->sarea = drm_getsarea(dev);
35 if (!dev_priv->sarea) {
36 DRM_ERROR("could not find sarea!\n");
37 dev->dev_private = (void *)dev_priv;
38 via_do_cleanup_map(dev);
39 return -EINVAL;
40 }
41
42 dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
43 if (!dev_priv->fb) {
44 DRM_ERROR("could not find framebuffer!\n");
45 dev->dev_private = (void *)dev_priv;
46 via_do_cleanup_map(dev);
47 return -EINVAL;
48 }
49 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
50 if (!dev_priv->mmio) {
51 DRM_ERROR("could not find mmio region!\n");
52 dev->dev_private = (void *)dev_priv;
53 via_do_cleanup_map(dev);
54 return -EINVAL;
55 }
56
57 dev_priv->sarea_priv =
58 (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
59 init->sarea_priv_offset);
60
61 dev_priv->agpAddr = init->agpAddr;
62
63 via_init_futex(dev_priv);
64
65 via_init_dmablit(dev);
66
67 dev->dev_private = (void *)dev_priv;
68 return 0;
69}
70
71int via_do_cleanup_map(struct drm_device * dev)
72{
73 via_dma_cleanup(dev);
74
75 return 0;
76}
77
78int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
79{
80 drm_via_init_t *init = data;
81
82 DRM_DEBUG("\n");
83
84 switch (init->func) {
85 case VIA_INIT_MAP:
86 return via_do_init_map(dev, init);
87 case VIA_CLEANUP_MAP:
88 return via_do_cleanup_map(dev);
89 }
90
91 return -EINVAL;
92}
93
94int via_driver_load(struct drm_device *dev, unsigned long chipset)
95{
96 drm_via_private_t *dev_priv;
97 int ret = 0;
98
99 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
100 if (dev_priv == NULL)
101 return -ENOMEM;
102
103 dev->dev_private = (void *)dev_priv;
104
105 dev_priv->chipset = chipset;
106
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
108 if (ret) {
109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
110 }
111 return ret;
112}
113
114int via_driver_unload(struct drm_device *dev)
115{
116 drm_via_private_t *dev_priv = dev->dev_private;
117
118 drm_sman_takedown(&dev_priv->sman);
119
120 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
121
122 return 0;
123}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
new file mode 100644
index 000000000000..e64094916e4f
--- /dev/null
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
3 * All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
26 */
27
28#include "drmP.h"
29#include "via_drm.h"
30#include "via_drv.h"
31#include "drm_sman.h"
32
33#define VIA_MM_ALIGN_SHIFT 4
34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
35
36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{
38 drm_via_agp_t *agp = data;
39 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
40 int ret;
41
42 mutex_lock(&dev->struct_mutex);
43 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
44 agp->size >> VIA_MM_ALIGN_SHIFT);
45
46 if (ret) {
47 DRM_ERROR("AGP memory manager initialisation error\n");
48 mutex_unlock(&dev->struct_mutex);
49 return ret;
50 }
51
52 dev_priv->agp_initialized = 1;
53 dev_priv->agp_offset = agp->offset;
54 mutex_unlock(&dev->struct_mutex);
55
56 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
57 return 0;
58}
59
60int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
61{
62 drm_via_fb_t *fb = data;
63 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
64 int ret;
65
66 mutex_lock(&dev->struct_mutex);
67 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
68 fb->size >> VIA_MM_ALIGN_SHIFT);
69
70 if (ret) {
71 DRM_ERROR("VRAM memory manager initialisation error\n");
72 mutex_unlock(&dev->struct_mutex);
73 return ret;
74 }
75
76 dev_priv->vram_initialized = 1;
77 dev_priv->vram_offset = fb->offset;
78
79 mutex_unlock(&dev->struct_mutex);
80 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
81
82 return 0;
83
84}
85
86int via_final_context(struct drm_device *dev, int context)
87{
88 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
89
90 via_release_futex(dev_priv, context);
91
92 /* Linux specific until context tracking code gets ported to BSD */
93 /* Last context, perform cleanup */
94 if (dev->ctx_count == 1 && dev->dev_private) {
95 DRM_DEBUG("Last Context\n");
96 if (dev->irq)
97 drm_irq_uninstall(dev);
98 via_cleanup_futex(dev_priv);
99 via_do_cleanup_map(dev);
100 }
101 return 1;
102}
103
104void via_lastclose(struct drm_device *dev)
105{
106 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
107
108 if (!dev_priv)
109 return;
110
111 mutex_lock(&dev->struct_mutex);
112 drm_sman_cleanup(&dev_priv->sman);
113 dev_priv->vram_initialized = 0;
114 dev_priv->agp_initialized = 0;
115 mutex_unlock(&dev->struct_mutex);
116}
117
118int via_mem_alloc(struct drm_device *dev, void *data,
119 struct drm_file *file_priv)
120{
121 drm_via_mem_t *mem = data;
122 int retval = 0;
123 struct drm_memblock_item *item;
124 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
125 unsigned long tmpSize;
126
127 if (mem->type > VIA_MEM_AGP) {
128 DRM_ERROR("Unknown memory type allocation\n");
129 return -EINVAL;
130 }
131 mutex_lock(&dev->struct_mutex);
132 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
133 dev_priv->agp_initialized)) {
134 DRM_ERROR
135 ("Attempt to allocate from uninitialized memory manager.\n");
136 mutex_unlock(&dev->struct_mutex);
137 return -EINVAL;
138 }
139
140 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
141 item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
142 (unsigned long)file_priv);
143 mutex_unlock(&dev->struct_mutex);
144 if (item) {
145 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
146 dev_priv->vram_offset : dev_priv->agp_offset) +
147 (item->mm->
148 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
149 mem->index = item->user_hash.key;
150 } else {
151 mem->offset = 0;
152 mem->size = 0;
153 mem->index = 0;
154 DRM_DEBUG("Video memory allocation failed\n");
155 retval = -ENOMEM;
156 }
157
158 return retval;
159}
160
161int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
162{
163 drm_via_private_t *dev_priv = dev->dev_private;
164 drm_via_mem_t *mem = data;
165 int ret;
166
167 mutex_lock(&dev->struct_mutex);
168 ret = drm_sman_free_key(&dev_priv->sman, mem->index);
169 mutex_unlock(&dev->struct_mutex);
170 DRM_DEBUG("free = 0x%lx\n", mem->index);
171
172 return ret;
173}
174
175
176void via_reclaim_buffers_locked(struct drm_device * dev,
177 struct drm_file *file_priv)
178{
179 drm_via_private_t *dev_priv = dev->dev_private;
180
181 mutex_lock(&dev->struct_mutex);
182 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
183 mutex_unlock(&dev->struct_mutex);
184 return;
185 }
186
187 if (dev->driver->dma_quiescent) {
188 dev->driver->dma_quiescent(dev);
189 }
190
191 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
192 mutex_unlock(&dev->struct_mutex);
193 return;
194}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
new file mode 100644
index 000000000000..46a579198747
--- /dev/null
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -0,0 +1,1116 @@
1/*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Thomas Hellstrom 2004, 2005.
25 * This code was written using docs obtained under NDA from VIA Inc.
26 *
27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
28 * be very slow.
29 */
30
31#include "via_3d_reg.h"
32#include "drmP.h"
33#include "drm.h"
34#include "via_drm.h"
35#include "via_verifier.h"
36#include "via_drv.h"
37
38typedef enum {
39 state_command,
40 state_header2,
41 state_header1,
42 state_vheader5,
43 state_vheader6,
44 state_error
45} verifier_state_t;
46
47typedef enum {
48 no_check = 0,
49 check_for_header2,
50 check_for_header1,
51 check_for_header2_err,
52 check_for_header1_err,
53 check_for_fire,
54 check_z_buffer_addr0,
55 check_z_buffer_addr1,
56 check_z_buffer_addr_mode,
57 check_destination_addr0,
58 check_destination_addr1,
59 check_destination_addr_mode,
60 check_for_dummy,
61 check_for_dd,
62 check_texture_addr0,
63 check_texture_addr1,
64 check_texture_addr2,
65 check_texture_addr3,
66 check_texture_addr4,
67 check_texture_addr5,
68 check_texture_addr6,
69 check_texture_addr7,
70 check_texture_addr8,
71 check_texture_addr_mode,
72 check_for_vertex_count,
73 check_number_texunits,
74 forbidden_command
75} hazard_t;
76
77/*
78 * Associates each hazard above with a possible multi-command
79 * sequence. For example an address that is split over multiple
80 * commands and that needs to be checked at the first command
81 * that does not include any part of the address.
82 */
83
84static drm_via_sequence_t seqs[] = {
85 no_sequence,
86 no_sequence,
87 no_sequence,
88 no_sequence,
89 no_sequence,
90 no_sequence,
91 z_address,
92 z_address,
93 z_address,
94 dest_address,
95 dest_address,
96 dest_address,
97 no_sequence,
98 no_sequence,
99 tex_address,
100 tex_address,
101 tex_address,
102 tex_address,
103 tex_address,
104 tex_address,
105 tex_address,
106 tex_address,
107 tex_address,
108 tex_address,
109 no_sequence
110};
111
112typedef struct {
113 unsigned int code;
114 hazard_t hz;
115} hz_init_t;
116
117static hz_init_t init_table1[] = {
118 {0xf2, check_for_header2_err},
119 {0xf0, check_for_header1_err},
120 {0xee, check_for_fire},
121 {0xcc, check_for_dummy},
122 {0xdd, check_for_dd},
123 {0x00, no_check},
124 {0x10, check_z_buffer_addr0},
125 {0x11, check_z_buffer_addr1},
126 {0x12, check_z_buffer_addr_mode},
127 {0x13, no_check},
128 {0x14, no_check},
129 {0x15, no_check},
130 {0x23, no_check},
131 {0x24, no_check},
132 {0x33, no_check},
133 {0x34, no_check},
134 {0x35, no_check},
135 {0x36, no_check},
136 {0x37, no_check},
137 {0x38, no_check},
138 {0x39, no_check},
139 {0x3A, no_check},
140 {0x3B, no_check},
141 {0x3C, no_check},
142 {0x3D, no_check},
143 {0x3E, no_check},
144 {0x40, check_destination_addr0},
145 {0x41, check_destination_addr1},
146 {0x42, check_destination_addr_mode},
147 {0x43, no_check},
148 {0x44, no_check},
149 {0x50, no_check},
150 {0x51, no_check},
151 {0x52, no_check},
152 {0x53, no_check},
153 {0x54, no_check},
154 {0x55, no_check},
155 {0x56, no_check},
156 {0x57, no_check},
157 {0x58, no_check},
158 {0x70, no_check},
159 {0x71, no_check},
160 {0x78, no_check},
161 {0x79, no_check},
162 {0x7A, no_check},
163 {0x7B, no_check},
164 {0x7C, no_check},
165 {0x7D, check_for_vertex_count}
166};
167
168static hz_init_t init_table2[] = {
169 {0xf2, check_for_header2_err},
170 {0xf0, check_for_header1_err},
171 {0xee, check_for_fire},
172 {0xcc, check_for_dummy},
173 {0x00, check_texture_addr0},
174 {0x01, check_texture_addr0},
175 {0x02, check_texture_addr0},
176 {0x03, check_texture_addr0},
177 {0x04, check_texture_addr0},
178 {0x05, check_texture_addr0},
179 {0x06, check_texture_addr0},
180 {0x07, check_texture_addr0},
181 {0x08, check_texture_addr0},
182 {0x09, check_texture_addr0},
183 {0x20, check_texture_addr1},
184 {0x21, check_texture_addr1},
185 {0x22, check_texture_addr1},
186 {0x23, check_texture_addr4},
187 {0x2B, check_texture_addr3},
188 {0x2C, check_texture_addr3},
189 {0x2D, check_texture_addr3},
190 {0x2E, check_texture_addr3},
191 {0x2F, check_texture_addr3},
192 {0x30, check_texture_addr3},
193 {0x31, check_texture_addr3},
194 {0x32, check_texture_addr3},
195 {0x33, check_texture_addr3},
196 {0x34, check_texture_addr3},
197 {0x4B, check_texture_addr5},
198 {0x4C, check_texture_addr6},
199 {0x51, check_texture_addr7},
200 {0x52, check_texture_addr8},
201 {0x77, check_texture_addr2},
202 {0x78, no_check},
203 {0x79, no_check},
204 {0x7A, no_check},
205 {0x7B, check_texture_addr_mode},
206 {0x7C, no_check},
207 {0x7D, no_check},
208 {0x7E, no_check},
209 {0x7F, no_check},
210 {0x80, no_check},
211 {0x81, no_check},
212 {0x82, no_check},
213 {0x83, no_check},
214 {0x85, no_check},
215 {0x86, no_check},
216 {0x87, no_check},
217 {0x88, no_check},
218 {0x89, no_check},
219 {0x8A, no_check},
220 {0x90, no_check},
221 {0x91, no_check},
222 {0x92, no_check},
223 {0x93, no_check}
224};
225
226static hz_init_t init_table3[] = {
227 {0xf2, check_for_header2_err},
228 {0xf0, check_for_header1_err},
229 {0xcc, check_for_dummy},
230 {0x00, check_number_texunits}
231};
232
233static hazard_t table1[256];
234static hazard_t table2[256];
235static hazard_t table3[256];
236
237static __inline__ int
238eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
239{
240 if ((buf_end - *buf) >= num_words) {
241 *buf += num_words;
242 return 0;
243 }
244 DRM_ERROR("Illegal termination of DMA command buffer\n");
245 return 1;
246}
247
248/*
249 * Partially stolen from drm_memory.h
250 */
251
252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253 unsigned long offset,
254 unsigned long size,
255 struct drm_device * dev)
256{
257 struct drm_map_list *r_list;
258 drm_local_map_t *map = seq->map_cache;
259
260 if (map && map->offset <= offset
261 && (offset + size) <= (map->offset + map->size)) {
262 return map;
263 }
264
265 list_for_each_entry(r_list, &dev->maplist, head) {
266 map = r_list->map;
267 if (!map)
268 continue;
269 if (map->offset <= offset
270 && (offset + size) <= (map->offset + map->size)
271 && !(map->flags & _DRM_RESTRICTED)
272 && (map->type == _DRM_AGP)) {
273 seq->map_cache = map;
274 return map;
275 }
276 }
277 return NULL;
278}
279
280/*
281 * Require that all AGP texture levels reside in the same AGP map which should
282 * be mappable by the client. This is not a big restriction.
283 * FIXME: To actually enforce this security policy strictly, drm_rmmap
284 * would have to wait for dma quiescent before removing an AGP map.
285 * The via_drm_lookup_agp_map call in reality seems to take
286 * very little CPU time.
287 */
288
289static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
290{
291 switch (cur_seq->unfinished) {
292 case z_address:
293 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
294 break;
295 case dest_address:
296 DRM_DEBUG("Destination start address is 0x%x\n",
297 cur_seq->d_addr);
298 break;
299 case tex_address:
300 if (cur_seq->agp_texture) {
301 unsigned start =
302 cur_seq->tex_level_lo[cur_seq->texture];
303 unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
304 unsigned long lo = ~0, hi = 0, tmp;
305 uint32_t *addr, *pitch, *height, tex;
306 unsigned i;
307 int npot;
308
309 if (end > 9)
310 end = 9;
311 if (start > 9)
312 start = 9;
313
314 addr =
315 &(cur_seq->t_addr[tex = cur_seq->texture][start]);
316 pitch = &(cur_seq->pitch[tex][start]);
317 height = &(cur_seq->height[tex][start]);
318 npot = cur_seq->tex_npot[tex];
319 for (i = start; i <= end; ++i) {
320 tmp = *addr++;
321 if (tmp < lo)
322 lo = tmp;
323 if (i == 0 && npot)
324 tmp += (*height++ * *pitch++);
325 else
326 tmp += (*height++ << *pitch++);
327 if (tmp > hi)
328 hi = tmp;
329 }
330
331 if (!via_drm_lookup_agp_map
332 (cur_seq, lo, hi - lo, cur_seq->dev)) {
333 DRM_ERROR
334 ("AGP texture is not in allowed map\n");
335 return 2;
336 }
337 }
338 break;
339 default:
340 break;
341 }
342 cur_seq->unfinished = no_sequence;
343 return 0;
344}
345
346static __inline__ int
347investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
348{
349 register uint32_t tmp, *tmp_addr;
350
351 if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
352 int ret;
353 if ((ret = finish_current_sequence(cur_seq)))
354 return ret;
355 }
356
357 switch (hz) {
358 case check_for_header2:
359 if (cmd == HALCYON_HEADER2)
360 return 1;
361 return 0;
362 case check_for_header1:
363 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
364 return 1;
365 return 0;
366 case check_for_header2_err:
367 if (cmd == HALCYON_HEADER2)
368 return 1;
369 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
370 break;
371 case check_for_header1_err:
372 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
373 return 1;
374 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
375 break;
376 case check_for_fire:
377 if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
378 return 1;
379 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
380 break;
381 case check_for_dummy:
382 if (HC_DUMMY == cmd)
383 return 0;
384 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
385 break;
386 case check_for_dd:
387 if (0xdddddddd == cmd)
388 return 0;
389 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
390 break;
391 case check_z_buffer_addr0:
392 cur_seq->unfinished = z_address;
393 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
394 (cmd & 0x00FFFFFF);
395 return 0;
396 case check_z_buffer_addr1:
397 cur_seq->unfinished = z_address;
398 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
399 ((cmd & 0xFF) << 24);
400 return 0;
401 case check_z_buffer_addr_mode:
402 cur_seq->unfinished = z_address;
403 if ((cmd & 0x0000C000) == 0)
404 return 0;
405 DRM_ERROR("Attempt to place Z buffer in system memory\n");
406 return 2;
407 case check_destination_addr0:
408 cur_seq->unfinished = dest_address;
409 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
410 (cmd & 0x00FFFFFF);
411 return 0;
412 case check_destination_addr1:
413 cur_seq->unfinished = dest_address;
414 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
415 ((cmd & 0xFF) << 24);
416 return 0;
417 case check_destination_addr_mode:
418 cur_seq->unfinished = dest_address;
419 if ((cmd & 0x0000C000) == 0)
420 return 0;
421 DRM_ERROR
422 ("Attempt to place 3D drawing buffer in system memory\n");
423 return 2;
424 case check_texture_addr0:
425 cur_seq->unfinished = tex_address;
426 tmp = (cmd >> 24);
427 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
428 *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
429 return 0;
430 case check_texture_addr1:
431 cur_seq->unfinished = tex_address;
432 tmp = ((cmd >> 24) - 0x20);
433 tmp += tmp << 1;
434 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
435 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
436 tmp_addr++;
437 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
438 tmp_addr++;
439 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
440 return 0;
441 case check_texture_addr2:
442 cur_seq->unfinished = tex_address;
443 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
444 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
445 return 0;
446 case check_texture_addr3:
447 cur_seq->unfinished = tex_address;
448 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
449 if (tmp == 0 &&
450 (cmd & HC_HTXnEnPit_MASK)) {
451 cur_seq->pitch[cur_seq->texture][tmp] =
452 (cmd & HC_HTXnLnPit_MASK);
453 cur_seq->tex_npot[cur_seq->texture] = 1;
454 } else {
455 cur_seq->pitch[cur_seq->texture][tmp] =
456 (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
457 cur_seq->tex_npot[cur_seq->texture] = 0;
458 if (cmd & 0x000FFFFF) {
459 DRM_ERROR
460 ("Unimplemented texture level 0 pitch mode.\n");
461 return 2;
462 }
463 }
464 return 0;
465 case check_texture_addr4:
466 cur_seq->unfinished = tex_address;
467 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
468 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
469 return 0;
470 case check_texture_addr5:
471 case check_texture_addr6:
472 cur_seq->unfinished = tex_address;
473 /*
474 * Texture width. We don't care since we have the pitch.
475 */
476 return 0;
477 case check_texture_addr7:
478 cur_seq->unfinished = tex_address;
479 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
480 tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
481 tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
482 tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
483 tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
484 tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
485 tmp_addr[0] = 1 << (cmd & 0x0000000F);
486 return 0;
487 case check_texture_addr8:
488 cur_seq->unfinished = tex_address;
489 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
490 tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
491 tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
492 tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
493 tmp_addr[6] = 1 << (cmd & 0x0000000F);
494 return 0;
495 case check_texture_addr_mode:
496 cur_seq->unfinished = tex_address;
497 if (2 == (tmp = cmd & 0x00000003)) {
498 DRM_ERROR
499 ("Attempt to fetch texture from system memory.\n");
500 return 2;
501 }
502 cur_seq->agp_texture = (tmp == 3);
503 cur_seq->tex_palette_size[cur_seq->texture] =
504 (cmd >> 16) & 0x000000007;
505 return 0;
506 case check_for_vertex_count:
507 cur_seq->vertex_count = cmd & 0x0000FFFF;
508 return 0;
509 case check_number_texunits:
510 cur_seq->multitex = (cmd >> 3) & 1;
511 return 0;
512 default:
513 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
514 return 2;
515 }
516 return 2;
517}
518
519static __inline__ int
520via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
521 drm_via_state_t * cur_seq)
522{
523 drm_via_private_t *dev_priv =
524 (drm_via_private_t *) cur_seq->dev->dev_private;
525 uint32_t a_fire, bcmd, dw_count;
526 int ret = 0;
527 int have_fire;
528 const uint32_t *buf = *buffer;
529
530 while (buf < buf_end) {
531 have_fire = 0;
532 if ((buf_end - buf) < 2) {
533 DRM_ERROR
534 ("Unexpected termination of primitive list.\n");
535 ret = 1;
536 break;
537 }
538 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
539 break;
540 bcmd = *buf++;
541 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
542 DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
543 *buf);
544 ret = 1;
545 break;
546 }
547 a_fire =
548 *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
549 HC_HE3Fire_MASK;
550
551 /*
552 * How many dwords per vertex ?
553 */
554
555 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
556 DRM_ERROR("Illegal B command vertex data for AGP.\n");
557 ret = 1;
558 break;
559 }
560
561 dw_count = 0;
562 if (bcmd & (1 << 7))
563 dw_count += (cur_seq->multitex) ? 2 : 1;
564 if (bcmd & (1 << 8))
565 dw_count += (cur_seq->multitex) ? 2 : 1;
566 if (bcmd & (1 << 9))
567 dw_count++;
568 if (bcmd & (1 << 10))
569 dw_count++;
570 if (bcmd & (1 << 11))
571 dw_count++;
572 if (bcmd & (1 << 12))
573 dw_count++;
574 if (bcmd & (1 << 13))
575 dw_count++;
576 if (bcmd & (1 << 14))
577 dw_count++;
578
579 while (buf < buf_end) {
580 if (*buf == a_fire) {
581 if (dev_priv->num_fire_offsets >=
582 VIA_FIRE_BUF_SIZE) {
583 DRM_ERROR("Fire offset buffer full.\n");
584 ret = 1;
585 break;
586 }
587 dev_priv->fire_offsets[dev_priv->
588 num_fire_offsets++] =
589 buf;
590 have_fire = 1;
591 buf++;
592 if (buf < buf_end && *buf == a_fire)
593 buf++;
594 break;
595 }
596 if ((*buf == HALCYON_HEADER2) ||
597 ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
598 DRM_ERROR("Missing Vertex Fire command, "
599 "Stray Vertex Fire command or verifier "
600 "lost sync.\n");
601 ret = 1;
602 break;
603 }
604 if ((ret = eat_words(&buf, buf_end, dw_count)))
605 break;
606 }
607 if (buf >= buf_end && !have_fire) {
608 DRM_ERROR("Missing Vertex Fire command or verifier "
609 "lost sync.\n");
610 ret = 1;
611 break;
612 }
613 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
614 DRM_ERROR("AGP Primitive list end misaligned.\n");
615 ret = 1;
616 break;
617 }
618 }
619 *buffer = buf;
620 return ret;
621}
622
623static __inline__ verifier_state_t
624via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
625 drm_via_state_t * hc_state)
626{
627 uint32_t cmd;
628 int hz_mode;
629 hazard_t hz;
630 const uint32_t *buf = *buffer;
631 const hazard_t *hz_table;
632
633 if ((buf_end - buf) < 2) {
634 DRM_ERROR
635 ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
636 return state_error;
637 }
638 buf++;
639 cmd = (*buf++ & 0xFFFF0000) >> 16;
640
641 switch (cmd) {
642 case HC_ParaType_CmdVdata:
643 if (via_check_prim_list(&buf, buf_end, hc_state))
644 return state_error;
645 *buffer = buf;
646 return state_command;
647 case HC_ParaType_NotTex:
648 hz_table = table1;
649 break;
650 case HC_ParaType_Tex:
651 hc_state->texture = 0;
652 hz_table = table2;
653 break;
654 case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
655 hc_state->texture = 1;
656 hz_table = table2;
657 break;
658 case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
659 hz_table = table3;
660 break;
661 case HC_ParaType_Auto:
662 if (eat_words(&buf, buf_end, 2))
663 return state_error;
664 *buffer = buf;
665 return state_command;
666 case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
667 if (eat_words(&buf, buf_end, 32))
668 return state_error;
669 *buffer = buf;
670 return state_command;
671 case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
672 case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
673 DRM_ERROR("Texture palettes are rejected because of "
674 "lack of info how to determine their size.\n");
675 return state_error;
676 case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
677 DRM_ERROR("Fog factor palettes are rejected because of "
678 "lack of info how to determine their size.\n");
679 return state_error;
680 default:
681
682 /*
683 * There are some unimplemented HC_ParaTypes here, that
684 * need to be implemented if the Mesa driver is extended.
685 */
686
687 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
688 "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
689 cmd, *(buf - 2));
690 *buffer = buf;
691 return state_error;
692 }
693
694 while (buf < buf_end) {
695 cmd = *buf++;
696 if ((hz = hz_table[cmd >> 24])) {
697 if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
698 if (hz_mode == 1) {
699 buf--;
700 break;
701 }
702 return state_error;
703 }
704 } else if (hc_state->unfinished &&
705 finish_current_sequence(hc_state)) {
706 return state_error;
707 }
708 }
709 if (hc_state->unfinished && finish_current_sequence(hc_state)) {
710 return state_error;
711 }
712 *buffer = buf;
713 return state_command;
714}
715
716static __inline__ verifier_state_t
717via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
718 const uint32_t * buf_end, int *fire_count)
719{
720 uint32_t cmd;
721 const uint32_t *buf = *buffer;
722 const uint32_t *next_fire;
723 int burst = 0;
724
725 next_fire = dev_priv->fire_offsets[*fire_count];
726 buf++;
727 cmd = (*buf & 0xFFFF0000) >> 16;
728 VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
729 switch (cmd) {
730 case HC_ParaType_CmdVdata:
731 while ((buf < buf_end) &&
732 (*fire_count < dev_priv->num_fire_offsets) &&
733 (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
734 while (buf <= next_fire) {
735 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
736 (burst & 63), *buf++);
737 burst += 4;
738 }
739 if ((buf < buf_end)
740 && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
741 buf++;
742
743 if (++(*fire_count) < dev_priv->num_fire_offsets)
744 next_fire = dev_priv->fire_offsets[*fire_count];
745 }
746 break;
747 default:
748 while (buf < buf_end) {
749
750 if (*buf == HC_HEADER2 ||
751 (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
752 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
753 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
754 break;
755
756 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
757 (burst & 63), *buf++);
758 burst += 4;
759 }
760 }
761 *buffer = buf;
762 return state_command;
763}
764
765static __inline__ int verify_mmio_address(uint32_t address)
766{
767 if ((address > 0x3FF) && (address < 0xC00)) {
768 DRM_ERROR("Invalid VIDEO DMA command. "
769 "Attempt to access 3D- or command burst area.\n");
770 return 1;
771 } else if ((address > 0xCFF) && (address < 0x1300)) {
772 DRM_ERROR("Invalid VIDEO DMA command. "
773 "Attempt to access PCI DMA area.\n");
774 return 1;
775 } else if (address > 0x13FF) {
776 DRM_ERROR("Invalid VIDEO DMA command. "
777 "Attempt to access VGA registers.\n");
778 return 1;
779 }
780 return 0;
781}
782
783static __inline__ int
784verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
785 uint32_t dwords)
786{
787 const uint32_t *buf = *buffer;
788
789 if (buf_end - buf < dwords) {
790 DRM_ERROR("Illegal termination of video command.\n");
791 return 1;
792 }
793 while (dwords--) {
794 if (*buf++) {
795 DRM_ERROR("Illegal video command tail.\n");
796 return 1;
797 }
798 }
799 *buffer = buf;
800 return 0;
801}
802
803static __inline__ verifier_state_t
804via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
805{
806 uint32_t cmd;
807 const uint32_t *buf = *buffer;
808 verifier_state_t ret = state_command;
809
810 while (buf < buf_end) {
811 cmd = *buf;
812 if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
813 (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
814 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
815 break;
816 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
817 "Attempt to access 3D- or command burst area.\n");
818 ret = state_error;
819 break;
820 } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
821 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
822 break;
823 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
824 "Attempt to access VGA registers.\n");
825 ret = state_error;
826 break;
827 } else {
828 buf += 2;
829 }
830 }
831 *buffer = buf;
832 return ret;
833}
834
835static __inline__ verifier_state_t
836via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
837 const uint32_t * buf_end)
838{
839 register uint32_t cmd;
840 const uint32_t *buf = *buffer;
841
842 while (buf < buf_end) {
843 cmd = *buf;
844 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
845 break;
846 VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
847 buf++;
848 }
849 *buffer = buf;
850 return state_command;
851}
852
853static __inline__ verifier_state_t
854via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
855{
856 uint32_t data;
857 const uint32_t *buf = *buffer;
858
859 if (buf_end - buf < 4) {
860 DRM_ERROR("Illegal termination of video header5 command\n");
861 return state_error;
862 }
863
864 data = *buf++ & ~VIA_VIDEOMASK;
865 if (verify_mmio_address(data))
866 return state_error;
867
868 data = *buf++;
869 if (*buf++ != 0x00F50000) {
870 DRM_ERROR("Illegal header5 header data\n");
871 return state_error;
872 }
873 if (*buf++ != 0x00000000) {
874 DRM_ERROR("Illegal header5 header data\n");
875 return state_error;
876 }
877 if (eat_words(&buf, buf_end, data))
878 return state_error;
879 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
880 return state_error;
881 *buffer = buf;
882 return state_command;
883
884}
885
886static __inline__ verifier_state_t
887via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
888 const uint32_t * buf_end)
889{
890 uint32_t addr, count, i;
891 const uint32_t *buf = *buffer;
892
893 addr = *buf++ & ~VIA_VIDEOMASK;
894 i = count = *buf;
895 buf += 3;
896 while (i--) {
897 VIA_WRITE(addr, *buf++);
898 }
899 if (count & 3)
900 buf += 4 - (count & 3);
901 *buffer = buf;
902 return state_command;
903}
904
905static __inline__ verifier_state_t
906via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
907{
908 uint32_t data;
909 const uint32_t *buf = *buffer;
910 uint32_t i;
911
912 if (buf_end - buf < 4) {
913 DRM_ERROR("Illegal termination of video header6 command\n");
914 return state_error;
915 }
916 buf++;
917 data = *buf++;
918 if (*buf++ != 0x00F60000) {
919 DRM_ERROR("Illegal header6 header data\n");
920 return state_error;
921 }
922 if (*buf++ != 0x00000000) {
923 DRM_ERROR("Illegal header6 header data\n");
924 return state_error;
925 }
926 if ((buf_end - buf) < (data << 1)) {
927 DRM_ERROR("Illegal termination of video header6 command\n");
928 return state_error;
929 }
930 for (i = 0; i < data; ++i) {
931 if (verify_mmio_address(*buf++))
932 return state_error;
933 buf++;
934 }
935 data <<= 1;
936 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
937 return state_error;
938 *buffer = buf;
939 return state_command;
940}
941
942static __inline__ verifier_state_t
943via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
944 const uint32_t * buf_end)
945{
946
947 uint32_t addr, count, i;
948 const uint32_t *buf = *buffer;
949
950 i = count = *++buf;
951 buf += 3;
952 while (i--) {
953 addr = *buf++;
954 VIA_WRITE(addr, *buf++);
955 }
956 count <<= 1;
957 if (count & 3)
958 buf += 4 - (count & 3);
959 *buffer = buf;
960 return state_command;
961}
962
963int
964via_verify_command_stream(const uint32_t * buf, unsigned int size,
965 struct drm_device * dev, int agp)
966{
967
968 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
969 drm_via_state_t *hc_state = &dev_priv->hc_state;
970 drm_via_state_t saved_state = *hc_state;
971 uint32_t cmd;
972 const uint32_t *buf_end = buf + (size >> 2);
973 verifier_state_t state = state_command;
974 int cme_video;
975 int supported_3d;
976
977 cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
978 dev_priv->chipset == VIA_DX9_0);
979
980 supported_3d = dev_priv->chipset != VIA_DX9_0;
981
982 hc_state->dev = dev;
983 hc_state->unfinished = no_sequence;
984 hc_state->map_cache = NULL;
985 hc_state->agp = agp;
986 hc_state->buf_start = buf;
987 dev_priv->num_fire_offsets = 0;
988
989 while (buf < buf_end) {
990
991 switch (state) {
992 case state_header2:
993 state = via_check_header2(&buf, buf_end, hc_state);
994 break;
995 case state_header1:
996 state = via_check_header1(&buf, buf_end);
997 break;
998 case state_vheader5:
999 state = via_check_vheader5(&buf, buf_end);
1000 break;
1001 case state_vheader6:
1002 state = via_check_vheader6(&buf, buf_end);
1003 break;
1004 case state_command:
1005 if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1006 supported_3d)
1007 state = state_header2;
1008 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1009 state = state_header1;
1010 else if (cme_video
1011 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1012 state = state_vheader5;
1013 else if (cme_video
1014 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1015 state = state_vheader6;
1016 else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1017 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1018 state = state_error;
1019 } else {
1020 DRM_ERROR
1021 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1022 cmd);
1023 state = state_error;
1024 }
1025 break;
1026 case state_error:
1027 default:
1028 *hc_state = saved_state;
1029 return -EINVAL;
1030 }
1031 }
1032 if (state == state_error) {
1033 *hc_state = saved_state;
1034 return -EINVAL;
1035 }
1036 return 0;
1037}
1038
1039int
1040via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1041 unsigned int size)
1042{
1043
1044 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
1045 uint32_t cmd;
1046 const uint32_t *buf_end = buf + (size >> 2);
1047 verifier_state_t state = state_command;
1048 int fire_count = 0;
1049
1050 while (buf < buf_end) {
1051
1052 switch (state) {
1053 case state_header2:
1054 state =
1055 via_parse_header2(dev_priv, &buf, buf_end,
1056 &fire_count);
1057 break;
1058 case state_header1:
1059 state = via_parse_header1(dev_priv, &buf, buf_end);
1060 break;
1061 case state_vheader5:
1062 state = via_parse_vheader5(dev_priv, &buf, buf_end);
1063 break;
1064 case state_vheader6:
1065 state = via_parse_vheader6(dev_priv, &buf, buf_end);
1066 break;
1067 case state_command:
1068 if (HALCYON_HEADER2 == (cmd = *buf))
1069 state = state_header2;
1070 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1071 state = state_header1;
1072 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1073 state = state_vheader5;
1074 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1075 state = state_vheader6;
1076 else {
1077 DRM_ERROR
1078 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1079 cmd);
1080 state = state_error;
1081 }
1082 break;
1083 case state_error:
1084 default:
1085 return -EINVAL;
1086 }
1087 }
1088 if (state == state_error) {
1089 return -EINVAL;
1090 }
1091 return 0;
1092}
1093
1094static void
1095setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1096{
1097 int i;
1098
1099 for (i = 0; i < 256; ++i) {
1100 table[i] = forbidden_command;
1101 }
1102
1103 for (i = 0; i < size; ++i) {
1104 table[init_table[i].code] = init_table[i].hz;
1105 }
1106}
1107
1108void via_init_command_verifier(void)
1109{
1110 setup_hazard_table(init_table1, table1,
1111 sizeof(init_table1) / sizeof(hz_init_t));
1112 setup_hazard_table(init_table2, table2,
1113 sizeof(init_table2) / sizeof(hz_init_t));
1114 setup_hazard_table(init_table3, table3,
1115 sizeof(init_table3) / sizeof(hz_init_t));
1116}
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
new file mode 100644
index 000000000000..d6f8214b69f5
--- /dev/null
+++ b/drivers/gpu/drm/via/via_verifier.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Thomas Hellström 2004.
24 */
25
26#ifndef _VIA_VERIFIER_H_
27#define _VIA_VERIFIER_H_
28
29typedef enum {
30 no_sequence = 0,
31 z_address,
32 dest_address,
33 tex_address
34} drm_via_sequence_t;
35
36typedef struct {
37 unsigned texture;
38 uint32_t z_addr;
39 uint32_t d_addr;
40 uint32_t t_addr[2][10];
41 uint32_t pitch[2][10];
42 uint32_t height[2][10];
43 uint32_t tex_level_lo[2];
44 uint32_t tex_level_hi[2];
45 uint32_t tex_palette_size[2];
46 uint32_t tex_npot[2];
47 drm_via_sequence_t unfinished;
48 int agp_texture;
49 int multitex;
50 struct drm_device *dev;
51 drm_local_map_t *map_cache;
52 uint32_t vertex_count;
53 int agp;
54 const uint32_t *buf_start;
55} drm_via_state_t;
56
57extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
58 struct drm_device * dev, int agp);
59extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
60 unsigned int size);
61
62#endif
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
new file mode 100644
index 000000000000..6ec04ac12459
--- /dev/null
+++ b/drivers/gpu/drm/via/via_video.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Thomas Hellstrom 2005.
24 *
25 * Video and XvMC related functions.
26 */
27
28#include "drmP.h"
29#include "via_drm.h"
30#include "via_drv.h"
31
32void via_init_futex(drm_via_private_t * dev_priv)
33{
34 unsigned int i;
35
36 DRM_DEBUG("\n");
37
38 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
39 DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
40 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
41 }
42}
43
44void via_cleanup_futex(drm_via_private_t * dev_priv)
45{
46}
47
48void via_release_futex(drm_via_private_t * dev_priv, int context)
49{
50 unsigned int i;
51 volatile int *lock;
52
53 if (!dev_priv->sarea_priv)
54 return;
55
56 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
57 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
59 if (_DRM_LOCK_IS_HELD(*lock)
60 && (*lock & _DRM_LOCK_CONT)) {
61 DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
62 }
63 *lock = 0;
64 }
65 }
66}
67
68int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
69{
70 drm_via_futex_t *fx = data;
71 volatile int *lock;
72 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
73 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
74 int ret = 0;
75
76 DRM_DEBUG("\n");
77
78 if (fx->lock > VIA_NR_XVMC_LOCKS)
79 return -EFAULT;
80
81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
82
83 switch (fx->func) {
84 case VIA_FUTEX_WAIT:
85 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
86 (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
87 return ret;
88 case VIA_FUTEX_WAKE:
89 DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
90 return 0;
91 }
92 return 0;
93}