aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-05-28 20:09:59 -0400
committerDave Airlie <airlied@redhat.com>2008-07-13 20:45:01 -0400
commitc0e09200dc0813972442e550a5905a132768e56c (patch)
treed38e635a30ff8b0a2b98b9d7f97cab1501f8209e /drivers/gpu
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
drm: reorganise drm tree to be more future proof.
With the coming of kernel based modesetting and the memory manager stuff, the everything in one directory approach was getting very ugly and starting to be unmanageable. This restructures the drm along the lines of other kernel components. It creates a drivers/gpu/drm directory and moves the hw drivers into subdirectores. It moves the includes into an include/drm, and sets up the unifdef for the userspace headers we should be exporting. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/Makefile1
-rw-r--r--drivers/gpu/drm/Kconfig107
-rw-r--r--drivers/gpu/drm/Makefile26
-rw-r--r--drivers/gpu/drm/README.drm43
-rw-r--r--drivers/gpu/drm/ati_pcigart.c181
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c455
-rw-r--r--drivers/gpu/drm/drm_auth.c190
-rw-r--r--drivers/gpu/drm/drm_bufs.c1601
-rw-r--r--drivers/gpu/drm/drm_context.c471
-rw-r--r--drivers/gpu/drm/drm_dma.c180
-rw-r--r--drivers/gpu/drm/drm_drawable.c192
-rw-r--r--drivers/gpu/drm/drm_drv.c540
-rw-r--r--drivers/gpu/drm/drm_fops.c466
-rw-r--r--drivers/gpu/drm/drm_hashtab.c202
-rw-r--r--drivers/gpu/drm/drm_ioc32.c1073
-rw-r--r--drivers/gpu/drm/drm_ioctl.c352
-rw-r--r--drivers/gpu/drm/drm_irq.c462
-rw-r--r--drivers/gpu/drm/drm_lock.c391
-rw-r--r--drivers/gpu/drm/drm_memory.c181
-rw-r--r--drivers/gpu/drm/drm_mm.c295
-rw-r--r--drivers/gpu/drm/drm_pci.c183
-rw-r--r--drivers/gpu/drm/drm_proc.c557
-rw-r--r--drivers/gpu/drm/drm_scatter.c227
-rw-r--r--drivers/gpu/drm/drm_sman.c353
-rw-r--r--drivers/gpu/drm/drm_stub.c331
-rw-r--r--drivers/gpu/drm/drm_sysfs.c208
-rw-r--r--drivers/gpu/drm/drm_vm.c673
-rw-r--r--drivers/gpu/drm/i810/Makefile8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c1283
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c97
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h242
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1553
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c108
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h292
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c858
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c605
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1142
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c222
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c623
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c386
-rw-r--r--drivers/gpu/drm/mga/Makefile11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c1162
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c141
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h687
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c231
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c148
-rw-r--r--drivers/gpu/drm/mga/mga_state.c1104
-rw-r--r--drivers/gpu/drm/mga/mga_ucode.h11645
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c193
-rw-r--r--drivers/gpu/drm/r128/Makefile10
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c935
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c103
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h522
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c221
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c101
-rw-r--r--drivers/gpu/drm/r128/r128_state.c1681
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c1071
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1772
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1773
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c126
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1406
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c424
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c320
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_microcode.h1844
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c3203
-rw-r--r--drivers/gpu/drm/savage/Makefile9
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c1095
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c88
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h575
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1163
-rw-r--r--drivers/gpu/drm/sis/Makefile10
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c117
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h73
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c333
-rw-r--r--drivers/gpu/drm/tdfx/Makefile8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c84
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.h47
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h1650
-rw-r--r--drivers/gpu/drm/via/via_dma.c755
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c816
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h140
-rw-r--r--drivers/gpu/drm/via/via_drv.c100
-rw-r--r--drivers/gpu/drm/via/via_drv.h153
-rw-r--r--drivers/gpu/drm/via/via_irq.c377
-rw-r--r--drivers/gpu/drm/via/via_map.c123
-rw-r--r--drivers/gpu/drm/via/via_mm.c194
-rw-r--r--drivers/gpu/drm/via/via_verifier.c1116
-rw-r--r--drivers/gpu/drm/via/via_verifier.h62
-rw-r--r--drivers/gpu/drm/via/via_video.c93
95 files changed, 57899 insertions, 0 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
new file mode 100644
index 000000000000..de566cf0414c
--- /dev/null
+++ b/drivers/gpu/Makefile
@@ -0,0 +1 @@
obj-y += drm/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
new file mode 100644
index 000000000000..610d6fd5bb50
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig
@@ -0,0 +1,107 @@
1#
2# Drm device configuration
3#
4# This driver provides support for the
5# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
6#
7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
10 help
11 Kernel-level support for the Direct Rendering Infrastructure (DRI)
12 introduced in XFree86 4.0. If you say Y here, you need to select
13 the module that's right for your graphics card from the list below.
14 These modules provide support for synchronization, security, and
15 DMA transfers. Please see <http://dri.sourceforge.net/> for more
16 details. You should also select and configure AGP
17 (/dev/agpgart) support.
18
19config DRM_TDFX
20 tristate "3dfx Banshee/Voodoo3+"
21 depends on DRM && PCI
22 help
23 Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
24 graphics card. If M is selected, the module will be called tdfx.
25
26config DRM_R128
27 tristate "ATI Rage 128"
28 depends on DRM && PCI
29 help
30 Choose this option if you have an ATI Rage 128 graphics card. If M
31 is selected, the module will be called r128. AGP support for
32 this card is strongly suggested (unless you have a PCI version).
33
34config DRM_RADEON
35 tristate "ATI Radeon"
36 depends on DRM && PCI
37 help
38 Choose this option if you have an ATI Radeon graphics card. There
39 are both PCI and AGP versions. You don't need to choose this to
40 run the Radeon in plain VGA mode.
41
42 If M is selected, the module will be called radeon.
43
44config DRM_I810
45 tristate "Intel I810"
46 depends on DRM && AGP && AGP_INTEL
47 help
48 Choose this option if you have an Intel I810 graphics card. If M is
49 selected, the module will be called i810. AGP support is required
50 for this driver to work.
51
52choice
53 prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
54 depends on DRM && AGP && AGP_INTEL
55 optional
56
57config DRM_I830
58 tristate "i830 driver"
59 help
60 Choose this option if you have a system that has Intel 830M, 845G,
61 852GM, 855GM or 865G integrated graphics. If M is selected, the
62 module will be called i830. AGP support is required for this driver
63 to work. This driver is used by the older X releases X.org 6.7 and
64 XFree86 4.3. If unsure, build this and i915 as modules and the X server
65 will load the correct one.
66
67config DRM_I915
68 tristate "i915 driver"
69 help
70 Choose this option if you have a system that has Intel 830M, 845G,
71 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
72 module will be called i915. AGP support is required for this driver
73 to work. This driver is used by the Intel driver in X.org 6.8 and
74 XFree86 4.4 and above. If unsure, build this and i830 as modules and
75 the X server will load the correct one.
76
77endchoice
78
79config DRM_MGA
80 tristate "Matrox g200/g400"
81 depends on DRM
82 help
83 Choose this option if you have a Matrox G200, G400 or G450 graphics
84 card. If M is selected, the module will be called mga. AGP
85 support is required for this driver to work.
86
87config DRM_SIS
88 tristate "SiS video cards"
89 depends on DRM && AGP
90 help
91 Choose this option if you have a SiS 630 or compatible video
92 chipset. If M is selected the module will be called sis. AGP
93 support is required for this driver to work.
94
95config DRM_VIA
96 tristate "Via unichrome video cards"
97 depends on DRM
98 help
99 Choose this option if you have a Via unichrome or compatible video
100 chipset. If M is selected the module will be called via.
101
102config DRM_SAVAGE
103 tristate "Savage video cards"
104 depends on DRM
105 help
106 Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
107 chipset. If M is selected the module will be called savage.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
new file mode 100644
index 000000000000..e9f9a97ae00a
--- /dev/null
+++ b/drivers/gpu/drm/Makefile
@@ -0,0 +1,26 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6
7drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
8 drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
9 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
10 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
11 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
12
13drm-$(CONFIG_COMPAT) += drm_ioc32.o
14
15obj-$(CONFIG_DRM) += drm.o
16obj-$(CONFIG_DRM_TDFX) += tdfx/
17obj-$(CONFIG_DRM_R128) += r128/
18obj-$(CONFIG_DRM_RADEON)+= radeon/
19obj-$(CONFIG_DRM_MGA) += mga/
20obj-$(CONFIG_DRM_I810) += i810/
21obj-$(CONFIG_DRM_I830) += i830/
22obj-$(CONFIG_DRM_I915) += i915/
23obj-$(CONFIG_DRM_SIS) += sis/
24obj-$(CONFIG_DRM_SAVAGE)+= savage/
25obj-$(CONFIG_DRM_VIA) +=via/
26
diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm
new file mode 100644
index 000000000000..b5b332722581
--- /dev/null
+++ b/drivers/gpu/drm/README.drm
@@ -0,0 +1,43 @@
1************************************************************
2* For the very latest on DRI development, please see: *
3* http://dri.freedesktop.org/ *
4************************************************************
5
6The Direct Rendering Manager (drm) is a device-independent kernel-level
7device driver that provides support for the XFree86 Direct Rendering
8Infrastructure (DRI).
9
10The DRM supports the Direct Rendering Infrastructure (DRI) in four major
11ways:
12
13 1. The DRM provides synchronized access to the graphics hardware via
14 the use of an optimized two-tiered lock.
15
16 2. The DRM enforces the DRI security policy for access to the graphics
17 hardware by only allowing authenticated X11 clients access to
18 restricted regions of memory.
19
20 3. The DRM provides a generic DMA engine, complete with multiple
21 queues and the ability to detect the need for an OpenGL context
22 switch.
23
24 4. The DRM is extensible via the use of small device-specific modules
25 that rely extensively on the API exported by the DRM module.
26
27
28Documentation on the DRI is available from:
29 http://dri.freedesktop.org/wiki/Documentation
30 http://sourceforge.net/project/showfiles.php?group_id=387
31 http://dri.sourceforge.net/doc/
32
33For specific information about kernel-level support, see:
34
35 The Direct Rendering Manager, Kernel Support for the Direct Rendering
36 Infrastructure
37 http://dri.sourceforge.net/doc/drm_low_level.html
38
39 Hardware Locking for the Direct Rendering Infrastructure
40 http://dri.sourceforge.net/doc/hardware_locking_low_level.html
41
42 A Security Analysis of the Direct Rendering Infrastructure
43 http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
new file mode 100644
index 000000000000..c533d0c9ec61
--- /dev/null
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -0,0 +1,181 @@
1/**
2 * \file ati_pcigart.c
3 * ATI PCI GART support
4 *
5 * \author Gareth Hughes <gareth@valinux.com>
6 */
7
8/*
9 * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
10 *
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
31 * DEALINGS IN THE SOFTWARE.
32 */
33
34#include "drmP.h"
35
36# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
37
38static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
39 struct drm_ati_pcigart_info *gart_info)
40{
41 gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
42 PAGE_SIZE,
43 gart_info->table_mask);
44 if (gart_info->table_handle == NULL)
45 return -ENOMEM;
46
47 return 0;
48}
49
50static void drm_ati_free_pcigart_table(struct drm_device *dev,
51 struct drm_ati_pcigart_info *gart_info)
52{
53 drm_pci_free(dev, gart_info->table_handle);
54 gart_info->table_handle = NULL;
55}
56
57int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
58{
59 struct drm_sg_mem *entry = dev->sg;
60 unsigned long pages;
61 int i;
62 int max_pages;
63
64 /* we need to support large memory configurations */
65 if (!entry) {
66 DRM_ERROR("no scatter/gather memory!\n");
67 return 0;
68 }
69
70 if (gart_info->bus_addr) {
71
72 max_pages = (gart_info->table_size / sizeof(u32));
73 pages = (entry->pages <= max_pages)
74 ? entry->pages : max_pages;
75
76 for (i = 0; i < pages; i++) {
77 if (!entry->busaddr[i])
78 break;
79 pci_unmap_page(dev->pdev, entry->busaddr[i],
80 PAGE_SIZE, PCI_DMA_TODEVICE);
81 }
82
83 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
84 gart_info->bus_addr = 0;
85 }
86
87 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
88 gart_info->table_handle) {
89 drm_ati_free_pcigart_table(dev, gart_info);
90 }
91
92 return 1;
93}
94EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
95
96int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
97{
98 struct drm_sg_mem *entry = dev->sg;
99 void *address = NULL;
100 unsigned long pages;
101 u32 *pci_gart, page_base;
102 dma_addr_t bus_address = 0;
103 int i, j, ret = 0;
104 int max_pages;
105
106 if (!entry) {
107 DRM_ERROR("no scatter/gather memory!\n");
108 goto done;
109 }
110
111 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
112 DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
113
114 ret = drm_ati_alloc_pcigart_table(dev, gart_info);
115 if (ret) {
116 DRM_ERROR("cannot allocate PCI GART page!\n");
117 goto done;
118 }
119
120 address = gart_info->table_handle->vaddr;
121 bus_address = gart_info->table_handle->busaddr;
122 } else {
123 address = gart_info->addr;
124 bus_address = gart_info->bus_addr;
125 DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
126 (unsigned long long)bus_address,
127 (unsigned long)address);
128 }
129
130 pci_gart = (u32 *) address;
131
132 max_pages = (gart_info->table_size / sizeof(u32));
133 pages = (entry->pages <= max_pages)
134 ? entry->pages : max_pages;
135
136 memset(pci_gart, 0, max_pages * sizeof(u32));
137
138 for (i = 0; i < pages; i++) {
139 /* we need to support large memory configurations */
140 entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
141 0, PAGE_SIZE, PCI_DMA_TODEVICE);
142 if (entry->busaddr[i] == 0) {
143 DRM_ERROR("unable to map PCIGART pages!\n");
144 drm_ati_pcigart_cleanup(dev, gart_info);
145 address = NULL;
146 bus_address = 0;
147 goto done;
148 }
149 page_base = (u32) entry->busaddr[i];
150
151 for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
152 switch(gart_info->gart_reg_if) {
153 case DRM_ATI_GART_IGP:
154 *pci_gart = cpu_to_le32((page_base) | 0xc);
155 break;
156 case DRM_ATI_GART_PCIE:
157 *pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
158 break;
159 default:
160 case DRM_ATI_GART_PCI:
161 *pci_gart = cpu_to_le32(page_base);
162 break;
163 }
164 pci_gart++;
165 page_base += ATI_PCIGART_PAGE_SIZE;
166 }
167 }
168 ret = 1;
169
170#if defined(__i386__) || defined(__x86_64__)
171 wbinvd();
172#else
173 mb();
174#endif
175
176 done:
177 gart_info->addr = address;
178 gart_info->bus_addr = bus_address;
179 return ret;
180}
181EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
new file mode 100644
index 000000000000..aefa5ac4c0b1
--- /dev/null
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -0,0 +1,455 @@
1/**
2 * \file drm_agpsupport.c
3 * DRM support for AGP/GART backend
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
32 */
33
34#include "drmP.h"
35#include <linux/module.h>
36
37#if __OS_HAS_AGP
38
39/**
40 * Get AGP information.
41 *
42 * \param inode device inode.
43 * \param file_priv DRM file private.
44 * \param cmd command.
45 * \param arg pointer to a (output) drm_agp_info structure.
46 * \return zero on success or a negative number on failure.
47 *
48 * Verifies the AGP device has been initialized and acquired and fills in the
49 * drm_agp_info structure with the information in drm_agp_head::agp_info.
50 */
51int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
52{
53 DRM_AGP_KERN *kern;
54
55 if (!dev->agp || !dev->agp->acquired)
56 return -EINVAL;
57
58 kern = &dev->agp->agp_info;
59 info->agp_version_major = kern->version.major;
60 info->agp_version_minor = kern->version.minor;
61 info->mode = kern->mode;
62 info->aperture_base = kern->aper_base;
63 info->aperture_size = kern->aper_size * 1024 * 1024;
64 info->memory_allowed = kern->max_memory << PAGE_SHIFT;
65 info->memory_used = kern->current_memory << PAGE_SHIFT;
66 info->id_vendor = kern->device->vendor;
67 info->id_device = kern->device->device;
68
69 return 0;
70}
71
72EXPORT_SYMBOL(drm_agp_info);
73
74int drm_agp_info_ioctl(struct drm_device *dev, void *data,
75 struct drm_file *file_priv)
76{
77 struct drm_agp_info *info = data;
78 int err;
79
80 err = drm_agp_info(dev, info);
81 if (err)
82 return err;
83
84 return 0;
85}
86
87/**
88 * Acquire the AGP device.
89 *
90 * \param dev DRM device that is to acquire AGP.
91 * \return zero on success or a negative number on failure.
92 *
93 * Verifies the AGP device hasn't been acquired before and calls
94 * \c agp_backend_acquire.
95 */
96int drm_agp_acquire(struct drm_device * dev)
97{
98 if (!dev->agp)
99 return -ENODEV;
100 if (dev->agp->acquired)
101 return -EBUSY;
102 if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
103 return -ENODEV;
104 dev->agp->acquired = 1;
105 return 0;
106}
107
108EXPORT_SYMBOL(drm_agp_acquire);
109
110/**
111 * Acquire the AGP device (ioctl).
112 *
113 * \param inode device inode.
114 * \param file_priv DRM file private.
115 * \param cmd command.
116 * \param arg user argument.
117 * \return zero on success or a negative number on failure.
118 *
119 * Verifies the AGP device hasn't been acquired before and calls
120 * \c agp_backend_acquire.
121 */
122int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
123 struct drm_file *file_priv)
124{
125 return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
126}
127
128/**
129 * Release the AGP device.
130 *
131 * \param dev DRM device that is to release AGP.
132 * \return zero on success or a negative number on failure.
133 *
134 * Verifies the AGP device has been acquired and calls \c agp_backend_release.
135 */
136int drm_agp_release(struct drm_device * dev)
137{
138 if (!dev->agp || !dev->agp->acquired)
139 return -EINVAL;
140 agp_backend_release(dev->agp->bridge);
141 dev->agp->acquired = 0;
142 return 0;
143}
144EXPORT_SYMBOL(drm_agp_release);
145
146int drm_agp_release_ioctl(struct drm_device *dev, void *data,
147 struct drm_file *file_priv)
148{
149 return drm_agp_release(dev);
150}
151
152/**
153 * Enable the AGP bus.
154 *
155 * \param dev DRM device that has previously acquired AGP.
156 * \param mode Requested AGP mode.
157 * \return zero on success or a negative number on failure.
158 *
159 * Verifies the AGP device has been acquired but not enabled, and calls
160 * \c agp_enable.
161 */
162int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
163{
164 if (!dev->agp || !dev->agp->acquired)
165 return -EINVAL;
166
167 dev->agp->mode = mode.mode;
168 agp_enable(dev->agp->bridge, mode.mode);
169 dev->agp->enabled = 1;
170 return 0;
171}
172
173EXPORT_SYMBOL(drm_agp_enable);
174
175int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
176 struct drm_file *file_priv)
177{
178 struct drm_agp_mode *mode = data;
179
180 return drm_agp_enable(dev, *mode);
181}
182
183/**
184 * Allocate AGP memory.
185 *
186 * \param inode device inode.
187 * \param file_priv file private pointer.
188 * \param cmd command.
189 * \param arg pointer to a drm_agp_buffer structure.
190 * \return zero on success or a negative number on failure.
191 *
192 * Verifies the AGP device is present and has been acquired, allocates the
193 * memory via alloc_agp() and creates a drm_agp_mem entry for it.
194 */
195int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
196{
197 struct drm_agp_mem *entry;
198 DRM_AGP_MEM *memory;
199 unsigned long pages;
200 u32 type;
201
202 if (!dev->agp || !dev->agp->acquired)
203 return -EINVAL;
204 if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
205 return -ENOMEM;
206
207 memset(entry, 0, sizeof(*entry));
208
209 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
210 type = (u32) request->type;
211 if (!(memory = drm_alloc_agp(dev, pages, type))) {
212 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
213 return -ENOMEM;
214 }
215
216 entry->handle = (unsigned long)memory->key + 1;
217 entry->memory = memory;
218 entry->bound = 0;
219 entry->pages = pages;
220 list_add(&entry->head, &dev->agp->memory);
221
222 request->handle = entry->handle;
223 request->physical = memory->physical;
224
225 return 0;
226}
227EXPORT_SYMBOL(drm_agp_alloc);
228
229
230int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
231 struct drm_file *file_priv)
232{
233 struct drm_agp_buffer *request = data;
234
235 return drm_agp_alloc(dev, request);
236}
237
238/**
239 * Search for the AGP memory entry associated with a handle.
240 *
241 * \param dev DRM device structure.
242 * \param handle AGP memory handle.
243 * \return pointer to the drm_agp_mem structure associated with \p handle.
244 *
245 * Walks through drm_agp_head::memory until finding a matching handle.
246 */
247static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
248 unsigned long handle)
249{
250 struct drm_agp_mem *entry;
251
252 list_for_each_entry(entry, &dev->agp->memory, head) {
253 if (entry->handle == handle)
254 return entry;
255 }
256 return NULL;
257}
258
259/**
260 * Unbind AGP memory from the GATT (ioctl).
261 *
262 * \param inode device inode.
263 * \param file_priv DRM file private.
264 * \param cmd command.
265 * \param arg pointer to a drm_agp_binding structure.
266 * \return zero on success or a negative number on failure.
267 *
268 * Verifies the AGP device is present and acquired, looks-up the AGP memory
269 * entry and passes it to the unbind_agp() function.
270 */
271int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
272{
273 struct drm_agp_mem *entry;
274 int ret;
275
276 if (!dev->agp || !dev->agp->acquired)
277 return -EINVAL;
278 if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
279 return -EINVAL;
280 if (!entry->bound)
281 return -EINVAL;
282 ret = drm_unbind_agp(entry->memory);
283 if (ret == 0)
284 entry->bound = 0;
285 return ret;
286}
287EXPORT_SYMBOL(drm_agp_unbind);
288
289
290int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
291 struct drm_file *file_priv)
292{
293 struct drm_agp_binding *request = data;
294
295 return drm_agp_unbind(dev, request);
296}
297
298/**
299 * Bind AGP memory into the GATT (ioctl)
300 *
301 * \param inode device inode.
302 * \param file_priv DRM file private.
303 * \param cmd command.
304 * \param arg pointer to a drm_agp_binding structure.
305 * \return zero on success or a negative number on failure.
306 *
307 * Verifies the AGP device is present and has been acquired and that no memory
308 * is currently bound into the GATT. Looks-up the AGP memory entry and passes
309 * it to bind_agp() function.
310 */
311int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
312{
313 struct drm_agp_mem *entry;
314 int retcode;
315 int page;
316
317 if (!dev->agp || !dev->agp->acquired)
318 return -EINVAL;
319 if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
320 return -EINVAL;
321 if (entry->bound)
322 return -EINVAL;
323 page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
324 if ((retcode = drm_bind_agp(entry->memory, page)))
325 return retcode;
326 entry->bound = dev->agp->base + (page << PAGE_SHIFT);
327 DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
328 dev->agp->base, entry->bound);
329 return 0;
330}
331EXPORT_SYMBOL(drm_agp_bind);
332
333
334int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
335 struct drm_file *file_priv)
336{
337 struct drm_agp_binding *request = data;
338
339 return drm_agp_bind(dev, request);
340}
341
342/**
343 * Free AGP memory (ioctl).
344 *
345 * \param inode device inode.
346 * \param file_priv DRM file private.
347 * \param cmd command.
348 * \param arg pointer to a drm_agp_buffer structure.
349 * \return zero on success or a negative number on failure.
350 *
351 * Verifies the AGP device is present and has been acquired and looks up the
352 * AGP memory entry. If the memory it's currently bound, unbind it via
353 * unbind_agp(). Frees it via free_agp() as well as the entry itself
354 * and unlinks from the doubly linked list it's inserted in.
355 */
356int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
357{
358 struct drm_agp_mem *entry;
359
360 if (!dev->agp || !dev->agp->acquired)
361 return -EINVAL;
362 if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
363 return -EINVAL;
364 if (entry->bound)
365 drm_unbind_agp(entry->memory);
366
367 list_del(&entry->head);
368
369 drm_free_agp(entry->memory, entry->pages);
370 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
371 return 0;
372}
373EXPORT_SYMBOL(drm_agp_free);
374
375
376
377int drm_agp_free_ioctl(struct drm_device *dev, void *data,
378 struct drm_file *file_priv)
379{
380 struct drm_agp_buffer *request = data;
381
382 return drm_agp_free(dev, request);
383}
384
385/**
386 * Initialize the AGP resources.
387 *
388 * \return pointer to a drm_agp_head structure.
389 *
390 * Gets the drm_agp_t structure which is made available by the agpgart module
391 * via the inter_module_* functions. Creates and initializes a drm_agp_head
392 * structure.
393 */
394struct drm_agp_head *drm_agp_init(struct drm_device *dev)
395{
396 struct drm_agp_head *head = NULL;
397
398 if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
399 return NULL;
400 memset((void *)head, 0, sizeof(*head));
401 head->bridge = agp_find_bridge(dev->pdev);
402 if (!head->bridge) {
403 if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
404 drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
405 return NULL;
406 }
407 agp_copy_info(head->bridge, &head->agp_info);
408 agp_backend_release(head->bridge);
409 } else {
410 agp_copy_info(head->bridge, &head->agp_info);
411 }
412 if (head->agp_info.chipset == NOT_SUPPORTED) {
413 drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
414 return NULL;
415 }
416 INIT_LIST_HEAD(&head->memory);
417 head->cant_use_aperture = head->agp_info.cant_use_aperture;
418 head->page_mask = head->agp_info.page_mask;
419 head->base = head->agp_info.aper_base;
420 return head;
421}
422
423/** Calls agp_allocate_memory() */
424DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
425 size_t pages, u32 type)
426{
427 return agp_allocate_memory(bridge, pages, type);
428}
429
430/** Calls agp_free_memory() */
431int drm_agp_free_memory(DRM_AGP_MEM * handle)
432{
433 if (!handle)
434 return 0;
435 agp_free_memory(handle);
436 return 1;
437}
438
439/** Calls agp_bind_memory() */
440int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
441{
442 if (!handle)
443 return -EINVAL;
444 return agp_bind_memory(handle, start);
445}
446
447/** Calls agp_unbind_memory() */
448int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
449{
450 if (!handle)
451 return -EINVAL;
452 return agp_unbind_memory(handle);
453}
454
455#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
new file mode 100644
index 000000000000..a73462723d2d
--- /dev/null
+++ b/drivers/gpu/drm/drm_auth.c
@@ -0,0 +1,190 @@
1/**
2 * \file drm_auth.c
3 * IOCTLs for authentication
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38/**
39 * Find the file with the given magic number.
40 *
41 * \param dev DRM device.
42 * \param magic magic number.
43 *
44 * Searches in drm_device::magiclist within all files with the same hash key
45 * the one with matching magic number, while holding the drm_device::struct_mutex
46 * lock.
47 */
48static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
49{
50 struct drm_file *retval = NULL;
51 struct drm_magic_entry *pt;
52 struct drm_hash_item *hash;
53
54 mutex_lock(&dev->struct_mutex);
55 if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
56 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
57 retval = pt->priv;
58 }
59 mutex_unlock(&dev->struct_mutex);
60 return retval;
61}
62
63/**
64 * Adds a magic number.
65 *
66 * \param dev DRM device.
67 * \param priv file private data.
68 * \param magic magic number.
69 *
70 * Creates a drm_magic_entry structure and appends to the linked list
71 * associated the magic number hash key in drm_device::magiclist, while holding
72 * the drm_device::struct_mutex lock.
73 */
74static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
75 drm_magic_t magic)
76{
77 struct drm_magic_entry *entry;
78
79 DRM_DEBUG("%d\n", magic);
80
81 entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
82 if (!entry)
83 return -ENOMEM;
84 memset(entry, 0, sizeof(*entry));
85 entry->priv = priv;
86
87 entry->hash_item.key = (unsigned long)magic;
88 mutex_lock(&dev->struct_mutex);
89 drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
90 list_add_tail(&entry->head, &dev->magicfree);
91 mutex_unlock(&dev->struct_mutex);
92
93 return 0;
94}
95
96/**
97 * Remove a magic number.
98 *
99 * \param dev DRM device.
100 * \param magic magic number.
101 *
102 * Searches and unlinks the entry in drm_device::magiclist with the magic
103 * number hash key, while holding the drm_device::struct_mutex lock.
104 */
105static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
106{
107 struct drm_magic_entry *pt;
108 struct drm_hash_item *hash;
109
110 DRM_DEBUG("%d\n", magic);
111
112 mutex_lock(&dev->struct_mutex);
113 if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
114 mutex_unlock(&dev->struct_mutex);
115 return -EINVAL;
116 }
117 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
118 drm_ht_remove_item(&dev->magiclist, hash);
119 list_del(&pt->head);
120 mutex_unlock(&dev->struct_mutex);
121
122 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
123
124 return 0;
125}
126
127/**
128 * Get a unique magic number (ioctl).
129 *
130 * \param inode device inode.
131 * \param file_priv DRM file private.
132 * \param cmd command.
133 * \param arg pointer to a resulting drm_auth structure.
134 * \return zero on success, or a negative number on failure.
135 *
136 * If there is a magic number in drm_file::magic then use it, otherwise
137 * searches an unique non-zero magic number and add it associating it with \p
138 * file_priv.
139 */
140int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
141{
142 static drm_magic_t sequence = 0;
143 static DEFINE_SPINLOCK(lock);
144 struct drm_auth *auth = data;
145
146 /* Find unique magic */
147 if (file_priv->magic) {
148 auth->magic = file_priv->magic;
149 } else {
150 do {
151 spin_lock(&lock);
152 if (!sequence)
153 ++sequence; /* reserve 0 */
154 auth->magic = sequence++;
155 spin_unlock(&lock);
156 } while (drm_find_file(dev, auth->magic));
157 file_priv->magic = auth->magic;
158 drm_add_magic(dev, file_priv, auth->magic);
159 }
160
161 DRM_DEBUG("%u\n", auth->magic);
162
163 return 0;
164}
165
166/**
167 * Authenticate with a magic.
168 *
169 * \param inode device inode.
170 * \param file_priv DRM file private.
171 * \param cmd command.
172 * \param arg pointer to a drm_auth structure.
173 * \return zero if authentication successed, or a negative number otherwise.
174 *
175 * Checks if \p file_priv is associated with the magic number passed in \arg.
176 */
177int drm_authmagic(struct drm_device *dev, void *data,
178 struct drm_file *file_priv)
179{
180 struct drm_auth *auth = data;
181 struct drm_file *file;
182
183 DRM_DEBUG("%u\n", auth->magic);
184 if ((file = drm_find_file(dev, auth->magic))) {
185 file->authenticated = 1;
186 drm_remove_magic(dev, auth->magic);
187 return 0;
188 }
189 return -EINVAL;
190}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
new file mode 100644
index 000000000000..bde64b84166e
--- /dev/null
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -0,0 +1,1601 @@
1/**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/vmalloc.h>
37#include "drmP.h"
38
39unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
40{
41 return pci_resource_start(dev->pdev, resource);
42}
43EXPORT_SYMBOL(drm_get_resource_start);
44
45unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
46{
47 return pci_resource_len(dev->pdev, resource);
48}
49
50EXPORT_SYMBOL(drm_get_resource_len);
51
52static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
53 drm_local_map_t *map)
54{
55 struct drm_map_list *entry;
56 list_for_each_entry(entry, &dev->maplist, head) {
57 if (entry->map && map->type == entry->map->type &&
58 ((entry->map->offset == map->offset) ||
59 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
60 return entry;
61 }
62 }
63
64 return NULL;
65}
66
67static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
68 unsigned long user_token, int hashed_handle)
69{
70 int use_hashed_handle;
71#if (BITS_PER_LONG == 64)
72 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73#elif (BITS_PER_LONG == 32)
74 use_hashed_handle = hashed_handle;
75#else
76#error Unsupported long size. Neither 64 nor 32 bits.
77#endif
78
79 if (!use_hashed_handle) {
80 int ret;
81 hash->key = user_token >> PAGE_SHIFT;
82 ret = drm_ht_insert_item(&dev->map_hash, hash);
83 if (ret != -EINVAL)
84 return ret;
85 }
86 return drm_ht_just_insert_please(&dev->map_hash, hash,
87 user_token, 32 - PAGE_SHIFT - 3,
88 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
89}
90
91/**
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
93 *
94 * \param inode device inode.
95 * \param file_priv DRM file private.
96 * \param cmd command.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
99 *
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
103 */
104static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
105 unsigned int size, enum drm_map_type type,
106 enum drm_map_flags flags,
107 struct drm_map_list ** maplist)
108{
109 struct drm_map *map;
110 struct drm_map_list *list;
111 drm_dma_handle_t *dmah;
112 unsigned long user_token;
113 int ret;
114
115 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
116 if (!map)
117 return -ENOMEM;
118
119 map->offset = offset;
120 map->size = size;
121 map->flags = flags;
122 map->type = type;
123
124 /* Only allow shared memory to be removable since we only keep enough
125 * book keeping information about shared memory to allow for removal
126 * when processes fork.
127 */
128 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
130 return -EINVAL;
131 }
132 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 map->offset, map->size, map->type);
134 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
136 return -EINVAL;
137 }
138 map->mtrr = -1;
139 map->handle = NULL;
140
141 switch (map->type) {
142 case _DRM_REGISTERS:
143 case _DRM_FRAME_BUFFER:
144#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 if (map->offset + (map->size-1) < map->offset ||
146 map->offset < virt_to_phys(high_memory)) {
147 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
148 return -EINVAL;
149 }
150#endif
151#ifdef __alpha__
152 map->offset += dev->hose->mem_space->start;
153#endif
154 /* Some drivers preinitialize some maps, without the X Server
155 * needing to be aware of it. Therefore, we just return success
156 * when the server tries to create a duplicate map.
157 */
158 list = drm_find_matching_map(dev, map);
159 if (list != NULL) {
160 if (list->map->size != map->size) {
161 DRM_DEBUG("Matching maps of type %d with "
162 "mismatched sizes, (%ld vs %ld)\n",
163 map->type, map->size,
164 list->map->size);
165 list->map->size = map->size;
166 }
167
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
169 *maplist = list;
170 return 0;
171 }
172
173 if (drm_core_has_MTRR(dev)) {
174 if (map->type == _DRM_FRAME_BUFFER ||
175 (map->flags & _DRM_WRITE_COMBINING)) {
176 map->mtrr = mtrr_add(map->offset, map->size,
177 MTRR_TYPE_WRCOMB, 1);
178 }
179 }
180 if (map->type == _DRM_REGISTERS) {
181 map->handle = ioremap(map->offset, map->size);
182 if (!map->handle) {
183 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
184 return -ENOMEM;
185 }
186 }
187
188 break;
189 case _DRM_SHM:
190 list = drm_find_matching_map(dev, map);
191 if (list != NULL) {
192 if(list->map->size != map->size) {
193 DRM_DEBUG("Matching maps of type %d with "
194 "mismatched sizes, (%ld vs %ld)\n",
195 map->type, map->size, list->map->size);
196 list->map->size = map->size;
197 }
198
199 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
200 *maplist = list;
201 return 0;
202 }
203 map->handle = vmalloc_user(map->size);
204 DRM_DEBUG("%lu %d %p\n",
205 map->size, drm_order(map->size), map->handle);
206 if (!map->handle) {
207 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 return -ENOMEM;
209 }
210 map->offset = (unsigned long)map->handle;
211 if (map->flags & _DRM_CONTAINS_LOCK) {
212 /* Prevent a 2nd X Server from creating a 2nd lock */
213 if (dev->lock.hw_lock != NULL) {
214 vfree(map->handle);
215 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
216 return -EBUSY;
217 }
218 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
219 }
220 break;
221 case _DRM_AGP: {
222 struct drm_agp_mem *entry;
223 int valid = 0;
224
225 if (!drm_core_has_AGP(dev)) {
226 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
227 return -EINVAL;
228 }
229#ifdef __alpha__
230 map->offset += dev->hose->mem_space->start;
231#endif
232 /* In some cases (i810 driver), user space may have already
233 * added the AGP base itself, because dev->agp->base previously
234 * only got set during AGP enable. So, only add the base
235 * address if the map's offset isn't already within the
236 * aperture.
237 */
238 if (map->offset < dev->agp->base ||
239 map->offset > dev->agp->base +
240 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
241 map->offset += dev->agp->base;
242 }
243 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
244
245 /* This assumes the DRM is in total control of AGP space.
246 * It's not always the case as AGP can be in the control
247 * of user space (i.e. i810 driver). So this loop will get
248 * skipped and we double check that dev->agp->memory is
249 * actually set as well as being invalid before EPERM'ing
250 */
251 list_for_each_entry(entry, &dev->agp->memory, head) {
252 if ((map->offset >= entry->bound) &&
253 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
254 valid = 1;
255 break;
256 }
257 }
258 if (!list_empty(&dev->agp->memory) && !valid) {
259 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260 return -EPERM;
261 }
262 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
263
264 break;
265 }
266 case _DRM_SCATTER_GATHER:
267 if (!dev->sg) {
268 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
269 return -EINVAL;
270 }
271 map->offset += (unsigned long)dev->sg->virtual;
272 break;
273 case _DRM_CONSISTENT:
274 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
275 * As we're limiting the address to 2^32-1 (or less),
276 * casting it down to 32 bits is no problem, but we
277 * need to point to a 64bit variable first. */
278 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
279 if (!dmah) {
280 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281 return -ENOMEM;
282 }
283 map->handle = dmah->vaddr;
284 map->offset = (unsigned long)dmah->busaddr;
285 kfree(dmah);
286 break;
287 default:
288 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
289 return -EINVAL;
290 }
291
292 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
293 if (!list) {
294 if (map->type == _DRM_REGISTERS)
295 iounmap(map->handle);
296 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
297 return -EINVAL;
298 }
299 memset(list, 0, sizeof(*list));
300 list->map = map;
301
302 mutex_lock(&dev->struct_mutex);
303 list_add(&list->head, &dev->maplist);
304
305 /* Assign a 32-bit handle */
306 /* We do it here so that dev->struct_mutex protects the increment */
307 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
308 map->offset;
309 ret = drm_map_handle(dev, &list->hash, user_token, 0);
310 if (ret) {
311 if (map->type == _DRM_REGISTERS)
312 iounmap(map->handle);
313 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
314 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
315 mutex_unlock(&dev->struct_mutex);
316 return ret;
317 }
318
319 list->user_token = list->hash.key << PAGE_SHIFT;
320 mutex_unlock(&dev->struct_mutex);
321
322 *maplist = list;
323 return 0;
324 }
325
326int drm_addmap(struct drm_device * dev, unsigned int offset,
327 unsigned int size, enum drm_map_type type,
328 enum drm_map_flags flags, drm_local_map_t ** map_ptr)
329{
330 struct drm_map_list *list;
331 int rc;
332
333 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
334 if (!rc)
335 *map_ptr = list->map;
336 return rc;
337}
338
339EXPORT_SYMBOL(drm_addmap);
340
341int drm_addmap_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file_priv)
343{
344 struct drm_map *map = data;
345 struct drm_map_list *maplist;
346 int err;
347
348 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
349 return -EPERM;
350
351 err = drm_addmap_core(dev, map->offset, map->size, map->type,
352 map->flags, &maplist);
353
354 if (err)
355 return err;
356
357 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
358 map->handle = (void *)(unsigned long)maplist->user_token;
359 return 0;
360}
361
362/**
363 * Remove a map private from list and deallocate resources if the mapping
364 * isn't in use.
365 *
366 * \param inode device inode.
367 * \param file_priv DRM file private.
368 * \param cmd command.
369 * \param arg pointer to a struct drm_map structure.
370 * \return zero on success or a negative value on error.
371 *
372 * Searches the map on drm_device::maplist, removes it from the list, see if
373 * its being used, and free any associate resource (such as MTRR's) if it's not
374 * being on use.
375 *
376 * \sa drm_addmap
377 */
378int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
379{
380 struct drm_map_list *r_list = NULL, *list_t;
381 drm_dma_handle_t dmah;
382 int found = 0;
383
384 /* Find the list entry for the map and remove it */
385 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
386 if (r_list->map == map) {
387 list_del(&r_list->head);
388 drm_ht_remove_key(&dev->map_hash,
389 r_list->user_token >> PAGE_SHIFT);
390 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
391 found = 1;
392 break;
393 }
394 }
395
396 if (!found)
397 return -EINVAL;
398
399 switch (map->type) {
400 case _DRM_REGISTERS:
401 iounmap(map->handle);
402 /* FALLTHROUGH */
403 case _DRM_FRAME_BUFFER:
404 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
405 int retcode;
406 retcode = mtrr_del(map->mtrr, map->offset, map->size);
407 DRM_DEBUG("mtrr_del=%d\n", retcode);
408 }
409 break;
410 case _DRM_SHM:
411 vfree(map->handle);
412 break;
413 case _DRM_AGP:
414 case _DRM_SCATTER_GATHER:
415 break;
416 case _DRM_CONSISTENT:
417 dmah.vaddr = map->handle;
418 dmah.busaddr = map->offset;
419 dmah.size = map->size;
420 __drm_pci_free(dev, &dmah);
421 break;
422 }
423 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
424
425 return 0;
426}
427
428int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
429{
430 int ret;
431
432 mutex_lock(&dev->struct_mutex);
433 ret = drm_rmmap_locked(dev, map);
434 mutex_unlock(&dev->struct_mutex);
435
436 return ret;
437}
438EXPORT_SYMBOL(drm_rmmap);
439
440/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
441 * the last close of the device, and this is necessary for cleanup when things
442 * exit uncleanly. Therefore, having userland manually remove mappings seems
443 * like a pointless exercise since they're going away anyway.
444 *
445 * One use case might be after addmap is allowed for normal users for SHM and
446 * gets used by drivers that the server doesn't need to care about. This seems
447 * unlikely.
448 */
449int drm_rmmap_ioctl(struct drm_device *dev, void *data,
450 struct drm_file *file_priv)
451{
452 struct drm_map *request = data;
453 drm_local_map_t *map = NULL;
454 struct drm_map_list *r_list;
455 int ret;
456
457 mutex_lock(&dev->struct_mutex);
458 list_for_each_entry(r_list, &dev->maplist, head) {
459 if (r_list->map &&
460 r_list->user_token == (unsigned long)request->handle &&
461 r_list->map->flags & _DRM_REMOVABLE) {
462 map = r_list->map;
463 break;
464 }
465 }
466
467 /* List has wrapped around to the head pointer, or its empty we didn't
468 * find anything.
469 */
470 if (list_empty(&dev->maplist) || !map) {
471 mutex_unlock(&dev->struct_mutex);
472 return -EINVAL;
473 }
474
475 /* Register and framebuffer maps are permanent */
476 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
477 mutex_unlock(&dev->struct_mutex);
478 return 0;
479 }
480
481 ret = drm_rmmap_locked(dev, map);
482
483 mutex_unlock(&dev->struct_mutex);
484
485 return ret;
486}
487
488/**
489 * Cleanup after an error on one of the addbufs() functions.
490 *
491 * \param dev DRM device.
492 * \param entry buffer entry where the error occurred.
493 *
494 * Frees any pages and buffers associated with the given entry.
495 */
496static void drm_cleanup_buf_error(struct drm_device * dev,
497 struct drm_buf_entry * entry)
498{
499 int i;
500
501 if (entry->seg_count) {
502 for (i = 0; i < entry->seg_count; i++) {
503 if (entry->seglist[i]) {
504 drm_pci_free(dev, entry->seglist[i]);
505 }
506 }
507 drm_free(entry->seglist,
508 entry->seg_count *
509 sizeof(*entry->seglist), DRM_MEM_SEGS);
510
511 entry->seg_count = 0;
512 }
513
514 if (entry->buf_count) {
515 for (i = 0; i < entry->buf_count; i++) {
516 if (entry->buflist[i].dev_private) {
517 drm_free(entry->buflist[i].dev_private,
518 entry->buflist[i].dev_priv_size,
519 DRM_MEM_BUFS);
520 }
521 }
522 drm_free(entry->buflist,
523 entry->buf_count *
524 sizeof(*entry->buflist), DRM_MEM_BUFS);
525
526 entry->buf_count = 0;
527 }
528}
529
530#if __OS_HAS_AGP
531/**
532 * Add AGP buffers for DMA transfers.
533 *
534 * \param dev struct drm_device to which the buffers are to be added.
535 * \param request pointer to a struct drm_buf_desc describing the request.
536 * \return zero on success or a negative number on failure.
537 *
538 * After some sanity checks creates a drm_buf structure for each buffer and
539 * reallocates the buffer list of the same size order to accommodate the new
540 * buffers.
541 */
542int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
543{
544 struct drm_device_dma *dma = dev->dma;
545 struct drm_buf_entry *entry;
546 struct drm_agp_mem *agp_entry;
547 struct drm_buf *buf;
548 unsigned long offset;
549 unsigned long agp_offset;
550 int count;
551 int order;
552 int size;
553 int alignment;
554 int page_order;
555 int total;
556 int byte_count;
557 int i, valid;
558 struct drm_buf **temp_buflist;
559
560 if (!dma)
561 return -EINVAL;
562
563 count = request->count;
564 order = drm_order(request->size);
565 size = 1 << order;
566
567 alignment = (request->flags & _DRM_PAGE_ALIGN)
568 ? PAGE_ALIGN(size) : size;
569 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
570 total = PAGE_SIZE << page_order;
571
572 byte_count = 0;
573 agp_offset = dev->agp->base + request->agp_start;
574
575 DRM_DEBUG("count: %d\n", count);
576 DRM_DEBUG("order: %d\n", order);
577 DRM_DEBUG("size: %d\n", size);
578 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
579 DRM_DEBUG("alignment: %d\n", alignment);
580 DRM_DEBUG("page_order: %d\n", page_order);
581 DRM_DEBUG("total: %d\n", total);
582
583 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
584 return -EINVAL;
585 if (dev->queue_count)
586 return -EBUSY; /* Not while in use */
587
588 /* Make sure buffers are located in AGP memory that we own */
589 valid = 0;
590 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
591 if ((agp_offset >= agp_entry->bound) &&
592 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
593 valid = 1;
594 break;
595 }
596 }
597 if (!list_empty(&dev->agp->memory) && !valid) {
598 DRM_DEBUG("zone invalid\n");
599 return -EINVAL;
600 }
601 spin_lock(&dev->count_lock);
602 if (dev->buf_use) {
603 spin_unlock(&dev->count_lock);
604 return -EBUSY;
605 }
606 atomic_inc(&dev->buf_alloc);
607 spin_unlock(&dev->count_lock);
608
609 mutex_lock(&dev->struct_mutex);
610 entry = &dma->bufs[order];
611 if (entry->buf_count) {
612 mutex_unlock(&dev->struct_mutex);
613 atomic_dec(&dev->buf_alloc);
614 return -ENOMEM; /* May only call once for each order */
615 }
616
617 if (count < 0 || count > 4096) {
618 mutex_unlock(&dev->struct_mutex);
619 atomic_dec(&dev->buf_alloc);
620 return -EINVAL;
621 }
622
623 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
624 DRM_MEM_BUFS);
625 if (!entry->buflist) {
626 mutex_unlock(&dev->struct_mutex);
627 atomic_dec(&dev->buf_alloc);
628 return -ENOMEM;
629 }
630 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
631
632 entry->buf_size = size;
633 entry->page_order = page_order;
634
635 offset = 0;
636
637 while (entry->buf_count < count) {
638 buf = &entry->buflist[entry->buf_count];
639 buf->idx = dma->buf_count + entry->buf_count;
640 buf->total = alignment;
641 buf->order = order;
642 buf->used = 0;
643
644 buf->offset = (dma->byte_count + offset);
645 buf->bus_address = agp_offset + offset;
646 buf->address = (void *)(agp_offset + offset);
647 buf->next = NULL;
648 buf->waiting = 0;
649 buf->pending = 0;
650 init_waitqueue_head(&buf->dma_wait);
651 buf->file_priv = NULL;
652
653 buf->dev_priv_size = dev->driver->dev_priv_size;
654 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
655 if (!buf->dev_private) {
656 /* Set count correctly so we free the proper amount. */
657 entry->buf_count = count;
658 drm_cleanup_buf_error(dev, entry);
659 mutex_unlock(&dev->struct_mutex);
660 atomic_dec(&dev->buf_alloc);
661 return -ENOMEM;
662 }
663 memset(buf->dev_private, 0, buf->dev_priv_size);
664
665 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
666
667 offset += alignment;
668 entry->buf_count++;
669 byte_count += PAGE_SIZE << page_order;
670 }
671
672 DRM_DEBUG("byte_count: %d\n", byte_count);
673
674 temp_buflist = drm_realloc(dma->buflist,
675 dma->buf_count * sizeof(*dma->buflist),
676 (dma->buf_count + entry->buf_count)
677 * sizeof(*dma->buflist), DRM_MEM_BUFS);
678 if (!temp_buflist) {
679 /* Free the entry because it isn't valid */
680 drm_cleanup_buf_error(dev, entry);
681 mutex_unlock(&dev->struct_mutex);
682 atomic_dec(&dev->buf_alloc);
683 return -ENOMEM;
684 }
685 dma->buflist = temp_buflist;
686
687 for (i = 0; i < entry->buf_count; i++) {
688 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
689 }
690
691 dma->buf_count += entry->buf_count;
692 dma->seg_count += entry->seg_count;
693 dma->page_count += byte_count >> PAGE_SHIFT;
694 dma->byte_count += byte_count;
695
696 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
697 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
698
699 mutex_unlock(&dev->struct_mutex);
700
701 request->count = entry->buf_count;
702 request->size = size;
703
704 dma->flags = _DRM_DMA_USE_AGP;
705
706 atomic_dec(&dev->buf_alloc);
707 return 0;
708}
709EXPORT_SYMBOL(drm_addbufs_agp);
710#endif /* __OS_HAS_AGP */
711
712int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
713{
714 struct drm_device_dma *dma = dev->dma;
715 int count;
716 int order;
717 int size;
718 int total;
719 int page_order;
720 struct drm_buf_entry *entry;
721 drm_dma_handle_t *dmah;
722 struct drm_buf *buf;
723 int alignment;
724 unsigned long offset;
725 int i;
726 int byte_count;
727 int page_count;
728 unsigned long *temp_pagelist;
729 struct drm_buf **temp_buflist;
730
731 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
732 return -EINVAL;
733
734 if (!dma)
735 return -EINVAL;
736
737 if (!capable(CAP_SYS_ADMIN))
738 return -EPERM;
739
740 count = request->count;
741 order = drm_order(request->size);
742 size = 1 << order;
743
744 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
745 request->count, request->size, size, order, dev->queue_count);
746
747 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
748 return -EINVAL;
749 if (dev->queue_count)
750 return -EBUSY; /* Not while in use */
751
752 alignment = (request->flags & _DRM_PAGE_ALIGN)
753 ? PAGE_ALIGN(size) : size;
754 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
755 total = PAGE_SIZE << page_order;
756
757 spin_lock(&dev->count_lock);
758 if (dev->buf_use) {
759 spin_unlock(&dev->count_lock);
760 return -EBUSY;
761 }
762 atomic_inc(&dev->buf_alloc);
763 spin_unlock(&dev->count_lock);
764
765 mutex_lock(&dev->struct_mutex);
766 entry = &dma->bufs[order];
767 if (entry->buf_count) {
768 mutex_unlock(&dev->struct_mutex);
769 atomic_dec(&dev->buf_alloc);
770 return -ENOMEM; /* May only call once for each order */
771 }
772
773 if (count < 0 || count > 4096) {
774 mutex_unlock(&dev->struct_mutex);
775 atomic_dec(&dev->buf_alloc);
776 return -EINVAL;
777 }
778
779 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
780 DRM_MEM_BUFS);
781 if (!entry->buflist) {
782 mutex_unlock(&dev->struct_mutex);
783 atomic_dec(&dev->buf_alloc);
784 return -ENOMEM;
785 }
786 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
787
788 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
789 DRM_MEM_SEGS);
790 if (!entry->seglist) {
791 drm_free(entry->buflist,
792 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
793 mutex_unlock(&dev->struct_mutex);
794 atomic_dec(&dev->buf_alloc);
795 return -ENOMEM;
796 }
797 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
798
799 /* Keep the original pagelist until we know all the allocations
800 * have succeeded
801 */
802 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
803 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
804 if (!temp_pagelist) {
805 drm_free(entry->buflist,
806 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
807 drm_free(entry->seglist,
808 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
809 mutex_unlock(&dev->struct_mutex);
810 atomic_dec(&dev->buf_alloc);
811 return -ENOMEM;
812 }
813 memcpy(temp_pagelist,
814 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
815 DRM_DEBUG("pagelist: %d entries\n",
816 dma->page_count + (count << page_order));
817
818 entry->buf_size = size;
819 entry->page_order = page_order;
820 byte_count = 0;
821 page_count = 0;
822
823 while (entry->buf_count < count) {
824
825 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
826
827 if (!dmah) {
828 /* Set count correctly so we free the proper amount. */
829 entry->buf_count = count;
830 entry->seg_count = count;
831 drm_cleanup_buf_error(dev, entry);
832 drm_free(temp_pagelist,
833 (dma->page_count + (count << page_order))
834 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
835 mutex_unlock(&dev->struct_mutex);
836 atomic_dec(&dev->buf_alloc);
837 return -ENOMEM;
838 }
839 entry->seglist[entry->seg_count++] = dmah;
840 for (i = 0; i < (1 << page_order); i++) {
841 DRM_DEBUG("page %d @ 0x%08lx\n",
842 dma->page_count + page_count,
843 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
844 temp_pagelist[dma->page_count + page_count++]
845 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
846 }
847 for (offset = 0;
848 offset + size <= total && entry->buf_count < count;
849 offset += alignment, ++entry->buf_count) {
850 buf = &entry->buflist[entry->buf_count];
851 buf->idx = dma->buf_count + entry->buf_count;
852 buf->total = alignment;
853 buf->order = order;
854 buf->used = 0;
855 buf->offset = (dma->byte_count + byte_count + offset);
856 buf->address = (void *)(dmah->vaddr + offset);
857 buf->bus_address = dmah->busaddr + offset;
858 buf->next = NULL;
859 buf->waiting = 0;
860 buf->pending = 0;
861 init_waitqueue_head(&buf->dma_wait);
862 buf->file_priv = NULL;
863
864 buf->dev_priv_size = dev->driver->dev_priv_size;
865 buf->dev_private = drm_alloc(buf->dev_priv_size,
866 DRM_MEM_BUFS);
867 if (!buf->dev_private) {
868 /* Set count correctly so we free the proper amount. */
869 entry->buf_count = count;
870 entry->seg_count = count;
871 drm_cleanup_buf_error(dev, entry);
872 drm_free(temp_pagelist,
873 (dma->page_count +
874 (count << page_order))
875 * sizeof(*dma->pagelist),
876 DRM_MEM_PAGES);
877 mutex_unlock(&dev->struct_mutex);
878 atomic_dec(&dev->buf_alloc);
879 return -ENOMEM;
880 }
881 memset(buf->dev_private, 0, buf->dev_priv_size);
882
883 DRM_DEBUG("buffer %d @ %p\n",
884 entry->buf_count, buf->address);
885 }
886 byte_count += PAGE_SIZE << page_order;
887 }
888
889 temp_buflist = drm_realloc(dma->buflist,
890 dma->buf_count * sizeof(*dma->buflist),
891 (dma->buf_count + entry->buf_count)
892 * sizeof(*dma->buflist), DRM_MEM_BUFS);
893 if (!temp_buflist) {
894 /* Free the entry because it isn't valid */
895 drm_cleanup_buf_error(dev, entry);
896 drm_free(temp_pagelist,
897 (dma->page_count + (count << page_order))
898 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
899 mutex_unlock(&dev->struct_mutex);
900 atomic_dec(&dev->buf_alloc);
901 return -ENOMEM;
902 }
903 dma->buflist = temp_buflist;
904
905 for (i = 0; i < entry->buf_count; i++) {
906 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
907 }
908
909 /* No allocations failed, so now we can replace the orginal pagelist
910 * with the new one.
911 */
912 if (dma->page_count) {
913 drm_free(dma->pagelist,
914 dma->page_count * sizeof(*dma->pagelist),
915 DRM_MEM_PAGES);
916 }
917 dma->pagelist = temp_pagelist;
918
919 dma->buf_count += entry->buf_count;
920 dma->seg_count += entry->seg_count;
921 dma->page_count += entry->seg_count << page_order;
922 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
923
924 mutex_unlock(&dev->struct_mutex);
925
926 request->count = entry->buf_count;
927 request->size = size;
928
929 if (request->flags & _DRM_PCI_BUFFER_RO)
930 dma->flags = _DRM_DMA_USE_PCI_RO;
931
932 atomic_dec(&dev->buf_alloc);
933 return 0;
934
935}
936EXPORT_SYMBOL(drm_addbufs_pci);
937
938static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
939{
940 struct drm_device_dma *dma = dev->dma;
941 struct drm_buf_entry *entry;
942 struct drm_buf *buf;
943 unsigned long offset;
944 unsigned long agp_offset;
945 int count;
946 int order;
947 int size;
948 int alignment;
949 int page_order;
950 int total;
951 int byte_count;
952 int i;
953 struct drm_buf **temp_buflist;
954
955 if (!drm_core_check_feature(dev, DRIVER_SG))
956 return -EINVAL;
957
958 if (!dma)
959 return -EINVAL;
960
961 if (!capable(CAP_SYS_ADMIN))
962 return -EPERM;
963
964 count = request->count;
965 order = drm_order(request->size);
966 size = 1 << order;
967
968 alignment = (request->flags & _DRM_PAGE_ALIGN)
969 ? PAGE_ALIGN(size) : size;
970 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
971 total = PAGE_SIZE << page_order;
972
973 byte_count = 0;
974 agp_offset = request->agp_start;
975
976 DRM_DEBUG("count: %d\n", count);
977 DRM_DEBUG("order: %d\n", order);
978 DRM_DEBUG("size: %d\n", size);
979 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
980 DRM_DEBUG("alignment: %d\n", alignment);
981 DRM_DEBUG("page_order: %d\n", page_order);
982 DRM_DEBUG("total: %d\n", total);
983
984 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
985 return -EINVAL;
986 if (dev->queue_count)
987 return -EBUSY; /* Not while in use */
988
989 spin_lock(&dev->count_lock);
990 if (dev->buf_use) {
991 spin_unlock(&dev->count_lock);
992 return -EBUSY;
993 }
994 atomic_inc(&dev->buf_alloc);
995 spin_unlock(&dev->count_lock);
996
997 mutex_lock(&dev->struct_mutex);
998 entry = &dma->bufs[order];
999 if (entry->buf_count) {
1000 mutex_unlock(&dev->struct_mutex);
1001 atomic_dec(&dev->buf_alloc);
1002 return -ENOMEM; /* May only call once for each order */
1003 }
1004
1005 if (count < 0 || count > 4096) {
1006 mutex_unlock(&dev->struct_mutex);
1007 atomic_dec(&dev->buf_alloc);
1008 return -EINVAL;
1009 }
1010
1011 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1012 DRM_MEM_BUFS);
1013 if (!entry->buflist) {
1014 mutex_unlock(&dev->struct_mutex);
1015 atomic_dec(&dev->buf_alloc);
1016 return -ENOMEM;
1017 }
1018 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1019
1020 entry->buf_size = size;
1021 entry->page_order = page_order;
1022
1023 offset = 0;
1024
1025 while (entry->buf_count < count) {
1026 buf = &entry->buflist[entry->buf_count];
1027 buf->idx = dma->buf_count + entry->buf_count;
1028 buf->total = alignment;
1029 buf->order = order;
1030 buf->used = 0;
1031
1032 buf->offset = (dma->byte_count + offset);
1033 buf->bus_address = agp_offset + offset;
1034 buf->address = (void *)(agp_offset + offset
1035 + (unsigned long)dev->sg->virtual);
1036 buf->next = NULL;
1037 buf->waiting = 0;
1038 buf->pending = 0;
1039 init_waitqueue_head(&buf->dma_wait);
1040 buf->file_priv = NULL;
1041
1042 buf->dev_priv_size = dev->driver->dev_priv_size;
1043 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1044 if (!buf->dev_private) {
1045 /* Set count correctly so we free the proper amount. */
1046 entry->buf_count = count;
1047 drm_cleanup_buf_error(dev, entry);
1048 mutex_unlock(&dev->struct_mutex);
1049 atomic_dec(&dev->buf_alloc);
1050 return -ENOMEM;
1051 }
1052
1053 memset(buf->dev_private, 0, buf->dev_priv_size);
1054
1055 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1056
1057 offset += alignment;
1058 entry->buf_count++;
1059 byte_count += PAGE_SIZE << page_order;
1060 }
1061
1062 DRM_DEBUG("byte_count: %d\n", byte_count);
1063
1064 temp_buflist = drm_realloc(dma->buflist,
1065 dma->buf_count * sizeof(*dma->buflist),
1066 (dma->buf_count + entry->buf_count)
1067 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1068 if (!temp_buflist) {
1069 /* Free the entry because it isn't valid */
1070 drm_cleanup_buf_error(dev, entry);
1071 mutex_unlock(&dev->struct_mutex);
1072 atomic_dec(&dev->buf_alloc);
1073 return -ENOMEM;
1074 }
1075 dma->buflist = temp_buflist;
1076
1077 for (i = 0; i < entry->buf_count; i++) {
1078 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1079 }
1080
1081 dma->buf_count += entry->buf_count;
1082 dma->seg_count += entry->seg_count;
1083 dma->page_count += byte_count >> PAGE_SHIFT;
1084 dma->byte_count += byte_count;
1085
1086 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1087 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1088
1089 mutex_unlock(&dev->struct_mutex);
1090
1091 request->count = entry->buf_count;
1092 request->size = size;
1093
1094 dma->flags = _DRM_DMA_USE_SG;
1095
1096 atomic_dec(&dev->buf_alloc);
1097 return 0;
1098}
1099
1100static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1101{
1102 struct drm_device_dma *dma = dev->dma;
1103 struct drm_buf_entry *entry;
1104 struct drm_buf *buf;
1105 unsigned long offset;
1106 unsigned long agp_offset;
1107 int count;
1108 int order;
1109 int size;
1110 int alignment;
1111 int page_order;
1112 int total;
1113 int byte_count;
1114 int i;
1115 struct drm_buf **temp_buflist;
1116
1117 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1118 return -EINVAL;
1119
1120 if (!dma)
1121 return -EINVAL;
1122
1123 if (!capable(CAP_SYS_ADMIN))
1124 return -EPERM;
1125
1126 count = request->count;
1127 order = drm_order(request->size);
1128 size = 1 << order;
1129
1130 alignment = (request->flags & _DRM_PAGE_ALIGN)
1131 ? PAGE_ALIGN(size) : size;
1132 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1133 total = PAGE_SIZE << page_order;
1134
1135 byte_count = 0;
1136 agp_offset = request->agp_start;
1137
1138 DRM_DEBUG("count: %d\n", count);
1139 DRM_DEBUG("order: %d\n", order);
1140 DRM_DEBUG("size: %d\n", size);
1141 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1142 DRM_DEBUG("alignment: %d\n", alignment);
1143 DRM_DEBUG("page_order: %d\n", page_order);
1144 DRM_DEBUG("total: %d\n", total);
1145
1146 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1147 return -EINVAL;
1148 if (dev->queue_count)
1149 return -EBUSY; /* Not while in use */
1150
1151 spin_lock(&dev->count_lock);
1152 if (dev->buf_use) {
1153 spin_unlock(&dev->count_lock);
1154 return -EBUSY;
1155 }
1156 atomic_inc(&dev->buf_alloc);
1157 spin_unlock(&dev->count_lock);
1158
1159 mutex_lock(&dev->struct_mutex);
1160 entry = &dma->bufs[order];
1161 if (entry->buf_count) {
1162 mutex_unlock(&dev->struct_mutex);
1163 atomic_dec(&dev->buf_alloc);
1164 return -ENOMEM; /* May only call once for each order */
1165 }
1166
1167 if (count < 0 || count > 4096) {
1168 mutex_unlock(&dev->struct_mutex);
1169 atomic_dec(&dev->buf_alloc);
1170 return -EINVAL;
1171 }
1172
1173 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1174 DRM_MEM_BUFS);
1175 if (!entry->buflist) {
1176 mutex_unlock(&dev->struct_mutex);
1177 atomic_dec(&dev->buf_alloc);
1178 return -ENOMEM;
1179 }
1180 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1181
1182 entry->buf_size = size;
1183 entry->page_order = page_order;
1184
1185 offset = 0;
1186
1187 while (entry->buf_count < count) {
1188 buf = &entry->buflist[entry->buf_count];
1189 buf->idx = dma->buf_count + entry->buf_count;
1190 buf->total = alignment;
1191 buf->order = order;
1192 buf->used = 0;
1193
1194 buf->offset = (dma->byte_count + offset);
1195 buf->bus_address = agp_offset + offset;
1196 buf->address = (void *)(agp_offset + offset);
1197 buf->next = NULL;
1198 buf->waiting = 0;
1199 buf->pending = 0;
1200 init_waitqueue_head(&buf->dma_wait);
1201 buf->file_priv = NULL;
1202
1203 buf->dev_priv_size = dev->driver->dev_priv_size;
1204 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1205 if (!buf->dev_private) {
1206 /* Set count correctly so we free the proper amount. */
1207 entry->buf_count = count;
1208 drm_cleanup_buf_error(dev, entry);
1209 mutex_unlock(&dev->struct_mutex);
1210 atomic_dec(&dev->buf_alloc);
1211 return -ENOMEM;
1212 }
1213 memset(buf->dev_private, 0, buf->dev_priv_size);
1214
1215 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1216
1217 offset += alignment;
1218 entry->buf_count++;
1219 byte_count += PAGE_SIZE << page_order;
1220 }
1221
1222 DRM_DEBUG("byte_count: %d\n", byte_count);
1223
1224 temp_buflist = drm_realloc(dma->buflist,
1225 dma->buf_count * sizeof(*dma->buflist),
1226 (dma->buf_count + entry->buf_count)
1227 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1228 if (!temp_buflist) {
1229 /* Free the entry because it isn't valid */
1230 drm_cleanup_buf_error(dev, entry);
1231 mutex_unlock(&dev->struct_mutex);
1232 atomic_dec(&dev->buf_alloc);
1233 return -ENOMEM;
1234 }
1235 dma->buflist = temp_buflist;
1236
1237 for (i = 0; i < entry->buf_count; i++) {
1238 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1239 }
1240
1241 dma->buf_count += entry->buf_count;
1242 dma->seg_count += entry->seg_count;
1243 dma->page_count += byte_count >> PAGE_SHIFT;
1244 dma->byte_count += byte_count;
1245
1246 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1247 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1248
1249 mutex_unlock(&dev->struct_mutex);
1250
1251 request->count = entry->buf_count;
1252 request->size = size;
1253
1254 dma->flags = _DRM_DMA_USE_FB;
1255
1256 atomic_dec(&dev->buf_alloc);
1257 return 0;
1258}
1259
1260
1261/**
1262 * Add buffers for DMA transfers (ioctl).
1263 *
1264 * \param inode device inode.
1265 * \param file_priv DRM file private.
1266 * \param cmd command.
1267 * \param arg pointer to a struct drm_buf_desc request.
1268 * \return zero on success or a negative number on failure.
1269 *
1270 * According with the memory type specified in drm_buf_desc::flags and the
1271 * build options, it dispatches the call either to addbufs_agp(),
1272 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1273 * PCI memory respectively.
1274 */
1275int drm_addbufs(struct drm_device *dev, void *data,
1276 struct drm_file *file_priv)
1277{
1278 struct drm_buf_desc *request = data;
1279 int ret;
1280
1281 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1282 return -EINVAL;
1283
1284#if __OS_HAS_AGP
1285 if (request->flags & _DRM_AGP_BUFFER)
1286 ret = drm_addbufs_agp(dev, request);
1287 else
1288#endif
1289 if (request->flags & _DRM_SG_BUFFER)
1290 ret = drm_addbufs_sg(dev, request);
1291 else if (request->flags & _DRM_FB_BUFFER)
1292 ret = drm_addbufs_fb(dev, request);
1293 else
1294 ret = drm_addbufs_pci(dev, request);
1295
1296 return ret;
1297}
1298
1299/**
1300 * Get information about the buffer mappings.
1301 *
1302 * This was originally mean for debugging purposes, or by a sophisticated
1303 * client library to determine how best to use the available buffers (e.g.,
1304 * large buffers can be used for image transfer).
1305 *
1306 * \param inode device inode.
1307 * \param file_priv DRM file private.
1308 * \param cmd command.
1309 * \param arg pointer to a drm_buf_info structure.
1310 * \return zero on success or a negative number on failure.
1311 *
1312 * Increments drm_device::buf_use while holding the drm_device::count_lock
1313 * lock, preventing of allocating more buffers after this call. Information
1314 * about each requested buffer is then copied into user space.
1315 */
1316int drm_infobufs(struct drm_device *dev, void *data,
1317 struct drm_file *file_priv)
1318{
1319 struct drm_device_dma *dma = dev->dma;
1320 struct drm_buf_info *request = data;
1321 int i;
1322 int count;
1323
1324 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1325 return -EINVAL;
1326
1327 if (!dma)
1328 return -EINVAL;
1329
1330 spin_lock(&dev->count_lock);
1331 if (atomic_read(&dev->buf_alloc)) {
1332 spin_unlock(&dev->count_lock);
1333 return -EBUSY;
1334 }
1335 ++dev->buf_use; /* Can't allocate more after this call */
1336 spin_unlock(&dev->count_lock);
1337
1338 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1339 if (dma->bufs[i].buf_count)
1340 ++count;
1341 }
1342
1343 DRM_DEBUG("count = %d\n", count);
1344
1345 if (request->count >= count) {
1346 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1347 if (dma->bufs[i].buf_count) {
1348 struct drm_buf_desc __user *to =
1349 &request->list[count];
1350 struct drm_buf_entry *from = &dma->bufs[i];
1351 struct drm_freelist *list = &dma->bufs[i].freelist;
1352 if (copy_to_user(&to->count,
1353 &from->buf_count,
1354 sizeof(from->buf_count)) ||
1355 copy_to_user(&to->size,
1356 &from->buf_size,
1357 sizeof(from->buf_size)) ||
1358 copy_to_user(&to->low_mark,
1359 &list->low_mark,
1360 sizeof(list->low_mark)) ||
1361 copy_to_user(&to->high_mark,
1362 &list->high_mark,
1363 sizeof(list->high_mark)))
1364 return -EFAULT;
1365
1366 DRM_DEBUG("%d %d %d %d %d\n",
1367 i,
1368 dma->bufs[i].buf_count,
1369 dma->bufs[i].buf_size,
1370 dma->bufs[i].freelist.low_mark,
1371 dma->bufs[i].freelist.high_mark);
1372 ++count;
1373 }
1374 }
1375 }
1376 request->count = count;
1377
1378 return 0;
1379}
1380
1381/**
1382 * Specifies a low and high water mark for buffer allocation
1383 *
1384 * \param inode device inode.
1385 * \param file_priv DRM file private.
1386 * \param cmd command.
1387 * \param arg a pointer to a drm_buf_desc structure.
1388 * \return zero on success or a negative number on failure.
1389 *
1390 * Verifies that the size order is bounded between the admissible orders and
1391 * updates the respective drm_device_dma::bufs entry low and high water mark.
1392 *
1393 * \note This ioctl is deprecated and mostly never used.
1394 */
1395int drm_markbufs(struct drm_device *dev, void *data,
1396 struct drm_file *file_priv)
1397{
1398 struct drm_device_dma *dma = dev->dma;
1399 struct drm_buf_desc *request = data;
1400 int order;
1401 struct drm_buf_entry *entry;
1402
1403 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1404 return -EINVAL;
1405
1406 if (!dma)
1407 return -EINVAL;
1408
1409 DRM_DEBUG("%d, %d, %d\n",
1410 request->size, request->low_mark, request->high_mark);
1411 order = drm_order(request->size);
1412 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1413 return -EINVAL;
1414 entry = &dma->bufs[order];
1415
1416 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1417 return -EINVAL;
1418 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1419 return -EINVAL;
1420
1421 entry->freelist.low_mark = request->low_mark;
1422 entry->freelist.high_mark = request->high_mark;
1423
1424 return 0;
1425}
1426
1427/**
1428 * Unreserve the buffers in list, previously reserved using drmDMA.
1429 *
1430 * \param inode device inode.
1431 * \param file_priv DRM file private.
1432 * \param cmd command.
1433 * \param arg pointer to a drm_buf_free structure.
1434 * \return zero on success or a negative number on failure.
1435 *
1436 * Calls free_buffer() for each used buffer.
1437 * This function is primarily used for debugging.
1438 */
1439int drm_freebufs(struct drm_device *dev, void *data,
1440 struct drm_file *file_priv)
1441{
1442 struct drm_device_dma *dma = dev->dma;
1443 struct drm_buf_free *request = data;
1444 int i;
1445 int idx;
1446 struct drm_buf *buf;
1447
1448 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1449 return -EINVAL;
1450
1451 if (!dma)
1452 return -EINVAL;
1453
1454 DRM_DEBUG("%d\n", request->count);
1455 for (i = 0; i < request->count; i++) {
1456 if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1457 return -EFAULT;
1458 if (idx < 0 || idx >= dma->buf_count) {
1459 DRM_ERROR("Index %d (of %d max)\n",
1460 idx, dma->buf_count - 1);
1461 return -EINVAL;
1462 }
1463 buf = dma->buflist[idx];
1464 if (buf->file_priv != file_priv) {
1465 DRM_ERROR("Process %d freeing buffer not owned\n",
1466 task_pid_nr(current));
1467 return -EINVAL;
1468 }
1469 drm_free_buffer(dev, buf);
1470 }
1471
1472 return 0;
1473}
1474
1475/**
1476 * Maps all of the DMA buffers into client-virtual space (ioctl).
1477 *
1478 * \param inode device inode.
1479 * \param file_priv DRM file private.
1480 * \param cmd command.
1481 * \param arg pointer to a drm_buf_map structure.
1482 * \return zero on success or a negative number on failure.
1483 *
1484 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1485 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1486 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1487 * drm_mmap_dma().
1488 */
1489int drm_mapbufs(struct drm_device *dev, void *data,
1490 struct drm_file *file_priv)
1491{
1492 struct drm_device_dma *dma = dev->dma;
1493 int retcode = 0;
1494 const int zero = 0;
1495 unsigned long virtual;
1496 unsigned long address;
1497 struct drm_buf_map *request = data;
1498 int i;
1499
1500 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1501 return -EINVAL;
1502
1503 if (!dma)
1504 return -EINVAL;
1505
1506 spin_lock(&dev->count_lock);
1507 if (atomic_read(&dev->buf_alloc)) {
1508 spin_unlock(&dev->count_lock);
1509 return -EBUSY;
1510 }
1511 dev->buf_use++; /* Can't allocate more after this call */
1512 spin_unlock(&dev->count_lock);
1513
1514 if (request->count >= dma->buf_count) {
1515 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1516 || (drm_core_check_feature(dev, DRIVER_SG)
1517 && (dma->flags & _DRM_DMA_USE_SG))
1518 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1519 && (dma->flags & _DRM_DMA_USE_FB))) {
1520 struct drm_map *map = dev->agp_buffer_map;
1521 unsigned long token = dev->agp_buffer_token;
1522
1523 if (!map) {
1524 retcode = -EINVAL;
1525 goto done;
1526 }
1527 down_write(&current->mm->mmap_sem);
1528 virtual = do_mmap(file_priv->filp, 0, map->size,
1529 PROT_READ | PROT_WRITE,
1530 MAP_SHARED,
1531 token);
1532 up_write(&current->mm->mmap_sem);
1533 } else {
1534 down_write(&current->mm->mmap_sem);
1535 virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
1536 PROT_READ | PROT_WRITE,
1537 MAP_SHARED, 0);
1538 up_write(&current->mm->mmap_sem);
1539 }
1540 if (virtual > -1024UL) {
1541 /* Real error */
1542 retcode = (signed long)virtual;
1543 goto done;
1544 }
1545 request->virtual = (void __user *)virtual;
1546
1547 for (i = 0; i < dma->buf_count; i++) {
1548 if (copy_to_user(&request->list[i].idx,
1549 &dma->buflist[i]->idx,
1550 sizeof(request->list[0].idx))) {
1551 retcode = -EFAULT;
1552 goto done;
1553 }
1554 if (copy_to_user(&request->list[i].total,
1555 &dma->buflist[i]->total,
1556 sizeof(request->list[0].total))) {
1557 retcode = -EFAULT;
1558 goto done;
1559 }
1560 if (copy_to_user(&request->list[i].used,
1561 &zero, sizeof(zero))) {
1562 retcode = -EFAULT;
1563 goto done;
1564 }
1565 address = virtual + dma->buflist[i]->offset; /* *** */
1566 if (copy_to_user(&request->list[i].address,
1567 &address, sizeof(address))) {
1568 retcode = -EFAULT;
1569 goto done;
1570 }
1571 }
1572 }
1573 done:
1574 request->count = dma->buf_count;
1575 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1576
1577 return retcode;
1578}
1579
1580/**
1581 * Compute size order. Returns the exponent of the smaller power of two which
1582 * is greater or equal to given number.
1583 *
1584 * \param size size.
1585 * \return order.
1586 *
1587 * \todo Can be made faster.
1588 */
1589int drm_order(unsigned long size)
1590{
1591 int order;
1592 unsigned long tmp;
1593
1594 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1595
1596 if (size & (size - 1))
1597 ++order;
1598
1599 return order;
1600}
1601EXPORT_SYMBOL(drm_order);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
new file mode 100644
index 000000000000..d505f695421f
--- /dev/null
+++ b/drivers/gpu/drm/drm_context.c
@@ -0,0 +1,471 @@
1/**
2 * \file drm_context.c
3 * IOCTLs for generic contexts
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36/*
37 * ChangeLog:
38 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
39 * added context constructor/destructor hooks,
40 * needed by SiS driver's memory management.
41 */
42
43#include "drmP.h"
44
45/******************************************************************/
46/** \name Context bitmap support */
47/*@{*/
48
49/**
50 * Free a handle from the context bitmap.
51 *
52 * \param dev DRM device.
53 * \param ctx_handle context handle.
54 *
55 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
56 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
57 * lock.
58 */
59void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
60{
61 mutex_lock(&dev->struct_mutex);
62 idr_remove(&dev->ctx_idr, ctx_handle);
63 mutex_unlock(&dev->struct_mutex);
64}
65
66/**
67 * Context bitmap allocation.
68 *
69 * \param dev DRM device.
70 * \return (non-negative) context handle on success or a negative number on failure.
71 *
72 * Allocate a new idr from drm_device::ctx_idr while holding the
73 * drm_device::struct_mutex lock.
74 */
75static int drm_ctxbitmap_next(struct drm_device * dev)
76{
77 int new_id;
78 int ret;
79
80again:
81 if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
82 DRM_ERROR("Out of memory expanding drawable idr\n");
83 return -ENOMEM;
84 }
85 mutex_lock(&dev->struct_mutex);
86 ret = idr_get_new_above(&dev->ctx_idr, NULL,
87 DRM_RESERVED_CONTEXTS, &new_id);
88 if (ret == -EAGAIN) {
89 mutex_unlock(&dev->struct_mutex);
90 goto again;
91 }
92 mutex_unlock(&dev->struct_mutex);
93 return new_id;
94}
95
96/**
97 * Context bitmap initialization.
98 *
99 * \param dev DRM device.
100 *
101 * Initialise the drm_device::ctx_idr
102 */
103int drm_ctxbitmap_init(struct drm_device * dev)
104{
105 idr_init(&dev->ctx_idr);
106 return 0;
107}
108
109/**
110 * Context bitmap cleanup.
111 *
112 * \param dev DRM device.
113 *
114 * Free all idr members using drm_ctx_sarea_free helper function
115 * while holding the drm_device::struct_mutex lock.
116 */
117void drm_ctxbitmap_cleanup(struct drm_device * dev)
118{
119 mutex_lock(&dev->struct_mutex);
120 idr_remove_all(&dev->ctx_idr);
121 mutex_unlock(&dev->struct_mutex);
122}
123
124/*@}*/
125
126/******************************************************************/
127/** \name Per Context SAREA Support */
128/*@{*/
129
130/**
131 * Get per-context SAREA.
132 *
133 * \param inode device inode.
134 * \param file_priv DRM file private.
135 * \param cmd command.
136 * \param arg user argument pointing to a drm_ctx_priv_map structure.
137 * \return zero on success or a negative number on failure.
138 *
139 * Gets the map from drm_device::ctx_idr with the handle specified and
140 * returns its handle.
141 */
142int drm_getsareactx(struct drm_device *dev, void *data,
143 struct drm_file *file_priv)
144{
145 struct drm_ctx_priv_map *request = data;
146 struct drm_map *map;
147 struct drm_map_list *_entry;
148
149 mutex_lock(&dev->struct_mutex);
150
151 map = idr_find(&dev->ctx_idr, request->ctx_id);
152 if (!map) {
153 mutex_unlock(&dev->struct_mutex);
154 return -EINVAL;
155 }
156
157 mutex_unlock(&dev->struct_mutex);
158
159 request->handle = NULL;
160 list_for_each_entry(_entry, &dev->maplist, head) {
161 if (_entry->map == map) {
162 request->handle =
163 (void *)(unsigned long)_entry->user_token;
164 break;
165 }
166 }
167 if (request->handle == NULL)
168 return -EINVAL;
169
170 return 0;
171}
172
173/**
174 * Set per-context SAREA.
175 *
176 * \param inode device inode.
177 * \param file_priv DRM file private.
178 * \param cmd command.
179 * \param arg user argument pointing to a drm_ctx_priv_map structure.
180 * \return zero on success or a negative number on failure.
181 *
182 * Searches the mapping specified in \p arg and update the entry in
183 * drm_device::ctx_idr with it.
184 */
185int drm_setsareactx(struct drm_device *dev, void *data,
186 struct drm_file *file_priv)
187{
188 struct drm_ctx_priv_map *request = data;
189 struct drm_map *map = NULL;
190 struct drm_map_list *r_list = NULL;
191
192 mutex_lock(&dev->struct_mutex);
193 list_for_each_entry(r_list, &dev->maplist, head) {
194 if (r_list->map
195 && r_list->user_token == (unsigned long) request->handle)
196 goto found;
197 }
198 bad:
199 mutex_unlock(&dev->struct_mutex);
200 return -EINVAL;
201
202 found:
203 map = r_list->map;
204 if (!map)
205 goto bad;
206
207 if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
208 goto bad;
209
210 mutex_unlock(&dev->struct_mutex);
211
212 return 0;
213}
214
215/*@}*/
216
217/******************************************************************/
218/** \name The actual DRM context handling routines */
219/*@{*/
220
221/**
222 * Switch context.
223 *
224 * \param dev DRM device.
225 * \param old old context handle.
226 * \param new new context handle.
227 * \return zero on success or a negative number on failure.
228 *
229 * Attempt to set drm_device::context_flag.
230 */
231static int drm_context_switch(struct drm_device * dev, int old, int new)
232{
233 if (test_and_set_bit(0, &dev->context_flag)) {
234 DRM_ERROR("Reentering -- FIXME\n");
235 return -EBUSY;
236 }
237
238 DRM_DEBUG("Context switch from %d to %d\n", old, new);
239
240 if (new == dev->last_context) {
241 clear_bit(0, &dev->context_flag);
242 return 0;
243 }
244
245 return 0;
246}
247
248/**
249 * Complete context switch.
250 *
251 * \param dev DRM device.
252 * \param new new context handle.
253 * \return zero on success or a negative number on failure.
254 *
255 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
256 * hardware lock is held, clears the drm_device::context_flag and wakes up
257 * drm_device::context_wait.
258 */
259static int drm_context_switch_complete(struct drm_device * dev, int new)
260{
261 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
262 dev->last_switch = jiffies;
263
264 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
265 DRM_ERROR("Lock isn't held after context switch\n");
266 }
267
268 /* If a context switch is ever initiated
269 when the kernel holds the lock, release
270 that lock here. */
271 clear_bit(0, &dev->context_flag);
272 wake_up(&dev->context_wait);
273
274 return 0;
275}
276
277/**
278 * Reserve contexts.
279 *
280 * \param inode device inode.
281 * \param file_priv DRM file private.
282 * \param cmd command.
283 * \param arg user argument pointing to a drm_ctx_res structure.
284 * \return zero on success or a negative number on failure.
285 */
286int drm_resctx(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
288{
289 struct drm_ctx_res *res = data;
290 struct drm_ctx ctx;
291 int i;
292
293 if (res->count >= DRM_RESERVED_CONTEXTS) {
294 memset(&ctx, 0, sizeof(ctx));
295 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
296 ctx.handle = i;
297 if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
298 return -EFAULT;
299 }
300 }
301 res->count = DRM_RESERVED_CONTEXTS;
302
303 return 0;
304}
305
306/**
307 * Add context.
308 *
309 * \param inode device inode.
310 * \param file_priv DRM file private.
311 * \param cmd command.
312 * \param arg user argument pointing to a drm_ctx structure.
313 * \return zero on success or a negative number on failure.
314 *
315 * Get a new handle for the context and copy to userspace.
316 */
317int drm_addctx(struct drm_device *dev, void *data,
318 struct drm_file *file_priv)
319{
320 struct drm_ctx_list *ctx_entry;
321 struct drm_ctx *ctx = data;
322
323 ctx->handle = drm_ctxbitmap_next(dev);
324 if (ctx->handle == DRM_KERNEL_CONTEXT) {
325 /* Skip kernel's context and get a new one. */
326 ctx->handle = drm_ctxbitmap_next(dev);
327 }
328 DRM_DEBUG("%d\n", ctx->handle);
329 if (ctx->handle == -1) {
330 DRM_DEBUG("Not enough free contexts.\n");
331 /* Should this return -EBUSY instead? */
332 return -ENOMEM;
333 }
334
335 if (ctx->handle != DRM_KERNEL_CONTEXT) {
336 if (dev->driver->context_ctor)
337 if (!dev->driver->context_ctor(dev, ctx->handle)) {
338 DRM_DEBUG("Running out of ctxs or memory.\n");
339 return -ENOMEM;
340 }
341 }
342
343 ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
344 if (!ctx_entry) {
345 DRM_DEBUG("out of memory\n");
346 return -ENOMEM;
347 }
348
349 INIT_LIST_HEAD(&ctx_entry->head);
350 ctx_entry->handle = ctx->handle;
351 ctx_entry->tag = file_priv;
352
353 mutex_lock(&dev->ctxlist_mutex);
354 list_add(&ctx_entry->head, &dev->ctxlist);
355 ++dev->ctx_count;
356 mutex_unlock(&dev->ctxlist_mutex);
357
358 return 0;
359}
360
361int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
362{
363 /* This does nothing */
364 return 0;
365}
366
367/**
368 * Get context.
369 *
370 * \param inode device inode.
371 * \param file_priv DRM file private.
372 * \param cmd command.
373 * \param arg user argument pointing to a drm_ctx structure.
374 * \return zero on success or a negative number on failure.
375 */
376int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
377{
378 struct drm_ctx *ctx = data;
379
380 /* This is 0, because we don't handle any context flags */
381 ctx->flags = 0;
382
383 return 0;
384}
385
386/**
387 * Switch context.
388 *
389 * \param inode device inode.
390 * \param file_priv DRM file private.
391 * \param cmd command.
392 * \param arg user argument pointing to a drm_ctx structure.
393 * \return zero on success or a negative number on failure.
394 *
395 * Calls context_switch().
396 */
397int drm_switchctx(struct drm_device *dev, void *data,
398 struct drm_file *file_priv)
399{
400 struct drm_ctx *ctx = data;
401
402 DRM_DEBUG("%d\n", ctx->handle);
403 return drm_context_switch(dev, dev->last_context, ctx->handle);
404}
405
406/**
407 * New context.
408 *
409 * \param inode device inode.
410 * \param file_priv DRM file private.
411 * \param cmd command.
412 * \param arg user argument pointing to a drm_ctx structure.
413 * \return zero on success or a negative number on failure.
414 *
415 * Calls context_switch_complete().
416 */
417int drm_newctx(struct drm_device *dev, void *data,
418 struct drm_file *file_priv)
419{
420 struct drm_ctx *ctx = data;
421
422 DRM_DEBUG("%d\n", ctx->handle);
423 drm_context_switch_complete(dev, ctx->handle);
424
425 return 0;
426}
427
428/**
429 * Remove context.
430 *
431 * \param inode device inode.
432 * \param file_priv DRM file private.
433 * \param cmd command.
434 * \param arg user argument pointing to a drm_ctx structure.
435 * \return zero on success or a negative number on failure.
436 *
437 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
438 */
439int drm_rmctx(struct drm_device *dev, void *data,
440 struct drm_file *file_priv)
441{
442 struct drm_ctx *ctx = data;
443
444 DRM_DEBUG("%d\n", ctx->handle);
445 if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
446 file_priv->remove_auth_on_close = 1;
447 }
448 if (ctx->handle != DRM_KERNEL_CONTEXT) {
449 if (dev->driver->context_dtor)
450 dev->driver->context_dtor(dev, ctx->handle);
451 drm_ctxbitmap_free(dev, ctx->handle);
452 }
453
454 mutex_lock(&dev->ctxlist_mutex);
455 if (!list_empty(&dev->ctxlist)) {
456 struct drm_ctx_list *pos, *n;
457
458 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
459 if (pos->handle == ctx->handle) {
460 list_del(&pos->head);
461 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
462 --dev->ctx_count;
463 }
464 }
465 }
466 mutex_unlock(&dev->ctxlist_mutex);
467
468 return 0;
469}
470
471/*@}*/
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
new file mode 100644
index 000000000000..7a8e2fba4678
--- /dev/null
+++ b/drivers/gpu/drm/drm_dma.c
@@ -0,0 +1,180 @@
1/**
2 * \file drm_dma.c
3 * DMA IOCTL and function support
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38/**
39 * Initialize the DMA data.
40 *
41 * \param dev DRM device.
42 * \return zero on success or a negative value on failure.
43 *
44 * Allocate and initialize a drm_device_dma structure.
45 */
46int drm_dma_setup(struct drm_device *dev)
47{
48 int i;
49
50 dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
51 if (!dev->dma)
52 return -ENOMEM;
53
54 memset(dev->dma, 0, sizeof(*dev->dma));
55
56 for (i = 0; i <= DRM_MAX_ORDER; i++)
57 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
58
59 return 0;
60}
61
62/**
63 * Cleanup the DMA resources.
64 *
65 * \param dev DRM device.
66 *
67 * Free all pages associated with DMA buffers, the buffers and pages lists, and
68 * finally the drm_device::dma structure itself.
69 */
70void drm_dma_takedown(struct drm_device *dev)
71{
72 struct drm_device_dma *dma = dev->dma;
73 int i, j;
74
75 if (!dma)
76 return;
77
78 /* Clear dma buffers */
79 for (i = 0; i <= DRM_MAX_ORDER; i++) {
80 if (dma->bufs[i].seg_count) {
81 DRM_DEBUG("order %d: buf_count = %d,"
82 " seg_count = %d\n",
83 i,
84 dma->bufs[i].buf_count,
85 dma->bufs[i].seg_count);
86 for (j = 0; j < dma->bufs[i].seg_count; j++) {
87 if (dma->bufs[i].seglist[j]) {
88 drm_pci_free(dev, dma->bufs[i].seglist[j]);
89 }
90 }
91 drm_free(dma->bufs[i].seglist,
92 dma->bufs[i].seg_count
93 * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
94 }
95 if (dma->bufs[i].buf_count) {
96 for (j = 0; j < dma->bufs[i].buf_count; j++) {
97 if (dma->bufs[i].buflist[j].dev_private) {
98 drm_free(dma->bufs[i].buflist[j].
99 dev_private,
100 dma->bufs[i].buflist[j].
101 dev_priv_size, DRM_MEM_BUFS);
102 }
103 }
104 drm_free(dma->bufs[i].buflist,
105 dma->bufs[i].buf_count *
106 sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
107 }
108 }
109
110 if (dma->buflist) {
111 drm_free(dma->buflist,
112 dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
113 }
114
115 if (dma->pagelist) {
116 drm_free(dma->pagelist,
117 dma->page_count * sizeof(*dma->pagelist),
118 DRM_MEM_PAGES);
119 }
120 drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
121 dev->dma = NULL;
122}
123
124/**
125 * Free a buffer.
126 *
127 * \param dev DRM device.
128 * \param buf buffer to free.
129 *
130 * Resets the fields of \p buf.
131 */
132void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
133{
134 if (!buf)
135 return;
136
137 buf->waiting = 0;
138 buf->pending = 0;
139 buf->file_priv = NULL;
140 buf->used = 0;
141
142 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
143 && waitqueue_active(&buf->dma_wait)) {
144 wake_up_interruptible(&buf->dma_wait);
145 }
146}
147
148/**
149 * Reclaim the buffers.
150 *
151 * \param file_priv DRM file private.
152 *
153 * Frees each buffer associated with \p file_priv not already on the hardware.
154 */
155void drm_core_reclaim_buffers(struct drm_device *dev,
156 struct drm_file *file_priv)
157{
158 struct drm_device_dma *dma = dev->dma;
159 int i;
160
161 if (!dma)
162 return;
163 for (i = 0; i < dma->buf_count; i++) {
164 if (dma->buflist[i]->file_priv == file_priv) {
165 switch (dma->buflist[i]->list) {
166 case DRM_LIST_NONE:
167 drm_free_buffer(dev, dma->buflist[i]);
168 break;
169 case DRM_LIST_WAIT:
170 dma->buflist[i]->list = DRM_LIST_RECLAIM;
171 break;
172 default:
173 /* Buffer already on hardware. */
174 break;
175 }
176 }
177 }
178}
179
180EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
new file mode 100644
index 000000000000..1839c57663c5
--- /dev/null
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -0,0 +1,192 @@
1/**
2 * \file drm_drawable.c
3 * IOCTLs for drawables
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 * \author Michel Dänzer <michel@tungstengraphics.com>
8 */
9
10/*
11 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
12 *
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
16 * All Rights Reserved.
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a
19 * copy of this software and associated documentation files (the "Software"),
20 * to deal in the Software without restriction, including without limitation
21 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22 * and/or sell copies of the Software, and to permit persons to whom the
23 * Software is furnished to do so, subject to the following conditions:
24 *
25 * The above copyright notice and this permission notice (including the next
26 * paragraph) shall be included in all copies or substantial portions of the
27 * Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
32 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
33 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
34 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
35 * OTHER DEALINGS IN THE SOFTWARE.
36 */
37
38#include "drmP.h"
39
40/**
41 * Allocate drawable ID and memory to store information about it.
42 */
43int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
44{
45 unsigned long irqflags;
46 struct drm_draw *draw = data;
47 int new_id = 0;
48 int ret;
49
50again:
51 if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
52 DRM_ERROR("Out of memory expanding drawable idr\n");
53 return -ENOMEM;
54 }
55
56 spin_lock_irqsave(&dev->drw_lock, irqflags);
57 ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
58 if (ret == -EAGAIN) {
59 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
60 goto again;
61 }
62
63 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
64
65 draw->handle = new_id;
66
67 DRM_DEBUG("%d\n", draw->handle);
68
69 return 0;
70}
71
72/**
73 * Free drawable ID and memory to store information about it.
74 */
75int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
76{
77 struct drm_draw *draw = data;
78 unsigned long irqflags;
79
80 spin_lock_irqsave(&dev->drw_lock, irqflags);
81
82 drm_free(drm_get_drawable_info(dev, draw->handle),
83 sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
84
85 idr_remove(&dev->drw_idr, draw->handle);
86
87 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
88 DRM_DEBUG("%d\n", draw->handle);
89 return 0;
90}
91
92int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
93{
94 struct drm_update_draw *update = data;
95 unsigned long irqflags;
96 struct drm_clip_rect *rects;
97 struct drm_drawable_info *info;
98 int err;
99
100 info = idr_find(&dev->drw_idr, update->handle);
101 if (!info) {
102 info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
103 if (!info)
104 return -ENOMEM;
105 if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
106 DRM_ERROR("No such drawable %d\n", update->handle);
107 drm_free(info, sizeof(*info), DRM_MEM_BUFS);
108 return -EINVAL;
109 }
110 }
111
112 switch (update->type) {
113 case DRM_DRAWABLE_CLIPRECTS:
114 if (update->num != info->num_rects) {
115 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
116 DRM_MEM_BUFS);
117 } else
118 rects = info->rects;
119
120 if (update->num && !rects) {
121 DRM_ERROR("Failed to allocate cliprect memory\n");
122 err = -ENOMEM;
123 goto error;
124 }
125
126 if (update->num && DRM_COPY_FROM_USER(rects,
127 (struct drm_clip_rect __user *)
128 (unsigned long)update->data,
129 update->num *
130 sizeof(*rects))) {
131 DRM_ERROR("Failed to copy cliprects from userspace\n");
132 err = -EFAULT;
133 goto error;
134 }
135
136 spin_lock_irqsave(&dev->drw_lock, irqflags);
137
138 if (rects != info->rects) {
139 drm_free(info->rects, info->num_rects *
140 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
141 }
142
143 info->rects = rects;
144 info->num_rects = update->num;
145
146 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
147
148 DRM_DEBUG("Updated %d cliprects for drawable %d\n",
149 info->num_rects, update->handle);
150 break;
151 default:
152 DRM_ERROR("Invalid update type %d\n", update->type);
153 return -EINVAL;
154 }
155
156 return 0;
157
158error:
159 if (rects != info->rects)
160 drm_free(rects, update->num * sizeof(struct drm_clip_rect),
161 DRM_MEM_BUFS);
162
163 return err;
164}
165
166/**
167 * Caller must hold the drawable spinlock!
168 */
169struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
170{
171 return idr_find(&dev->drw_idr, id);
172}
173EXPORT_SYMBOL(drm_get_drawable_info);
174
175static int drm_drawable_free(int idr, void *p, void *data)
176{
177 struct drm_drawable_info *info = p;
178
179 if (info) {
180 drm_free(info->rects, info->num_rects *
181 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
182 drm_free(info, sizeof(*info), DRM_MEM_BUFS);
183 }
184
185 return 0;
186}
187
188void drm_drawable_free_all(struct drm_device *dev)
189{
190 idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
191 idr_remove_all(&dev->drw_idr);
192}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
new file mode 100644
index 000000000000..564138714bb5
--- /dev/null
+++ b/drivers/gpu/drm/drm_drv.c
@@ -0,0 +1,540 @@
1/**
2 * \file drm_drv.c
3 * Generic driver template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 *
8 * To use this template, you must at least define the following (samples
9 * given for the MGA driver):
10 *
11 * \code
12 * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
13 *
14 * #define DRIVER_NAME "mga"
15 * #define DRIVER_DESC "Matrox G200/G400"
16 * #define DRIVER_DATE "20001127"
17 *
18 * #define drm_x mga_##x
19 * \endcode
20 */
21
22/*
23 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
24 *
25 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
26 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
27 * All Rights Reserved.
28 *
29 * Permission is hereby granted, free of charge, to any person obtaining a
30 * copy of this software and associated documentation files (the "Software"),
31 * to deal in the Software without restriction, including without limitation
32 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
33 * and/or sell copies of the Software, and to permit persons to whom the
34 * Software is furnished to do so, subject to the following conditions:
35 *
36 * The above copyright notice and this permission notice (including the next
37 * paragraph) shall be included in all copies or substantial portions of the
38 * Software.
39 *
40 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
42 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
43 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
44 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
45 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
46 * OTHER DEALINGS IN THE SOFTWARE.
47 */
48
49#include "drmP.h"
50#include "drm_core.h"
51
52static int drm_version(struct drm_device *dev, void *data,
53 struct drm_file *file_priv);
54
55/** Ioctl table */
56static struct drm_ioctl_desc drm_ioctls[] = {
57 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
58 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
59 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
60 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
61 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
62 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
63 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
64 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
65
66 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
67 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
68 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
69 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
70
71 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
76
77 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
78 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
79 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
81 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
82 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
84
85 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
86 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
87
88 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
89 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
90
91 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
92
93 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
94 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
95 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
96 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
97 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
98 /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
99 DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
100
101 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102
103#if __OS_HAS_AGP
104 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
105 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
108 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112#endif
113
114 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116
117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
118
119 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120};
121
122#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
123
124/**
125 * Take down the DRM device.
126 *
127 * \param dev DRM device structure.
128 *
129 * Frees every resource in \p dev.
130 *
131 * \sa drm_device
132 */
133int drm_lastclose(struct drm_device * dev)
134{
135 struct drm_magic_entry *pt, *next;
136 struct drm_map_list *r_list, *list_t;
137 struct drm_vma_entry *vma, *vma_temp;
138 int i;
139
140 DRM_DEBUG("\n");
141
142 if (dev->driver->lastclose)
143 dev->driver->lastclose(dev);
144 DRM_DEBUG("driver lastclose completed\n");
145
146 if (dev->unique) {
147 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
148 dev->unique = NULL;
149 dev->unique_len = 0;
150 }
151
152 if (dev->irq_enabled)
153 drm_irq_uninstall(dev);
154
155 mutex_lock(&dev->struct_mutex);
156
157 /* Free drawable information memory */
158 drm_drawable_free_all(dev);
159 del_timer(&dev->timer);
160
161 /* Clear pid list */
162 if (dev->magicfree.next) {
163 list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
164 list_del(&pt->head);
165 drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
166 drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
167 }
168 drm_ht_remove(&dev->magiclist);
169 }
170
171 /* Clear AGP information */
172 if (drm_core_has_AGP(dev) && dev->agp) {
173 struct drm_agp_mem *entry, *tempe;
174
175 /* Remove AGP resources, but leave dev->agp
176 intact until drv_cleanup is called. */
177 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
178 if (entry->bound)
179 drm_unbind_agp(entry->memory);
180 drm_free_agp(entry->memory, entry->pages);
181 drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
182 }
183 INIT_LIST_HEAD(&dev->agp->memory);
184
185 if (dev->agp->acquired)
186 drm_agp_release(dev);
187
188 dev->agp->acquired = 0;
189 dev->agp->enabled = 0;
190 }
191 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
192 drm_sg_cleanup(dev->sg);
193 dev->sg = NULL;
194 }
195
196 /* Clear vma list (only built for debugging) */
197 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
198 list_del(&vma->head);
199 drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
200 }
201
202 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
203 if (!(r_list->map->flags & _DRM_DRIVER)) {
204 drm_rmmap_locked(dev, r_list->map);
205 r_list = NULL;
206 }
207 }
208
209 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
210 for (i = 0; i < dev->queue_count; i++) {
211 if (dev->queuelist[i]) {
212 drm_free(dev->queuelist[i],
213 sizeof(*dev->queuelist[0]),
214 DRM_MEM_QUEUES);
215 dev->queuelist[i] = NULL;
216 }
217 }
218 drm_free(dev->queuelist,
219 dev->queue_slots * sizeof(*dev->queuelist),
220 DRM_MEM_QUEUES);
221 dev->queuelist = NULL;
222 }
223 dev->queue_count = 0;
224
225 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
226 drm_dma_takedown(dev);
227
228 if (dev->lock.hw_lock) {
229 dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
230 dev->lock.file_priv = NULL;
231 wake_up_interruptible(&dev->lock.lock_queue);
232 }
233 mutex_unlock(&dev->struct_mutex);
234
235 DRM_DEBUG("lastclose completed\n");
236 return 0;
237}
238
239/**
240 * Module initialization. Called via init_module at module load time, or via
241 * linux/init/main.c (this is not currently supported).
242 *
243 * \return zero on success or a negative number on failure.
244 *
245 * Initializes an array of drm_device structures, and attempts to
246 * initialize all available devices, using consecutive minors, registering the
247 * stubs and initializing the AGP device.
248 *
249 * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
250 * after the initialization for driver customization.
251 */
252int drm_init(struct drm_driver *driver)
253{
254 struct pci_dev *pdev = NULL;
255 struct pci_device_id *pid;
256 int i;
257
258 DRM_DEBUG("\n");
259
260 for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
261 pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
262
263 pdev = NULL;
264 /* pass back in pdev to account for multiple identical cards */
265 while ((pdev =
266 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
267 pid->subdevice, pdev)) != NULL) {
268 /* stealth mode requires a manual probe */
269 pci_dev_get(pdev);
270 drm_get_dev(pdev, pid, driver);
271 }
272 }
273 return 0;
274}
275
276EXPORT_SYMBOL(drm_init);
277
278/**
279 * Called via cleanup_module() at module unload time.
280 *
281 * Cleans up all DRM device, calling drm_lastclose().
282 *
283 * \sa drm_init
284 */
285static void drm_cleanup(struct drm_device * dev)
286{
287 DRM_DEBUG("\n");
288
289 if (!dev) {
290 DRM_ERROR("cleanup called no dev\n");
291 return;
292 }
293
294 drm_lastclose(dev);
295
296 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
297 dev->agp && dev->agp->agp_mtrr >= 0) {
298 int retval;
299 retval = mtrr_del(dev->agp->agp_mtrr,
300 dev->agp->agp_info.aper_base,
301 dev->agp->agp_info.aper_size * 1024 * 1024);
302 DRM_DEBUG("mtrr_del=%d\n", retval);
303 }
304
305 if (drm_core_has_AGP(dev) && dev->agp) {
306 drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
307 dev->agp = NULL;
308 }
309
310 if (dev->driver->unload)
311 dev->driver->unload(dev);
312
313 drm_ht_remove(&dev->map_hash);
314 drm_ctxbitmap_cleanup(dev);
315
316 drm_put_minor(&dev->primary);
317 if (drm_put_dev(dev))
318 DRM_ERROR("Cannot unload module\n");
319}
320
321int drm_minors_cleanup(int id, void *ptr, void *data)
322{
323 struct drm_minor *minor = ptr;
324 struct drm_device *dev;
325 struct drm_driver *driver = data;
326
327 dev = minor->dev;
328 if (minor->dev->driver != driver)
329 return 0;
330
331 if (minor->type != DRM_MINOR_LEGACY)
332 return 0;
333
334 if (dev)
335 pci_dev_put(dev->pdev);
336 drm_cleanup(dev);
337 return 1;
338}
339
340void drm_exit(struct drm_driver *driver)
341{
342 DRM_DEBUG("\n");
343
344 idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
345
346 DRM_INFO("Module unloaded\n");
347}
348
349EXPORT_SYMBOL(drm_exit);
350
351/** File operations structure */
352static const struct file_operations drm_stub_fops = {
353 .owner = THIS_MODULE,
354 .open = drm_stub_open
355};
356
357static int __init drm_core_init(void)
358{
359 int ret = -ENOMEM;
360
361 idr_init(&drm_minors_idr);
362
363 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
364 goto err_p1;
365
366 drm_class = drm_sysfs_create(THIS_MODULE, "drm");
367 if (IS_ERR(drm_class)) {
368 printk(KERN_ERR "DRM: Error creating drm class.\n");
369 ret = PTR_ERR(drm_class);
370 goto err_p2;
371 }
372
373 drm_proc_root = proc_mkdir("dri", NULL);
374 if (!drm_proc_root) {
375 DRM_ERROR("Cannot create /proc/dri\n");
376 ret = -1;
377 goto err_p3;
378 }
379
380 drm_mem_init();
381
382 DRM_INFO("Initialized %s %d.%d.%d %s\n",
383 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
384 return 0;
385err_p3:
386 drm_sysfs_destroy();
387err_p2:
388 unregister_chrdev(DRM_MAJOR, "drm");
389
390 idr_destroy(&drm_minors_idr);
391err_p1:
392 return ret;
393}
394
395static void __exit drm_core_exit(void)
396{
397 remove_proc_entry("dri", NULL);
398 drm_sysfs_destroy();
399
400 unregister_chrdev(DRM_MAJOR, "drm");
401
402 idr_destroy(&drm_minors_idr);
403}
404
405module_init(drm_core_init);
406module_exit(drm_core_exit);
407
408/**
409 * Get version information
410 *
411 * \param inode device inode.
412 * \param filp file pointer.
413 * \param cmd command.
414 * \param arg user argument, pointing to a drm_version structure.
415 * \return zero on success or negative number on failure.
416 *
417 * Fills in the version information in \p arg.
418 */
419static int drm_version(struct drm_device *dev, void *data,
420 struct drm_file *file_priv)
421{
422 struct drm_version *version = data;
423 int len;
424
425 version->version_major = dev->driver->major;
426 version->version_minor = dev->driver->minor;
427 version->version_patchlevel = dev->driver->patchlevel;
428 DRM_COPY(version->name, dev->driver->name);
429 DRM_COPY(version->date, dev->driver->date);
430 DRM_COPY(version->desc, dev->driver->desc);
431
432 return 0;
433}
434
435/**
436 * Called whenever a process performs an ioctl on /dev/drm.
437 *
438 * \param inode device inode.
439 * \param file_priv DRM file private.
440 * \param cmd command.
441 * \param arg user argument.
442 * \return zero on success or negative number on failure.
443 *
444 * Looks up the ioctl function in the ::ioctls table, checking for root
445 * previleges if so required, and dispatches to the respective function.
446 */
447int drm_ioctl(struct inode *inode, struct file *filp,
448 unsigned int cmd, unsigned long arg)
449{
450 struct drm_file *file_priv = filp->private_data;
451 struct drm_device *dev = file_priv->minor->dev;
452 struct drm_ioctl_desc *ioctl;
453 drm_ioctl_t *func;
454 unsigned int nr = DRM_IOCTL_NR(cmd);
455 int retcode = -EINVAL;
456 char *kdata = NULL;
457
458 atomic_inc(&dev->ioctl_count);
459 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
460 ++file_priv->ioctl_count;
461
462 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
463 task_pid_nr(current), cmd, nr,
464 (long)old_encode_dev(file_priv->minor->device),
465 file_priv->authenticated);
466
467 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
468 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
469 goto err_i1;
470 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
471 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
472 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
473 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
474 ioctl = &drm_ioctls[nr];
475 cmd = ioctl->cmd;
476 } else
477 goto err_i1;
478
479 /* Do not trust userspace, use our own definition */
480 func = ioctl->func;
481 /* is there a local override? */
482 if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
483 func = dev->driver->dma_ioctl;
484
485 if (!func) {
486 DRM_DEBUG("no function\n");
487 retcode = -EINVAL;
488 } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
489 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
490 ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
491 retcode = -EACCES;
492 } else {
493 if (cmd & (IOC_IN | IOC_OUT)) {
494 kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
495 if (!kdata) {
496 retcode = -ENOMEM;
497 goto err_i1;
498 }
499 }
500
501 if (cmd & IOC_IN) {
502 if (copy_from_user(kdata, (void __user *)arg,
503 _IOC_SIZE(cmd)) != 0) {
504 retcode = -EFAULT;
505 goto err_i1;
506 }
507 }
508 retcode = func(dev, kdata, file_priv);
509
510 if ((retcode == 0) && (cmd & IOC_OUT)) {
511 if (copy_to_user((void __user *)arg, kdata,
512 _IOC_SIZE(cmd)) != 0)
513 retcode = -EFAULT;
514 }
515 }
516
517 err_i1:
518 if (kdata)
519 kfree(kdata);
520 atomic_dec(&dev->ioctl_count);
521 if (retcode)
522 DRM_DEBUG("ret = %x\n", retcode);
523 return retcode;
524}
525
526EXPORT_SYMBOL(drm_ioctl);
527
528drm_local_map_t *drm_getsarea(struct drm_device *dev)
529{
530 struct drm_map_list *entry;
531
532 list_for_each_entry(entry, &dev->maplist, head) {
533 if (entry->map && entry->map->type == _DRM_SHM &&
534 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
535 return entry->map;
536 }
537 }
538 return NULL;
539}
540EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
new file mode 100644
index 000000000000..d2e6da85f58a
--- /dev/null
+++ b/drivers/gpu/drm/drm_fops.c
@@ -0,0 +1,466 @@
1/**
2 * \file drm_fops.c
3 * File operations for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Daryll Strauss <daryll@valinux.com>
7 * \author Gareth Hughes <gareth@valinux.com>
8 */
9
10/*
11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 *
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * All Rights Reserved.
16 *
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
23 *
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
26 * Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
35 */
36
37#include "drmP.h"
38#include "drm_sarea.h"
39#include <linux/poll.h>
40
41static int drm_open_helper(struct inode *inode, struct file *filp,
42 struct drm_device * dev);
43
44static int drm_setup(struct drm_device * dev)
45{
46 drm_local_map_t *map;
47 int i;
48 int ret;
49 u32 sareapage;
50
51 if (dev->driver->firstopen) {
52 ret = dev->driver->firstopen(dev);
53 if (ret != 0)
54 return ret;
55 }
56
57 dev->magicfree.next = NULL;
58
59 /* prebuild the SAREA */
60 sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
61 i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
62 if (i != 0)
63 return i;
64
65 atomic_set(&dev->ioctl_count, 0);
66 atomic_set(&dev->vma_count, 0);
67 dev->buf_use = 0;
68 atomic_set(&dev->buf_alloc, 0);
69
70 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
71 i = drm_dma_setup(dev);
72 if (i < 0)
73 return i;
74 }
75
76 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
77 atomic_set(&dev->counts[i], 0);
78
79 drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
80 INIT_LIST_HEAD(&dev->magicfree);
81
82 dev->sigdata.lock = NULL;
83 init_waitqueue_head(&dev->lock.lock_queue);
84 dev->queue_count = 0;
85 dev->queue_reserved = 0;
86 dev->queue_slots = 0;
87 dev->queuelist = NULL;
88 dev->irq_enabled = 0;
89 dev->context_flag = 0;
90 dev->interrupt_flag = 0;
91 dev->dma_flag = 0;
92 dev->last_context = 0;
93 dev->last_switch = 0;
94 dev->last_checked = 0;
95 init_waitqueue_head(&dev->context_wait);
96 dev->if_version = 0;
97
98 dev->ctx_start = 0;
99 dev->lck_start = 0;
100
101 dev->buf_async = NULL;
102 init_waitqueue_head(&dev->buf_readers);
103 init_waitqueue_head(&dev->buf_writers);
104
105 DRM_DEBUG("\n");
106
107 /*
108 * The kernel's context could be created here, but is now created
109 * in drm_dma_enqueue. This is more resource-efficient for
110 * hardware that does not do DMA, but may mean that
111 * drm_select_queue fails between the time the interrupt is
112 * initialized and the time the queues are initialized.
113 */
114
115 return 0;
116}
117
118/**
119 * Open file.
120 *
121 * \param inode device inode
122 * \param filp file pointer.
123 * \return zero on success or a negative number on failure.
124 *
125 * Searches the DRM device with the same minor number, calls open_helper(), and
126 * increments the device open count. If the open count was previous at zero,
127 * i.e., it's the first that the device is open, then calls setup().
128 */
129int drm_open(struct inode *inode, struct file *filp)
130{
131 struct drm_device *dev = NULL;
132 int minor_id = iminor(inode);
133 struct drm_minor *minor;
134 int retcode = 0;
135
136 minor = idr_find(&drm_minors_idr, minor_id);
137 if (!minor)
138 return -ENODEV;
139
140 if (!(dev = minor->dev))
141 return -ENODEV;
142
143 retcode = drm_open_helper(inode, filp, dev);
144 if (!retcode) {
145 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
146 spin_lock(&dev->count_lock);
147 if (!dev->open_count++) {
148 spin_unlock(&dev->count_lock);
149 return drm_setup(dev);
150 }
151 spin_unlock(&dev->count_lock);
152 }
153
154 return retcode;
155}
156EXPORT_SYMBOL(drm_open);
157
158/**
159 * File \c open operation.
160 *
161 * \param inode device inode.
162 * \param filp file pointer.
163 *
164 * Puts the dev->fops corresponding to the device minor number into
165 * \p filp, call the \c open method, and restore the file operations.
166 */
167int drm_stub_open(struct inode *inode, struct file *filp)
168{
169 struct drm_device *dev = NULL;
170 struct drm_minor *minor;
171 int minor_id = iminor(inode);
172 int err = -ENODEV;
173 const struct file_operations *old_fops;
174
175 DRM_DEBUG("\n");
176
177 minor = idr_find(&drm_minors_idr, minor_id);
178 if (!minor)
179 return -ENODEV;
180
181 if (!(dev = minor->dev))
182 return -ENODEV;
183
184 old_fops = filp->f_op;
185 filp->f_op = fops_get(&dev->driver->fops);
186 if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
187 fops_put(filp->f_op);
188 filp->f_op = fops_get(old_fops);
189 }
190 fops_put(old_fops);
191
192 return err;
193}
194
195/**
196 * Check whether DRI will run on this CPU.
197 *
198 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
199 */
200static int drm_cpu_valid(void)
201{
202#if defined(__i386__)
203 if (boot_cpu_data.x86 == 3)
204 return 0; /* No cmpxchg on a 386 */
205#endif
206#if defined(__sparc__) && !defined(__sparc_v9__)
207 return 0; /* No cmpxchg before v9 sparc. */
208#endif
209 return 1;
210}
211
212/**
213 * Called whenever a process opens /dev/drm.
214 *
215 * \param inode device inode.
216 * \param filp file pointer.
217 * \param dev device.
218 * \return zero on success or a negative number on failure.
219 *
220 * Creates and initializes a drm_file structure for the file private data in \p
221 * filp and add it into the double linked list in \p dev.
222 */
223static int drm_open_helper(struct inode *inode, struct file *filp,
224 struct drm_device * dev)
225{
226 int minor_id = iminor(inode);
227 struct drm_file *priv;
228 int ret;
229
230 if (filp->f_flags & O_EXCL)
231 return -EBUSY; /* No exclusive opens */
232 if (!drm_cpu_valid())
233 return -EINVAL;
234
235 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
236
237 priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
238 if (!priv)
239 return -ENOMEM;
240
241 memset(priv, 0, sizeof(*priv));
242 filp->private_data = priv;
243 priv->filp = filp;
244 priv->uid = current->euid;
245 priv->pid = task_pid_nr(current);
246 priv->minor = idr_find(&drm_minors_idr, minor_id);
247 priv->ioctl_count = 0;
248 /* for compatibility root is always authenticated */
249 priv->authenticated = capable(CAP_SYS_ADMIN);
250 priv->lock_count = 0;
251
252 INIT_LIST_HEAD(&priv->lhead);
253
254 if (dev->driver->open) {
255 ret = dev->driver->open(dev, priv);
256 if (ret < 0)
257 goto out_free;
258 }
259
260 mutex_lock(&dev->struct_mutex);
261 if (list_empty(&dev->filelist))
262 priv->master = 1;
263
264 list_add(&priv->lhead, &dev->filelist);
265 mutex_unlock(&dev->struct_mutex);
266
267#ifdef __alpha__
268 /*
269 * Default the hose
270 */
271 if (!dev->hose) {
272 struct pci_dev *pci_dev;
273 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
274 if (pci_dev) {
275 dev->hose = pci_dev->sysdata;
276 pci_dev_put(pci_dev);
277 }
278 if (!dev->hose) {
279 struct pci_bus *b = pci_bus_b(pci_root_buses.next);
280 if (b)
281 dev->hose = b->sysdata;
282 }
283 }
284#endif
285
286 return 0;
287 out_free:
288 drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
289 filp->private_data = NULL;
290 return ret;
291}
292
293/** No-op. */
294int drm_fasync(int fd, struct file *filp, int on)
295{
296 struct drm_file *priv = filp->private_data;
297 struct drm_device *dev = priv->minor->dev;
298 int retcode;
299
300 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
301 (long)old_encode_dev(priv->minor->device));
302 retcode = fasync_helper(fd, filp, on, &dev->buf_async);
303 if (retcode < 0)
304 return retcode;
305 return 0;
306}
307EXPORT_SYMBOL(drm_fasync);
308
309/**
310 * Release file.
311 *
312 * \param inode device inode
313 * \param file_priv DRM file private.
314 * \return zero on success or a negative number on failure.
315 *
316 * If the hardware lock is held then free it, and take it again for the kernel
317 * context since it's necessary to reclaim buffers. Unlink the file private
318 * data from its list and free it. Decreases the open count and if it reaches
319 * zero calls drm_lastclose().
320 */
321int drm_release(struct inode *inode, struct file *filp)
322{
323 struct drm_file *file_priv = filp->private_data;
324 struct drm_device *dev = file_priv->minor->dev;
325 int retcode = 0;
326
327 lock_kernel();
328
329 DRM_DEBUG("open_count = %d\n", dev->open_count);
330
331 if (dev->driver->preclose)
332 dev->driver->preclose(dev, file_priv);
333
334 /* ========================================================
335 * Begin inline drm_release
336 */
337
338 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
339 task_pid_nr(current),
340 (long)old_encode_dev(file_priv->minor->device),
341 dev->open_count);
342
343 if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
344 if (drm_i_have_hw_lock(dev, file_priv)) {
345 dev->driver->reclaim_buffers_locked(dev, file_priv);
346 } else {
347 unsigned long endtime = jiffies + 3 * DRM_HZ;
348 int locked = 0;
349
350 drm_idlelock_take(&dev->lock);
351
352 /*
353 * Wait for a while.
354 */
355
356 do{
357 spin_lock_bh(&dev->lock.spinlock);
358 locked = dev->lock.idle_has_lock;
359 spin_unlock_bh(&dev->lock.spinlock);
360 if (locked)
361 break;
362 schedule();
363 } while (!time_after_eq(jiffies, endtime));
364
365 if (!locked) {
366 DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
367 "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
368 "\tI will go on reclaiming the buffers anyway.\n");
369 }
370
371 dev->driver->reclaim_buffers_locked(dev, file_priv);
372 drm_idlelock_release(&dev->lock);
373 }
374 }
375
376 if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
377
378 drm_idlelock_take(&dev->lock);
379 dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
380 drm_idlelock_release(&dev->lock);
381
382 }
383
384 if (drm_i_have_hw_lock(dev, file_priv)) {
385 DRM_DEBUG("File %p released, freeing lock for context %d\n",
386 filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
387
388 drm_lock_free(&dev->lock,
389 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
390 }
391
392
393 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
394 !dev->driver->reclaim_buffers_locked) {
395 dev->driver->reclaim_buffers(dev, file_priv);
396 }
397
398 drm_fasync(-1, filp, 0);
399
400 mutex_lock(&dev->ctxlist_mutex);
401 if (!list_empty(&dev->ctxlist)) {
402 struct drm_ctx_list *pos, *n;
403
404 list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
405 if (pos->tag == file_priv &&
406 pos->handle != DRM_KERNEL_CONTEXT) {
407 if (dev->driver->context_dtor)
408 dev->driver->context_dtor(dev,
409 pos->handle);
410
411 drm_ctxbitmap_free(dev, pos->handle);
412
413 list_del(&pos->head);
414 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
415 --dev->ctx_count;
416 }
417 }
418 }
419 mutex_unlock(&dev->ctxlist_mutex);
420
421 mutex_lock(&dev->struct_mutex);
422 if (file_priv->remove_auth_on_close == 1) {
423 struct drm_file *temp;
424
425 list_for_each_entry(temp, &dev->filelist, lhead)
426 temp->authenticated = 0;
427 }
428 list_del(&file_priv->lhead);
429 mutex_unlock(&dev->struct_mutex);
430
431 if (dev->driver->postclose)
432 dev->driver->postclose(dev, file_priv);
433 drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
434
435 /* ========================================================
436 * End inline drm_release
437 */
438
439 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
440 spin_lock(&dev->count_lock);
441 if (!--dev->open_count) {
442 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
443 DRM_ERROR("Device busy: %d %d\n",
444 atomic_read(&dev->ioctl_count), dev->blocked);
445 spin_unlock(&dev->count_lock);
446 unlock_kernel();
447 return -EBUSY;
448 }
449 spin_unlock(&dev->count_lock);
450 unlock_kernel();
451 return drm_lastclose(dev);
452 }
453 spin_unlock(&dev->count_lock);
454
455 unlock_kernel();
456
457 return retcode;
458}
459EXPORT_SYMBOL(drm_release);
460
461/** No-op. */
462unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
463{
464 return 0;
465}
466EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
new file mode 100644
index 000000000000..33160673a7b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -0,0 +1,202 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple open hash tab implementation.
30 *
31 * Authors:
32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
33 */
34
35#include "drmP.h"
36#include "drm_hashtab.h"
37#include <linux/hash.h>
38
39int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
40{
41 unsigned int i;
42
43 ht->size = 1 << order;
44 ht->order = order;
45 ht->fill = 0;
46 ht->table = NULL;
47 ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
48 if (!ht->use_vmalloc) {
49 ht->table = drm_calloc(ht->size, sizeof(*ht->table),
50 DRM_MEM_HASHTAB);
51 }
52 if (!ht->table) {
53 ht->use_vmalloc = 1;
54 ht->table = vmalloc(ht->size*sizeof(*ht->table));
55 }
56 if (!ht->table) {
57 DRM_ERROR("Out of memory for hash table\n");
58 return -ENOMEM;
59 }
60 for (i=0; i< ht->size; ++i) {
61 INIT_HLIST_HEAD(&ht->table[i]);
62 }
63 return 0;
64}
65
66void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
67{
68 struct drm_hash_item *entry;
69 struct hlist_head *h_list;
70 struct hlist_node *list;
71 unsigned int hashed_key;
72 int count = 0;
73
74 hashed_key = hash_long(key, ht->order);
75 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
76 h_list = &ht->table[hashed_key];
77 hlist_for_each(list, h_list) {
78 entry = hlist_entry(list, struct drm_hash_item, head);
79 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
80 }
81}
82
83static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
84 unsigned long key)
85{
86 struct drm_hash_item *entry;
87 struct hlist_head *h_list;
88 struct hlist_node *list;
89 unsigned int hashed_key;
90
91 hashed_key = hash_long(key, ht->order);
92 h_list = &ht->table[hashed_key];
93 hlist_for_each(list, h_list) {
94 entry = hlist_entry(list, struct drm_hash_item, head);
95 if (entry->key == key)
96 return list;
97 if (entry->key > key)
98 break;
99 }
100 return NULL;
101}
102
103
104int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
105{
106 struct drm_hash_item *entry;
107 struct hlist_head *h_list;
108 struct hlist_node *list, *parent;
109 unsigned int hashed_key;
110 unsigned long key = item->key;
111
112 hashed_key = hash_long(key, ht->order);
113 h_list = &ht->table[hashed_key];
114 parent = NULL;
115 hlist_for_each(list, h_list) {
116 entry = hlist_entry(list, struct drm_hash_item, head);
117 if (entry->key == key)
118 return -EINVAL;
119 if (entry->key > key)
120 break;
121 parent = list;
122 }
123 if (parent) {
124 hlist_add_after(parent, &item->head);
125 } else {
126 hlist_add_head(&item->head, h_list);
127 }
128 return 0;
129}
130
131/*
132 * Just insert an item and return any "bits" bit key that hasn't been
133 * used before.
134 */
135int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
136 unsigned long seed, int bits, int shift,
137 unsigned long add)
138{
139 int ret;
140 unsigned long mask = (1 << bits) - 1;
141 unsigned long first, unshifted_key;
142
143 unshifted_key = hash_long(seed, bits);
144 first = unshifted_key;
145 do {
146 item->key = (unshifted_key << shift) + add;
147 ret = drm_ht_insert_item(ht, item);
148 if (ret)
149 unshifted_key = (unshifted_key + 1) & mask;
150 } while(ret && (unshifted_key != first));
151
152 if (ret) {
153 DRM_ERROR("Available key bit space exhausted\n");
154 return -EINVAL;
155 }
156 return 0;
157}
158
159int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
160 struct drm_hash_item **item)
161{
162 struct hlist_node *list;
163
164 list = drm_ht_find_key(ht, key);
165 if (!list)
166 return -EINVAL;
167
168 *item = hlist_entry(list, struct drm_hash_item, head);
169 return 0;
170}
171
172int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
173{
174 struct hlist_node *list;
175
176 list = drm_ht_find_key(ht, key);
177 if (list) {
178 hlist_del_init(list);
179 ht->fill--;
180 return 0;
181 }
182 return -EINVAL;
183}
184
185int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
186{
187 hlist_del_init(&item->head);
188 ht->fill--;
189 return 0;
190}
191
192void drm_ht_remove(struct drm_open_hash *ht)
193{
194 if (ht->table) {
195 if (ht->use_vmalloc)
196 vfree(ht->table);
197 else
198 drm_free(ht->table, ht->size * sizeof(*ht->table),
199 DRM_MEM_HASHTAB);
200 ht->table = NULL;
201 }
202}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
new file mode 100644
index 000000000000..90f5a8d9bdcb
--- /dev/null
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -0,0 +1,1073 @@
1/**
2 * \file drm_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the DRM.
5 *
6 * \author Paul Mackerras <paulus@samba.org>
7 *
8 * Copyright (C) Paul Mackerras 2005.
9 * All Rights Reserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30#include <linux/compat.h>
31
32#include "drmP.h"
33#include "drm_core.h"
34
35#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
36#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
37#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t)
38#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t)
39#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t)
40
41#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t)
42#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t)
43#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t)
44#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t)
45#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t)
46#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t)
47#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t)
48
49#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t)
50
51#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
52#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
53
54#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t)
55#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t)
56
57#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t)
58#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t)
59#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t)
60#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t)
61#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t)
62#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t)
63
64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
66
67#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
68
69typedef struct drm_version_32 {
70 int version_major; /**< Major version */
71 int version_minor; /**< Minor version */
72 int version_patchlevel; /**< Patch level */
73 u32 name_len; /**< Length of name buffer */
74 u32 name; /**< Name of driver */
75 u32 date_len; /**< Length of date buffer */
76 u32 date; /**< User-space buffer to hold date */
77 u32 desc_len; /**< Length of desc buffer */
78 u32 desc; /**< User-space buffer to hold desc */
79} drm_version32_t;
80
81static int compat_drm_version(struct file *file, unsigned int cmd,
82 unsigned long arg)
83{
84 drm_version32_t v32;
85 struct drm_version __user *version;
86 int err;
87
88 if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
89 return -EFAULT;
90
91 version = compat_alloc_user_space(sizeof(*version));
92 if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
93 return -EFAULT;
94 if (__put_user(v32.name_len, &version->name_len)
95 || __put_user((void __user *)(unsigned long)v32.name,
96 &version->name)
97 || __put_user(v32.date_len, &version->date_len)
98 || __put_user((void __user *)(unsigned long)v32.date,
99 &version->date)
100 || __put_user(v32.desc_len, &version->desc_len)
101 || __put_user((void __user *)(unsigned long)v32.desc,
102 &version->desc))
103 return -EFAULT;
104
105 err = drm_ioctl(file->f_path.dentry->d_inode, file,
106 DRM_IOCTL_VERSION, (unsigned long)version);
107 if (err)
108 return err;
109
110 if (__get_user(v32.version_major, &version->version_major)
111 || __get_user(v32.version_minor, &version->version_minor)
112 || __get_user(v32.version_patchlevel, &version->version_patchlevel)
113 || __get_user(v32.name_len, &version->name_len)
114 || __get_user(v32.date_len, &version->date_len)
115 || __get_user(v32.desc_len, &version->desc_len))
116 return -EFAULT;
117
118 if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
119 return -EFAULT;
120 return 0;
121}
122
123typedef struct drm_unique32 {
124 u32 unique_len; /**< Length of unique */
125 u32 unique; /**< Unique name for driver instantiation */
126} drm_unique32_t;
127
128static int compat_drm_getunique(struct file *file, unsigned int cmd,
129 unsigned long arg)
130{
131 drm_unique32_t uq32;
132 struct drm_unique __user *u;
133 int err;
134
135 if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
136 return -EFAULT;
137
138 u = compat_alloc_user_space(sizeof(*u));
139 if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
140 return -EFAULT;
141 if (__put_user(uq32.unique_len, &u->unique_len)
142 || __put_user((void __user *)(unsigned long)uq32.unique,
143 &u->unique))
144 return -EFAULT;
145
146 err = drm_ioctl(file->f_path.dentry->d_inode, file,
147 DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
148 if (err)
149 return err;
150
151 if (__get_user(uq32.unique_len, &u->unique_len))
152 return -EFAULT;
153 if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
154 return -EFAULT;
155 return 0;
156}
157
158static int compat_drm_setunique(struct file *file, unsigned int cmd,
159 unsigned long arg)
160{
161 drm_unique32_t uq32;
162 struct drm_unique __user *u;
163
164 if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
165 return -EFAULT;
166
167 u = compat_alloc_user_space(sizeof(*u));
168 if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
169 return -EFAULT;
170 if (__put_user(uq32.unique_len, &u->unique_len)
171 || __put_user((void __user *)(unsigned long)uq32.unique,
172 &u->unique))
173 return -EFAULT;
174
175 return drm_ioctl(file->f_path.dentry->d_inode, file,
176 DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
177}
178
179typedef struct drm_map32 {
180 u32 offset; /**< Requested physical address (0 for SAREA)*/
181 u32 size; /**< Requested physical size (bytes) */
182 enum drm_map_type type; /**< Type of memory to map */
183 enum drm_map_flags flags; /**< Flags */
184 u32 handle; /**< User-space: "Handle" to pass to mmap() */
185 int mtrr; /**< MTRR slot used */
186} drm_map32_t;
187
188static int compat_drm_getmap(struct file *file, unsigned int cmd,
189 unsigned long arg)
190{
191 drm_map32_t __user *argp = (void __user *)arg;
192 drm_map32_t m32;
193 struct drm_map __user *map;
194 int idx, err;
195 void *handle;
196
197 if (get_user(idx, &argp->offset))
198 return -EFAULT;
199
200 map = compat_alloc_user_space(sizeof(*map));
201 if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
202 return -EFAULT;
203 if (__put_user(idx, &map->offset))
204 return -EFAULT;
205
206 err = drm_ioctl(file->f_path.dentry->d_inode, file,
207 DRM_IOCTL_GET_MAP, (unsigned long)map);
208 if (err)
209 return err;
210
211 if (__get_user(m32.offset, &map->offset)
212 || __get_user(m32.size, &map->size)
213 || __get_user(m32.type, &map->type)
214 || __get_user(m32.flags, &map->flags)
215 || __get_user(handle, &map->handle)
216 || __get_user(m32.mtrr, &map->mtrr))
217 return -EFAULT;
218
219 m32.handle = (unsigned long)handle;
220 if (copy_to_user(argp, &m32, sizeof(m32)))
221 return -EFAULT;
222 return 0;
223
224}
225
226static int compat_drm_addmap(struct file *file, unsigned int cmd,
227 unsigned long arg)
228{
229 drm_map32_t __user *argp = (void __user *)arg;
230 drm_map32_t m32;
231 struct drm_map __user *map;
232 int err;
233 void *handle;
234
235 if (copy_from_user(&m32, argp, sizeof(m32)))
236 return -EFAULT;
237
238 map = compat_alloc_user_space(sizeof(*map));
239 if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
240 return -EFAULT;
241 if (__put_user(m32.offset, &map->offset)
242 || __put_user(m32.size, &map->size)
243 || __put_user(m32.type, &map->type)
244 || __put_user(m32.flags, &map->flags))
245 return -EFAULT;
246
247 err = drm_ioctl(file->f_path.dentry->d_inode, file,
248 DRM_IOCTL_ADD_MAP, (unsigned long)map);
249 if (err)
250 return err;
251
252 if (__get_user(m32.offset, &map->offset)
253 || __get_user(m32.mtrr, &map->mtrr)
254 || __get_user(handle, &map->handle))
255 return -EFAULT;
256
257 m32.handle = (unsigned long)handle;
258 if (m32.handle != (unsigned long)handle && printk_ratelimit())
259 printk(KERN_ERR "compat_drm_addmap truncated handle"
260 " %p for type %d offset %x\n",
261 handle, m32.type, m32.offset);
262
263 if (copy_to_user(argp, &m32, sizeof(m32)))
264 return -EFAULT;
265
266 return 0;
267}
268
269static int compat_drm_rmmap(struct file *file, unsigned int cmd,
270 unsigned long arg)
271{
272 drm_map32_t __user *argp = (void __user *)arg;
273 struct drm_map __user *map;
274 u32 handle;
275
276 if (get_user(handle, &argp->handle))
277 return -EFAULT;
278
279 map = compat_alloc_user_space(sizeof(*map));
280 if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
281 return -EFAULT;
282 if (__put_user((void *)(unsigned long)handle, &map->handle))
283 return -EFAULT;
284
285 return drm_ioctl(file->f_path.dentry->d_inode, file,
286 DRM_IOCTL_RM_MAP, (unsigned long)map);
287}
288
289typedef struct drm_client32 {
290 int idx; /**< Which client desired? */
291 int auth; /**< Is client authenticated? */
292 u32 pid; /**< Process ID */
293 u32 uid; /**< User ID */
294 u32 magic; /**< Magic */
295 u32 iocs; /**< Ioctl count */
296} drm_client32_t;
297
298static int compat_drm_getclient(struct file *file, unsigned int cmd,
299 unsigned long arg)
300{
301 drm_client32_t c32;
302 drm_client32_t __user *argp = (void __user *)arg;
303 struct drm_client __user *client;
304 int idx, err;
305
306 if (get_user(idx, &argp->idx))
307 return -EFAULT;
308
309 client = compat_alloc_user_space(sizeof(*client));
310 if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
311 return -EFAULT;
312 if (__put_user(idx, &client->idx))
313 return -EFAULT;
314
315 err = drm_ioctl(file->f_path.dentry->d_inode, file,
316 DRM_IOCTL_GET_CLIENT, (unsigned long)client);
317 if (err)
318 return err;
319
320 if (__get_user(c32.auth, &client->auth)
321 || __get_user(c32.pid, &client->pid)
322 || __get_user(c32.uid, &client->uid)
323 || __get_user(c32.magic, &client->magic)
324 || __get_user(c32.iocs, &client->iocs))
325 return -EFAULT;
326
327 if (copy_to_user(argp, &c32, sizeof(c32)))
328 return -EFAULT;
329 return 0;
330}
331
332typedef struct drm_stats32 {
333 u32 count;
334 struct {
335 u32 value;
336 enum drm_stat_type type;
337 } data[15];
338} drm_stats32_t;
339
340static int compat_drm_getstats(struct file *file, unsigned int cmd,
341 unsigned long arg)
342{
343 drm_stats32_t s32;
344 drm_stats32_t __user *argp = (void __user *)arg;
345 struct drm_stats __user *stats;
346 int i, err;
347
348 stats = compat_alloc_user_space(sizeof(*stats));
349 if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
350 return -EFAULT;
351
352 err = drm_ioctl(file->f_path.dentry->d_inode, file,
353 DRM_IOCTL_GET_STATS, (unsigned long)stats);
354 if (err)
355 return err;
356
357 if (__get_user(s32.count, &stats->count))
358 return -EFAULT;
359 for (i = 0; i < 15; ++i)
360 if (__get_user(s32.data[i].value, &stats->data[i].value)
361 || __get_user(s32.data[i].type, &stats->data[i].type))
362 return -EFAULT;
363
364 if (copy_to_user(argp, &s32, sizeof(s32)))
365 return -EFAULT;
366 return 0;
367}
368
369typedef struct drm_buf_desc32 {
370 int count; /**< Number of buffers of this size */
371 int size; /**< Size in bytes */
372 int low_mark; /**< Low water mark */
373 int high_mark; /**< High water mark */
374 int flags;
375 u32 agp_start; /**< Start address in the AGP aperture */
376} drm_buf_desc32_t;
377
378static int compat_drm_addbufs(struct file *file, unsigned int cmd,
379 unsigned long arg)
380{
381 drm_buf_desc32_t __user *argp = (void __user *)arg;
382 struct drm_buf_desc __user *buf;
383 int err;
384 unsigned long agp_start;
385
386 buf = compat_alloc_user_space(sizeof(*buf));
387 if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
388 || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
389 return -EFAULT;
390
391 if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
392 || __get_user(agp_start, &argp->agp_start)
393 || __put_user(agp_start, &buf->agp_start))
394 return -EFAULT;
395
396 err = drm_ioctl(file->f_path.dentry->d_inode, file,
397 DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
398 if (err)
399 return err;
400
401 if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
402 || __get_user(agp_start, &buf->agp_start)
403 || __put_user(agp_start, &argp->agp_start))
404 return -EFAULT;
405
406 return 0;
407}
408
409static int compat_drm_markbufs(struct file *file, unsigned int cmd,
410 unsigned long arg)
411{
412 drm_buf_desc32_t b32;
413 drm_buf_desc32_t __user *argp = (void __user *)arg;
414 struct drm_buf_desc __user *buf;
415
416 if (copy_from_user(&b32, argp, sizeof(b32)))
417 return -EFAULT;
418
419 buf = compat_alloc_user_space(sizeof(*buf));
420 if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
421 return -EFAULT;
422
423 if (__put_user(b32.size, &buf->size)
424 || __put_user(b32.low_mark, &buf->low_mark)
425 || __put_user(b32.high_mark, &buf->high_mark))
426 return -EFAULT;
427
428 return drm_ioctl(file->f_path.dentry->d_inode, file,
429 DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
430}
431
432typedef struct drm_buf_info32 {
433 int count; /**< Entries in list */
434 u32 list;
435} drm_buf_info32_t;
436
437static int compat_drm_infobufs(struct file *file, unsigned int cmd,
438 unsigned long arg)
439{
440 drm_buf_info32_t req32;
441 drm_buf_info32_t __user *argp = (void __user *)arg;
442 drm_buf_desc32_t __user *to;
443 struct drm_buf_info __user *request;
444 struct drm_buf_desc __user *list;
445 size_t nbytes;
446 int i, err;
447 int count, actual;
448
449 if (copy_from_user(&req32, argp, sizeof(req32)))
450 return -EFAULT;
451
452 count = req32.count;
453 to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
454 if (count < 0)
455 count = 0;
456 if (count > 0
457 && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
458 return -EFAULT;
459
460 nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
461 request = compat_alloc_user_space(nbytes);
462 if (!access_ok(VERIFY_WRITE, request, nbytes))
463 return -EFAULT;
464 list = (struct drm_buf_desc *) (request + 1);
465
466 if (__put_user(count, &request->count)
467 || __put_user(list, &request->list))
468 return -EFAULT;
469
470 err = drm_ioctl(file->f_path.dentry->d_inode, file,
471 DRM_IOCTL_INFO_BUFS, (unsigned long)request);
472 if (err)
473 return err;
474
475 if (__get_user(actual, &request->count))
476 return -EFAULT;
477 if (count >= actual)
478 for (i = 0; i < actual; ++i)
479 if (__copy_in_user(&to[i], &list[i],
480 offsetof(struct drm_buf_desc, flags)))
481 return -EFAULT;
482
483 if (__put_user(actual, &argp->count))
484 return -EFAULT;
485
486 return 0;
487}
488
489typedef struct drm_buf_pub32 {
490 int idx; /**< Index into the master buffer list */
491 int total; /**< Buffer size */
492 int used; /**< Amount of buffer in use (for DMA) */
493 u32 address; /**< Address of buffer */
494} drm_buf_pub32_t;
495
496typedef struct drm_buf_map32 {
497 int count; /**< Length of the buffer list */
498 u32 virtual; /**< Mmap'd area in user-virtual */
499 u32 list; /**< Buffer information */
500} drm_buf_map32_t;
501
502static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
503 unsigned long arg)
504{
505 drm_buf_map32_t __user *argp = (void __user *)arg;
506 drm_buf_map32_t req32;
507 drm_buf_pub32_t __user *list32;
508 struct drm_buf_map __user *request;
509 struct drm_buf_pub __user *list;
510 int i, err;
511 int count, actual;
512 size_t nbytes;
513 void __user *addr;
514
515 if (copy_from_user(&req32, argp, sizeof(req32)))
516 return -EFAULT;
517 count = req32.count;
518 list32 = (void __user *)(unsigned long)req32.list;
519
520 if (count < 0)
521 return -EINVAL;
522 nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
523 request = compat_alloc_user_space(nbytes);
524 if (!access_ok(VERIFY_WRITE, request, nbytes))
525 return -EFAULT;
526 list = (struct drm_buf_pub *) (request + 1);
527
528 if (__put_user(count, &request->count)
529 || __put_user(list, &request->list))
530 return -EFAULT;
531
532 err = drm_ioctl(file->f_path.dentry->d_inode, file,
533 DRM_IOCTL_MAP_BUFS, (unsigned long)request);
534 if (err)
535 return err;
536
537 if (__get_user(actual, &request->count))
538 return -EFAULT;
539 if (count >= actual)
540 for (i = 0; i < actual; ++i)
541 if (__copy_in_user(&list32[i], &list[i],
542 offsetof(struct drm_buf_pub, address))
543 || __get_user(addr, &list[i].address)
544 || __put_user((unsigned long)addr,
545 &list32[i].address))
546 return -EFAULT;
547
548 if (__put_user(actual, &argp->count)
549 || __get_user(addr, &request->virtual)
550 || __put_user((unsigned long)addr, &argp->virtual))
551 return -EFAULT;
552
553 return 0;
554}
555
556typedef struct drm_buf_free32 {
557 int count;
558 u32 list;
559} drm_buf_free32_t;
560
561static int compat_drm_freebufs(struct file *file, unsigned int cmd,
562 unsigned long arg)
563{
564 drm_buf_free32_t req32;
565 struct drm_buf_free __user *request;
566 drm_buf_free32_t __user *argp = (void __user *)arg;
567
568 if (copy_from_user(&req32, argp, sizeof(req32)))
569 return -EFAULT;
570
571 request = compat_alloc_user_space(sizeof(*request));
572 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
573 return -EFAULT;
574 if (__put_user(req32.count, &request->count)
575 || __put_user((int __user *)(unsigned long)req32.list,
576 &request->list))
577 return -EFAULT;
578
579 return drm_ioctl(file->f_path.dentry->d_inode, file,
580 DRM_IOCTL_FREE_BUFS, (unsigned long)request);
581}
582
583typedef struct drm_ctx_priv_map32 {
584 unsigned int ctx_id; /**< Context requesting private mapping */
585 u32 handle; /**< Handle of map */
586} drm_ctx_priv_map32_t;
587
588static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
589 unsigned long arg)
590{
591 drm_ctx_priv_map32_t req32;
592 struct drm_ctx_priv_map __user *request;
593 drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
594
595 if (copy_from_user(&req32, argp, sizeof(req32)))
596 return -EFAULT;
597
598 request = compat_alloc_user_space(sizeof(*request));
599 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
600 return -EFAULT;
601 if (__put_user(req32.ctx_id, &request->ctx_id)
602 || __put_user((void *)(unsigned long)req32.handle,
603 &request->handle))
604 return -EFAULT;
605
606 return drm_ioctl(file->f_path.dentry->d_inode, file,
607 DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
608}
609
610static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
611 unsigned long arg)
612{
613 struct drm_ctx_priv_map __user *request;
614 drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
615 int err;
616 unsigned int ctx_id;
617 void *handle;
618
619 if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
620 || __get_user(ctx_id, &argp->ctx_id))
621 return -EFAULT;
622
623 request = compat_alloc_user_space(sizeof(*request));
624 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
625 return -EFAULT;
626 if (__put_user(ctx_id, &request->ctx_id))
627 return -EFAULT;
628
629 err = drm_ioctl(file->f_path.dentry->d_inode, file,
630 DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
631 if (err)
632 return err;
633
634 if (__get_user(handle, &request->handle)
635 || __put_user((unsigned long)handle, &argp->handle))
636 return -EFAULT;
637
638 return 0;
639}
640
641typedef struct drm_ctx_res32 {
642 int count;
643 u32 contexts;
644} drm_ctx_res32_t;
645
646static int compat_drm_resctx(struct file *file, unsigned int cmd,
647 unsigned long arg)
648{
649 drm_ctx_res32_t __user *argp = (void __user *)arg;
650 drm_ctx_res32_t res32;
651 struct drm_ctx_res __user *res;
652 int err;
653
654 if (copy_from_user(&res32, argp, sizeof(res32)))
655 return -EFAULT;
656
657 res = compat_alloc_user_space(sizeof(*res));
658 if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
659 return -EFAULT;
660 if (__put_user(res32.count, &res->count)
661 || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
662 &res->contexts))
663 return -EFAULT;
664
665 err = drm_ioctl(file->f_path.dentry->d_inode, file,
666 DRM_IOCTL_RES_CTX, (unsigned long)res);
667 if (err)
668 return err;
669
670 if (__get_user(res32.count, &res->count)
671 || __put_user(res32.count, &argp->count))
672 return -EFAULT;
673
674 return 0;
675}
676
677typedef struct drm_dma32 {
678 int context; /**< Context handle */
679 int send_count; /**< Number of buffers to send */
680 u32 send_indices; /**< List of handles to buffers */
681 u32 send_sizes; /**< Lengths of data to send */
682 enum drm_dma_flags flags; /**< Flags */
683 int request_count; /**< Number of buffers requested */
684 int request_size; /**< Desired size for buffers */
685 u32 request_indices; /**< Buffer information */
686 u32 request_sizes;
687 int granted_count; /**< Number of buffers granted */
688} drm_dma32_t;
689
690static int compat_drm_dma(struct file *file, unsigned int cmd,
691 unsigned long arg)
692{
693 drm_dma32_t d32;
694 drm_dma32_t __user *argp = (void __user *)arg;
695 struct drm_dma __user *d;
696 int err;
697
698 if (copy_from_user(&d32, argp, sizeof(d32)))
699 return -EFAULT;
700
701 d = compat_alloc_user_space(sizeof(*d));
702 if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
703 return -EFAULT;
704
705 if (__put_user(d32.context, &d->context)
706 || __put_user(d32.send_count, &d->send_count)
707 || __put_user((int __user *)(unsigned long)d32.send_indices,
708 &d->send_indices)
709 || __put_user((int __user *)(unsigned long)d32.send_sizes,
710 &d->send_sizes)
711 || __put_user(d32.flags, &d->flags)
712 || __put_user(d32.request_count, &d->request_count)
713 || __put_user((int __user *)(unsigned long)d32.request_indices,
714 &d->request_indices)
715 || __put_user((int __user *)(unsigned long)d32.request_sizes,
716 &d->request_sizes))
717 return -EFAULT;
718
719 err = drm_ioctl(file->f_path.dentry->d_inode, file,
720 DRM_IOCTL_DMA, (unsigned long)d);
721 if (err)
722 return err;
723
724 if (__get_user(d32.request_size, &d->request_size)
725 || __get_user(d32.granted_count, &d->granted_count)
726 || __put_user(d32.request_size, &argp->request_size)
727 || __put_user(d32.granted_count, &argp->granted_count))
728 return -EFAULT;
729
730 return 0;
731}
732
733#if __OS_HAS_AGP
734typedef struct drm_agp_mode32 {
735 u32 mode; /**< AGP mode */
736} drm_agp_mode32_t;
737
738static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
739 unsigned long arg)
740{
741 drm_agp_mode32_t __user *argp = (void __user *)arg;
742 drm_agp_mode32_t m32;
743 struct drm_agp_mode __user *mode;
744
745 if (get_user(m32.mode, &argp->mode))
746 return -EFAULT;
747
748 mode = compat_alloc_user_space(sizeof(*mode));
749 if (put_user(m32.mode, &mode->mode))
750 return -EFAULT;
751
752 return drm_ioctl(file->f_path.dentry->d_inode, file,
753 DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
754}
755
756typedef struct drm_agp_info32 {
757 int agp_version_major;
758 int agp_version_minor;
759 u32 mode;
760 u32 aperture_base; /* physical address */
761 u32 aperture_size; /* bytes */
762 u32 memory_allowed; /* bytes */
763 u32 memory_used;
764
765 /* PCI information */
766 unsigned short id_vendor;
767 unsigned short id_device;
768} drm_agp_info32_t;
769
770static int compat_drm_agp_info(struct file *file, unsigned int cmd,
771 unsigned long arg)
772{
773 drm_agp_info32_t __user *argp = (void __user *)arg;
774 drm_agp_info32_t i32;
775 struct drm_agp_info __user *info;
776 int err;
777
778 info = compat_alloc_user_space(sizeof(*info));
779 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
780 return -EFAULT;
781
782 err = drm_ioctl(file->f_path.dentry->d_inode, file,
783 DRM_IOCTL_AGP_INFO, (unsigned long)info);
784 if (err)
785 return err;
786
787 if (__get_user(i32.agp_version_major, &info->agp_version_major)
788 || __get_user(i32.agp_version_minor, &info->agp_version_minor)
789 || __get_user(i32.mode, &info->mode)
790 || __get_user(i32.aperture_base, &info->aperture_base)
791 || __get_user(i32.aperture_size, &info->aperture_size)
792 || __get_user(i32.memory_allowed, &info->memory_allowed)
793 || __get_user(i32.memory_used, &info->memory_used)
794 || __get_user(i32.id_vendor, &info->id_vendor)
795 || __get_user(i32.id_device, &info->id_device))
796 return -EFAULT;
797
798 if (copy_to_user(argp, &i32, sizeof(i32)))
799 return -EFAULT;
800
801 return 0;
802}
803
804typedef struct drm_agp_buffer32 {
805 u32 size; /**< In bytes -- will round to page boundary */
806 u32 handle; /**< Used for binding / unbinding */
807 u32 type; /**< Type of memory to allocate */
808 u32 physical; /**< Physical used by i810 */
809} drm_agp_buffer32_t;
810
811static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
812 unsigned long arg)
813{
814 drm_agp_buffer32_t __user *argp = (void __user *)arg;
815 drm_agp_buffer32_t req32;
816 struct drm_agp_buffer __user *request;
817 int err;
818
819 if (copy_from_user(&req32, argp, sizeof(req32)))
820 return -EFAULT;
821
822 request = compat_alloc_user_space(sizeof(*request));
823 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
824 || __put_user(req32.size, &request->size)
825 || __put_user(req32.type, &request->type))
826 return -EFAULT;
827
828 err = drm_ioctl(file->f_path.dentry->d_inode, file,
829 DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
830 if (err)
831 return err;
832
833 if (__get_user(req32.handle, &request->handle)
834 || __get_user(req32.physical, &request->physical)
835 || copy_to_user(argp, &req32, sizeof(req32))) {
836 drm_ioctl(file->f_path.dentry->d_inode, file,
837 DRM_IOCTL_AGP_FREE, (unsigned long)request);
838 return -EFAULT;
839 }
840
841 return 0;
842}
843
844static int compat_drm_agp_free(struct file *file, unsigned int cmd,
845 unsigned long arg)
846{
847 drm_agp_buffer32_t __user *argp = (void __user *)arg;
848 struct drm_agp_buffer __user *request;
849 u32 handle;
850
851 request = compat_alloc_user_space(sizeof(*request));
852 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
853 || get_user(handle, &argp->handle)
854 || __put_user(handle, &request->handle))
855 return -EFAULT;
856
857 return drm_ioctl(file->f_path.dentry->d_inode, file,
858 DRM_IOCTL_AGP_FREE, (unsigned long)request);
859}
860
861typedef struct drm_agp_binding32 {
862 u32 handle; /**< From drm_agp_buffer */
863 u32 offset; /**< In bytes -- will round to page boundary */
864} drm_agp_binding32_t;
865
866static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
867 unsigned long arg)
868{
869 drm_agp_binding32_t __user *argp = (void __user *)arg;
870 drm_agp_binding32_t req32;
871 struct drm_agp_binding __user *request;
872
873 if (copy_from_user(&req32, argp, sizeof(req32)))
874 return -EFAULT;
875
876 request = compat_alloc_user_space(sizeof(*request));
877 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
878 || __put_user(req32.handle, &request->handle)
879 || __put_user(req32.offset, &request->offset))
880 return -EFAULT;
881
882 return drm_ioctl(file->f_path.dentry->d_inode, file,
883 DRM_IOCTL_AGP_BIND, (unsigned long)request);
884}
885
886static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
887 unsigned long arg)
888{
889 drm_agp_binding32_t __user *argp = (void __user *)arg;
890 struct drm_agp_binding __user *request;
891 u32 handle;
892
893 request = compat_alloc_user_space(sizeof(*request));
894 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
895 || get_user(handle, &argp->handle)
896 || __put_user(handle, &request->handle))
897 return -EFAULT;
898
899 return drm_ioctl(file->f_path.dentry->d_inode, file,
900 DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
901}
902#endif /* __OS_HAS_AGP */
903
904typedef struct drm_scatter_gather32 {
905 u32 size; /**< In bytes -- will round to page boundary */
906 u32 handle; /**< Used for mapping / unmapping */
907} drm_scatter_gather32_t;
908
909static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
910 unsigned long arg)
911{
912 drm_scatter_gather32_t __user *argp = (void __user *)arg;
913 struct drm_scatter_gather __user *request;
914 int err;
915 unsigned long x;
916
917 request = compat_alloc_user_space(sizeof(*request));
918 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
919 || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
920 || __get_user(x, &argp->size)
921 || __put_user(x, &request->size))
922 return -EFAULT;
923
924 err = drm_ioctl(file->f_path.dentry->d_inode, file,
925 DRM_IOCTL_SG_ALLOC, (unsigned long)request);
926 if (err)
927 return err;
928
929 /* XXX not sure about the handle conversion here... */
930 if (__get_user(x, &request->handle)
931 || __put_user(x >> PAGE_SHIFT, &argp->handle))
932 return -EFAULT;
933
934 return 0;
935}
936
937static int compat_drm_sg_free(struct file *file, unsigned int cmd,
938 unsigned long arg)
939{
940 drm_scatter_gather32_t __user *argp = (void __user *)arg;
941 struct drm_scatter_gather __user *request;
942 unsigned long x;
943
944 request = compat_alloc_user_space(sizeof(*request));
945 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
946 || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
947 || __get_user(x, &argp->handle)
948 || __put_user(x << PAGE_SHIFT, &request->handle))
949 return -EFAULT;
950
951 return drm_ioctl(file->f_path.dentry->d_inode, file,
952 DRM_IOCTL_SG_FREE, (unsigned long)request);
953}
954
955struct drm_wait_vblank_request32 {
956 enum drm_vblank_seq_type type;
957 unsigned int sequence;
958 u32 signal;
959};
960
961struct drm_wait_vblank_reply32 {
962 enum drm_vblank_seq_type type;
963 unsigned int sequence;
964 s32 tval_sec;
965 s32 tval_usec;
966};
967
968typedef union drm_wait_vblank32 {
969 struct drm_wait_vblank_request32 request;
970 struct drm_wait_vblank_reply32 reply;
971} drm_wait_vblank32_t;
972
973static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
974 unsigned long arg)
975{
976 drm_wait_vblank32_t __user *argp = (void __user *)arg;
977 drm_wait_vblank32_t req32;
978 union drm_wait_vblank __user *request;
979 int err;
980
981 if (copy_from_user(&req32, argp, sizeof(req32)))
982 return -EFAULT;
983
984 request = compat_alloc_user_space(sizeof(*request));
985 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
986 || __put_user(req32.request.type, &request->request.type)
987 || __put_user(req32.request.sequence, &request->request.sequence)
988 || __put_user(req32.request.signal, &request->request.signal))
989 return -EFAULT;
990
991 err = drm_ioctl(file->f_path.dentry->d_inode, file,
992 DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
993 if (err)
994 return err;
995
996 if (__get_user(req32.reply.type, &request->reply.type)
997 || __get_user(req32.reply.sequence, &request->reply.sequence)
998 || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
999 || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
1000 return -EFAULT;
1001
1002 if (copy_to_user(argp, &req32, sizeof(req32)))
1003 return -EFAULT;
1004
1005 return 0;
1006}
1007
1008drm_ioctl_compat_t *drm_compat_ioctls[] = {
1009 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
1010 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
1011 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
1012 [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
1013 [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
1014 [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
1015 [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
1016 [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
1017 [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
1018 [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
1019 [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
1020 [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
1021 [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
1022 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
1023 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
1024 [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
1025 [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
1026#if __OS_HAS_AGP
1027 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
1028 [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
1029 [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
1030 [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
1031 [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
1032 [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
1033#endif
1034 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
1035 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
1036 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
1037};
1038
1039/**
1040 * Called whenever a 32-bit process running under a 64-bit kernel
1041 * performs an ioctl on /dev/drm.
1042 *
1043 * \param file_priv DRM file private.
1044 * \param cmd command.
1045 * \param arg user argument.
1046 * \return zero on success or negative number on failure.
1047 */
1048long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1049{
1050 unsigned int nr = DRM_IOCTL_NR(cmd);
1051 drm_ioctl_compat_t *fn;
1052 int ret;
1053
1054 /* Assume that ioctls without an explicit compat routine will just
1055 * work. This may not always be a good assumption, but it's better
1056 * than always failing.
1057 */
1058 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
1059 return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
1060
1061 fn = drm_compat_ioctls[nr];
1062
1063 lock_kernel(); /* XXX for now */
1064 if (fn != NULL)
1065 ret = (*fn) (filp, cmd, arg);
1066 else
1067 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
1068 unlock_kernel();
1069
1070 return ret;
1071}
1072
1073EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
new file mode 100644
index 000000000000..16829fb3089d
--- /dev/null
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -0,0 +1,352 @@
1/**
2 * \file drm_ioctl.c
3 * IOCTL processing for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37#include "drm_core.h"
38
39#include "linux/pci.h"
40
41/**
42 * Get the bus id.
43 *
44 * \param inode device inode.
45 * \param file_priv DRM file private.
46 * \param cmd command.
47 * \param arg user argument, pointing to a drm_unique structure.
48 * \return zero on success or a negative number on failure.
49 *
50 * Copies the bus id from drm_device::unique into user space.
51 */
52int drm_getunique(struct drm_device *dev, void *data,
53 struct drm_file *file_priv)
54{
55 struct drm_unique *u = data;
56
57 if (u->unique_len >= dev->unique_len) {
58 if (copy_to_user(u->unique, dev->unique, dev->unique_len))
59 return -EFAULT;
60 }
61 u->unique_len = dev->unique_len;
62
63 return 0;
64}
65
66/**
67 * Set the bus id.
68 *
69 * \param inode device inode.
70 * \param file_priv DRM file private.
71 * \param cmd command.
72 * \param arg user argument, pointing to a drm_unique structure.
73 * \return zero on success or a negative number on failure.
74 *
75 * Copies the bus id from userspace into drm_device::unique, and verifies that
76 * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
77 * in interface version 1.1 and will return EBUSY when setversion has requested
78 * version 1.1 or greater.
79 */
80int drm_setunique(struct drm_device *dev, void *data,
81 struct drm_file *file_priv)
82{
83 struct drm_unique *u = data;
84 int domain, bus, slot, func, ret;
85
86 if (dev->unique_len || dev->unique)
87 return -EBUSY;
88
89 if (!u->unique_len || u->unique_len > 1024)
90 return -EINVAL;
91
92 dev->unique_len = u->unique_len;
93 dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
94 if (!dev->unique)
95 return -ENOMEM;
96 if (copy_from_user(dev->unique, u->unique, dev->unique_len))
97 return -EFAULT;
98
99 dev->unique[dev->unique_len] = '\0';
100
101 dev->devname =
102 drm_alloc(strlen(dev->driver->pci_driver.name) +
103 strlen(dev->unique) + 2, DRM_MEM_DRIVER);
104 if (!dev->devname)
105 return -ENOMEM;
106
107 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
108 dev->unique);
109
110 /* Return error if the busid submitted doesn't match the device's actual
111 * busid.
112 */
113 ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
114 if (ret != 3)
115 return -EINVAL;
116 domain = bus >> 8;
117 bus &= 0xff;
118
119 if ((domain != drm_get_pci_domain(dev)) ||
120 (bus != dev->pdev->bus->number) ||
121 (slot != PCI_SLOT(dev->pdev->devfn)) ||
122 (func != PCI_FUNC(dev->pdev->devfn)))
123 return -EINVAL;
124
125 return 0;
126}
127
128static int drm_set_busid(struct drm_device * dev)
129{
130 int len;
131
132 if (dev->unique != NULL)
133 return 0;
134
135 dev->unique_len = 40;
136 dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
137 if (dev->unique == NULL)
138 return -ENOMEM;
139
140 len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
141 drm_get_pci_domain(dev), dev->pdev->bus->number,
142 PCI_SLOT(dev->pdev->devfn),
143 PCI_FUNC(dev->pdev->devfn));
144
145 if (len > dev->unique_len)
146 DRM_ERROR("Unique buffer overflowed\n");
147
148 dev->devname =
149 drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
150 2, DRM_MEM_DRIVER);
151 if (dev->devname == NULL)
152 return -ENOMEM;
153
154 sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
155 dev->unique);
156
157 return 0;
158}
159
160/**
161 * Get a mapping information.
162 *
163 * \param inode device inode.
164 * \param file_priv DRM file private.
165 * \param cmd command.
166 * \param arg user argument, pointing to a drm_map structure.
167 *
168 * \return zero on success or a negative number on failure.
169 *
170 * Searches for the mapping with the specified offset and copies its information
171 * into userspace
172 */
173int drm_getmap(struct drm_device *dev, void *data,
174 struct drm_file *file_priv)
175{
176 struct drm_map *map = data;
177 struct drm_map_list *r_list = NULL;
178 struct list_head *list;
179 int idx;
180 int i;
181
182 idx = map->offset;
183
184 mutex_lock(&dev->struct_mutex);
185 if (idx < 0) {
186 mutex_unlock(&dev->struct_mutex);
187 return -EINVAL;
188 }
189
190 i = 0;
191 list_for_each(list, &dev->maplist) {
192 if (i == idx) {
193 r_list = list_entry(list, struct drm_map_list, head);
194 break;
195 }
196 i++;
197 }
198 if (!r_list || !r_list->map) {
199 mutex_unlock(&dev->struct_mutex);
200 return -EINVAL;
201 }
202
203 map->offset = r_list->map->offset;
204 map->size = r_list->map->size;
205 map->type = r_list->map->type;
206 map->flags = r_list->map->flags;
207 map->handle = (void *)(unsigned long) r_list->user_token;
208 map->mtrr = r_list->map->mtrr;
209 mutex_unlock(&dev->struct_mutex);
210
211 return 0;
212}
213
214/**
215 * Get client information.
216 *
217 * \param inode device inode.
218 * \param file_priv DRM file private.
219 * \param cmd command.
220 * \param arg user argument, pointing to a drm_client structure.
221 *
222 * \return zero on success or a negative number on failure.
223 *
224 * Searches for the client with the specified index and copies its information
225 * into userspace
226 */
227int drm_getclient(struct drm_device *dev, void *data,
228 struct drm_file *file_priv)
229{
230 struct drm_client *client = data;
231 struct drm_file *pt;
232 int idx;
233 int i;
234
235 idx = client->idx;
236 mutex_lock(&dev->struct_mutex);
237
238 i = 0;
239 list_for_each_entry(pt, &dev->filelist, lhead) {
240 if (i++ >= idx) {
241 client->auth = pt->authenticated;
242 client->pid = pt->pid;
243 client->uid = pt->uid;
244 client->magic = pt->magic;
245 client->iocs = pt->ioctl_count;
246 mutex_unlock(&dev->struct_mutex);
247
248 return 0;
249 }
250 }
251 mutex_unlock(&dev->struct_mutex);
252
253 return -EINVAL;
254}
255
256/**
257 * Get statistics information.
258 *
259 * \param inode device inode.
260 * \param file_priv DRM file private.
261 * \param cmd command.
262 * \param arg user argument, pointing to a drm_stats structure.
263 *
264 * \return zero on success or a negative number on failure.
265 */
266int drm_getstats(struct drm_device *dev, void *data,
267 struct drm_file *file_priv)
268{
269 struct drm_stats *stats = data;
270 int i;
271
272 memset(stats, 0, sizeof(*stats));
273
274 mutex_lock(&dev->struct_mutex);
275
276 for (i = 0; i < dev->counters; i++) {
277 if (dev->types[i] == _DRM_STAT_LOCK)
278 stats->data[i].value =
279 (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
280 else
281 stats->data[i].value = atomic_read(&dev->counts[i]);
282 stats->data[i].type = dev->types[i];
283 }
284
285 stats->count = dev->counters;
286
287 mutex_unlock(&dev->struct_mutex);
288
289 return 0;
290}
291
292/**
293 * Setversion ioctl.
294 *
295 * \param inode device inode.
296 * \param file_priv DRM file private.
297 * \param cmd command.
298 * \param arg user argument, pointing to a drm_lock structure.
299 * \return zero on success or negative number on failure.
300 *
301 * Sets the requested interface version
302 */
303int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
304{
305 struct drm_set_version *sv = data;
306 int if_version, retcode = 0;
307
308 if (sv->drm_di_major != -1) {
309 if (sv->drm_di_major != DRM_IF_MAJOR ||
310 sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
311 retcode = -EINVAL;
312 goto done;
313 }
314 if_version = DRM_IF_VERSION(sv->drm_di_major,
315 sv->drm_di_minor);
316 dev->if_version = max(if_version, dev->if_version);
317 if (sv->drm_di_minor >= 1) {
318 /*
319 * Version 1.1 includes tying of DRM to specific device
320 */
321 drm_set_busid(dev);
322 }
323 }
324
325 if (sv->drm_dd_major != -1) {
326 if (sv->drm_dd_major != dev->driver->major ||
327 sv->drm_dd_minor < 0 || sv->drm_dd_minor >
328 dev->driver->minor) {
329 retcode = -EINVAL;
330 goto done;
331 }
332
333 if (dev->driver->set_version)
334 dev->driver->set_version(dev, sv);
335 }
336
337done:
338 sv->drm_di_major = DRM_IF_MAJOR;
339 sv->drm_di_minor = DRM_IF_MINOR;
340 sv->drm_dd_major = dev->driver->major;
341 sv->drm_dd_minor = dev->driver->minor;
342
343 return retcode;
344}
345
346/** No-op ioctl. */
347int drm_noop(struct drm_device *dev, void *data,
348 struct drm_file *file_priv)
349{
350 DRM_DEBUG("\n");
351 return 0;
352}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
new file mode 100644
index 000000000000..089c015c01d1
--- /dev/null
+++ b/drivers/gpu/drm/drm_irq.c
@@ -0,0 +1,462 @@
1/**
2 * \file drm_irq.c
3 * IRQ support
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38#include <linux/interrupt.h> /* For task queue support */
39
40/**
41 * Get interrupt from bus id.
42 *
43 * \param inode device inode.
44 * \param file_priv DRM file private.
45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
48 *
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
52 */
53int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
55{
56 struct drm_irq_busid *p = data;
57
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
59 return -EINVAL;
60
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64 return -EINVAL;
65
66 p->irq = dev->irq;
67
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 p->irq);
70
71 return 0;
72}
73
74/**
75 * Install IRQ handler.
76 *
77 * \param dev DRM device.
78 * \param irq IRQ number.
79 *
80 * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
81 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
82 * before and after the installation.
83 */
84static int drm_irq_install(struct drm_device * dev)
85{
86 int ret;
87 unsigned long sh_flags = 0;
88
89 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
90 return -EINVAL;
91
92 if (dev->irq == 0)
93 return -EINVAL;
94
95 mutex_lock(&dev->struct_mutex);
96
97 /* Driver must have been initialized */
98 if (!dev->dev_private) {
99 mutex_unlock(&dev->struct_mutex);
100 return -EINVAL;
101 }
102
103 if (dev->irq_enabled) {
104 mutex_unlock(&dev->struct_mutex);
105 return -EBUSY;
106 }
107 dev->irq_enabled = 1;
108 mutex_unlock(&dev->struct_mutex);
109
110 DRM_DEBUG("irq=%d\n", dev->irq);
111
112 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
113 init_waitqueue_head(&dev->vbl_queue);
114
115 spin_lock_init(&dev->vbl_lock);
116
117 INIT_LIST_HEAD(&dev->vbl_sigs);
118 INIT_LIST_HEAD(&dev->vbl_sigs2);
119
120 dev->vbl_pending = 0;
121 }
122
123 /* Before installing handler */
124 dev->driver->irq_preinstall(dev);
125
126 /* Install handler */
127 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
128 sh_flags = IRQF_SHARED;
129
130 ret = request_irq(dev->irq, dev->driver->irq_handler,
131 sh_flags, dev->devname, dev);
132 if (ret < 0) {
133 mutex_lock(&dev->struct_mutex);
134 dev->irq_enabled = 0;
135 mutex_unlock(&dev->struct_mutex);
136 return ret;
137 }
138
139 /* After installing handler */
140 dev->driver->irq_postinstall(dev);
141
142 return 0;
143}
144
145/**
146 * Uninstall the IRQ handler.
147 *
148 * \param dev DRM device.
149 *
150 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
151 */
152int drm_irq_uninstall(struct drm_device * dev)
153{
154 int irq_enabled;
155
156 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
157 return -EINVAL;
158
159 mutex_lock(&dev->struct_mutex);
160 irq_enabled = dev->irq_enabled;
161 dev->irq_enabled = 0;
162 mutex_unlock(&dev->struct_mutex);
163
164 if (!irq_enabled)
165 return -EINVAL;
166
167 DRM_DEBUG("irq=%d\n", dev->irq);
168
169 dev->driver->irq_uninstall(dev);
170
171 free_irq(dev->irq, dev);
172
173 dev->locked_tasklet_func = NULL;
174
175 return 0;
176}
177
178EXPORT_SYMBOL(drm_irq_uninstall);
179
180/**
181 * IRQ control ioctl.
182 *
183 * \param inode device inode.
184 * \param file_priv DRM file private.
185 * \param cmd command.
186 * \param arg user argument, pointing to a drm_control structure.
187 * \return zero on success or a negative number on failure.
188 *
189 * Calls irq_install() or irq_uninstall() according to \p arg.
190 */
191int drm_control(struct drm_device *dev, void *data,
192 struct drm_file *file_priv)
193{
194 struct drm_control *ctl = data;
195
196 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
197
198
199 switch (ctl->func) {
200 case DRM_INST_HANDLER:
201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
202 return 0;
203 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
204 ctl->irq != dev->irq)
205 return -EINVAL;
206 return drm_irq_install(dev);
207 case DRM_UNINST_HANDLER:
208 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
209 return 0;
210 return drm_irq_uninstall(dev);
211 default:
212 return -EINVAL;
213 }
214}
215
216/**
217 * Wait for VBLANK.
218 *
219 * \param inode device inode.
220 * \param file_priv DRM file private.
221 * \param cmd command.
222 * \param data user argument, pointing to a drm_wait_vblank structure.
223 * \return zero on success or a negative number on failure.
224 *
225 * Verifies the IRQ is installed.
226 *
227 * If a signal is requested checks if this task has already scheduled the same signal
228 * for the same vblank sequence number - nothing to be done in
229 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
230 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
231 * task.
232 *
233 * If a signal is not requested, then calls vblank_wait().
234 */
235int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
236{
237 union drm_wait_vblank *vblwait = data;
238 struct timeval now;
239 int ret = 0;
240 unsigned int flags, seq;
241
242 if ((!dev->irq) || (!dev->irq_enabled))
243 return -EINVAL;
244
245 if (vblwait->request.type &
246 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
247 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
248 vblwait->request.type,
249 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
250 return -EINVAL;
251 }
252
253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
254
255 if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
256 DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
257 return -EINVAL;
258
259 seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
260 : &dev->vbl_received);
261
262 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
263 case _DRM_VBLANK_RELATIVE:
264 vblwait->request.sequence += seq;
265 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
266 case _DRM_VBLANK_ABSOLUTE:
267 break;
268 default:
269 return -EINVAL;
270 }
271
272 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
273 (seq - vblwait->request.sequence) <= (1<<23)) {
274 vblwait->request.sequence = seq + 1;
275 }
276
277 if (flags & _DRM_VBLANK_SIGNAL) {
278 unsigned long irqflags;
279 struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
280 ? &dev->vbl_sigs2 : &dev->vbl_sigs;
281 struct drm_vbl_sig *vbl_sig;
282
283 spin_lock_irqsave(&dev->vbl_lock, irqflags);
284
285 /* Check if this task has already scheduled the same signal
286 * for the same vblank sequence number; nothing to be done in
287 * that case
288 */
289 list_for_each_entry(vbl_sig, vbl_sigs, head) {
290 if (vbl_sig->sequence == vblwait->request.sequence
291 && vbl_sig->info.si_signo ==
292 vblwait->request.signal
293 && vbl_sig->task == current) {
294 spin_unlock_irqrestore(&dev->vbl_lock,
295 irqflags);
296 vblwait->reply.sequence = seq;
297 goto done;
298 }
299 }
300
301 if (dev->vbl_pending >= 100) {
302 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
303 return -EBUSY;
304 }
305
306 dev->vbl_pending++;
307
308 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
309
310 if (!
311 (vbl_sig =
312 drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
313 return -ENOMEM;
314 }
315
316 memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
317
318 vbl_sig->sequence = vblwait->request.sequence;
319 vbl_sig->info.si_signo = vblwait->request.signal;
320 vbl_sig->task = current;
321
322 spin_lock_irqsave(&dev->vbl_lock, irqflags);
323
324 list_add_tail(&vbl_sig->head, vbl_sigs);
325
326 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
327
328 vblwait->reply.sequence = seq;
329 } else {
330 if (flags & _DRM_VBLANK_SECONDARY) {
331 if (dev->driver->vblank_wait2)
332 ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
333 } else if (dev->driver->vblank_wait)
334 ret =
335 dev->driver->vblank_wait(dev,
336 &vblwait->request.sequence);
337
338 do_gettimeofday(&now);
339 vblwait->reply.tval_sec = now.tv_sec;
340 vblwait->reply.tval_usec = now.tv_usec;
341 }
342
343 done:
344 return ret;
345}
346
347/**
348 * Send the VBLANK signals.
349 *
350 * \param dev DRM device.
351 *
352 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
353 *
354 * If a signal is not requested, then calls vblank_wait().
355 */
356void drm_vbl_send_signals(struct drm_device * dev)
357{
358 unsigned long flags;
359 int i;
360
361 spin_lock_irqsave(&dev->vbl_lock, flags);
362
363 for (i = 0; i < 2; i++) {
364 struct drm_vbl_sig *vbl_sig, *tmp;
365 struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
366 unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
367 &dev->vbl_received);
368
369 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
370 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
371 vbl_sig->info.si_code = vbl_seq;
372 send_sig_info(vbl_sig->info.si_signo,
373 &vbl_sig->info, vbl_sig->task);
374
375 list_del(&vbl_sig->head);
376
377 drm_free(vbl_sig, sizeof(*vbl_sig),
378 DRM_MEM_DRIVER);
379
380 dev->vbl_pending--;
381 }
382 }
383 }
384
385 spin_unlock_irqrestore(&dev->vbl_lock, flags);
386}
387
388EXPORT_SYMBOL(drm_vbl_send_signals);
389
390/**
391 * Tasklet wrapper function.
392 *
393 * \param data DRM device in disguise.
394 *
395 * Attempts to grab the HW lock and calls the driver callback on success. On
396 * failure, leave the lock marked as contended so the callback can be called
397 * from drm_unlock().
398 */
399static void drm_locked_tasklet_func(unsigned long data)
400{
401 struct drm_device *dev = (struct drm_device *)data;
402 unsigned long irqflags;
403
404 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
405
406 if (!dev->locked_tasklet_func ||
407 !drm_lock_take(&dev->lock,
408 DRM_KERNEL_CONTEXT)) {
409 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
410 return;
411 }
412
413 dev->lock.lock_time = jiffies;
414 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
415
416 dev->locked_tasklet_func(dev);
417
418 drm_lock_free(&dev->lock,
419 DRM_KERNEL_CONTEXT);
420
421 dev->locked_tasklet_func = NULL;
422
423 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
424}
425
426/**
427 * Schedule a tasklet to call back a driver hook with the HW lock held.
428 *
429 * \param dev DRM device.
430 * \param func Driver callback.
431 *
432 * This is intended for triggering actions that require the HW lock from an
433 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
434 * completes. Note that the callback may be called from interrupt or process
435 * context, it must not make any assumptions about this. Also, the HW lock will
436 * be held with the kernel context or any client context.
437 */
438void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
439{
440 unsigned long irqflags;
441 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
442
443 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
444 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
445 return;
446
447 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
448
449 if (dev->locked_tasklet_func) {
450 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
451 return;
452 }
453
454 dev->locked_tasklet_func = func;
455
456 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
457
458 drm_tasklet.data = (unsigned long)dev;
459
460 tasklet_hi_schedule(&drm_tasklet);
461}
462EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
new file mode 100644
index 000000000000..0998723cde79
--- /dev/null
+++ b/drivers/gpu/drm/drm_lock.c
@@ -0,0 +1,391 @@
1/**
2 * \file drm_lock.c
3 * IOCTLs for locking
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37
38static int drm_notifier(void *priv);
39
40/**
41 * Lock ioctl.
42 *
43 * \param inode device inode.
44 * \param file_priv DRM file private.
45 * \param cmd command.
46 * \param arg user argument, pointing to a drm_lock structure.
47 * \return zero on success or negative number on failure.
48 *
49 * Add the current task to the lock wait queue, and attempt to take to lock.
50 */
51int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
52{
53 DECLARE_WAITQUEUE(entry, current);
54 struct drm_lock *lock = data;
55 int ret = 0;
56
57 ++file_priv->lock_count;
58
59 if (lock->context == DRM_KERNEL_CONTEXT) {
60 DRM_ERROR("Process %d using kernel context %d\n",
61 task_pid_nr(current), lock->context);
62 return -EINVAL;
63 }
64
65 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
66 lock->context, task_pid_nr(current),
67 dev->lock.hw_lock->lock, lock->flags);
68
69 if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
70 if (lock->context < 0)
71 return -EINVAL;
72
73 add_wait_queue(&dev->lock.lock_queue, &entry);
74 spin_lock_bh(&dev->lock.spinlock);
75 dev->lock.user_waiters++;
76 spin_unlock_bh(&dev->lock.spinlock);
77 for (;;) {
78 __set_current_state(TASK_INTERRUPTIBLE);
79 if (!dev->lock.hw_lock) {
80 /* Device has been unregistered */
81 ret = -EINTR;
82 break;
83 }
84 if (drm_lock_take(&dev->lock, lock->context)) {
85 dev->lock.file_priv = file_priv;
86 dev->lock.lock_time = jiffies;
87 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
88 break; /* Got lock */
89 }
90
91 /* Contention */
92 schedule();
93 if (signal_pending(current)) {
94 ret = -ERESTARTSYS;
95 break;
96 }
97 }
98 spin_lock_bh(&dev->lock.spinlock);
99 dev->lock.user_waiters--;
100 spin_unlock_bh(&dev->lock.spinlock);
101 __set_current_state(TASK_RUNNING);
102 remove_wait_queue(&dev->lock.lock_queue, &entry);
103
104 DRM_DEBUG("%d %s\n", lock->context,
105 ret ? "interrupted" : "has lock");
106 if (ret) return ret;
107
108 sigemptyset(&dev->sigmask);
109 sigaddset(&dev->sigmask, SIGSTOP);
110 sigaddset(&dev->sigmask, SIGTSTP);
111 sigaddset(&dev->sigmask, SIGTTIN);
112 sigaddset(&dev->sigmask, SIGTTOU);
113 dev->sigdata.context = lock->context;
114 dev->sigdata.lock = dev->lock.hw_lock;
115 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
116
117 if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
118 dev->driver->dma_ready(dev);
119
120 if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
121 {
122 if (dev->driver->dma_quiescent(dev)) {
123 DRM_DEBUG("%d waiting for DMA quiescent\n",
124 lock->context);
125 return -EBUSY;
126 }
127 }
128
129 if (dev->driver->kernel_context_switch &&
130 dev->last_context != lock->context) {
131 dev->driver->kernel_context_switch(dev, dev->last_context,
132 lock->context);
133 }
134
135 return 0;
136}
137
138/**
139 * Unlock ioctl.
140 *
141 * \param inode device inode.
142 * \param file_priv DRM file private.
143 * \param cmd command.
144 * \param arg user argument, pointing to a drm_lock structure.
145 * \return zero on success or negative number on failure.
146 *
147 * Transfer and free the lock.
148 */
149int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
150{
151 struct drm_lock *lock = data;
152 unsigned long irqflags;
153
154 if (lock->context == DRM_KERNEL_CONTEXT) {
155 DRM_ERROR("Process %d using kernel context %d\n",
156 task_pid_nr(current), lock->context);
157 return -EINVAL;
158 }
159
160 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
161
162 if (dev->locked_tasklet_func) {
163 dev->locked_tasklet_func(dev);
164
165 dev->locked_tasklet_func = NULL;
166 }
167
168 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
169
170 atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
171
172 /* kernel_context_switch isn't used by any of the x86 drm
173 * modules but is required by the Sparc driver.
174 */
175 if (dev->driver->kernel_context_switch_unlock)
176 dev->driver->kernel_context_switch_unlock(dev);
177 else {
178 if (drm_lock_free(&dev->lock,lock->context)) {
179 /* FIXME: Should really bail out here. */
180 }
181 }
182
183 unblock_all_signals();
184 return 0;
185}
186
187/**
188 * Take the heavyweight lock.
189 *
190 * \param lock lock pointer.
191 * \param context locking context.
192 * \return one if the lock is held, or zero otherwise.
193 *
194 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
195 */
196int drm_lock_take(struct drm_lock_data *lock_data,
197 unsigned int context)
198{
199 unsigned int old, new, prev;
200 volatile unsigned int *lock = &lock_data->hw_lock->lock;
201
202 spin_lock_bh(&lock_data->spinlock);
203 do {
204 old = *lock;
205 if (old & _DRM_LOCK_HELD)
206 new = old | _DRM_LOCK_CONT;
207 else {
208 new = context | _DRM_LOCK_HELD |
209 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
210 _DRM_LOCK_CONT : 0);
211 }
212 prev = cmpxchg(lock, old, new);
213 } while (prev != old);
214 spin_unlock_bh(&lock_data->spinlock);
215
216 if (_DRM_LOCKING_CONTEXT(old) == context) {
217 if (old & _DRM_LOCK_HELD) {
218 if (context != DRM_KERNEL_CONTEXT) {
219 DRM_ERROR("%d holds heavyweight lock\n",
220 context);
221 }
222 return 0;
223 }
224 }
225
226 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
227 /* Have lock */
228 return 1;
229 }
230 return 0;
231}
232
233/**
234 * This takes a lock forcibly and hands it to context. Should ONLY be used
235 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
236 *
237 * \param dev DRM device.
238 * \param lock lock pointer.
239 * \param context locking context.
240 * \return always one.
241 *
242 * Resets the lock file pointer.
243 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
244 */
245static int drm_lock_transfer(struct drm_lock_data *lock_data,
246 unsigned int context)
247{
248 unsigned int old, new, prev;
249 volatile unsigned int *lock = &lock_data->hw_lock->lock;
250
251 lock_data->file_priv = NULL;
252 do {
253 old = *lock;
254 new = context | _DRM_LOCK_HELD;
255 prev = cmpxchg(lock, old, new);
256 } while (prev != old);
257 return 1;
258}
259
260/**
261 * Free lock.
262 *
263 * \param dev DRM device.
264 * \param lock lock.
265 * \param context context.
266 *
267 * Resets the lock file pointer.
268 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
269 * waiting on the lock queue.
270 */
271int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
272{
273 unsigned int old, new, prev;
274 volatile unsigned int *lock = &lock_data->hw_lock->lock;
275
276 spin_lock_bh(&lock_data->spinlock);
277 if (lock_data->kernel_waiters != 0) {
278 drm_lock_transfer(lock_data, 0);
279 lock_data->idle_has_lock = 1;
280 spin_unlock_bh(&lock_data->spinlock);
281 return 1;
282 }
283 spin_unlock_bh(&lock_data->spinlock);
284
285 do {
286 old = *lock;
287 new = _DRM_LOCKING_CONTEXT(old);
288 prev = cmpxchg(lock, old, new);
289 } while (prev != old);
290
291 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
292 DRM_ERROR("%d freed heavyweight lock held by %d\n",
293 context, _DRM_LOCKING_CONTEXT(old));
294 return 1;
295 }
296 wake_up_interruptible(&lock_data->lock_queue);
297 return 0;
298}
299
300/**
301 * If we get here, it means that the process has called DRM_IOCTL_LOCK
302 * without calling DRM_IOCTL_UNLOCK.
303 *
304 * If the lock is not held, then let the signal proceed as usual. If the lock
305 * is held, then set the contended flag and keep the signal blocked.
306 *
307 * \param priv pointer to a drm_sigdata structure.
308 * \return one if the signal should be delivered normally, or zero if the
309 * signal should be blocked.
310 */
311static int drm_notifier(void *priv)
312{
313 struct drm_sigdata *s = (struct drm_sigdata *) priv;
314 unsigned int old, new, prev;
315
316 /* Allow signal delivery if lock isn't held */
317 if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
318 || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
319 return 1;
320
321 /* Otherwise, set flag to force call to
322 drmUnlock */
323 do {
324 old = s->lock->lock;
325 new = old | _DRM_LOCK_CONT;
326 prev = cmpxchg(&s->lock->lock, old, new);
327 } while (prev != old);
328 return 0;
329}
330
331/**
332 * This function returns immediately and takes the hw lock
333 * with the kernel context if it is free, otherwise it gets the highest priority when and if
334 * it is eventually released.
335 *
336 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
337 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
338 * a deadlock, which is why the "idlelock" was invented).
339 *
340 * This should be sufficient to wait for GPU idle without
341 * having to worry about starvation.
342 */
343
344void drm_idlelock_take(struct drm_lock_data *lock_data)
345{
346 int ret = 0;
347
348 spin_lock_bh(&lock_data->spinlock);
349 lock_data->kernel_waiters++;
350 if (!lock_data->idle_has_lock) {
351
352 spin_unlock_bh(&lock_data->spinlock);
353 ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
354 spin_lock_bh(&lock_data->spinlock);
355
356 if (ret == 1)
357 lock_data->idle_has_lock = 1;
358 }
359 spin_unlock_bh(&lock_data->spinlock);
360}
361EXPORT_SYMBOL(drm_idlelock_take);
362
363void drm_idlelock_release(struct drm_lock_data *lock_data)
364{
365 unsigned int old, prev;
366 volatile unsigned int *lock = &lock_data->hw_lock->lock;
367
368 spin_lock_bh(&lock_data->spinlock);
369 if (--lock_data->kernel_waiters == 0) {
370 if (lock_data->idle_has_lock) {
371 do {
372 old = *lock;
373 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
374 } while (prev != old);
375 wake_up_interruptible(&lock_data->lock_queue);
376 lock_data->idle_has_lock = 0;
377 }
378 }
379 spin_unlock_bh(&lock_data->spinlock);
380}
381EXPORT_SYMBOL(drm_idlelock_release);
382
383
384int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
385{
386 return (file_priv->lock_count && dev->lock.hw_lock &&
387 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
388 dev->lock.file_priv == file_priv);
389}
390
391EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
new file mode 100644
index 000000000000..845081b44f63
--- /dev/null
+++ b/drivers/gpu/drm/drm_memory.c
@@ -0,0 +1,181 @@
1/**
2 * \file drm_memory.c
3 * Memory management wrappers for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/highmem.h>
37#include "drmP.h"
38
39#ifdef DEBUG_MEMORY
40#include "drm_memory_debug.h"
41#else
42
43/** No-op. */
44void drm_mem_init(void)
45{
46}
47
48/**
49 * Called when "/proc/dri/%dev%/mem" is read.
50 *
51 * \param buf output buffer.
52 * \param start start of output data.
53 * \param offset requested start offset.
54 * \param len requested number of bytes.
55 * \param eof whether there is no more data to return.
56 * \param data private data.
57 * \return number of written bytes.
58 *
59 * No-op.
60 */
61int drm_mem_info(char *buf, char **start, off_t offset,
62 int len, int *eof, void *data)
63{
64 return 0;
65}
66
67/** Wrapper around kmalloc() and kfree() */
68void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
69{
70 void *pt;
71
72 if (!(pt = kmalloc(size, GFP_KERNEL)))
73 return NULL;
74 if (oldpt && oldsize) {
75 memcpy(pt, oldpt, oldsize);
76 kfree(oldpt);
77 }
78 return pt;
79}
80
81#if __OS_HAS_AGP
82static void *agp_remap(unsigned long offset, unsigned long size,
83 struct drm_device * dev)
84{
85 unsigned long *phys_addr_map, i, num_pages =
86 PAGE_ALIGN(size) / PAGE_SIZE;
87 struct drm_agp_mem *agpmem;
88 struct page **page_map;
89 void *addr;
90
91 size = PAGE_ALIGN(size);
92
93#ifdef __alpha__
94 offset -= dev->hose->mem_space->start;
95#endif
96
97 list_for_each_entry(agpmem, &dev->agp->memory, head)
98 if (agpmem->bound <= offset
99 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
100 (offset + size))
101 break;
102 if (!agpmem)
103 return NULL;
104
105 /*
106 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
107 * the CPU do not get remapped by the GART. We fix this by using the kernel's
108 * page-table instead (that's probably faster anyhow...).
109 */
110 /* note: use vmalloc() because num_pages could be large... */
111 page_map = vmalloc(num_pages * sizeof(struct page *));
112 if (!page_map)
113 return NULL;
114
115 phys_addr_map =
116 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
117 for (i = 0; i < num_pages; ++i)
118 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
119 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
120 vfree(page_map);
121
122 return addr;
123}
124
125/** Wrapper around agp_allocate_memory() */
126DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
127{
128 return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
129}
130
131/** Wrapper around agp_free_memory() */
132int drm_free_agp(DRM_AGP_MEM * handle, int pages)
133{
134 return drm_agp_free_memory(handle) ? 0 : -EINVAL;
135}
136
137/** Wrapper around agp_bind_memory() */
138int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
139{
140 return drm_agp_bind_memory(handle, start);
141}
142
143/** Wrapper around agp_unbind_memory() */
144int drm_unbind_agp(DRM_AGP_MEM * handle)
145{
146 return drm_agp_unbind_memory(handle);
147}
148
149#else /* __OS_HAS_AGP */
150static inline void *agp_remap(unsigned long offset, unsigned long size,
151 struct drm_device * dev)
152{
153 return NULL;
154}
155
156#endif /* agp */
157
158#endif /* debug_memory */
159
160void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
161{
162 if (drm_core_has_AGP(dev) &&
163 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
164 map->handle = agp_remap(map->offset, map->size, dev);
165 else
166 map->handle = ioremap(map->offset, map->size);
167}
168EXPORT_SYMBOL(drm_core_ioremap);
169
170void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
171{
172 if (!map->handle || !map->size)
173 return;
174
175 if (drm_core_has_AGP(dev) &&
176 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
177 vunmap(map->handle);
178 else
179 iounmap(map->handle);
180}
181EXPORT_SYMBOL(drm_core_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
new file mode 100644
index 000000000000..dcff9e9b52e3
--- /dev/null
+++ b/drivers/gpu/drm/drm_mm.c
@@ -0,0 +1,295 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 */
43
44#include "drmP.h"
45#include <linux/slab.h>
46
47unsigned long drm_mm_tail_space(struct drm_mm *mm)
48{
49 struct list_head *tail_node;
50 struct drm_mm_node *entry;
51
52 tail_node = mm->ml_entry.prev;
53 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
54 if (!entry->free)
55 return 0;
56
57 return entry->size;
58}
59
60int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
61{
62 struct list_head *tail_node;
63 struct drm_mm_node *entry;
64
65 tail_node = mm->ml_entry.prev;
66 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
67 if (!entry->free)
68 return -ENOMEM;
69
70 if (entry->size <= size)
71 return -ENOMEM;
72
73 entry->size -= size;
74 return 0;
75}
76
77
78static int drm_mm_create_tail_node(struct drm_mm *mm,
79 unsigned long start,
80 unsigned long size)
81{
82 struct drm_mm_node *child;
83
84 child = (struct drm_mm_node *)
85 drm_alloc(sizeof(*child), DRM_MEM_MM);
86 if (!child)
87 return -ENOMEM;
88
89 child->free = 1;
90 child->size = size;
91 child->start = start;
92 child->mm = mm;
93
94 list_add_tail(&child->ml_entry, &mm->ml_entry);
95 list_add_tail(&child->fl_entry, &mm->fl_entry);
96
97 return 0;
98}
99
100
101int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
102{
103 struct list_head *tail_node;
104 struct drm_mm_node *entry;
105
106 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
108 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
110 }
111 entry->size += size;
112 return 0;
113}
114
115static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
116 unsigned long size)
117{
118 struct drm_mm_node *child;
119
120 child = (struct drm_mm_node *)
121 drm_alloc(sizeof(*child), DRM_MEM_MM);
122 if (!child)
123 return NULL;
124
125 INIT_LIST_HEAD(&child->fl_entry);
126
127 child->free = 0;
128 child->size = size;
129 child->start = parent->start;
130 child->mm = parent->mm;
131
132 list_add_tail(&child->ml_entry, &parent->ml_entry);
133 INIT_LIST_HEAD(&child->fl_entry);
134
135 parent->size -= size;
136 parent->start += size;
137 return child;
138}
139
140
141
142struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
143 unsigned long size, unsigned alignment)
144{
145
146 struct drm_mm_node *align_splitoff = NULL;
147 struct drm_mm_node *child;
148 unsigned tmp = 0;
149
150 if (alignment)
151 tmp = parent->start % alignment;
152
153 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
155 if (!align_splitoff)
156 return NULL;
157 }
158
159 if (parent->size == size) {
160 list_del_init(&parent->fl_entry);
161 parent->free = 0;
162 return parent;
163 } else {
164 child = drm_mm_split_at_start(parent, size);
165 }
166
167 if (align_splitoff)
168 drm_mm_put_block(align_splitoff);
169
170 return child;
171}
172
173/*
174 * Put a block. Merge with the previous and / or next block if they are free.
175 * Otherwise add to the free stack.
176 */
177
178void drm_mm_put_block(struct drm_mm_node * cur)
179{
180
181 struct drm_mm *mm = cur->mm;
182 struct list_head *cur_head = &cur->ml_entry;
183 struct list_head *root_head = &mm->ml_entry;
184 struct drm_mm_node *prev_node = NULL;
185 struct drm_mm_node *next_node;
186
187 int merged = 0;
188
189 if (cur_head->prev != root_head) {
190 prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
191 if (prev_node->free) {
192 prev_node->size += cur->size;
193 merged = 1;
194 }
195 }
196 if (cur_head->next != root_head) {
197 next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
198 if (next_node->free) {
199 if (merged) {
200 prev_node->size += next_node->size;
201 list_del(&next_node->ml_entry);
202 list_del(&next_node->fl_entry);
203 drm_free(next_node, sizeof(*next_node),
204 DRM_MEM_MM);
205 } else {
206 next_node->size += cur->size;
207 next_node->start = cur->start;
208 merged = 1;
209 }
210 }
211 }
212 if (!merged) {
213 cur->free = 1;
214 list_add(&cur->fl_entry, &mm->fl_entry);
215 } else {
216 list_del(&cur->ml_entry);
217 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
218 }
219}
220
221struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
222 unsigned long size,
223 unsigned alignment, int best_match)
224{
225 struct list_head *list;
226 const struct list_head *free_stack = &mm->fl_entry;
227 struct drm_mm_node *entry;
228 struct drm_mm_node *best;
229 unsigned long best_size;
230 unsigned wasted;
231
232 best = NULL;
233 best_size = ~0UL;
234
235 list_for_each(list, free_stack) {
236 entry = list_entry(list, struct drm_mm_node, fl_entry);
237 wasted = 0;
238
239 if (entry->size < size)
240 continue;
241
242 if (alignment) {
243 register unsigned tmp = entry->start % alignment;
244 if (tmp)
245 wasted += alignment - tmp;
246 }
247
248
249 if (entry->size >= size + wasted) {
250 if (!best_match)
251 return entry;
252 if (size < best_size) {
253 best = entry;
254 best_size = entry->size;
255 }
256 }
257 }
258
259 return best;
260}
261
262int drm_mm_clean(struct drm_mm * mm)
263{
264 struct list_head *head = &mm->ml_entry;
265
266 return (head->next->next == head);
267}
268
269int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
270{
271 INIT_LIST_HEAD(&mm->ml_entry);
272 INIT_LIST_HEAD(&mm->fl_entry);
273
274 return drm_mm_create_tail_node(mm, start, size);
275}
276
277
278void drm_mm_takedown(struct drm_mm * mm)
279{
280 struct list_head *bnode = mm->fl_entry.next;
281 struct drm_mm_node *entry;
282
283 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
284
285 if (entry->ml_entry.next != &mm->ml_entry ||
286 entry->fl_entry.next != &mm->fl_entry) {
287 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
288 return;
289 }
290
291 list_del(&entry->fl_entry);
292 list_del(&entry->ml_entry);
293
294 drm_free(entry, sizeof(*entry), DRM_MEM_MM);
295}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
new file mode 100644
index 000000000000..b55d5bc6ea61
--- /dev/null
+++ b/drivers/gpu/drm/drm_pci.c
@@ -0,0 +1,183 @@
1/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
2/**
3 * \file drm_pci.c
4 * \brief Functions and ioctls to manage PCI memory
5 *
6 * \warning These interfaces aren't stable yet.
7 *
8 * \todo Implement the remaining ioctl's for the PCI pools.
9 * \todo The wrappers here are so thin that they would be better off inlined..
10 *
11 * \author José Fonseca <jrfonseca@tungstengraphics.com>
12 * \author Leif Delgass <ldelgass@retinalburn.net>
13 */
14
15/*
16 * Copyright 2003 José Fonseca.
17 * Copyright 2003 Leif Delgass.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
36 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37 */
38
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
41#include "drmP.h"
42
43/**********************************************************************/
44/** \name PCI memory */
45/*@{*/
46
47/**
48 * \brief Allocate a PCI consistent memory block, for DMA.
49 */
50drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
51 dma_addr_t maxaddr)
52{
53 drm_dma_handle_t *dmah;
54#if 1
55 unsigned long addr;
56 size_t sz;
57#endif
58#ifdef DRM_DEBUG_MEMORY
59 int area = DRM_MEM_DMA;
60
61 spin_lock(&drm_mem_lock);
62 if ((drm_ram_used >> PAGE_SHIFT)
63 > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
64 spin_unlock(&drm_mem_lock);
65 return 0;
66 }
67 spin_unlock(&drm_mem_lock);
68#endif
69
70 /* pci_alloc_consistent only guarantees alignment to the smallest
71 * PAGE_SIZE order which is greater than or equal to the requested size.
72 * Return NULL here for now to make sure nobody tries for larger alignment
73 */
74 if (align > size)
75 return NULL;
76
77 if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
78 DRM_ERROR("Setting pci dma mask failed\n");
79 return NULL;
80 }
81
82 dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
83 if (!dmah)
84 return NULL;
85
86 dmah->size = size;
87 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
88
89#ifdef DRM_DEBUG_MEMORY
90 if (dmah->vaddr == NULL) {
91 spin_lock(&drm_mem_lock);
92 ++drm_mem_stats[area].fail_count;
93 spin_unlock(&drm_mem_lock);
94 kfree(dmah);
95 return NULL;
96 }
97
98 spin_lock(&drm_mem_lock);
99 ++drm_mem_stats[area].succeed_count;
100 drm_mem_stats[area].bytes_allocated += size;
101 drm_ram_used += size;
102 spin_unlock(&drm_mem_lock);
103#else
104 if (dmah->vaddr == NULL) {
105 kfree(dmah);
106 return NULL;
107 }
108#endif
109
110 memset(dmah->vaddr, 0, size);
111
112 /* XXX - Is virt_to_page() legal for consistent mem? */
113 /* Reserve */
114 for (addr = (unsigned long)dmah->vaddr, sz = size;
115 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
116 SetPageReserved(virt_to_page(addr));
117 }
118
119 return dmah;
120}
121
122EXPORT_SYMBOL(drm_pci_alloc);
123
124/**
125 * \brief Free a PCI consistent memory block without freeing its descriptor.
126 *
127 * This function is for internal use in the Linux-specific DRM core code.
128 */
129void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
130{
131#if 1
132 unsigned long addr;
133 size_t sz;
134#endif
135#ifdef DRM_DEBUG_MEMORY
136 int area = DRM_MEM_DMA;
137 int alloc_count;
138 int free_count;
139#endif
140
141 if (!dmah->vaddr) {
142#ifdef DRM_DEBUG_MEMORY
143 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
144#endif
145 } else {
146 /* XXX - Is virt_to_page() legal for consistent mem? */
147 /* Unreserve */
148 for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
149 sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
150 ClearPageReserved(virt_to_page(addr));
151 }
152 dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
153 dmah->busaddr);
154 }
155
156#ifdef DRM_DEBUG_MEMORY
157 spin_lock(&drm_mem_lock);
158 free_count = ++drm_mem_stats[area].free_count;
159 alloc_count = drm_mem_stats[area].succeed_count;
160 drm_mem_stats[area].bytes_freed += size;
161 drm_ram_used -= size;
162 spin_unlock(&drm_mem_lock);
163 if (free_count > alloc_count) {
164 DRM_MEM_ERROR(area,
165 "Excess frees: %d frees, %d allocs\n",
166 free_count, alloc_count);
167 }
168#endif
169
170}
171
172/**
173 * \brief Free a PCI consistent memory block
174 */
175void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
176{
177 __drm_pci_free(dev, dmah);
178 kfree(dmah);
179}
180
181EXPORT_SYMBOL(drm_pci_free);
182
183/*@}*/
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
new file mode 100644
index 000000000000..93b1e0475c93
--- /dev/null
+++ b/drivers/gpu/drm/drm_proc.c
@@ -0,0 +1,557 @@
1/**
2 * \file drm_proc.c
3 * /proc support for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 *
8 * \par Acknowledgements:
9 * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
10 * the problem with the proc files not outputting all their information.
11 */
12
13/*
14 * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
15 *
16 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
17 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
34 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
35 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
36 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
37 * OTHER DEALINGS IN THE SOFTWARE.
38 */
39
40#include "drmP.h"
41
42static int drm_name_info(char *buf, char **start, off_t offset,
43 int request, int *eof, void *data);
44static int drm_vm_info(char *buf, char **start, off_t offset,
45 int request, int *eof, void *data);
46static int drm_clients_info(char *buf, char **start, off_t offset,
47 int request, int *eof, void *data);
48static int drm_queues_info(char *buf, char **start, off_t offset,
49 int request, int *eof, void *data);
50static int drm_bufs_info(char *buf, char **start, off_t offset,
51 int request, int *eof, void *data);
52#if DRM_DEBUG_CODE
53static int drm_vma_info(char *buf, char **start, off_t offset,
54 int request, int *eof, void *data);
55#endif
56
57/**
58 * Proc file list.
59 */
60static struct drm_proc_list {
61 const char *name; /**< file name */
62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
63} drm_proc_list[] = {
64 {"name", drm_name_info},
65 {"mem", drm_mem_info},
66 {"vm", drm_vm_info},
67 {"clients", drm_clients_info},
68 {"queues", drm_queues_info},
69 {"bufs", drm_bufs_info},
70#if DRM_DEBUG_CODE
71 {"vma", drm_vma_info},
72#endif
73};
74
75#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
76
77/**
78 * Initialize the DRI proc filesystem for a device.
79 *
80 * \param dev DRM device.
81 * \param minor device minor number.
82 * \param root DRI proc dir entry.
83 * \param dev_root resulting DRI device proc dir entry.
84 * \return root entry pointer on success, or NULL on failure.
85 *
86 * Create the DRI proc root entry "/proc/dri", the device proc root entry
87 * "/proc/dri/%minor%/", and each entry in proc_list as
88 * "/proc/dri/%minor%/%name%".
89 */
90int drm_proc_init(struct drm_minor *minor, int minor_id,
91 struct proc_dir_entry *root)
92{
93 struct proc_dir_entry *ent;
94 int i, j;
95 char name[64];
96
97 sprintf(name, "%d", minor_id);
98 minor->dev_root = proc_mkdir(name, root);
99 if (!minor->dev_root) {
100 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
101 return -1;
102 }
103
104 for (i = 0; i < DRM_PROC_ENTRIES; i++) {
105 ent = create_proc_entry(drm_proc_list[i].name,
106 S_IFREG | S_IRUGO, minor->dev_root);
107 if (!ent) {
108 DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
109 name, drm_proc_list[i].name);
110 for (j = 0; j < i; j++)
111 remove_proc_entry(drm_proc_list[i].name,
112 minor->dev_root);
113 remove_proc_entry(name, root);
114 minor->dev_root = NULL;
115 return -1;
116 }
117 ent->read_proc = drm_proc_list[i].f;
118 ent->data = minor;
119 }
120
121 return 0;
122}
123
124/**
125 * Cleanup the proc filesystem resources.
126 *
127 * \param minor device minor number.
128 * \param root DRI proc dir entry.
129 * \param dev_root DRI device proc dir entry.
130 * \return always zero.
131 *
132 * Remove all proc entries created by proc_init().
133 */
134int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
135{
136 int i;
137 char name[64];
138
139 if (!root || !minor->dev_root)
140 return 0;
141
142 for (i = 0; i < DRM_PROC_ENTRIES; i++)
143 remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
144 sprintf(name, "%d", minor->index);
145 remove_proc_entry(name, root);
146
147 return 0;
148}
149
150/**
151 * Called when "/proc/dri/.../name" is read.
152 *
153 * \param buf output buffer.
154 * \param start start of output data.
155 * \param offset requested start offset.
156 * \param request requested number of bytes.
157 * \param eof whether there is no more data to return.
158 * \param data private data.
159 * \return number of written bytes.
160 *
161 * Prints the device name together with the bus id if available.
162 */
163static int drm_name_info(char *buf, char **start, off_t offset, int request,
164 int *eof, void *data)
165{
166 struct drm_minor *minor = (struct drm_minor *) data;
167 struct drm_device *dev = minor->dev;
168 int len = 0;
169
170 if (offset > DRM_PROC_LIMIT) {
171 *eof = 1;
172 return 0;
173 }
174
175 *start = &buf[offset];
176 *eof = 0;
177
178 if (dev->unique) {
179 DRM_PROC_PRINT("%s %s %s\n",
180 dev->driver->pci_driver.name,
181 pci_name(dev->pdev), dev->unique);
182 } else {
183 DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
184 pci_name(dev->pdev));
185 }
186
187 if (len > request + offset)
188 return request;
189 *eof = 1;
190 return len - offset;
191}
192
193/**
194 * Called when "/proc/dri/.../vm" is read.
195 *
196 * \param buf output buffer.
197 * \param start start of output data.
198 * \param offset requested start offset.
199 * \param request requested number of bytes.
200 * \param eof whether there is no more data to return.
201 * \param data private data.
202 * \return number of written bytes.
203 *
204 * Prints information about all mappings in drm_device::maplist.
205 */
206static int drm__vm_info(char *buf, char **start, off_t offset, int request,
207 int *eof, void *data)
208{
209 struct drm_minor *minor = (struct drm_minor *) data;
210 struct drm_device *dev = minor->dev;
211 int len = 0;
212 struct drm_map *map;
213 struct drm_map_list *r_list;
214
215 /* Hardcoded from _DRM_FRAME_BUFFER,
216 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
217 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
218 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
219 const char *type;
220 int i;
221
222 if (offset > DRM_PROC_LIMIT) {
223 *eof = 1;
224 return 0;
225 }
226
227 *start = &buf[offset];
228 *eof = 0;
229
230 DRM_PROC_PRINT("slot offset size type flags "
231 "address mtrr\n\n");
232 i = 0;
233 list_for_each_entry(r_list, &dev->maplist, head) {
234 map = r_list->map;
235 if (!map)
236 continue;
237 if (map->type < 0 || map->type > 5)
238 type = "??";
239 else
240 type = types[map->type];
241 DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
242 i,
243 map->offset,
244 map->size, type, map->flags,
245 (unsigned long) r_list->user_token);
246 if (map->mtrr < 0) {
247 DRM_PROC_PRINT("none\n");
248 } else {
249 DRM_PROC_PRINT("%4d\n", map->mtrr);
250 }
251 i++;
252 }
253
254 if (len > request + offset)
255 return request;
256 *eof = 1;
257 return len - offset;
258}
259
260/**
261 * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
262 */
263static int drm_vm_info(char *buf, char **start, off_t offset, int request,
264 int *eof, void *data)
265{
266 struct drm_minor *minor = (struct drm_minor *) data;
267 struct drm_device *dev = minor->dev;
268 int ret;
269
270 mutex_lock(&dev->struct_mutex);
271 ret = drm__vm_info(buf, start, offset, request, eof, data);
272 mutex_unlock(&dev->struct_mutex);
273 return ret;
274}
275
276/**
277 * Called when "/proc/dri/.../queues" is read.
278 *
279 * \param buf output buffer.
280 * \param start start of output data.
281 * \param offset requested start offset.
282 * \param request requested number of bytes.
283 * \param eof whether there is no more data to return.
284 * \param data private data.
285 * \return number of written bytes.
286 */
287static int drm__queues_info(char *buf, char **start, off_t offset,
288 int request, int *eof, void *data)
289{
290 struct drm_minor *minor = (struct drm_minor *) data;
291 struct drm_device *dev = minor->dev;
292 int len = 0;
293 int i;
294 struct drm_queue *q;
295
296 if (offset > DRM_PROC_LIMIT) {
297 *eof = 1;
298 return 0;
299 }
300
301 *start = &buf[offset];
302 *eof = 0;
303
304 DRM_PROC_PRINT(" ctx/flags use fin"
305 " blk/rw/rwf wait flushed queued"
306 " locks\n\n");
307 for (i = 0; i < dev->queue_count; i++) {
308 q = dev->queuelist[i];
309 atomic_inc(&q->use_count);
310 DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
311 "%5d/0x%03x %5d %5d"
312 " %5d/%c%c/%c%c%c %5Zd\n",
313 i,
314 q->flags,
315 atomic_read(&q->use_count),
316 atomic_read(&q->finalization),
317 atomic_read(&q->block_count),
318 atomic_read(&q->block_read) ? 'r' : '-',
319 atomic_read(&q->block_write) ? 'w' : '-',
320 waitqueue_active(&q->read_queue) ? 'r' : '-',
321 waitqueue_active(&q->
322 write_queue) ? 'w' : '-',
323 waitqueue_active(&q->
324 flush_queue) ? 'f' : '-',
325 DRM_BUFCOUNT(&q->waitlist));
326 atomic_dec(&q->use_count);
327 }
328
329 if (len > request + offset)
330 return request;
331 *eof = 1;
332 return len - offset;
333}
334
335/**
336 * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
337 */
338static int drm_queues_info(char *buf, char **start, off_t offset, int request,
339 int *eof, void *data)
340{
341 struct drm_minor *minor = (struct drm_minor *) data;
342 struct drm_device *dev = minor->dev;
343 int ret;
344
345 mutex_lock(&dev->struct_mutex);
346 ret = drm__queues_info(buf, start, offset, request, eof, data);
347 mutex_unlock(&dev->struct_mutex);
348 return ret;
349}
350
351/**
352 * Called when "/proc/dri/.../bufs" is read.
353 *
354 * \param buf output buffer.
355 * \param start start of output data.
356 * \param offset requested start offset.
357 * \param request requested number of bytes.
358 * \param eof whether there is no more data to return.
359 * \param data private data.
360 * \return number of written bytes.
361 */
362static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
363 int *eof, void *data)
364{
365 struct drm_minor *minor = (struct drm_minor *) data;
366 struct drm_device *dev = minor->dev;
367 int len = 0;
368 struct drm_device_dma *dma = dev->dma;
369 int i;
370
371 if (!dma || offset > DRM_PROC_LIMIT) {
372 *eof = 1;
373 return 0;
374 }
375
376 *start = &buf[offset];
377 *eof = 0;
378
379 DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
380 for (i = 0; i <= DRM_MAX_ORDER; i++) {
381 if (dma->bufs[i].buf_count)
382 DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
383 i,
384 dma->bufs[i].buf_size,
385 dma->bufs[i].buf_count,
386 atomic_read(&dma->bufs[i]
387 .freelist.count),
388 dma->bufs[i].seg_count,
389 dma->bufs[i].seg_count
390 * (1 << dma->bufs[i].page_order),
391 (dma->bufs[i].seg_count
392 * (1 << dma->bufs[i].page_order))
393 * PAGE_SIZE / 1024);
394 }
395 DRM_PROC_PRINT("\n");
396 for (i = 0; i < dma->buf_count; i++) {
397 if (i && !(i % 32))
398 DRM_PROC_PRINT("\n");
399 DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
400 }
401 DRM_PROC_PRINT("\n");
402
403 if (len > request + offset)
404 return request;
405 *eof = 1;
406 return len - offset;
407}
408
409/**
410 * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
411 */
412static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
413 int *eof, void *data)
414{
415 struct drm_minor *minor = (struct drm_minor *) data;
416 struct drm_device *dev = minor->dev;
417 int ret;
418
419 mutex_lock(&dev->struct_mutex);
420 ret = drm__bufs_info(buf, start, offset, request, eof, data);
421 mutex_unlock(&dev->struct_mutex);
422 return ret;
423}
424
425/**
426 * Called when "/proc/dri/.../clients" is read.
427 *
428 * \param buf output buffer.
429 * \param start start of output data.
430 * \param offset requested start offset.
431 * \param request requested number of bytes.
432 * \param eof whether there is no more data to return.
433 * \param data private data.
434 * \return number of written bytes.
435 */
436static int drm__clients_info(char *buf, char **start, off_t offset,
437 int request, int *eof, void *data)
438{
439 struct drm_minor *minor = (struct drm_minor *) data;
440 struct drm_device *dev = minor->dev;
441 int len = 0;
442 struct drm_file *priv;
443
444 if (offset > DRM_PROC_LIMIT) {
445 *eof = 1;
446 return 0;
447 }
448
449 *start = &buf[offset];
450 *eof = 0;
451
452 DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
453 list_for_each_entry(priv, &dev->filelist, lhead) {
454 DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
455 priv->authenticated ? 'y' : 'n',
456 priv->minor->index,
457 priv->pid,
458 priv->uid, priv->magic, priv->ioctl_count);
459 }
460
461 if (len > request + offset)
462 return request;
463 *eof = 1;
464 return len - offset;
465}
466
467/**
468 * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
469 */
470static int drm_clients_info(char *buf, char **start, off_t offset,
471 int request, int *eof, void *data)
472{
473 struct drm_minor *minor = (struct drm_minor *) data;
474 struct drm_device *dev = minor->dev;
475 int ret;
476
477 mutex_lock(&dev->struct_mutex);
478 ret = drm__clients_info(buf, start, offset, request, eof, data);
479 mutex_unlock(&dev->struct_mutex);
480 return ret;
481}
482
483#if DRM_DEBUG_CODE
484
485static int drm__vma_info(char *buf, char **start, off_t offset, int request,
486 int *eof, void *data)
487{
488 struct drm_minor *minor = (struct drm_minor *) data;
489 struct drm_device *dev = minor->dev;
490 int len = 0;
491 struct drm_vma_entry *pt;
492 struct vm_area_struct *vma;
493#if defined(__i386__)
494 unsigned int pgprot;
495#endif
496
497 if (offset > DRM_PROC_LIMIT) {
498 *eof = 1;
499 return 0;
500 }
501
502 *start = &buf[offset];
503 *eof = 0;
504
505 DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
506 atomic_read(&dev->vma_count),
507 high_memory, virt_to_phys(high_memory));
508 list_for_each_entry(pt, &dev->vmalist, head) {
509 if (!(vma = pt->vma))
510 continue;
511 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
512 pt->pid,
513 vma->vm_start,
514 vma->vm_end,
515 vma->vm_flags & VM_READ ? 'r' : '-',
516 vma->vm_flags & VM_WRITE ? 'w' : '-',
517 vma->vm_flags & VM_EXEC ? 'x' : '-',
518 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
519 vma->vm_flags & VM_LOCKED ? 'l' : '-',
520 vma->vm_flags & VM_IO ? 'i' : '-',
521 vma->vm_pgoff);
522
523#if defined(__i386__)
524 pgprot = pgprot_val(vma->vm_page_prot);
525 DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
526 pgprot & _PAGE_PRESENT ? 'p' : '-',
527 pgprot & _PAGE_RW ? 'w' : 'r',
528 pgprot & _PAGE_USER ? 'u' : 's',
529 pgprot & _PAGE_PWT ? 't' : 'b',
530 pgprot & _PAGE_PCD ? 'u' : 'c',
531 pgprot & _PAGE_ACCESSED ? 'a' : '-',
532 pgprot & _PAGE_DIRTY ? 'd' : '-',
533 pgprot & _PAGE_PSE ? 'm' : 'k',
534 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
535#endif
536 DRM_PROC_PRINT("\n");
537 }
538
539 if (len > request + offset)
540 return request;
541 *eof = 1;
542 return len - offset;
543}
544
545static int drm_vma_info(char *buf, char **start, off_t offset, int request,
546 int *eof, void *data)
547{
548 struct drm_minor *minor = (struct drm_minor *) data;
549 struct drm_device *dev = minor->dev;
550 int ret;
551
552 mutex_lock(&dev->struct_mutex);
553 ret = drm__vma_info(buf, start, offset, request, eof, data);
554 mutex_unlock(&dev->struct_mutex);
555 return ret;
556}
557#endif
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
new file mode 100644
index 000000000000..b2b0f3d41714
--- /dev/null
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -0,0 +1,227 @@
1/**
2 * \file drm_scatter.c
3 * IOCTLs to manage scatter/gather memory
4 *
5 * \author Gareth Hughes <gareth@valinux.com>
6 */
7
8/*
9 * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
10 *
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
31 * DEALINGS IN THE SOFTWARE.
32 */
33
34#include <linux/vmalloc.h>
35#include "drmP.h"
36
37#define DEBUG_SCATTER 0
38
39static inline void *drm_vmalloc_dma(unsigned long size)
40{
41#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
42 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
43#else
44 return vmalloc_32(size);
45#endif
46}
47
48void drm_sg_cleanup(struct drm_sg_mem * entry)
49{
50 struct page *page;
51 int i;
52
53 for (i = 0; i < entry->pages; i++) {
54 page = entry->pagelist[i];
55 if (page)
56 ClearPageReserved(page);
57 }
58
59 vfree(entry->virtual);
60
61 drm_free(entry->busaddr,
62 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
63 drm_free(entry->pagelist,
64 entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
65 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
66}
67
68#ifdef _LP64
69# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
70#else
71# define ScatterHandle(x) (unsigned int)(x)
72#endif
73
74int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
75{
76 struct drm_sg_mem *entry;
77 unsigned long pages, i, j;
78
79 DRM_DEBUG("\n");
80
81 if (!drm_core_check_feature(dev, DRIVER_SG))
82 return -EINVAL;
83
84 if (dev->sg)
85 return -EINVAL;
86
87 entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
88 if (!entry)
89 return -ENOMEM;
90
91 memset(entry, 0, sizeof(*entry));
92 pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
93 DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
94
95 entry->pages = pages;
96 entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
97 DRM_MEM_PAGES);
98 if (!entry->pagelist) {
99 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
100 return -ENOMEM;
101 }
102
103 memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
104
105 entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr),
106 DRM_MEM_PAGES);
107 if (!entry->busaddr) {
108 drm_free(entry->pagelist,
109 entry->pages * sizeof(*entry->pagelist),
110 DRM_MEM_PAGES);
111 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
112 return -ENOMEM;
113 }
114 memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
115
116 entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
117 if (!entry->virtual) {
118 drm_free(entry->busaddr,
119 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
120 drm_free(entry->pagelist,
121 entry->pages * sizeof(*entry->pagelist),
122 DRM_MEM_PAGES);
123 drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
124 return -ENOMEM;
125 }
126
127 /* This also forces the mapping of COW pages, so our page list
128 * will be valid. Please don't remove it...
129 */
130 memset(entry->virtual, 0, pages << PAGE_SHIFT);
131
132 entry->handle = ScatterHandle((unsigned long)entry->virtual);
133
134 DRM_DEBUG("handle = %08lx\n", entry->handle);
135 DRM_DEBUG("virtual = %p\n", entry->virtual);
136
137 for (i = (unsigned long)entry->virtual, j = 0; j < pages;
138 i += PAGE_SIZE, j++) {
139 entry->pagelist[j] = vmalloc_to_page((void *)i);
140 if (!entry->pagelist[j])
141 goto failed;
142 SetPageReserved(entry->pagelist[j]);
143 }
144
145 request->handle = entry->handle;
146
147 dev->sg = entry;
148
149#if DEBUG_SCATTER
150 /* Verify that each page points to its virtual address, and vice
151 * versa.
152 */
153 {
154 int error = 0;
155
156 for (i = 0; i < pages; i++) {
157 unsigned long *tmp;
158
159 tmp = page_address(entry->pagelist[i]);
160 for (j = 0;
161 j < PAGE_SIZE / sizeof(unsigned long);
162 j++, tmp++) {
163 *tmp = 0xcafebabe;
164 }
165 tmp = (unsigned long *)((u8 *) entry->virtual +
166 (PAGE_SIZE * i));
167 for (j = 0;
168 j < PAGE_SIZE / sizeof(unsigned long);
169 j++, tmp++) {
170 if (*tmp != 0xcafebabe && error == 0) {
171 error = 1;
172 DRM_ERROR("Scatter allocation error, "
173 "pagelist does not match "
174 "virtual mapping\n");
175 }
176 }
177 tmp = page_address(entry->pagelist[i]);
178 for (j = 0;
179 j < PAGE_SIZE / sizeof(unsigned long);
180 j++, tmp++) {
181 *tmp = 0;
182 }
183 }
184 if (error == 0)
185 DRM_ERROR("Scatter allocation matches pagelist\n");
186 }
187#endif
188
189 return 0;
190
191 failed:
192 drm_sg_cleanup(entry);
193 return -ENOMEM;
194}
195EXPORT_SYMBOL(drm_sg_alloc);
196
197
198int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
199 struct drm_file *file_priv)
200{
201 struct drm_scatter_gather *request = data;
202
203 return drm_sg_alloc(dev, request);
204
205}
206
207int drm_sg_free(struct drm_device *dev, void *data,
208 struct drm_file *file_priv)
209{
210 struct drm_scatter_gather *request = data;
211 struct drm_sg_mem *entry;
212
213 if (!drm_core_check_feature(dev, DRIVER_SG))
214 return -EINVAL;
215
216 entry = dev->sg;
217 dev->sg = NULL;
218
219 if (!entry || entry->handle != request->handle)
220 return -EINVAL;
221
222 DRM_DEBUG("virtual = %p\n", entry->virtual);
223
224 drm_sg_cleanup(entry);
225
226 return 0;
227}
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
new file mode 100644
index 000000000000..926f146390ce
--- /dev/null
+++ b/drivers/gpu/drm/drm_sman.c
@@ -0,0 +1,353 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28/*
29 * Simple memory manager interface that keeps track on allocate regions on a
30 * per "owner" basis. All regions associated with an "owner" can be released
31 * with a simple call. Typically if the "owner" exists. The owner is any
32 * "unsigned long" identifier. Can typically be a pointer to a file private
33 * struct or a context identifier.
34 *
35 * Authors:
36 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
37 */
38
39#include "drm_sman.h"
40
41struct drm_owner_item {
42 struct drm_hash_item owner_hash;
43 struct list_head sman_list;
44 struct list_head mem_blocks;
45};
46
47void drm_sman_takedown(struct drm_sman * sman)
48{
49 drm_ht_remove(&sman->user_hash_tab);
50 drm_ht_remove(&sman->owner_hash_tab);
51 if (sman->mm)
52 drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
53 DRM_MEM_MM);
54}
55
56EXPORT_SYMBOL(drm_sman_takedown);
57
58int
59drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
60 unsigned int user_order, unsigned int owner_order)
61{
62 int ret = 0;
63
64 sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
65 DRM_MEM_MM);
66 if (!sman->mm) {
67 ret = -ENOMEM;
68 goto out;
69 }
70 sman->num_managers = num_managers;
71 INIT_LIST_HEAD(&sman->owner_items);
72 ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
73 if (ret)
74 goto out1;
75 ret = drm_ht_create(&sman->user_hash_tab, user_order);
76 if (!ret)
77 goto out;
78
79 drm_ht_remove(&sman->owner_hash_tab);
80out1:
81 drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
82out:
83 return ret;
84}
85
86EXPORT_SYMBOL(drm_sman_init);
87
88static void *drm_sman_mm_allocate(void *private, unsigned long size,
89 unsigned alignment)
90{
91 struct drm_mm *mm = (struct drm_mm *) private;
92 struct drm_mm_node *tmp;
93
94 tmp = drm_mm_search_free(mm, size, alignment, 1);
95 if (!tmp) {
96 return NULL;
97 }
98 tmp = drm_mm_get_block(tmp, size, alignment);
99 return tmp;
100}
101
102static void drm_sman_mm_free(void *private, void *ref)
103{
104 struct drm_mm_node *node = (struct drm_mm_node *) ref;
105
106 drm_mm_put_block(node);
107}
108
109static void drm_sman_mm_destroy(void *private)
110{
111 struct drm_mm *mm = (struct drm_mm *) private;
112 drm_mm_takedown(mm);
113 drm_free(mm, sizeof(*mm), DRM_MEM_MM);
114}
115
116static unsigned long drm_sman_mm_offset(void *private, void *ref)
117{
118 struct drm_mm_node *node = (struct drm_mm_node *) ref;
119 return node->start;
120}
121
122int
123drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
124 unsigned long start, unsigned long size)
125{
126 struct drm_sman_mm *sman_mm;
127 struct drm_mm *mm;
128 int ret;
129
130 BUG_ON(manager >= sman->num_managers);
131
132 sman_mm = &sman->mm[manager];
133 mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM);
134 if (!mm) {
135 return -ENOMEM;
136 }
137 sman_mm->private = mm;
138 ret = drm_mm_init(mm, start, size);
139
140 if (ret) {
141 drm_free(mm, sizeof(*mm), DRM_MEM_MM);
142 return ret;
143 }
144
145 sman_mm->allocate = drm_sman_mm_allocate;
146 sman_mm->free = drm_sman_mm_free;
147 sman_mm->destroy = drm_sman_mm_destroy;
148 sman_mm->offset = drm_sman_mm_offset;
149
150 return 0;
151}
152
153EXPORT_SYMBOL(drm_sman_set_range);
154
155int
156drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
157 struct drm_sman_mm * allocator)
158{
159 BUG_ON(manager >= sman->num_managers);
160 sman->mm[manager] = *allocator;
161
162 return 0;
163}
164EXPORT_SYMBOL(drm_sman_set_manager);
165
166static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
167 unsigned long owner)
168{
169 int ret;
170 struct drm_hash_item *owner_hash_item;
171 struct drm_owner_item *owner_item;
172
173 ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
174 if (!ret) {
175 return drm_hash_entry(owner_hash_item, struct drm_owner_item,
176 owner_hash);
177 }
178
179 owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM);
180 if (!owner_item)
181 goto out;
182
183 INIT_LIST_HEAD(&owner_item->mem_blocks);
184 owner_item->owner_hash.key = owner;
185 if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
186 goto out1;
187
188 list_add_tail(&owner_item->sman_list, &sman->owner_items);
189 return owner_item;
190
191out1:
192 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
193out:
194 return NULL;
195}
196
197struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
198 unsigned long size, unsigned alignment,
199 unsigned long owner)
200{
201 void *tmp;
202 struct drm_sman_mm *sman_mm;
203 struct drm_owner_item *owner_item;
204 struct drm_memblock_item *memblock;
205
206 BUG_ON(manager >= sman->num_managers);
207
208 sman_mm = &sman->mm[manager];
209 tmp = sman_mm->allocate(sman_mm->private, size, alignment);
210
211 if (!tmp) {
212 return NULL;
213 }
214
215 memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM);
216
217 if (!memblock)
218 goto out;
219
220 memblock->mm_info = tmp;
221 memblock->mm = sman_mm;
222 memblock->sman = sman;
223
224 if (drm_ht_just_insert_please
225 (&sman->user_hash_tab, &memblock->user_hash,
226 (unsigned long)memblock, 32, 0, 0))
227 goto out1;
228
229 owner_item = drm_sman_get_owner_item(sman, owner);
230 if (!owner_item)
231 goto out2;
232
233 list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
234
235 return memblock;
236
237out2:
238 drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
239out1:
240 drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
241out:
242 sman_mm->free(sman_mm->private, tmp);
243
244 return NULL;
245}
246
247EXPORT_SYMBOL(drm_sman_alloc);
248
249static void drm_sman_free(struct drm_memblock_item *item)
250{
251 struct drm_sman *sman = item->sman;
252
253 list_del(&item->owner_list);
254 drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
255 item->mm->free(item->mm->private, item->mm_info);
256 drm_free(item, sizeof(*item), DRM_MEM_MM);
257}
258
259int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
260{
261 struct drm_hash_item *hash_item;
262 struct drm_memblock_item *memblock_item;
263
264 if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
265 return -EINVAL;
266
267 memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
268 user_hash);
269 drm_sman_free(memblock_item);
270 return 0;
271}
272
273EXPORT_SYMBOL(drm_sman_free_key);
274
275static void drm_sman_remove_owner(struct drm_sman *sman,
276 struct drm_owner_item *owner_item)
277{
278 list_del(&owner_item->sman_list);
279 drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
280 drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
281}
282
283int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
284{
285
286 struct drm_hash_item *hash_item;
287 struct drm_owner_item *owner_item;
288
289 if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
290 return -1;
291 }
292
293 owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
294 if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
295 drm_sman_remove_owner(sman, owner_item);
296 return -1;
297 }
298
299 return 0;
300}
301
302EXPORT_SYMBOL(drm_sman_owner_clean);
303
304static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
305 struct drm_owner_item *owner_item)
306{
307 struct drm_memblock_item *entry, *next;
308
309 list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
310 owner_list) {
311 drm_sman_free(entry);
312 }
313 drm_sman_remove_owner(sman, owner_item);
314}
315
316void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
317{
318
319 struct drm_hash_item *hash_item;
320 struct drm_owner_item *owner_item;
321
322 if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
323
324 return;
325 }
326
327 owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
328 drm_sman_do_owner_cleanup(sman, owner_item);
329}
330
331EXPORT_SYMBOL(drm_sman_owner_cleanup);
332
333void drm_sman_cleanup(struct drm_sman *sman)
334{
335 struct drm_owner_item *entry, *next;
336 unsigned int i;
337 struct drm_sman_mm *sman_mm;
338
339 list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
340 drm_sman_do_owner_cleanup(sman, entry);
341 }
342 if (sman->mm) {
343 for (i = 0; i < sman->num_managers; ++i) {
344 sman_mm = &sman->mm[i];
345 if (sman_mm->private) {
346 sman_mm->destroy(sman_mm->private);
347 sman_mm->private = NULL;
348 }
349 }
350 }
351}
352
353EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
new file mode 100644
index 000000000000..c2f584f3b46c
--- /dev/null
+++ b/drivers/gpu/drm/drm_stub.c
@@ -0,0 +1,331 @@
1/**
2 * \file drm_stub.h
3 * Stub support
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 */
7
8/*
9 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
10 *
11 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
31 * DEALINGS IN THE SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include "drmP.h"
37#include "drm_core.h"
38
39unsigned int drm_debug = 0; /* 1 to enable debug output */
40EXPORT_SYMBOL(drm_debug);
41
42MODULE_AUTHOR(CORE_AUTHOR);
43MODULE_DESCRIPTION(CORE_DESC);
44MODULE_LICENSE("GPL and additional rights");
45MODULE_PARM_DESC(debug, "Enable debug output");
46
47module_param_named(debug, drm_debug, int, 0600);
48
49struct idr drm_minors_idr;
50
51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root;
53
54static int drm_minor_get_id(struct drm_device *dev, int type)
55{
56 int new_id;
57 int ret;
58 int base = 0, limit = 63;
59
60again:
61 if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
62 DRM_ERROR("Out of memory expanding drawable idr\n");
63 return -ENOMEM;
64 }
65 mutex_lock(&dev->struct_mutex);
66 ret = idr_get_new_above(&drm_minors_idr, NULL,
67 base, &new_id);
68 mutex_unlock(&dev->struct_mutex);
69 if (ret == -EAGAIN) {
70 goto again;
71 } else if (ret) {
72 return ret;
73 }
74
75 if (new_id >= limit) {
76 idr_remove(&drm_minors_idr, new_id);
77 return -EINVAL;
78 }
79 return new_id;
80}
81
82static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
83 const struct pci_device_id *ent,
84 struct drm_driver *driver)
85{
86 int retcode;
87
88 INIT_LIST_HEAD(&dev->filelist);
89 INIT_LIST_HEAD(&dev->ctxlist);
90 INIT_LIST_HEAD(&dev->vmalist);
91 INIT_LIST_HEAD(&dev->maplist);
92
93 spin_lock_init(&dev->count_lock);
94 spin_lock_init(&dev->drw_lock);
95 spin_lock_init(&dev->tasklet_lock);
96 spin_lock_init(&dev->lock.spinlock);
97 init_timer(&dev->timer);
98 mutex_init(&dev->struct_mutex);
99 mutex_init(&dev->ctxlist_mutex);
100
101 idr_init(&dev->drw_idr);
102
103 dev->pdev = pdev;
104 dev->pci_device = pdev->device;
105 dev->pci_vendor = pdev->vendor;
106
107#ifdef __alpha__
108 dev->hose = pdev->sysdata;
109#endif
110 dev->irq = pdev->irq;
111
112 if (drm_ht_create(&dev->map_hash, 12)) {
113 return -ENOMEM;
114 }
115
116 /* the DRM has 6 basic counters */
117 dev->counters = 6;
118 dev->types[0] = _DRM_STAT_LOCK;
119 dev->types[1] = _DRM_STAT_OPENS;
120 dev->types[2] = _DRM_STAT_CLOSES;
121 dev->types[3] = _DRM_STAT_IOCTLS;
122 dev->types[4] = _DRM_STAT_LOCKS;
123 dev->types[5] = _DRM_STAT_UNLOCKS;
124
125 dev->driver = driver;
126
127 if (drm_core_has_AGP(dev)) {
128 if (drm_device_is_agp(dev))
129 dev->agp = drm_agp_init(dev);
130 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
131 && (dev->agp == NULL)) {
132 DRM_ERROR("Cannot initialize the agpgart module.\n");
133 retcode = -EINVAL;
134 goto error_out_unreg;
135 }
136 if (drm_core_has_MTRR(dev)) {
137 if (dev->agp)
138 dev->agp->agp_mtrr =
139 mtrr_add(dev->agp->agp_info.aper_base,
140 dev->agp->agp_info.aper_size *
141 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
142 }
143 }
144
145 if (dev->driver->load)
146 if ((retcode = dev->driver->load(dev, ent->driver_data)))
147 goto error_out_unreg;
148
149 retcode = drm_ctxbitmap_init(dev);
150 if (retcode) {
151 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
152 goto error_out_unreg;
153 }
154
155 return 0;
156
157 error_out_unreg:
158 drm_lastclose(dev);
159 return retcode;
160}
161
162
163/**
164 * Get a secondary minor number.
165 *
166 * \param dev device data structure
167 * \param sec-minor structure to hold the assigned minor
168 * \return negative number on failure.
169 *
170 * Search an empty entry and initialize it to the given parameters, and
171 * create the proc init entry via proc_init(). This routines assigns
172 * minor numbers to secondary heads of multi-headed cards
173 */
174static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
175{
176 struct drm_minor *new_minor;
177 int ret;
178 int minor_id;
179
180 DRM_DEBUG("\n");
181
182 minor_id = drm_minor_get_id(dev, type);
183 if (minor_id < 0)
184 return minor_id;
185
186 new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
187 if (!new_minor) {
188 ret = -ENOMEM;
189 goto err_idr;
190 }
191
192 new_minor->type = type;
193 new_minor->device = MKDEV(DRM_MAJOR, minor_id);
194 new_minor->dev = dev;
195 new_minor->index = minor_id;
196
197 idr_replace(&drm_minors_idr, new_minor, minor_id);
198
199 if (type == DRM_MINOR_LEGACY) {
200 ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
201 if (ret) {
202 DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
203 goto err_mem;
204 }
205 } else
206 new_minor->dev_root = NULL;
207
208 ret = drm_sysfs_device_add(new_minor);
209 if (ret) {
210 printk(KERN_ERR
211 "DRM: Error sysfs_device_add.\n");
212 goto err_g2;
213 }
214 *minor = new_minor;
215
216 DRM_DEBUG("new minor assigned %d\n", minor_id);
217 return 0;
218
219
220err_g2:
221 if (new_minor->type == DRM_MINOR_LEGACY)
222 drm_proc_cleanup(new_minor, drm_proc_root);
223err_mem:
224 kfree(new_minor);
225err_idr:
226 idr_remove(&drm_minors_idr, minor_id);
227 *minor = NULL;
228 return ret;
229}
230
231/**
232 * Register.
233 *
234 * \param pdev - PCI device structure
235 * \param ent entry from the PCI ID table with device type flags
236 * \return zero on success or a negative number on failure.
237 *
238 * Attempt to gets inter module "drm" information. If we are first
239 * then register the character device and inter module information.
240 * Try and register, if we fail to register, backout previous work.
241 */
242int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
243 struct drm_driver *driver)
244{
245 struct drm_device *dev;
246 int ret;
247
248 DRM_DEBUG("\n");
249
250 dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB);
251 if (!dev)
252 return -ENOMEM;
253
254 ret = pci_enable_device(pdev);
255 if (ret)
256 goto err_g1;
257
258 pci_set_master(pdev);
259 if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
260 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
261 goto err_g2;
262 }
263 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
264 goto err_g2;
265
266 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
267 driver->name, driver->major, driver->minor, driver->patchlevel,
268 driver->date, dev->primary->index);
269
270 return 0;
271
272err_g2:
273 pci_disable_device(pdev);
274err_g1:
275 drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
276 return ret;
277}
278
279/**
280 * Put a device minor number.
281 *
282 * \param dev device data structure
283 * \return always zero
284 *
285 * Cleans up the proc resources. If it is the last minor then release the foreign
286 * "drm" data, otherwise unregisters the "drm" data, frees the dev list and
287 * unregisters the character device.
288 */
289int drm_put_dev(struct drm_device * dev)
290{
291 DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
292
293 if (dev->unique) {
294 drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
295 dev->unique = NULL;
296 dev->unique_len = 0;
297 }
298 if (dev->devname) {
299 drm_free(dev->devname, strlen(dev->devname) + 1,
300 DRM_MEM_DRIVER);
301 dev->devname = NULL;
302 }
303 drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
304 return 0;
305}
306
307/**
308 * Put a secondary minor number.
309 *
310 * \param sec_minor - structure to be released
311 * \return always zero
312 *
313 * Cleans up the proc resources. Not legal for this to be the
314 * last minor released.
315 *
316 */
317int drm_put_minor(struct drm_minor **minor_p)
318{
319 struct drm_minor *minor = *minor_p;
320 DRM_DEBUG("release secondary minor %d\n", minor->index);
321
322 if (minor->type == DRM_MINOR_LEGACY)
323 drm_proc_cleanup(minor, drm_proc_root);
324 drm_sysfs_device_remove(minor);
325
326 idr_remove(&drm_minors_idr, minor->index);
327
328 kfree(minor);
329 *minor_p = NULL;
330 return 0;
331}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
new file mode 100644
index 000000000000..af211a0ef179
--- /dev/null
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -0,0 +1,208 @@
1
2/*
3 * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
4 * extra sysfs attribute from DRM. Normal drm_sysfs_class
5 * does not allow adding attributes.
6 *
7 * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
8 * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
9 * Copyright (c) 2003-2004 IBM Corp.
10 *
11 * This file is released under the GPLv2
12 *
13 */
14
15#include <linux/device.h>
16#include <linux/kdev_t.h>
17#include <linux/err.h>
18
19#include "drm_core.h"
20#include "drmP.h"
21
22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
23
24/**
25 * drm_sysfs_suspend - DRM class suspend hook
26 * @dev: Linux device to suspend
27 * @state: power state to enter
28 *
29 * Just figures out what the actual struct drm_device associated with
30 * @dev is and calls its suspend hook, if present.
31 */
32static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
33{
34 struct drm_minor *drm_minor = to_drm_minor(dev);
35 struct drm_device *drm_dev = drm_minor->dev;
36
37 if (drm_dev->driver->suspend)
38 return drm_dev->driver->suspend(drm_dev, state);
39
40 return 0;
41}
42
43/**
44 * drm_sysfs_resume - DRM class resume hook
45 * @dev: Linux device to resume
46 *
47 * Just figures out what the actual struct drm_device associated with
48 * @dev is and calls its resume hook, if present.
49 */
50static int drm_sysfs_resume(struct device *dev)
51{
52 struct drm_minor *drm_minor = to_drm_minor(dev);
53 struct drm_device *drm_dev = drm_minor->dev;
54
55 if (drm_dev->driver->resume)
56 return drm_dev->driver->resume(drm_dev);
57
58 return 0;
59}
60
61/* Display the version of drm_core. This doesn't work right in current design */
62static ssize_t version_show(struct class *dev, char *buf)
63{
64 return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
65 CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
66}
67
68static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
69
70/**
71 * drm_sysfs_create - create a struct drm_sysfs_class structure
72 * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
73 * @name: pointer to a string for the name of this class.
74 *
75 * This is used to create DRM class pointer that can then be used
76 * in calls to drm_sysfs_device_add().
77 *
78 * Note, the pointer created here is to be destroyed when finished by making a
79 * call to drm_sysfs_destroy().
80 */
81struct class *drm_sysfs_create(struct module *owner, char *name)
82{
83 struct class *class;
84 int err;
85
86 class = class_create(owner, name);
87 if (IS_ERR(class)) {
88 err = PTR_ERR(class);
89 goto err_out;
90 }
91
92 class->suspend = drm_sysfs_suspend;
93 class->resume = drm_sysfs_resume;
94
95 err = class_create_file(class, &class_attr_version);
96 if (err)
97 goto err_out_class;
98
99 return class;
100
101err_out_class:
102 class_destroy(class);
103err_out:
104 return ERR_PTR(err);
105}
106
107/**
108 * drm_sysfs_destroy - destroys DRM class
109 *
110 * Destroy the DRM device class.
111 */
112void drm_sysfs_destroy(void)
113{
114 if ((drm_class == NULL) || (IS_ERR(drm_class)))
115 return;
116 class_remove_file(drm_class, &class_attr_version);
117 class_destroy(drm_class);
118}
119
120static ssize_t show_dri(struct device *device, struct device_attribute *attr,
121 char *buf)
122{
123 struct drm_minor *drm_minor = to_drm_minor(device);
124 struct drm_device *drm_dev = drm_minor->dev;
125 if (drm_dev->driver->dri_library_name)
126 return drm_dev->driver->dri_library_name(drm_dev, buf);
127 return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
128}
129
130static struct device_attribute device_attrs[] = {
131 __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
132};
133
134/**
135 * drm_sysfs_device_release - do nothing
136 * @dev: Linux device
137 *
138 * Normally, this would free the DRM device associated with @dev, along
139 * with cleaning up any other stuff. But we do that in the DRM core, so
140 * this function can just return and hope that the core does its job.
141 */
142static void drm_sysfs_device_release(struct device *dev)
143{
144 return;
145}
146
147/**
148 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
149 * @dev: DRM device to be added
150 * @head: DRM head in question
151 *
152 * Add a DRM device to the DRM's device model class. We use @dev's PCI device
153 * as the parent for the Linux device, and make sure it has a file containing
154 * the driver we're using (for userspace compatibility).
155 */
156int drm_sysfs_device_add(struct drm_minor *minor)
157{
158 int err;
159 int i, j;
160 char *minor_str;
161
162 minor->kdev.parent = &minor->dev->pdev->dev;
163 minor->kdev.class = drm_class;
164 minor->kdev.release = drm_sysfs_device_release;
165 minor->kdev.devt = minor->device;
166 minor_str = "card%d";
167
168 snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
169
170 err = device_register(&minor->kdev);
171 if (err) {
172 DRM_ERROR("device add failed: %d\n", err);
173 goto err_out;
174 }
175
176 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
177 err = device_create_file(&minor->kdev, &device_attrs[i]);
178 if (err)
179 goto err_out_files;
180 }
181
182 return 0;
183
184err_out_files:
185 if (i > 0)
186 for (j = 0; j < i; j++)
187 device_remove_file(&minor->kdev, &device_attrs[i]);
188 device_unregister(&minor->kdev);
189err_out:
190
191 return err;
192}
193
194/**
195 * drm_sysfs_device_remove - remove DRM device
196 * @dev: DRM device to remove
197 *
198 * This call unregisters and cleans up a class device that was created with a
199 * call to drm_sysfs_device_add()
200 */
201void drm_sysfs_device_remove(struct drm_minor *minor)
202{
203 int i;
204
205 for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
206 device_remove_file(&minor->kdev, &device_attrs[i]);
207 device_unregister(&minor->kdev);
208}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
new file mode 100644
index 000000000000..c234c6f24a8d
--- /dev/null
+++ b/drivers/gpu/drm/drm_vm.c
@@ -0,0 +1,673 @@
1/**
2 * \file drm_vm.c
3 * Memory mapping for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37#if defined(__ia64__)
38#include <linux/efi.h>
39#endif
40
41static void drm_vm_open(struct vm_area_struct *vma);
42static void drm_vm_close(struct vm_area_struct *vma);
43
44static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
45{
46 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
47
48#if defined(__i386__) || defined(__x86_64__)
49 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50 pgprot_val(tmp) |= _PAGE_PCD;
51 pgprot_val(tmp) &= ~_PAGE_PWT;
52 }
53#elif defined(__powerpc__)
54 pgprot_val(tmp) |= _PAGE_NO_CACHE;
55 if (map_type == _DRM_REGISTERS)
56 pgprot_val(tmp) |= _PAGE_GUARDED;
57#elif defined(__ia64__)
58 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
59 vma->vm_start))
60 tmp = pgprot_writecombine(tmp);
61 else
62 tmp = pgprot_noncached(tmp);
63#elif defined(__sparc__)
64 tmp = pgprot_noncached(tmp);
65#endif
66 return tmp;
67}
68
69static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
70{
71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
72
73#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
74 tmp |= _PAGE_NO_CACHE;
75#endif
76 return tmp;
77}
78
79/**
80 * \c fault method for AGP virtual memory.
81 *
82 * \param vma virtual memory area.
83 * \param address access address.
84 * \return pointer to the page structure.
85 *
86 * Find the right map and if it's AGP memory find the real physical page to
87 * map, get the page, increment the use count and return it.
88 */
89#if __OS_HAS_AGP
90static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91{
92 struct drm_file *priv = vma->vm_file->private_data;
93 struct drm_device *dev = priv->minor->dev;
94 struct drm_map *map = NULL;
95 struct drm_map_list *r_list;
96 struct drm_hash_item *hash;
97
98 /*
99 * Find the right map
100 */
101 if (!drm_core_has_AGP(dev))
102 goto vm_fault_error;
103
104 if (!dev->agp || !dev->agp->cant_use_aperture)
105 goto vm_fault_error;
106
107 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
108 goto vm_fault_error;
109
110 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
111 map = r_list->map;
112
113 if (map && map->type == _DRM_AGP) {
114 /*
115 * Using vm_pgoff as a selector forces us to use this unusual
116 * addressing scheme.
117 */
118 unsigned long offset = (unsigned long)vmf->virtual_address -
119 vma->vm_start;
120 unsigned long baddr = map->offset + offset;
121 struct drm_agp_mem *agpmem;
122 struct page *page;
123
124#ifdef __alpha__
125 /*
126 * Adjust to a bus-relative address
127 */
128 baddr -= dev->hose->mem_space->start;
129#endif
130
131 /*
132 * It's AGP memory - find the real physical page to map
133 */
134 list_for_each_entry(agpmem, &dev->agp->memory, head) {
135 if (agpmem->bound <= baddr &&
136 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
137 break;
138 }
139
140 if (!agpmem)
141 goto vm_fault_error;
142
143 /*
144 * Get the page, inc the use count, and return it
145 */
146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
147 page = virt_to_page(__va(agpmem->memory->memory[offset]));
148 get_page(page);
149 vmf->page = page;
150
151 DRM_DEBUG
152 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
153 baddr, __va(agpmem->memory->memory[offset]), offset,
154 page_count(page));
155 return 0;
156 }
157vm_fault_error:
158 return VM_FAULT_SIGBUS; /* Disallow mremap */
159}
160#else /* __OS_HAS_AGP */
161static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
162{
163 return VM_FAULT_SIGBUS;
164}
165#endif /* __OS_HAS_AGP */
166
167/**
168 * \c nopage method for shared virtual memory.
169 *
170 * \param vma virtual memory area.
171 * \param address access address.
172 * \return pointer to the page structure.
173 *
174 * Get the mapping, find the real physical page to map, get the page, and
175 * return it.
176 */
177static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
178{
179 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
180 unsigned long offset;
181 unsigned long i;
182 struct page *page;
183
184 if (!map)
185 return VM_FAULT_SIGBUS; /* Nothing allocated */
186
187 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
188 i = (unsigned long)map->handle + offset;
189 page = vmalloc_to_page((void *)i);
190 if (!page)
191 return VM_FAULT_SIGBUS;
192 get_page(page);
193 vmf->page = page;
194
195 DRM_DEBUG("shm_fault 0x%lx\n", offset);
196 return 0;
197}
198
199/**
200 * \c close method for shared virtual memory.
201 *
202 * \param vma virtual memory area.
203 *
204 * Deletes map information if we are the last
205 * person to close a mapping and it's not in the global maplist.
206 */
207static void drm_vm_shm_close(struct vm_area_struct *vma)
208{
209 struct drm_file *priv = vma->vm_file->private_data;
210 struct drm_device *dev = priv->minor->dev;
211 struct drm_vma_entry *pt, *temp;
212 struct drm_map *map;
213 struct drm_map_list *r_list;
214 int found_maps = 0;
215
216 DRM_DEBUG("0x%08lx,0x%08lx\n",
217 vma->vm_start, vma->vm_end - vma->vm_start);
218 atomic_dec(&dev->vma_count);
219
220 map = vma->vm_private_data;
221
222 mutex_lock(&dev->struct_mutex);
223 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
224 if (pt->vma->vm_private_data == map)
225 found_maps++;
226 if (pt->vma == vma) {
227 list_del(&pt->head);
228 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
229 }
230 }
231
232 /* We were the only map that was found */
233 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
234 /* Check to see if we are in the maplist, if we are not, then
235 * we delete this mappings information.
236 */
237 found_maps = 0;
238 list_for_each_entry(r_list, &dev->maplist, head) {
239 if (r_list->map == map)
240 found_maps++;
241 }
242
243 if (!found_maps) {
244 drm_dma_handle_t dmah;
245
246 switch (map->type) {
247 case _DRM_REGISTERS:
248 case _DRM_FRAME_BUFFER:
249 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
250 int retcode;
251 retcode = mtrr_del(map->mtrr,
252 map->offset,
253 map->size);
254 DRM_DEBUG("mtrr_del = %d\n", retcode);
255 }
256 iounmap(map->handle);
257 break;
258 case _DRM_SHM:
259 vfree(map->handle);
260 break;
261 case _DRM_AGP:
262 case _DRM_SCATTER_GATHER:
263 break;
264 case _DRM_CONSISTENT:
265 dmah.vaddr = map->handle;
266 dmah.busaddr = map->offset;
267 dmah.size = map->size;
268 __drm_pci_free(dev, &dmah);
269 break;
270 }
271 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
272 }
273 }
274 mutex_unlock(&dev->struct_mutex);
275}
276
277/**
278 * \c fault method for DMA virtual memory.
279 *
280 * \param vma virtual memory area.
281 * \param address access address.
282 * \return pointer to the page structure.
283 *
284 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
285 */
286static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
287{
288 struct drm_file *priv = vma->vm_file->private_data;
289 struct drm_device *dev = priv->minor->dev;
290 struct drm_device_dma *dma = dev->dma;
291 unsigned long offset;
292 unsigned long page_nr;
293 struct page *page;
294
295 if (!dma)
296 return VM_FAULT_SIGBUS; /* Error */
297 if (!dma->pagelist)
298 return VM_FAULT_SIGBUS; /* Nothing allocated */
299
300 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
301 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
302 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
303
304 get_page(page);
305 vmf->page = page;
306
307 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
308 return 0;
309}
310
311/**
312 * \c fault method for scatter-gather virtual memory.
313 *
314 * \param vma virtual memory area.
315 * \param address access address.
316 * \return pointer to the page structure.
317 *
318 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
319 */
320static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
321{
322 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
323 struct drm_file *priv = vma->vm_file->private_data;
324 struct drm_device *dev = priv->minor->dev;
325 struct drm_sg_mem *entry = dev->sg;
326 unsigned long offset;
327 unsigned long map_offset;
328 unsigned long page_offset;
329 struct page *page;
330
331 if (!entry)
332 return VM_FAULT_SIGBUS; /* Error */
333 if (!entry->pagelist)
334 return VM_FAULT_SIGBUS; /* Nothing allocated */
335
336 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
337 map_offset = map->offset - (unsigned long)dev->sg->virtual;
338 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
339 page = entry->pagelist[page_offset];
340 get_page(page);
341 vmf->page = page;
342
343 return 0;
344}
345
346static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
347{
348 return drm_do_vm_fault(vma, vmf);
349}
350
351static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352{
353 return drm_do_vm_shm_fault(vma, vmf);
354}
355
356static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
357{
358 return drm_do_vm_dma_fault(vma, vmf);
359}
360
361static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
362{
363 return drm_do_vm_sg_fault(vma, vmf);
364}
365
366/** AGP virtual memory operations */
367static struct vm_operations_struct drm_vm_ops = {
368 .fault = drm_vm_fault,
369 .open = drm_vm_open,
370 .close = drm_vm_close,
371};
372
373/** Shared virtual memory operations */
374static struct vm_operations_struct drm_vm_shm_ops = {
375 .fault = drm_vm_shm_fault,
376 .open = drm_vm_open,
377 .close = drm_vm_shm_close,
378};
379
380/** DMA virtual memory operations */
381static struct vm_operations_struct drm_vm_dma_ops = {
382 .fault = drm_vm_dma_fault,
383 .open = drm_vm_open,
384 .close = drm_vm_close,
385};
386
387/** Scatter-gather virtual memory operations */
388static struct vm_operations_struct drm_vm_sg_ops = {
389 .fault = drm_vm_sg_fault,
390 .open = drm_vm_open,
391 .close = drm_vm_close,
392};
393
394/**
395 * \c open method for shared virtual memory.
396 *
397 * \param vma virtual memory area.
398 *
399 * Create a new drm_vma_entry structure as the \p vma private data entry and
400 * add it to drm_device::vmalist.
401 */
402static void drm_vm_open_locked(struct vm_area_struct *vma)
403{
404 struct drm_file *priv = vma->vm_file->private_data;
405 struct drm_device *dev = priv->minor->dev;
406 struct drm_vma_entry *vma_entry;
407
408 DRM_DEBUG("0x%08lx,0x%08lx\n",
409 vma->vm_start, vma->vm_end - vma->vm_start);
410 atomic_inc(&dev->vma_count);
411
412 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
413 if (vma_entry) {
414 vma_entry->vma = vma;
415 vma_entry->pid = current->pid;
416 list_add(&vma_entry->head, &dev->vmalist);
417 }
418}
419
420static void drm_vm_open(struct vm_area_struct *vma)
421{
422 struct drm_file *priv = vma->vm_file->private_data;
423 struct drm_device *dev = priv->minor->dev;
424
425 mutex_lock(&dev->struct_mutex);
426 drm_vm_open_locked(vma);
427 mutex_unlock(&dev->struct_mutex);
428}
429
430/**
431 * \c close method for all virtual memory types.
432 *
433 * \param vma virtual memory area.
434 *
435 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
436 * free it.
437 */
438static void drm_vm_close(struct vm_area_struct *vma)
439{
440 struct drm_file *priv = vma->vm_file->private_data;
441 struct drm_device *dev = priv->minor->dev;
442 struct drm_vma_entry *pt, *temp;
443
444 DRM_DEBUG("0x%08lx,0x%08lx\n",
445 vma->vm_start, vma->vm_end - vma->vm_start);
446 atomic_dec(&dev->vma_count);
447
448 mutex_lock(&dev->struct_mutex);
449 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
450 if (pt->vma == vma) {
451 list_del(&pt->head);
452 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
453 break;
454 }
455 }
456 mutex_unlock(&dev->struct_mutex);
457}
458
459/**
460 * mmap DMA memory.
461 *
462 * \param file_priv DRM file private.
463 * \param vma virtual memory area.
464 * \return zero on success or a negative number on failure.
465 *
466 * Sets the virtual memory area operations structure to vm_dma_ops, the file
467 * pointer, and calls vm_open().
468 */
469static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
470{
471 struct drm_file *priv = filp->private_data;
472 struct drm_device *dev;
473 struct drm_device_dma *dma;
474 unsigned long length = vma->vm_end - vma->vm_start;
475
476 dev = priv->minor->dev;
477 dma = dev->dma;
478 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
479 vma->vm_start, vma->vm_end, vma->vm_pgoff);
480
481 /* Length must match exact page count */
482 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
483 return -EINVAL;
484 }
485
486 if (!capable(CAP_SYS_ADMIN) &&
487 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
488 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
489#if defined(__i386__) || defined(__x86_64__)
490 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
491#else
492 /* Ye gads this is ugly. With more thought
493 we could move this up higher and use
494 `protection_map' instead. */
495 vma->vm_page_prot =
496 __pgprot(pte_val
497 (pte_wrprotect
498 (__pte(pgprot_val(vma->vm_page_prot)))));
499#endif
500 }
501
502 vma->vm_ops = &drm_vm_dma_ops;
503
504 vma->vm_flags |= VM_RESERVED; /* Don't swap */
505 vma->vm_flags |= VM_DONTEXPAND;
506
507 vma->vm_file = filp; /* Needed for drm_vm_open() */
508 drm_vm_open_locked(vma);
509 return 0;
510}
511
512unsigned long drm_core_get_map_ofs(struct drm_map * map)
513{
514 return map->offset;
515}
516
517EXPORT_SYMBOL(drm_core_get_map_ofs);
518
519unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
520{
521#ifdef __alpha__
522 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
523#else
524 return 0;
525#endif
526}
527
528EXPORT_SYMBOL(drm_core_get_reg_ofs);
529
530/**
531 * mmap DMA memory.
532 *
533 * \param file_priv DRM file private.
534 * \param vma virtual memory area.
535 * \return zero on success or a negative number on failure.
536 *
537 * If the virtual memory area has no offset associated with it then it's a DMA
538 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
539 * checks that the restricted flag is not set, sets the virtual memory operations
540 * according to the mapping type and remaps the pages. Finally sets the file
541 * pointer and calls vm_open().
542 */
543static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
544{
545 struct drm_file *priv = filp->private_data;
546 struct drm_device *dev = priv->minor->dev;
547 struct drm_map *map = NULL;
548 unsigned long offset = 0;
549 struct drm_hash_item *hash;
550
551 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
552 vma->vm_start, vma->vm_end, vma->vm_pgoff);
553
554 if (!priv->authenticated)
555 return -EACCES;
556
557 /* We check for "dma". On Apple's UniNorth, it's valid to have
558 * the AGP mapped at physical address 0
559 * --BenH.
560 */
561 if (!vma->vm_pgoff
562#if __OS_HAS_AGP
563 && (!dev->agp
564 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
565#endif
566 )
567 return drm_mmap_dma(filp, vma);
568
569 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
570 DRM_ERROR("Could not find map\n");
571 return -EINVAL;
572 }
573
574 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
575 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
576 return -EPERM;
577
578 /* Check for valid size. */
579 if (map->size < vma->vm_end - vma->vm_start)
580 return -EINVAL;
581
582 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
583 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
584#if defined(__i386__) || defined(__x86_64__)
585 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
586#else
587 /* Ye gads this is ugly. With more thought
588 we could move this up higher and use
589 `protection_map' instead. */
590 vma->vm_page_prot =
591 __pgprot(pte_val
592 (pte_wrprotect
593 (__pte(pgprot_val(vma->vm_page_prot)))));
594#endif
595 }
596
597 switch (map->type) {
598 case _DRM_AGP:
599 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
600 /*
601 * On some platforms we can't talk to bus dma address from the CPU, so for
602 * memory of type DRM_AGP, we'll deal with sorting out the real physical
603 * pages and mappings in fault()
604 */
605#if defined(__powerpc__)
606 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
607#endif
608 vma->vm_ops = &drm_vm_ops;
609 break;
610 }
611 /* fall through to _DRM_FRAME_BUFFER... */
612 case _DRM_FRAME_BUFFER:
613 case _DRM_REGISTERS:
614 offset = dev->driver->get_reg_ofs(dev);
615 vma->vm_flags |= VM_IO; /* not in core dump */
616 vma->vm_page_prot = drm_io_prot(map->type, vma);
617 if (io_remap_pfn_range(vma, vma->vm_start,
618 (map->offset + offset) >> PAGE_SHIFT,
619 vma->vm_end - vma->vm_start,
620 vma->vm_page_prot))
621 return -EAGAIN;
622 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
623 " offset = 0x%lx\n",
624 map->type,
625 vma->vm_start, vma->vm_end, map->offset + offset);
626 vma->vm_ops = &drm_vm_ops;
627 break;
628 case _DRM_CONSISTENT:
629 /* Consistent memory is really like shared memory. But
630 * it's allocated in a different way, so avoid fault */
631 if (remap_pfn_range(vma, vma->vm_start,
632 page_to_pfn(virt_to_page(map->handle)),
633 vma->vm_end - vma->vm_start, vma->vm_page_prot))
634 return -EAGAIN;
635 vma->vm_page_prot = drm_dma_prot(map->type, vma);
636 /* fall through to _DRM_SHM */
637 case _DRM_SHM:
638 vma->vm_ops = &drm_vm_shm_ops;
639 vma->vm_private_data = (void *)map;
640 /* Don't let this area swap. Change when
641 DRM_KERNEL advisory is supported. */
642 vma->vm_flags |= VM_RESERVED;
643 break;
644 case _DRM_SCATTER_GATHER:
645 vma->vm_ops = &drm_vm_sg_ops;
646 vma->vm_private_data = (void *)map;
647 vma->vm_flags |= VM_RESERVED;
648 vma->vm_page_prot = drm_dma_prot(map->type, vma);
649 break;
650 default:
651 return -EINVAL; /* This should never happen. */
652 }
653 vma->vm_flags |= VM_RESERVED; /* Don't swap */
654 vma->vm_flags |= VM_DONTEXPAND;
655
656 vma->vm_file = filp; /* Needed for drm_vm_open() */
657 drm_vm_open_locked(vma);
658 return 0;
659}
660
661int drm_mmap(struct file *filp, struct vm_area_struct *vma)
662{
663 struct drm_file *priv = filp->private_data;
664 struct drm_device *dev = priv->minor->dev;
665 int ret;
666
667 mutex_lock(&dev->struct_mutex);
668 ret = drm_mmap_locked(filp, vma);
669 mutex_unlock(&dev->struct_mutex);
670
671 return ret;
672}
673EXPORT_SYMBOL(drm_mmap);
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
new file mode 100644
index 000000000000..43844ecafcc5
--- /dev/null
+++ b/drivers/gpu/drm/i810/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i810-y := i810_drv.o i810_dma.o
7
8obj-$(CONFIG_DRM_I810) += i810.o
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
new file mode 100644
index 000000000000..e5de8ea41544
--- /dev/null
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -0,0 +1,1283 @@
1/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "i810_drm.h"
36#include "i810_drv.h"
37#include <linux/interrupt.h> /* For task queue support */
38#include <linux/delay.h>
39#include <linux/pagemap.h>
40
41#define I810_BUF_FREE 2
42#define I810_BUF_CLIENT 1
43#define I810_BUF_HARDWARE 0
44
45#define I810_BUF_UNMAPPED 0
46#define I810_BUF_MAPPED 1
47
48static struct drm_buf *i810_freelist_get(struct drm_device * dev)
49{
50 struct drm_device_dma *dma = dev->dma;
51 int i;
52 int used;
53
54 /* Linear search might not be the best solution */
55
56 for (i = 0; i < dma->buf_count; i++) {
57 struct drm_buf *buf = dma->buflist[i];
58 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
59 /* In use is already a pointer */
60 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
61 I810_BUF_CLIENT);
62 if (used == I810_BUF_FREE) {
63 return buf;
64 }
65 }
66 return NULL;
67}
68
69/* This should only be called if the buffer is not sent to the hardware
70 * yet, the hardware updates in use for us once its on the ring buffer.
71 */
72
73static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
74{
75 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
76 int used;
77
78 /* In use is already a pointer */
79 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
80 if (used != I810_BUF_CLIENT) {
81 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
82 return -EINVAL;
83 }
84
85 return 0;
86}
87
88static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
89{
90 struct drm_file *priv = filp->private_data;
91 struct drm_device *dev;
92 drm_i810_private_t *dev_priv;
93 struct drm_buf *buf;
94 drm_i810_buf_priv_t *buf_priv;
95
96 lock_kernel();
97 dev = priv->minor->dev;
98 dev_priv = dev->dev_private;
99 buf = dev_priv->mmap_buffer;
100 buf_priv = buf->dev_private;
101
102 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
103 vma->vm_file = filp;
104
105 buf_priv->currently_mapped = I810_BUF_MAPPED;
106 unlock_kernel();
107
108 if (io_remap_pfn_range(vma, vma->vm_start,
109 vma->vm_pgoff,
110 vma->vm_end - vma->vm_start, vma->vm_page_prot))
111 return -EAGAIN;
112 return 0;
113}
114
115static const struct file_operations i810_buffer_fops = {
116 .open = drm_open,
117 .release = drm_release,
118 .ioctl = drm_ioctl,
119 .mmap = i810_mmap_buffers,
120 .fasync = drm_fasync,
121};
122
123static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
124{
125 struct drm_device *dev = file_priv->minor->dev;
126 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
127 drm_i810_private_t *dev_priv = dev->dev_private;
128 const struct file_operations *old_fops;
129 int retcode = 0;
130
131 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
132 return -EINVAL;
133
134 down_write(&current->mm->mmap_sem);
135 old_fops = file_priv->filp->f_op;
136 file_priv->filp->f_op = &i810_buffer_fops;
137 dev_priv->mmap_buffer = buf;
138 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
139 PROT_READ | PROT_WRITE,
140 MAP_SHARED, buf->bus_address);
141 dev_priv->mmap_buffer = NULL;
142 file_priv->filp->f_op = old_fops;
143 if (IS_ERR(buf_priv->virtual)) {
144 /* Real error */
145 DRM_ERROR("mmap error\n");
146 retcode = PTR_ERR(buf_priv->virtual);
147 buf_priv->virtual = NULL;
148 }
149 up_write(&current->mm->mmap_sem);
150
151 return retcode;
152}
153
154static int i810_unmap_buffer(struct drm_buf * buf)
155{
156 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
157 int retcode = 0;
158
159 if (buf_priv->currently_mapped != I810_BUF_MAPPED)
160 return -EINVAL;
161
162 down_write(&current->mm->mmap_sem);
163 retcode = do_munmap(current->mm,
164 (unsigned long)buf_priv->virtual,
165 (size_t) buf->total);
166 up_write(&current->mm->mmap_sem);
167
168 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
169 buf_priv->virtual = NULL;
170
171 return retcode;
172}
173
174static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
175 struct drm_file *file_priv)
176{
177 struct drm_buf *buf;
178 drm_i810_buf_priv_t *buf_priv;
179 int retcode = 0;
180
181 buf = i810_freelist_get(dev);
182 if (!buf) {
183 retcode = -ENOMEM;
184 DRM_DEBUG("retcode=%d\n", retcode);
185 return retcode;
186 }
187
188 retcode = i810_map_buffer(buf, file_priv);
189 if (retcode) {
190 i810_freelist_put(dev, buf);
191 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
192 return retcode;
193 }
194 buf->file_priv = file_priv;
195 buf_priv = buf->dev_private;
196 d->granted = 1;
197 d->request_idx = buf->idx;
198 d->request_size = buf->total;
199 d->virtual = buf_priv->virtual;
200
201 return retcode;
202}
203
204static int i810_dma_cleanup(struct drm_device * dev)
205{
206 struct drm_device_dma *dma = dev->dma;
207
208 /* Make sure interrupts are disabled here because the uninstall ioctl
209 * may not have been called from userspace and after dev_private
210 * is freed, it's too late.
211 */
212 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
213 drm_irq_uninstall(dev);
214
215 if (dev->dev_private) {
216 int i;
217 drm_i810_private_t *dev_priv =
218 (drm_i810_private_t *) dev->dev_private;
219
220 if (dev_priv->ring.virtual_start) {
221 drm_core_ioremapfree(&dev_priv->ring.map, dev);
222 }
223 if (dev_priv->hw_status_page) {
224 pci_free_consistent(dev->pdev, PAGE_SIZE,
225 dev_priv->hw_status_page,
226 dev_priv->dma_status_page);
227 /* Need to rewrite hardware status page */
228 I810_WRITE(0x02080, 0x1ffff000);
229 }
230 drm_free(dev->dev_private, sizeof(drm_i810_private_t),
231 DRM_MEM_DRIVER);
232 dev->dev_private = NULL;
233
234 for (i = 0; i < dma->buf_count; i++) {
235 struct drm_buf *buf = dma->buflist[i];
236 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
237
238 if (buf_priv->kernel_virtual && buf->total)
239 drm_core_ioremapfree(&buf_priv->map, dev);
240 }
241 }
242 return 0;
243}
244
245static int i810_wait_ring(struct drm_device * dev, int n)
246{
247 drm_i810_private_t *dev_priv = dev->dev_private;
248 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
249 int iters = 0;
250 unsigned long end;
251 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
252
253 end = jiffies + (HZ * 3);
254 while (ring->space < n) {
255 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
256 ring->space = ring->head - (ring->tail + 8);
257 if (ring->space < 0)
258 ring->space += ring->Size;
259
260 if (ring->head != last_head) {
261 end = jiffies + (HZ * 3);
262 last_head = ring->head;
263 }
264
265 iters++;
266 if (time_before(end, jiffies)) {
267 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
268 DRM_ERROR("lockup\n");
269 goto out_wait_ring;
270 }
271 udelay(1);
272 }
273
274 out_wait_ring:
275 return iters;
276}
277
278static void i810_kernel_lost_context(struct drm_device * dev)
279{
280 drm_i810_private_t *dev_priv = dev->dev_private;
281 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
282
283 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
284 ring->tail = I810_READ(LP_RING + RING_TAIL);
285 ring->space = ring->head - (ring->tail + 8);
286 if (ring->space < 0)
287 ring->space += ring->Size;
288}
289
290static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
291{
292 struct drm_device_dma *dma = dev->dma;
293 int my_idx = 24;
294 u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
295 int i;
296
297 if (dma->buf_count > 1019) {
298 /* Not enough space in the status page for the freelist */
299 return -EINVAL;
300 }
301
302 for (i = 0; i < dma->buf_count; i++) {
303 struct drm_buf *buf = dma->buflist[i];
304 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
305
306 buf_priv->in_use = hw_status++;
307 buf_priv->my_use_idx = my_idx;
308 my_idx += 4;
309
310 *buf_priv->in_use = I810_BUF_FREE;
311
312 buf_priv->map.offset = buf->bus_address;
313 buf_priv->map.size = buf->total;
314 buf_priv->map.type = _DRM_AGP;
315 buf_priv->map.flags = 0;
316 buf_priv->map.mtrr = 0;
317
318 drm_core_ioremap(&buf_priv->map, dev);
319 buf_priv->kernel_virtual = buf_priv->map.handle;
320
321 }
322 return 0;
323}
324
325static int i810_dma_initialize(struct drm_device * dev,
326 drm_i810_private_t * dev_priv,
327 drm_i810_init_t * init)
328{
329 struct drm_map_list *r_list;
330 memset(dev_priv, 0, sizeof(drm_i810_private_t));
331
332 list_for_each_entry(r_list, &dev->maplist, head) {
333 if (r_list->map &&
334 r_list->map->type == _DRM_SHM &&
335 r_list->map->flags & _DRM_CONTAINS_LOCK) {
336 dev_priv->sarea_map = r_list->map;
337 break;
338 }
339 }
340 if (!dev_priv->sarea_map) {
341 dev->dev_private = (void *)dev_priv;
342 i810_dma_cleanup(dev);
343 DRM_ERROR("can not find sarea!\n");
344 return -EINVAL;
345 }
346 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
347 if (!dev_priv->mmio_map) {
348 dev->dev_private = (void *)dev_priv;
349 i810_dma_cleanup(dev);
350 DRM_ERROR("can not find mmio map!\n");
351 return -EINVAL;
352 }
353 dev->agp_buffer_token = init->buffers_offset;
354 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
355 if (!dev->agp_buffer_map) {
356 dev->dev_private = (void *)dev_priv;
357 i810_dma_cleanup(dev);
358 DRM_ERROR("can not find dma buffer map!\n");
359 return -EINVAL;
360 }
361
362 dev_priv->sarea_priv = (drm_i810_sarea_t *)
363 ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
364
365 dev_priv->ring.Start = init->ring_start;
366 dev_priv->ring.End = init->ring_end;
367 dev_priv->ring.Size = init->ring_size;
368
369 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
370 dev_priv->ring.map.size = init->ring_size;
371 dev_priv->ring.map.type = _DRM_AGP;
372 dev_priv->ring.map.flags = 0;
373 dev_priv->ring.map.mtrr = 0;
374
375 drm_core_ioremap(&dev_priv->ring.map, dev);
376
377 if (dev_priv->ring.map.handle == NULL) {
378 dev->dev_private = (void *)dev_priv;
379 i810_dma_cleanup(dev);
380 DRM_ERROR("can not ioremap virtual address for"
381 " ring buffer\n");
382 return -ENOMEM;
383 }
384
385 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
386
387 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
388
389 dev_priv->w = init->w;
390 dev_priv->h = init->h;
391 dev_priv->pitch = init->pitch;
392 dev_priv->back_offset = init->back_offset;
393 dev_priv->depth_offset = init->depth_offset;
394 dev_priv->front_offset = init->front_offset;
395
396 dev_priv->overlay_offset = init->overlay_offset;
397 dev_priv->overlay_physical = init->overlay_physical;
398
399 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
400 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
401 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
402
403 /* Program Hardware Status Page */
404 dev_priv->hw_status_page =
405 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
406 &dev_priv->dma_status_page);
407 if (!dev_priv->hw_status_page) {
408 dev->dev_private = (void *)dev_priv;
409 i810_dma_cleanup(dev);
410 DRM_ERROR("Can not allocate hardware status page\n");
411 return -ENOMEM;
412 }
413 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
414 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
415
416 I810_WRITE(0x02080, dev_priv->dma_status_page);
417 DRM_DEBUG("Enabled hardware status page\n");
418
419 /* Now we need to init our freelist */
420 if (i810_freelist_init(dev, dev_priv) != 0) {
421 dev->dev_private = (void *)dev_priv;
422 i810_dma_cleanup(dev);
423 DRM_ERROR("Not enough space in the status page for"
424 " the freelist\n");
425 return -ENOMEM;
426 }
427 dev->dev_private = (void *)dev_priv;
428
429 return 0;
430}
431
432static int i810_dma_init(struct drm_device *dev, void *data,
433 struct drm_file *file_priv)
434{
435 drm_i810_private_t *dev_priv;
436 drm_i810_init_t *init = data;
437 int retcode = 0;
438
439 switch (init->func) {
440 case I810_INIT_DMA_1_4:
441 DRM_INFO("Using v1.4 init.\n");
442 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
443 DRM_MEM_DRIVER);
444 if (dev_priv == NULL)
445 return -ENOMEM;
446 retcode = i810_dma_initialize(dev, dev_priv, init);
447 break;
448
449 case I810_CLEANUP_DMA:
450 DRM_INFO("DMA Cleanup\n");
451 retcode = i810_dma_cleanup(dev);
452 break;
453 default:
454 return -EINVAL;
455 }
456
457 return retcode;
458}
459
460/* Most efficient way to verify state for the i810 is as it is
461 * emitted. Non-conformant state is silently dropped.
462 *
463 * Use 'volatile' & local var tmp to force the emitted values to be
464 * identical to the verified ones.
465 */
466static void i810EmitContextVerified(struct drm_device * dev,
467 volatile unsigned int *code)
468{
469 drm_i810_private_t *dev_priv = dev->dev_private;
470 int i, j = 0;
471 unsigned int tmp;
472 RING_LOCALS;
473
474 BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
475
476 OUT_RING(GFX_OP_COLOR_FACTOR);
477 OUT_RING(code[I810_CTXREG_CF1]);
478
479 OUT_RING(GFX_OP_STIPPLE);
480 OUT_RING(code[I810_CTXREG_ST1]);
481
482 for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
483 tmp = code[i];
484
485 if ((tmp & (7 << 29)) == (3 << 29) &&
486 (tmp & (0x1f << 24)) < (0x1d << 24)) {
487 OUT_RING(tmp);
488 j++;
489 } else
490 printk("constext state dropped!!!\n");
491 }
492
493 if (j & 1)
494 OUT_RING(0);
495
496 ADVANCE_LP_RING();
497}
498
499static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
500{
501 drm_i810_private_t *dev_priv = dev->dev_private;
502 int i, j = 0;
503 unsigned int tmp;
504 RING_LOCALS;
505
506 BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
507
508 OUT_RING(GFX_OP_MAP_INFO);
509 OUT_RING(code[I810_TEXREG_MI1]);
510 OUT_RING(code[I810_TEXREG_MI2]);
511 OUT_RING(code[I810_TEXREG_MI3]);
512
513 for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
514 tmp = code[i];
515
516 if ((tmp & (7 << 29)) == (3 << 29) &&
517 (tmp & (0x1f << 24)) < (0x1d << 24)) {
518 OUT_RING(tmp);
519 j++;
520 } else
521 printk("texture state dropped!!!\n");
522 }
523
524 if (j & 1)
525 OUT_RING(0);
526
527 ADVANCE_LP_RING();
528}
529
530/* Need to do some additional checking when setting the dest buffer.
531 */
532static void i810EmitDestVerified(struct drm_device * dev,
533 volatile unsigned int *code)
534{
535 drm_i810_private_t *dev_priv = dev->dev_private;
536 unsigned int tmp;
537 RING_LOCALS;
538
539 BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
540
541 tmp = code[I810_DESTREG_DI1];
542 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
543 OUT_RING(CMD_OP_DESTBUFFER_INFO);
544 OUT_RING(tmp);
545 } else
546 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
547 tmp, dev_priv->front_di1, dev_priv->back_di1);
548
549 /* invarient:
550 */
551 OUT_RING(CMD_OP_Z_BUFFER_INFO);
552 OUT_RING(dev_priv->zi1);
553
554 OUT_RING(GFX_OP_DESTBUFFER_VARS);
555 OUT_RING(code[I810_DESTREG_DV1]);
556
557 OUT_RING(GFX_OP_DRAWRECT_INFO);
558 OUT_RING(code[I810_DESTREG_DR1]);
559 OUT_RING(code[I810_DESTREG_DR2]);
560 OUT_RING(code[I810_DESTREG_DR3]);
561 OUT_RING(code[I810_DESTREG_DR4]);
562 OUT_RING(0);
563
564 ADVANCE_LP_RING();
565}
566
567static void i810EmitState(struct drm_device * dev)
568{
569 drm_i810_private_t *dev_priv = dev->dev_private;
570 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
571 unsigned int dirty = sarea_priv->dirty;
572
573 DRM_DEBUG("%x\n", dirty);
574
575 if (dirty & I810_UPLOAD_BUFFERS) {
576 i810EmitDestVerified(dev, sarea_priv->BufferState);
577 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
578 }
579
580 if (dirty & I810_UPLOAD_CTX) {
581 i810EmitContextVerified(dev, sarea_priv->ContextState);
582 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
583 }
584
585 if (dirty & I810_UPLOAD_TEX0) {
586 i810EmitTexVerified(dev, sarea_priv->TexState[0]);
587 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
588 }
589
590 if (dirty & I810_UPLOAD_TEX1) {
591 i810EmitTexVerified(dev, sarea_priv->TexState[1]);
592 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
593 }
594}
595
596/* need to verify
597 */
598static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
599 unsigned int clear_color,
600 unsigned int clear_zval)
601{
602 drm_i810_private_t *dev_priv = dev->dev_private;
603 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
604 int nbox = sarea_priv->nbox;
605 struct drm_clip_rect *pbox = sarea_priv->boxes;
606 int pitch = dev_priv->pitch;
607 int cpp = 2;
608 int i;
609 RING_LOCALS;
610
611 if (dev_priv->current_page == 1) {
612 unsigned int tmp = flags;
613
614 flags &= ~(I810_FRONT | I810_BACK);
615 if (tmp & I810_FRONT)
616 flags |= I810_BACK;
617 if (tmp & I810_BACK)
618 flags |= I810_FRONT;
619 }
620
621 i810_kernel_lost_context(dev);
622
623 if (nbox > I810_NR_SAREA_CLIPRECTS)
624 nbox = I810_NR_SAREA_CLIPRECTS;
625
626 for (i = 0; i < nbox; i++, pbox++) {
627 unsigned int x = pbox->x1;
628 unsigned int y = pbox->y1;
629 unsigned int width = (pbox->x2 - x) * cpp;
630 unsigned int height = pbox->y2 - y;
631 unsigned int start = y * pitch + x * cpp;
632
633 if (pbox->x1 > pbox->x2 ||
634 pbox->y1 > pbox->y2 ||
635 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
636 continue;
637
638 if (flags & I810_FRONT) {
639 BEGIN_LP_RING(6);
640 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
641 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
642 OUT_RING((height << 16) | width);
643 OUT_RING(start);
644 OUT_RING(clear_color);
645 OUT_RING(0);
646 ADVANCE_LP_RING();
647 }
648
649 if (flags & I810_BACK) {
650 BEGIN_LP_RING(6);
651 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
652 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
653 OUT_RING((height << 16) | width);
654 OUT_RING(dev_priv->back_offset + start);
655 OUT_RING(clear_color);
656 OUT_RING(0);
657 ADVANCE_LP_RING();
658 }
659
660 if (flags & I810_DEPTH) {
661 BEGIN_LP_RING(6);
662 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
663 OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
664 OUT_RING((height << 16) | width);
665 OUT_RING(dev_priv->depth_offset + start);
666 OUT_RING(clear_zval);
667 OUT_RING(0);
668 ADVANCE_LP_RING();
669 }
670 }
671}
672
673static void i810_dma_dispatch_swap(struct drm_device * dev)
674{
675 drm_i810_private_t *dev_priv = dev->dev_private;
676 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
677 int nbox = sarea_priv->nbox;
678 struct drm_clip_rect *pbox = sarea_priv->boxes;
679 int pitch = dev_priv->pitch;
680 int cpp = 2;
681 int i;
682 RING_LOCALS;
683
684 DRM_DEBUG("swapbuffers\n");
685
686 i810_kernel_lost_context(dev);
687
688 if (nbox > I810_NR_SAREA_CLIPRECTS)
689 nbox = I810_NR_SAREA_CLIPRECTS;
690
691 for (i = 0; i < nbox; i++, pbox++) {
692 unsigned int w = pbox->x2 - pbox->x1;
693 unsigned int h = pbox->y2 - pbox->y1;
694 unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
695 unsigned int start = dst;
696
697 if (pbox->x1 > pbox->x2 ||
698 pbox->y1 > pbox->y2 ||
699 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
700 continue;
701
702 BEGIN_LP_RING(6);
703 OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
704 OUT_RING(pitch | (0xCC << 16));
705 OUT_RING((h << 16) | (w * cpp));
706 if (dev_priv->current_page == 0)
707 OUT_RING(dev_priv->front_offset + start);
708 else
709 OUT_RING(dev_priv->back_offset + start);
710 OUT_RING(pitch);
711 if (dev_priv->current_page == 0)
712 OUT_RING(dev_priv->back_offset + start);
713 else
714 OUT_RING(dev_priv->front_offset + start);
715 ADVANCE_LP_RING();
716 }
717}
718
719static void i810_dma_dispatch_vertex(struct drm_device * dev,
720 struct drm_buf * buf, int discard, int used)
721{
722 drm_i810_private_t *dev_priv = dev->dev_private;
723 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
724 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
725 struct drm_clip_rect *box = sarea_priv->boxes;
726 int nbox = sarea_priv->nbox;
727 unsigned long address = (unsigned long)buf->bus_address;
728 unsigned long start = address - dev->agp->base;
729 int i = 0;
730 RING_LOCALS;
731
732 i810_kernel_lost_context(dev);
733
734 if (nbox > I810_NR_SAREA_CLIPRECTS)
735 nbox = I810_NR_SAREA_CLIPRECTS;
736
737 if (used > 4 * 1024)
738 used = 0;
739
740 if (sarea_priv->dirty)
741 i810EmitState(dev);
742
743 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
744 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
745
746 *(u32 *) buf_priv->kernel_virtual =
747 ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
748
749 if (used & 4) {
750 *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
751 used += 4;
752 }
753
754 i810_unmap_buffer(buf);
755 }
756
757 if (used) {
758 do {
759 if (i < nbox) {
760 BEGIN_LP_RING(4);
761 OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
762 SC_ENABLE);
763 OUT_RING(GFX_OP_SCISSOR_INFO);
764 OUT_RING(box[i].x1 | (box[i].y1 << 16));
765 OUT_RING((box[i].x2 -
766 1) | ((box[i].y2 - 1) << 16));
767 ADVANCE_LP_RING();
768 }
769
770 BEGIN_LP_RING(4);
771 OUT_RING(CMD_OP_BATCH_BUFFER);
772 OUT_RING(start | BB1_PROTECTED);
773 OUT_RING(start + used - 4);
774 OUT_RING(0);
775 ADVANCE_LP_RING();
776
777 } while (++i < nbox);
778 }
779
780 if (discard) {
781 dev_priv->counter++;
782
783 (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
784 I810_BUF_HARDWARE);
785
786 BEGIN_LP_RING(8);
787 OUT_RING(CMD_STORE_DWORD_IDX);
788 OUT_RING(20);
789 OUT_RING(dev_priv->counter);
790 OUT_RING(CMD_STORE_DWORD_IDX);
791 OUT_RING(buf_priv->my_use_idx);
792 OUT_RING(I810_BUF_FREE);
793 OUT_RING(CMD_REPORT_HEAD);
794 OUT_RING(0);
795 ADVANCE_LP_RING();
796 }
797}
798
799static void i810_dma_dispatch_flip(struct drm_device * dev)
800{
801 drm_i810_private_t *dev_priv = dev->dev_private;
802 int pitch = dev_priv->pitch;
803 RING_LOCALS;
804
805 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
806 dev_priv->current_page,
807 dev_priv->sarea_priv->pf_current_page);
808
809 i810_kernel_lost_context(dev);
810
811 BEGIN_LP_RING(2);
812 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
813 OUT_RING(0);
814 ADVANCE_LP_RING();
815
816 BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
817 /* On i815 at least ASYNC is buggy */
818 /* pitch<<5 is from 11.2.8 p158,
819 its the pitch / 8 then left shifted 8,
820 so (pitch >> 3) << 8 */
821 OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
822 if (dev_priv->current_page == 0) {
823 OUT_RING(dev_priv->back_offset);
824 dev_priv->current_page = 1;
825 } else {
826 OUT_RING(dev_priv->front_offset);
827 dev_priv->current_page = 0;
828 }
829 OUT_RING(0);
830 ADVANCE_LP_RING();
831
832 BEGIN_LP_RING(2);
833 OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
834 OUT_RING(0);
835 ADVANCE_LP_RING();
836
837 /* Increment the frame counter. The client-side 3D driver must
838 * throttle the framerate by waiting for this value before
839 * performing the swapbuffer ioctl.
840 */
841 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
842
843}
844
845static void i810_dma_quiescent(struct drm_device * dev)
846{
847 drm_i810_private_t *dev_priv = dev->dev_private;
848 RING_LOCALS;
849
850 i810_kernel_lost_context(dev);
851
852 BEGIN_LP_RING(4);
853 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
854 OUT_RING(CMD_REPORT_HEAD);
855 OUT_RING(0);
856 OUT_RING(0);
857 ADVANCE_LP_RING();
858
859 i810_wait_ring(dev, dev_priv->ring.Size - 8);
860}
861
862static int i810_flush_queue(struct drm_device * dev)
863{
864 drm_i810_private_t *dev_priv = dev->dev_private;
865 struct drm_device_dma *dma = dev->dma;
866 int i, ret = 0;
867 RING_LOCALS;
868
869 i810_kernel_lost_context(dev);
870
871 BEGIN_LP_RING(2);
872 OUT_RING(CMD_REPORT_HEAD);
873 OUT_RING(0);
874 ADVANCE_LP_RING();
875
876 i810_wait_ring(dev, dev_priv->ring.Size - 8);
877
878 for (i = 0; i < dma->buf_count; i++) {
879 struct drm_buf *buf = dma->buflist[i];
880 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
881
882 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
883 I810_BUF_FREE);
884
885 if (used == I810_BUF_HARDWARE)
886 DRM_DEBUG("reclaimed from HARDWARE\n");
887 if (used == I810_BUF_CLIENT)
888 DRM_DEBUG("still on client\n");
889 }
890
891 return ret;
892}
893
894/* Must be called with the lock held */
895static void i810_reclaim_buffers(struct drm_device * dev,
896 struct drm_file *file_priv)
897{
898 struct drm_device_dma *dma = dev->dma;
899 int i;
900
901 if (!dma)
902 return;
903 if (!dev->dev_private)
904 return;
905 if (!dma->buflist)
906 return;
907
908 i810_flush_queue(dev);
909
910 for (i = 0; i < dma->buf_count; i++) {
911 struct drm_buf *buf = dma->buflist[i];
912 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
913
914 if (buf->file_priv == file_priv && buf_priv) {
915 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
916 I810_BUF_FREE);
917
918 if (used == I810_BUF_CLIENT)
919 DRM_DEBUG("reclaimed from client\n");
920 if (buf_priv->currently_mapped == I810_BUF_MAPPED)
921 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
922 }
923 }
924}
925
926static int i810_flush_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv)
928{
929 LOCK_TEST_WITH_RETURN(dev, file_priv);
930
931 i810_flush_queue(dev);
932 return 0;
933}
934
935static int i810_dma_vertex(struct drm_device *dev, void *data,
936 struct drm_file *file_priv)
937{
938 struct drm_device_dma *dma = dev->dma;
939 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
940 u32 *hw_status = dev_priv->hw_status_page;
941 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
942 dev_priv->sarea_priv;
943 drm_i810_vertex_t *vertex = data;
944
945 LOCK_TEST_WITH_RETURN(dev, file_priv);
946
947 DRM_DEBUG("idx %d used %d discard %d\n",
948 vertex->idx, vertex->used, vertex->discard);
949
950 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
951 return -EINVAL;
952
953 i810_dma_dispatch_vertex(dev,
954 dma->buflist[vertex->idx],
955 vertex->discard, vertex->used);
956
957 atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
958 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
959 sarea_priv->last_enqueue = dev_priv->counter - 1;
960 sarea_priv->last_dispatch = (int)hw_status[5];
961
962 return 0;
963}
964
965static int i810_clear_bufs(struct drm_device *dev, void *data,
966 struct drm_file *file_priv)
967{
968 drm_i810_clear_t *clear = data;
969
970 LOCK_TEST_WITH_RETURN(dev, file_priv);
971
972 /* GH: Someone's doing nasty things... */
973 if (!dev->dev_private) {
974 return -EINVAL;
975 }
976
977 i810_dma_dispatch_clear(dev, clear->flags,
978 clear->clear_color, clear->clear_depth);
979 return 0;
980}
981
982static int i810_swap_bufs(struct drm_device *dev, void *data,
983 struct drm_file *file_priv)
984{
985 DRM_DEBUG("\n");
986
987 LOCK_TEST_WITH_RETURN(dev, file_priv);
988
989 i810_dma_dispatch_swap(dev);
990 return 0;
991}
992
993static int i810_getage(struct drm_device *dev, void *data,
994 struct drm_file *file_priv)
995{
996 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
997 u32 *hw_status = dev_priv->hw_status_page;
998 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
999 dev_priv->sarea_priv;
1000
1001 sarea_priv->last_dispatch = (int)hw_status[5];
1002 return 0;
1003}
1004
1005static int i810_getbuf(struct drm_device *dev, void *data,
1006 struct drm_file *file_priv)
1007{
1008 int retcode = 0;
1009 drm_i810_dma_t *d = data;
1010 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1011 u32 *hw_status = dev_priv->hw_status_page;
1012 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1013 dev_priv->sarea_priv;
1014
1015 LOCK_TEST_WITH_RETURN(dev, file_priv);
1016
1017 d->granted = 0;
1018
1019 retcode = i810_dma_get_buffer(dev, d, file_priv);
1020
1021 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1022 task_pid_nr(current), retcode, d->granted);
1023
1024 sarea_priv->last_dispatch = (int)hw_status[5];
1025
1026 return retcode;
1027}
1028
1029static int i810_copybuf(struct drm_device *dev, void *data,
1030 struct drm_file *file_priv)
1031{
1032 /* Never copy - 2.4.x doesn't need it */
1033 return 0;
1034}
1035
1036static int i810_docopy(struct drm_device *dev, void *data,
1037 struct drm_file *file_priv)
1038{
1039 /* Never copy - 2.4.x doesn't need it */
1040 return 0;
1041}
1042
1043static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
1044 unsigned int last_render)
1045{
1046 drm_i810_private_t *dev_priv = dev->dev_private;
1047 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1048 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1049 unsigned long address = (unsigned long)buf->bus_address;
1050 unsigned long start = address - dev->agp->base;
1051 int u;
1052 RING_LOCALS;
1053
1054 i810_kernel_lost_context(dev);
1055
1056 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
1057 if (u != I810_BUF_CLIENT) {
1058 DRM_DEBUG("MC found buffer that isn't mine!\n");
1059 }
1060
1061 if (used > 4 * 1024)
1062 used = 0;
1063
1064 sarea_priv->dirty = 0x7f;
1065
1066 DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
1067
1068 dev_priv->counter++;
1069 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1070 DRM_DEBUG("start : %lx\n", start);
1071 DRM_DEBUG("used : %d\n", used);
1072 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1073
1074 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1075 if (used & 4) {
1076 *(u32 *) ((char *) buf_priv->virtual + used) = 0;
1077 used += 4;
1078 }
1079
1080 i810_unmap_buffer(buf);
1081 }
1082 BEGIN_LP_RING(4);
1083 OUT_RING(CMD_OP_BATCH_BUFFER);
1084 OUT_RING(start | BB1_PROTECTED);
1085 OUT_RING(start + used - 4);
1086 OUT_RING(0);
1087 ADVANCE_LP_RING();
1088
1089 BEGIN_LP_RING(8);
1090 OUT_RING(CMD_STORE_DWORD_IDX);
1091 OUT_RING(buf_priv->my_use_idx);
1092 OUT_RING(I810_BUF_FREE);
1093 OUT_RING(0);
1094
1095 OUT_RING(CMD_STORE_DWORD_IDX);
1096 OUT_RING(16);
1097 OUT_RING(last_render);
1098 OUT_RING(0);
1099 ADVANCE_LP_RING();
1100}
1101
1102static int i810_dma_mc(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv)
1104{
1105 struct drm_device_dma *dma = dev->dma;
1106 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1107 u32 *hw_status = dev_priv->hw_status_page;
1108 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1109 dev_priv->sarea_priv;
1110 drm_i810_mc_t *mc = data;
1111
1112 LOCK_TEST_WITH_RETURN(dev, file_priv);
1113
1114 if (mc->idx >= dma->buf_count || mc->idx < 0)
1115 return -EINVAL;
1116
1117 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
1118 mc->last_render);
1119
1120 atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
1121 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1122 sarea_priv->last_enqueue = dev_priv->counter - 1;
1123 sarea_priv->last_dispatch = (int)hw_status[5];
1124
1125 return 0;
1126}
1127
1128static int i810_rstatus(struct drm_device *dev, void *data,
1129 struct drm_file *file_priv)
1130{
1131 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1132
1133 return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
1134}
1135
1136static int i810_ov0_info(struct drm_device *dev, void *data,
1137 struct drm_file *file_priv)
1138{
1139 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1140 drm_i810_overlay_t *ov = data;
1141
1142 ov->offset = dev_priv->overlay_offset;
1143 ov->physical = dev_priv->overlay_physical;
1144
1145 return 0;
1146}
1147
1148static int i810_fstatus(struct drm_device *dev, void *data,
1149 struct drm_file *file_priv)
1150{
1151 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1152
1153 LOCK_TEST_WITH_RETURN(dev, file_priv);
1154 return I810_READ(0x30008);
1155}
1156
1157static int i810_ov0_flip(struct drm_device *dev, void *data,
1158 struct drm_file *file_priv)
1159{
1160 drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
1161
1162 LOCK_TEST_WITH_RETURN(dev, file_priv);
1163
1164 //Tell the overlay to update
1165 I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
1166
1167 return 0;
1168}
1169
1170/* Not sure why this isn't set all the time:
1171 */
1172static void i810_do_init_pageflip(struct drm_device * dev)
1173{
1174 drm_i810_private_t *dev_priv = dev->dev_private;
1175
1176 DRM_DEBUG("\n");
1177 dev_priv->page_flipping = 1;
1178 dev_priv->current_page = 0;
1179 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1180}
1181
1182static int i810_do_cleanup_pageflip(struct drm_device * dev)
1183{
1184 drm_i810_private_t *dev_priv = dev->dev_private;
1185
1186 DRM_DEBUG("\n");
1187 if (dev_priv->current_page != 0)
1188 i810_dma_dispatch_flip(dev);
1189
1190 dev_priv->page_flipping = 0;
1191 return 0;
1192}
1193
1194static int i810_flip_bufs(struct drm_device *dev, void *data,
1195 struct drm_file *file_priv)
1196{
1197 drm_i810_private_t *dev_priv = dev->dev_private;
1198
1199 DRM_DEBUG("\n");
1200
1201 LOCK_TEST_WITH_RETURN(dev, file_priv);
1202
1203 if (!dev_priv->page_flipping)
1204 i810_do_init_pageflip(dev);
1205
1206 i810_dma_dispatch_flip(dev);
1207 return 0;
1208}
1209
1210int i810_driver_load(struct drm_device *dev, unsigned long flags)
1211{
1212 /* i810 has 4 more counters */
1213 dev->counters += 4;
1214 dev->types[6] = _DRM_STAT_IRQ;
1215 dev->types[7] = _DRM_STAT_PRIMARY;
1216 dev->types[8] = _DRM_STAT_SECONDARY;
1217 dev->types[9] = _DRM_STAT_DMA;
1218
1219 return 0;
1220}
1221
1222void i810_driver_lastclose(struct drm_device * dev)
1223{
1224 i810_dma_cleanup(dev);
1225}
1226
1227void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1228{
1229 if (dev->dev_private) {
1230 drm_i810_private_t *dev_priv = dev->dev_private;
1231 if (dev_priv->page_flipping) {
1232 i810_do_cleanup_pageflip(dev);
1233 }
1234 }
1235}
1236
1237void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
1238 struct drm_file *file_priv)
1239{
1240 i810_reclaim_buffers(dev, file_priv);
1241}
1242
1243int i810_driver_dma_quiescent(struct drm_device * dev)
1244{
1245 i810_dma_quiescent(dev);
1246 return 0;
1247}
1248
1249struct drm_ioctl_desc i810_ioctls[] = {
1250 DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1251 DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
1252 DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
1253 DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
1254 DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
1255 DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
1256 DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
1257 DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
1258 DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
1259 DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
1260 DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
1261 DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
1262 DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1263 DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
1264 DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
1265};
1266
1267int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
1268
1269/**
1270 * Determine if the device really is AGP or not.
1271 *
1272 * All Intel graphics chipsets are treated as AGP, even if they are really
1273 * PCI-e.
1274 *
1275 * \param dev The device to be tested.
1276 *
1277 * \returns
1278 * A value of 1 is always retured to indictate every i810 is AGP.
1279 */
1280int i810_driver_device_is_agp(struct drm_device * dev)
1281{
1282 return 1;
1283}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
new file mode 100644
index 000000000000..fabb9a817966
--- /dev/null
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -0,0 +1,97 @@
1/* i810_drv.c -- I810 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Jeff Hartmann <jhartmann@valinux.com>
30 * Gareth Hughes <gareth@valinux.com>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "i810_drm.h"
36#include "i810_drv.h"
37
38#include "drm_pciids.h"
39
40static struct pci_device_id pciidlist[] = {
41 i810_PCI_IDS
42};
43
44static struct drm_driver driver = {
45 .driver_features =
46 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
47 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
48 .dev_priv_size = sizeof(drm_i810_buf_priv_t),
49 .load = i810_driver_load,
50 .lastclose = i810_driver_lastclose,
51 .preclose = i810_driver_preclose,
52 .device_is_agp = i810_driver_device_is_agp,
53 .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
54 .dma_quiescent = i810_driver_dma_quiescent,
55 .get_map_ofs = drm_core_get_map_ofs,
56 .get_reg_ofs = drm_core_get_reg_ofs,
57 .ioctls = i810_ioctls,
58 .fops = {
59 .owner = THIS_MODULE,
60 .open = drm_open,
61 .release = drm_release,
62 .ioctl = drm_ioctl,
63 .mmap = drm_mmap,
64 .poll = drm_poll,
65 .fasync = drm_fasync,
66 },
67
68 .pci_driver = {
69 .name = DRIVER_NAME,
70 .id_table = pciidlist,
71 },
72
73 .name = DRIVER_NAME,
74 .desc = DRIVER_DESC,
75 .date = DRIVER_DATE,
76 .major = DRIVER_MAJOR,
77 .minor = DRIVER_MINOR,
78 .patchlevel = DRIVER_PATCHLEVEL,
79};
80
81static int __init i810_init(void)
82{
83 driver.num_ioctls = i810_max_ioctl;
84 return drm_init(&driver);
85}
86
87static void __exit i810_exit(void)
88{
89 drm_exit(&driver);
90}
91
92module_init(i810_init);
93module_exit(i810_exit);
94
95MODULE_AUTHOR(DRIVER_AUTHOR);
96MODULE_DESCRIPTION(DRIVER_DESC);
97MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
new file mode 100644
index 000000000000..0118849a5672
--- /dev/null
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -0,0 +1,242 @@
1/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 *
30 */
31
32#ifndef _I810_DRV_H_
33#define _I810_DRV_H_
34
35/* General customization:
36 */
37
38#define DRIVER_AUTHOR "VA Linux Systems Inc."
39
40#define DRIVER_NAME "i810"
41#define DRIVER_DESC "Intel i810"
42#define DRIVER_DATE "20030605"
43
44/* Interface history
45 *
46 * 1.1 - XFree86 4.1
47 * 1.2 - XvMC interfaces
48 * - XFree86 4.2
49 * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
50 * - Remove requirement for interrupt (leave stubs again)
51 * 1.3 - Add page flipping.
52 * 1.4 - fix DRM interface
53 */
54#define DRIVER_MAJOR 1
55#define DRIVER_MINOR 4
56#define DRIVER_PATCHLEVEL 0
57
58typedef struct drm_i810_buf_priv {
59 u32 *in_use;
60 int my_use_idx;
61 int currently_mapped;
62 void *virtual;
63 void *kernel_virtual;
64 drm_local_map_t map;
65} drm_i810_buf_priv_t;
66
67typedef struct _drm_i810_ring_buffer {
68 int tail_mask;
69 unsigned long Start;
70 unsigned long End;
71 unsigned long Size;
72 u8 *virtual_start;
73 int head;
74 int tail;
75 int space;
76 drm_local_map_t map;
77} drm_i810_ring_buffer_t;
78
79typedef struct drm_i810_private {
80 struct drm_map *sarea_map;
81 struct drm_map *mmio_map;
82
83 drm_i810_sarea_t *sarea_priv;
84 drm_i810_ring_buffer_t ring;
85
86 void *hw_status_page;
87 unsigned long counter;
88
89 dma_addr_t dma_status_page;
90
91 struct drm_buf *mmap_buffer;
92
93 u32 front_di1, back_di1, zi1;
94
95 int back_offset;
96 int depth_offset;
97 int overlay_offset;
98 int overlay_physical;
99 int w, h;
100 int pitch;
101 int back_pitch;
102 int depth_pitch;
103
104 int do_boxes;
105 int dma_used;
106
107 int current_page;
108 int page_flipping;
109
110 wait_queue_head_t irq_queue;
111 atomic_t irq_received;
112 atomic_t irq_emitted;
113
114 int front_offset;
115} drm_i810_private_t;
116
117 /* i810_dma.c */
118extern int i810_driver_dma_quiescent(struct drm_device * dev);
119extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
120 struct drm_file *file_priv);
121extern int i810_driver_load(struct drm_device *, unsigned long flags);
122extern void i810_driver_lastclose(struct drm_device * dev);
123extern void i810_driver_preclose(struct drm_device * dev,
124 struct drm_file *file_priv);
125extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
126 struct drm_file *file_priv);
127extern int i810_driver_device_is_agp(struct drm_device * dev);
128
129extern struct drm_ioctl_desc i810_ioctls[];
130extern int i810_max_ioctl;
131
132#define I810_BASE(reg) ((unsigned long) \
133 dev_priv->mmio_map->handle)
134#define I810_ADDR(reg) (I810_BASE(reg) + reg)
135#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
136#define I810_READ(reg) I810_DEREF(reg)
137#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
138#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
139#define I810_READ16(reg) I810_DEREF16(reg)
140#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
141
142#define I810_VERBOSE 0
143#define RING_LOCALS unsigned int outring, ringmask; \
144 volatile char *virt;
145
146#define BEGIN_LP_RING(n) do { \
147 if (I810_VERBOSE) \
148 DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
149 if (dev_priv->ring.space < n*4) \
150 i810_wait_ring(dev, n*4); \
151 dev_priv->ring.space -= n*4; \
152 outring = dev_priv->ring.tail; \
153 ringmask = dev_priv->ring.tail_mask; \
154 virt = dev_priv->ring.virtual_start; \
155} while (0)
156
157#define ADVANCE_LP_RING() do { \
158 if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
159 dev_priv->ring.tail = outring; \
160 I810_WRITE(LP_RING + RING_TAIL, outring); \
161} while(0)
162
163#define OUT_RING(n) do { \
164 if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
165 *(volatile unsigned int *)(virt + outring) = n; \
166 outring += 4; \
167 outring &= ringmask; \
168} while (0)
169
170#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
171#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
172#define CMD_REPORT_HEAD (7<<23)
173#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
174#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
175
176#define INST_PARSER_CLIENT 0x00000000
177#define INST_OP_FLUSH 0x02000000
178#define INST_FLUSH_MAP_CACHE 0x00000001
179
180#define BB1_START_ADDR_MASK (~0x7)
181#define BB1_PROTECTED (1<<0)
182#define BB1_UNPROTECTED (0<<0)
183#define BB2_END_ADDR_MASK (~0x7)
184
185#define I810REG_HWSTAM 0x02098
186#define I810REG_INT_IDENTITY_R 0x020a4
187#define I810REG_INT_MASK_R 0x020a8
188#define I810REG_INT_ENABLE_R 0x020a0
189
190#define LP_RING 0x2030
191#define HP_RING 0x2040
192#define RING_TAIL 0x00
193#define TAIL_ADDR 0x000FFFF8
194#define RING_HEAD 0x04
195#define HEAD_WRAP_COUNT 0xFFE00000
196#define HEAD_WRAP_ONE 0x00200000
197#define HEAD_ADDR 0x001FFFFC
198#define RING_START 0x08
199#define START_ADDR 0x00FFFFF8
200#define RING_LEN 0x0C
201#define RING_NR_PAGES 0x000FF000
202#define RING_REPORT_MASK 0x00000006
203#define RING_REPORT_64K 0x00000002
204#define RING_REPORT_128K 0x00000004
205#define RING_NO_REPORT 0x00000000
206#define RING_VALID_MASK 0x00000001
207#define RING_VALID 0x00000001
208#define RING_INVALID 0x00000000
209
210#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
211#define SC_UPDATE_SCISSOR (0x1<<1)
212#define SC_ENABLE_MASK (0x1<<0)
213#define SC_ENABLE (0x1<<0)
214
215#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
216#define SCI_YMIN_MASK (0xffff<<16)
217#define SCI_XMIN_MASK (0xffff<<0)
218#define SCI_YMAX_MASK (0xffff<<16)
219#define SCI_XMAX_MASK (0xffff<<0)
220
221#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
222#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
223#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2)
224#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
225#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
226#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
227
228#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23))
229#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23))
230#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23))
231#define CMD_OP_WAIT_FOR_EVENT ((0x0<<29)|(0x03<<23))
232
233#define BR00_BITBLT_CLIENT 0x40000000
234#define BR00_OP_COLOR_BLT 0x10000000
235#define BR00_OP_SRC_COPY_BLT 0x10C00000
236#define BR13_SOLID_PATTERN 0x80000000
237
238#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
239#define WAIT_FOR_PLANE_A_FLIP (1<<2)
240#define WAIT_FOR_VBLANK (1<<3)
241
242#endif
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
new file mode 100644
index 000000000000..c642ee0b238c
--- /dev/null
+++ b/drivers/gpu/drm/i830/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i830-y := i830_drv.o i830_dma.o i830_irq.o
7
8obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
new file mode 100644
index 000000000000..a86ab30b4620
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -0,0 +1,1553 @@
1/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Abraham vd Merwe <abraham@2d3d.co.za>
31 *
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "i830_drm.h"
37#include "i830_drv.h"
38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/pagemap.h>
40#include <linux/delay.h>
41#include <asm/uaccess.h>
42
43#define I830_BUF_FREE 2
44#define I830_BUF_CLIENT 1
45#define I830_BUF_HARDWARE 0
46
47#define I830_BUF_UNMAPPED 0
48#define I830_BUF_MAPPED 1
49
50static struct drm_buf *i830_freelist_get(struct drm_device * dev)
51{
52 struct drm_device_dma *dma = dev->dma;
53 int i;
54 int used;
55
56 /* Linear search might not be the best solution */
57
58 for (i = 0; i < dma->buf_count; i++) {
59 struct drm_buf *buf = dma->buflist[i];
60 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
61 /* In use is already a pointer */
62 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
63 I830_BUF_CLIENT);
64 if (used == I830_BUF_FREE) {
65 return buf;
66 }
67 }
68 return NULL;
69}
70
71/* This should only be called if the buffer is not sent to the hardware
72 * yet, the hardware updates in use for us once its on the ring buffer.
73 */
74
75static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
76{
77 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
78 int used;
79
80 /* In use is already a pointer */
81 used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
82 if (used != I830_BUF_CLIENT) {
83 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
84 return -EINVAL;
85 }
86
87 return 0;
88}
89
90static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
91{
92 struct drm_file *priv = filp->private_data;
93 struct drm_device *dev;
94 drm_i830_private_t *dev_priv;
95 struct drm_buf *buf;
96 drm_i830_buf_priv_t *buf_priv;
97
98 lock_kernel();
99 dev = priv->minor->dev;
100 dev_priv = dev->dev_private;
101 buf = dev_priv->mmap_buffer;
102 buf_priv = buf->dev_private;
103
104 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
105 vma->vm_file = filp;
106
107 buf_priv->currently_mapped = I830_BUF_MAPPED;
108 unlock_kernel();
109
110 if (io_remap_pfn_range(vma, vma->vm_start,
111 vma->vm_pgoff,
112 vma->vm_end - vma->vm_start, vma->vm_page_prot))
113 return -EAGAIN;
114 return 0;
115}
116
117static const struct file_operations i830_buffer_fops = {
118 .open = drm_open,
119 .release = drm_release,
120 .ioctl = drm_ioctl,
121 .mmap = i830_mmap_buffers,
122 .fasync = drm_fasync,
123};
124
125static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
126{
127 struct drm_device *dev = file_priv->minor->dev;
128 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
129 drm_i830_private_t *dev_priv = dev->dev_private;
130 const struct file_operations *old_fops;
131 unsigned long virtual;
132 int retcode = 0;
133
134 if (buf_priv->currently_mapped == I830_BUF_MAPPED)
135 return -EINVAL;
136
137 down_write(&current->mm->mmap_sem);
138 old_fops = file_priv->filp->f_op;
139 file_priv->filp->f_op = &i830_buffer_fops;
140 dev_priv->mmap_buffer = buf;
141 virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
142 MAP_SHARED, buf->bus_address);
143 dev_priv->mmap_buffer = NULL;
144 file_priv->filp->f_op = old_fops;
145 if (IS_ERR((void *)virtual)) { /* ugh */
146 /* Real error */
147 DRM_ERROR("mmap error\n");
148 retcode = PTR_ERR((void *)virtual);
149 buf_priv->virtual = NULL;
150 } else {
151 buf_priv->virtual = (void __user *)virtual;
152 }
153 up_write(&current->mm->mmap_sem);
154
155 return retcode;
156}
157
158static int i830_unmap_buffer(struct drm_buf * buf)
159{
160 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
161 int retcode = 0;
162
163 if (buf_priv->currently_mapped != I830_BUF_MAPPED)
164 return -EINVAL;
165
166 down_write(&current->mm->mmap_sem);
167 retcode = do_munmap(current->mm,
168 (unsigned long)buf_priv->virtual,
169 (size_t) buf->total);
170 up_write(&current->mm->mmap_sem);
171
172 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
173 buf_priv->virtual = NULL;
174
175 return retcode;
176}
177
178static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
179 struct drm_file *file_priv)
180{
181 struct drm_buf *buf;
182 drm_i830_buf_priv_t *buf_priv;
183 int retcode = 0;
184
185 buf = i830_freelist_get(dev);
186 if (!buf) {
187 retcode = -ENOMEM;
188 DRM_DEBUG("retcode=%d\n", retcode);
189 return retcode;
190 }
191
192 retcode = i830_map_buffer(buf, file_priv);
193 if (retcode) {
194 i830_freelist_put(dev, buf);
195 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
196 return retcode;
197 }
198 buf->file_priv = file_priv;
199 buf_priv = buf->dev_private;
200 d->granted = 1;
201 d->request_idx = buf->idx;
202 d->request_size = buf->total;
203 d->virtual = buf_priv->virtual;
204
205 return retcode;
206}
207
208static int i830_dma_cleanup(struct drm_device * dev)
209{
210 struct drm_device_dma *dma = dev->dma;
211
212 /* Make sure interrupts are disabled here because the uninstall ioctl
213 * may not have been called from userspace and after dev_private
214 * is freed, it's too late.
215 */
216 if (dev->irq_enabled)
217 drm_irq_uninstall(dev);
218
219 if (dev->dev_private) {
220 int i;
221 drm_i830_private_t *dev_priv =
222 (drm_i830_private_t *) dev->dev_private;
223
224 if (dev_priv->ring.virtual_start) {
225 drm_core_ioremapfree(&dev_priv->ring.map, dev);
226 }
227 if (dev_priv->hw_status_page) {
228 pci_free_consistent(dev->pdev, PAGE_SIZE,
229 dev_priv->hw_status_page,
230 dev_priv->dma_status_page);
231 /* Need to rewrite hardware status page */
232 I830_WRITE(0x02080, 0x1ffff000);
233 }
234
235 drm_free(dev->dev_private, sizeof(drm_i830_private_t),
236 DRM_MEM_DRIVER);
237 dev->dev_private = NULL;
238
239 for (i = 0; i < dma->buf_count; i++) {
240 struct drm_buf *buf = dma->buflist[i];
241 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
242 if (buf_priv->kernel_virtual && buf->total)
243 drm_core_ioremapfree(&buf_priv->map, dev);
244 }
245 }
246 return 0;
247}
248
249int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
250{
251 drm_i830_private_t *dev_priv = dev->dev_private;
252 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
253 int iters = 0;
254 unsigned long end;
255 unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
256
257 end = jiffies + (HZ * 3);
258 while (ring->space < n) {
259 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
260 ring->space = ring->head - (ring->tail + 8);
261 if (ring->space < 0)
262 ring->space += ring->Size;
263
264 if (ring->head != last_head) {
265 end = jiffies + (HZ * 3);
266 last_head = ring->head;
267 }
268
269 iters++;
270 if (time_before(end, jiffies)) {
271 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
272 DRM_ERROR("lockup\n");
273 goto out_wait_ring;
274 }
275 udelay(1);
276 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
277 }
278
279 out_wait_ring:
280 return iters;
281}
282
283static void i830_kernel_lost_context(struct drm_device * dev)
284{
285 drm_i830_private_t *dev_priv = dev->dev_private;
286 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
287
288 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
289 ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
290 ring->space = ring->head - (ring->tail + 8);
291 if (ring->space < 0)
292 ring->space += ring->Size;
293
294 if (ring->head == ring->tail)
295 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
296}
297
298static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
299{
300 struct drm_device_dma *dma = dev->dma;
301 int my_idx = 36;
302 u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
303 int i;
304
305 if (dma->buf_count > 1019) {
306 /* Not enough space in the status page for the freelist */
307 return -EINVAL;
308 }
309
310 for (i = 0; i < dma->buf_count; i++) {
311 struct drm_buf *buf = dma->buflist[i];
312 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
313
314 buf_priv->in_use = hw_status++;
315 buf_priv->my_use_idx = my_idx;
316 my_idx += 4;
317
318 *buf_priv->in_use = I830_BUF_FREE;
319
320 buf_priv->map.offset = buf->bus_address;
321 buf_priv->map.size = buf->total;
322 buf_priv->map.type = _DRM_AGP;
323 buf_priv->map.flags = 0;
324 buf_priv->map.mtrr = 0;
325
326 drm_core_ioremap(&buf_priv->map, dev);
327 buf_priv->kernel_virtual = buf_priv->map.handle;
328 }
329 return 0;
330}
331
332static int i830_dma_initialize(struct drm_device * dev,
333 drm_i830_private_t * dev_priv,
334 drm_i830_init_t * init)
335{
336 struct drm_map_list *r_list;
337
338 memset(dev_priv, 0, sizeof(drm_i830_private_t));
339
340 list_for_each_entry(r_list, &dev->maplist, head) {
341 if (r_list->map &&
342 r_list->map->type == _DRM_SHM &&
343 r_list->map->flags & _DRM_CONTAINS_LOCK) {
344 dev_priv->sarea_map = r_list->map;
345 break;
346 }
347 }
348
349 if (!dev_priv->sarea_map) {
350 dev->dev_private = (void *)dev_priv;
351 i830_dma_cleanup(dev);
352 DRM_ERROR("can not find sarea!\n");
353 return -EINVAL;
354 }
355 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
356 if (!dev_priv->mmio_map) {
357 dev->dev_private = (void *)dev_priv;
358 i830_dma_cleanup(dev);
359 DRM_ERROR("can not find mmio map!\n");
360 return -EINVAL;
361 }
362 dev->agp_buffer_token = init->buffers_offset;
363 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
364 if (!dev->agp_buffer_map) {
365 dev->dev_private = (void *)dev_priv;
366 i830_dma_cleanup(dev);
367 DRM_ERROR("can not find dma buffer map!\n");
368 return -EINVAL;
369 }
370
371 dev_priv->sarea_priv = (drm_i830_sarea_t *)
372 ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
373
374 dev_priv->ring.Start = init->ring_start;
375 dev_priv->ring.End = init->ring_end;
376 dev_priv->ring.Size = init->ring_size;
377
378 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
379 dev_priv->ring.map.size = init->ring_size;
380 dev_priv->ring.map.type = _DRM_AGP;
381 dev_priv->ring.map.flags = 0;
382 dev_priv->ring.map.mtrr = 0;
383
384 drm_core_ioremap(&dev_priv->ring.map, dev);
385
386 if (dev_priv->ring.map.handle == NULL) {
387 dev->dev_private = (void *)dev_priv;
388 i830_dma_cleanup(dev);
389 DRM_ERROR("can not ioremap virtual address for"
390 " ring buffer\n");
391 return -ENOMEM;
392 }
393
394 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
395
396 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
397
398 dev_priv->w = init->w;
399 dev_priv->h = init->h;
400 dev_priv->pitch = init->pitch;
401 dev_priv->back_offset = init->back_offset;
402 dev_priv->depth_offset = init->depth_offset;
403 dev_priv->front_offset = init->front_offset;
404
405 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
406 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
407 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
408
409 DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
410 DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
411 DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
412 DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
413
414 dev_priv->cpp = init->cpp;
415 /* We are using separate values as placeholders for mechanisms for
416 * private backbuffer/depthbuffer usage.
417 */
418
419 dev_priv->back_pitch = init->back_pitch;
420 dev_priv->depth_pitch = init->depth_pitch;
421 dev_priv->do_boxes = 0;
422 dev_priv->use_mi_batchbuffer_start = 0;
423
424 /* Program Hardware Status Page */
425 dev_priv->hw_status_page =
426 pci_alloc_consistent(dev->pdev, PAGE_SIZE,
427 &dev_priv->dma_status_page);
428 if (!dev_priv->hw_status_page) {
429 dev->dev_private = (void *)dev_priv;
430 i830_dma_cleanup(dev);
431 DRM_ERROR("Can not allocate hardware status page\n");
432 return -ENOMEM;
433 }
434 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
435 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
436
437 I830_WRITE(0x02080, dev_priv->dma_status_page);
438 DRM_DEBUG("Enabled hardware status page\n");
439
440 /* Now we need to init our freelist */
441 if (i830_freelist_init(dev, dev_priv) != 0) {
442 dev->dev_private = (void *)dev_priv;
443 i830_dma_cleanup(dev);
444 DRM_ERROR("Not enough space in the status page for"
445 " the freelist\n");
446 return -ENOMEM;
447 }
448 dev->dev_private = (void *)dev_priv;
449
450 return 0;
451}
452
453static int i830_dma_init(struct drm_device *dev, void *data,
454 struct drm_file *file_priv)
455{
456 drm_i830_private_t *dev_priv;
457 drm_i830_init_t *init = data;
458 int retcode = 0;
459
460 switch (init->func) {
461 case I830_INIT_DMA:
462 dev_priv = drm_alloc(sizeof(drm_i830_private_t),
463 DRM_MEM_DRIVER);
464 if (dev_priv == NULL)
465 return -ENOMEM;
466 retcode = i830_dma_initialize(dev, dev_priv, init);
467 break;
468 case I830_CLEANUP_DMA:
469 retcode = i830_dma_cleanup(dev);
470 break;
471 default:
472 retcode = -EINVAL;
473 break;
474 }
475
476 return retcode;
477}
478
479#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
480#define ST1_ENABLE (1<<16)
481#define ST1_MASK (0xffff)
482
483/* Most efficient way to verify state for the i830 is as it is
484 * emitted. Non-conformant state is silently dropped.
485 */
486static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
487{
488 drm_i830_private_t *dev_priv = dev->dev_private;
489 int i, j = 0;
490 unsigned int tmp;
491 RING_LOCALS;
492
493 BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
494
495 for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
496 tmp = code[i];
497 if ((tmp & (7 << 29)) == CMD_3D &&
498 (tmp & (0x1f << 24)) < (0x1d << 24)) {
499 OUT_RING(tmp);
500 j++;
501 } else {
502 DRM_ERROR("Skipping %d\n", i);
503 }
504 }
505
506 OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
507 OUT_RING(code[I830_CTXREG_BLENDCOLR]);
508 j += 2;
509
510 for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
511 tmp = code[i];
512 if ((tmp & (7 << 29)) == CMD_3D &&
513 (tmp & (0x1f << 24)) < (0x1d << 24)) {
514 OUT_RING(tmp);
515 j++;
516 } else {
517 DRM_ERROR("Skipping %d\n", i);
518 }
519 }
520
521 OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
522 OUT_RING(code[I830_CTXREG_MCSB1]);
523 j += 2;
524
525 if (j & 1)
526 OUT_RING(0);
527
528 ADVANCE_LP_RING();
529}
530
531static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
532{
533 drm_i830_private_t *dev_priv = dev->dev_private;
534 int i, j = 0;
535 unsigned int tmp;
536 RING_LOCALS;
537
538 if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
539 (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
540 (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
541
542 BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
543
544 OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
545 OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
546 OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
547 OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
548 OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
549 OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
550
551 for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
552 tmp = code[i];
553 OUT_RING(tmp);
554 j++;
555 }
556
557 if (j & 1)
558 OUT_RING(0);
559
560 ADVANCE_LP_RING();
561 } else
562 printk("rejected packet %x\n", code[0]);
563}
564
565static void i830EmitTexBlendVerified(struct drm_device * dev,
566 unsigned int *code, unsigned int num)
567{
568 drm_i830_private_t *dev_priv = dev->dev_private;
569 int i, j = 0;
570 unsigned int tmp;
571 RING_LOCALS;
572
573 if (!num)
574 return;
575
576 BEGIN_LP_RING(num + 1);
577
578 for (i = 0; i < num; i++) {
579 tmp = code[i];
580 OUT_RING(tmp);
581 j++;
582 }
583
584 if (j & 1)
585 OUT_RING(0);
586
587 ADVANCE_LP_RING();
588}
589
590static void i830EmitTexPalette(struct drm_device * dev,
591 unsigned int *palette, int number, int is_shared)
592{
593 drm_i830_private_t *dev_priv = dev->dev_private;
594 int i;
595 RING_LOCALS;
596
597 return;
598
599 BEGIN_LP_RING(258);
600
601 if (is_shared == 1) {
602 OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
603 MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
604 } else {
605 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
606 }
607 for (i = 0; i < 256; i++) {
608 OUT_RING(palette[i]);
609 }
610 OUT_RING(0);
611 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
612 */
613}
614
615/* Need to do some additional checking when setting the dest buffer.
616 */
617static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
618{
619 drm_i830_private_t *dev_priv = dev->dev_private;
620 unsigned int tmp;
621 RING_LOCALS;
622
623 BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
624
625 tmp = code[I830_DESTREG_CBUFADDR];
626 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
627 if (((int)outring) & 8) {
628 OUT_RING(0);
629 OUT_RING(0);
630 }
631
632 OUT_RING(CMD_OP_DESTBUFFER_INFO);
633 OUT_RING(BUF_3D_ID_COLOR_BACK |
634 BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
635 BUF_3D_USE_FENCE);
636 OUT_RING(tmp);
637 OUT_RING(0);
638
639 OUT_RING(CMD_OP_DESTBUFFER_INFO);
640 OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
641 BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
642 OUT_RING(dev_priv->zi1);
643 OUT_RING(0);
644 } else {
645 DRM_ERROR("bad di1 %x (allow %x or %x)\n",
646 tmp, dev_priv->front_di1, dev_priv->back_di1);
647 }
648
649 /* invarient:
650 */
651
652 OUT_RING(GFX_OP_DESTBUFFER_VARS);
653 OUT_RING(code[I830_DESTREG_DV1]);
654
655 OUT_RING(GFX_OP_DRAWRECT_INFO);
656 OUT_RING(code[I830_DESTREG_DR1]);
657 OUT_RING(code[I830_DESTREG_DR2]);
658 OUT_RING(code[I830_DESTREG_DR3]);
659 OUT_RING(code[I830_DESTREG_DR4]);
660
661 /* Need to verify this */
662 tmp = code[I830_DESTREG_SENABLE];
663 if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
664 OUT_RING(tmp);
665 } else {
666 DRM_ERROR("bad scissor enable\n");
667 OUT_RING(0);
668 }
669
670 OUT_RING(GFX_OP_SCISSOR_RECT);
671 OUT_RING(code[I830_DESTREG_SR1]);
672 OUT_RING(code[I830_DESTREG_SR2]);
673 OUT_RING(0);
674
675 ADVANCE_LP_RING();
676}
677
678static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
679{
680 drm_i830_private_t *dev_priv = dev->dev_private;
681 RING_LOCALS;
682
683 BEGIN_LP_RING(2);
684 OUT_RING(GFX_OP_STIPPLE);
685 OUT_RING(code[1]);
686 ADVANCE_LP_RING();
687}
688
689static void i830EmitState(struct drm_device * dev)
690{
691 drm_i830_private_t *dev_priv = dev->dev_private;
692 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
693 unsigned int dirty = sarea_priv->dirty;
694
695 DRM_DEBUG("%s %x\n", __func__, dirty);
696
697 if (dirty & I830_UPLOAD_BUFFERS) {
698 i830EmitDestVerified(dev, sarea_priv->BufferState);
699 sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
700 }
701
702 if (dirty & I830_UPLOAD_CTX) {
703 i830EmitContextVerified(dev, sarea_priv->ContextState);
704 sarea_priv->dirty &= ~I830_UPLOAD_CTX;
705 }
706
707 if (dirty & I830_UPLOAD_TEX0) {
708 i830EmitTexVerified(dev, sarea_priv->TexState[0]);
709 sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
710 }
711
712 if (dirty & I830_UPLOAD_TEX1) {
713 i830EmitTexVerified(dev, sarea_priv->TexState[1]);
714 sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
715 }
716
717 if (dirty & I830_UPLOAD_TEXBLEND0) {
718 i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
719 sarea_priv->TexBlendStateWordsUsed[0]);
720 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
721 }
722
723 if (dirty & I830_UPLOAD_TEXBLEND1) {
724 i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
725 sarea_priv->TexBlendStateWordsUsed[1]);
726 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
727 }
728
729 if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
730 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
731 } else {
732 if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
733 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
734 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
735 }
736 if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
737 i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
738 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
739 }
740
741 /* 1.3:
742 */
743#if 0
744 if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
745 i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
746 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
747 }
748 if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
749 i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
750 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
751 }
752#endif
753 }
754
755 /* 1.3:
756 */
757 if (dirty & I830_UPLOAD_STIPPLE) {
758 i830EmitStippleVerified(dev, sarea_priv->StippleState);
759 sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
760 }
761
762 if (dirty & I830_UPLOAD_TEX2) {
763 i830EmitTexVerified(dev, sarea_priv->TexState2);
764 sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
765 }
766
767 if (dirty & I830_UPLOAD_TEX3) {
768 i830EmitTexVerified(dev, sarea_priv->TexState3);
769 sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
770 }
771
772 if (dirty & I830_UPLOAD_TEXBLEND2) {
773 i830EmitTexBlendVerified(dev,
774 sarea_priv->TexBlendState2,
775 sarea_priv->TexBlendStateWordsUsed2);
776
777 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
778 }
779
780 if (dirty & I830_UPLOAD_TEXBLEND3) {
781 i830EmitTexBlendVerified(dev,
782 sarea_priv->TexBlendState3,
783 sarea_priv->TexBlendStateWordsUsed3);
784 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
785 }
786}
787
788/* ================================================================
789 * Performance monitoring functions
790 */
791
792static void i830_fill_box(struct drm_device * dev,
793 int x, int y, int w, int h, int r, int g, int b)
794{
795 drm_i830_private_t *dev_priv = dev->dev_private;
796 u32 color;
797 unsigned int BR13, CMD;
798 RING_LOCALS;
799
800 BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
801 CMD = XY_COLOR_BLT_CMD;
802 x += dev_priv->sarea_priv->boxes[0].x1;
803 y += dev_priv->sarea_priv->boxes[0].y1;
804
805 if (dev_priv->cpp == 4) {
806 BR13 |= (1 << 25);
807 CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
808 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
809 } else {
810 color = (((r & 0xf8) << 8) |
811 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
812 }
813
814 BEGIN_LP_RING(6);
815 OUT_RING(CMD);
816 OUT_RING(BR13);
817 OUT_RING((y << 16) | x);
818 OUT_RING(((y + h) << 16) | (x + w));
819
820 if (dev_priv->current_page == 1) {
821 OUT_RING(dev_priv->front_offset);
822 } else {
823 OUT_RING(dev_priv->back_offset);
824 }
825
826 OUT_RING(color);
827 ADVANCE_LP_RING();
828}
829
830static void i830_cp_performance_boxes(struct drm_device * dev)
831{
832 drm_i830_private_t *dev_priv = dev->dev_private;
833
834 /* Purple box for page flipping
835 */
836 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
837 i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
838
839 /* Red box if we have to wait for idle at any point
840 */
841 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
842 i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
843
844 /* Blue box: lost context?
845 */
846 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
847 i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
848
849 /* Yellow box for texture swaps
850 */
851 if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
852 i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
853
854 /* Green box if hardware never idles (as far as we can tell)
855 */
856 if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
857 i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
858
859 /* Draw bars indicating number of buffers allocated
860 * (not a great measure, easily confused)
861 */
862 if (dev_priv->dma_used) {
863 int bar = dev_priv->dma_used / 10240;
864 if (bar > 100)
865 bar = 100;
866 if (bar < 1)
867 bar = 1;
868 i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
869 dev_priv->dma_used = 0;
870 }
871
872 dev_priv->sarea_priv->perf_boxes = 0;
873}
874
875static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
876 unsigned int clear_color,
877 unsigned int clear_zval,
878 unsigned int clear_depthmask)
879{
880 drm_i830_private_t *dev_priv = dev->dev_private;
881 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
882 int nbox = sarea_priv->nbox;
883 struct drm_clip_rect *pbox = sarea_priv->boxes;
884 int pitch = dev_priv->pitch;
885 int cpp = dev_priv->cpp;
886 int i;
887 unsigned int BR13, CMD, D_CMD;
888 RING_LOCALS;
889
890 if (dev_priv->current_page == 1) {
891 unsigned int tmp = flags;
892
893 flags &= ~(I830_FRONT | I830_BACK);
894 if (tmp & I830_FRONT)
895 flags |= I830_BACK;
896 if (tmp & I830_BACK)
897 flags |= I830_FRONT;
898 }
899
900 i830_kernel_lost_context(dev);
901
902 switch (cpp) {
903 case 2:
904 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
905 D_CMD = CMD = XY_COLOR_BLT_CMD;
906 break;
907 case 4:
908 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
909 CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
910 XY_COLOR_BLT_WRITE_RGB);
911 D_CMD = XY_COLOR_BLT_CMD;
912 if (clear_depthmask & 0x00ffffff)
913 D_CMD |= XY_COLOR_BLT_WRITE_RGB;
914 if (clear_depthmask & 0xff000000)
915 D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
916 break;
917 default:
918 BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
919 D_CMD = CMD = XY_COLOR_BLT_CMD;
920 break;
921 }
922
923 if (nbox > I830_NR_SAREA_CLIPRECTS)
924 nbox = I830_NR_SAREA_CLIPRECTS;
925
926 for (i = 0; i < nbox; i++, pbox++) {
927 if (pbox->x1 > pbox->x2 ||
928 pbox->y1 > pbox->y2 ||
929 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
930 continue;
931
932 if (flags & I830_FRONT) {
933 DRM_DEBUG("clear front\n");
934 BEGIN_LP_RING(6);
935 OUT_RING(CMD);
936 OUT_RING(BR13);
937 OUT_RING((pbox->y1 << 16) | pbox->x1);
938 OUT_RING((pbox->y2 << 16) | pbox->x2);
939 OUT_RING(dev_priv->front_offset);
940 OUT_RING(clear_color);
941 ADVANCE_LP_RING();
942 }
943
944 if (flags & I830_BACK) {
945 DRM_DEBUG("clear back\n");
946 BEGIN_LP_RING(6);
947 OUT_RING(CMD);
948 OUT_RING(BR13);
949 OUT_RING((pbox->y1 << 16) | pbox->x1);
950 OUT_RING((pbox->y2 << 16) | pbox->x2);
951 OUT_RING(dev_priv->back_offset);
952 OUT_RING(clear_color);
953 ADVANCE_LP_RING();
954 }
955
956 if (flags & I830_DEPTH) {
957 DRM_DEBUG("clear depth\n");
958 BEGIN_LP_RING(6);
959 OUT_RING(D_CMD);
960 OUT_RING(BR13);
961 OUT_RING((pbox->y1 << 16) | pbox->x1);
962 OUT_RING((pbox->y2 << 16) | pbox->x2);
963 OUT_RING(dev_priv->depth_offset);
964 OUT_RING(clear_zval);
965 ADVANCE_LP_RING();
966 }
967 }
968}
969
970static void i830_dma_dispatch_swap(struct drm_device * dev)
971{
972 drm_i830_private_t *dev_priv = dev->dev_private;
973 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
974 int nbox = sarea_priv->nbox;
975 struct drm_clip_rect *pbox = sarea_priv->boxes;
976 int pitch = dev_priv->pitch;
977 int cpp = dev_priv->cpp;
978 int i;
979 unsigned int CMD, BR13;
980 RING_LOCALS;
981
982 DRM_DEBUG("swapbuffers\n");
983
984 i830_kernel_lost_context(dev);
985
986 if (dev_priv->do_boxes)
987 i830_cp_performance_boxes(dev);
988
989 switch (cpp) {
990 case 2:
991 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
992 CMD = XY_SRC_COPY_BLT_CMD;
993 break;
994 case 4:
995 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
996 CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
997 XY_SRC_COPY_BLT_WRITE_RGB);
998 break;
999 default:
1000 BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
1001 CMD = XY_SRC_COPY_BLT_CMD;
1002 break;
1003 }
1004
1005 if (nbox > I830_NR_SAREA_CLIPRECTS)
1006 nbox = I830_NR_SAREA_CLIPRECTS;
1007
1008 for (i = 0; i < nbox; i++, pbox++) {
1009 if (pbox->x1 > pbox->x2 ||
1010 pbox->y1 > pbox->y2 ||
1011 pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
1012 continue;
1013
1014 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
1015 pbox->x1, pbox->y1, pbox->x2, pbox->y2);
1016
1017 BEGIN_LP_RING(8);
1018 OUT_RING(CMD);
1019 OUT_RING(BR13);
1020 OUT_RING((pbox->y1 << 16) | pbox->x1);
1021 OUT_RING((pbox->y2 << 16) | pbox->x2);
1022
1023 if (dev_priv->current_page == 0)
1024 OUT_RING(dev_priv->front_offset);
1025 else
1026 OUT_RING(dev_priv->back_offset);
1027
1028 OUT_RING((pbox->y1 << 16) | pbox->x1);
1029 OUT_RING(BR13 & 0xffff);
1030
1031 if (dev_priv->current_page == 0)
1032 OUT_RING(dev_priv->back_offset);
1033 else
1034 OUT_RING(dev_priv->front_offset);
1035
1036 ADVANCE_LP_RING();
1037 }
1038}
1039
1040static void i830_dma_dispatch_flip(struct drm_device * dev)
1041{
1042 drm_i830_private_t *dev_priv = dev->dev_private;
1043 RING_LOCALS;
1044
1045 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
1046 __func__,
1047 dev_priv->current_page,
1048 dev_priv->sarea_priv->pf_current_page);
1049
1050 i830_kernel_lost_context(dev);
1051
1052 if (dev_priv->do_boxes) {
1053 dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
1054 i830_cp_performance_boxes(dev);
1055 }
1056
1057 BEGIN_LP_RING(2);
1058 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
1059 OUT_RING(0);
1060 ADVANCE_LP_RING();
1061
1062 BEGIN_LP_RING(6);
1063 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
1064 OUT_RING(0);
1065 if (dev_priv->current_page == 0) {
1066 OUT_RING(dev_priv->back_offset);
1067 dev_priv->current_page = 1;
1068 } else {
1069 OUT_RING(dev_priv->front_offset);
1070 dev_priv->current_page = 0;
1071 }
1072 OUT_RING(0);
1073 ADVANCE_LP_RING();
1074
1075 BEGIN_LP_RING(2);
1076 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
1077 OUT_RING(0);
1078 ADVANCE_LP_RING();
1079
1080 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1081}
1082
1083static void i830_dma_dispatch_vertex(struct drm_device * dev,
1084 struct drm_buf * buf, int discard, int used)
1085{
1086 drm_i830_private_t *dev_priv = dev->dev_private;
1087 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1088 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
1089 struct drm_clip_rect *box = sarea_priv->boxes;
1090 int nbox = sarea_priv->nbox;
1091 unsigned long address = (unsigned long)buf->bus_address;
1092 unsigned long start = address - dev->agp->base;
1093 int i = 0, u;
1094 RING_LOCALS;
1095
1096 i830_kernel_lost_context(dev);
1097
1098 if (nbox > I830_NR_SAREA_CLIPRECTS)
1099 nbox = I830_NR_SAREA_CLIPRECTS;
1100
1101 if (discard) {
1102 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1103 I830_BUF_HARDWARE);
1104 if (u != I830_BUF_CLIENT) {
1105 DRM_DEBUG("xxxx 2\n");
1106 }
1107 }
1108
1109 if (used > 4 * 1023)
1110 used = 0;
1111
1112 if (sarea_priv->dirty)
1113 i830EmitState(dev);
1114
1115 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
1116 address, used, nbox);
1117
1118 dev_priv->counter++;
1119 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1120 DRM_DEBUG("i830_dma_dispatch\n");
1121 DRM_DEBUG("start : %lx\n", start);
1122 DRM_DEBUG("used : %d\n", used);
1123 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1124
1125 if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
1126 u32 *vp = buf_priv->kernel_virtual;
1127
1128 vp[0] = (GFX_OP_PRIMITIVE |
1129 sarea_priv->vertex_prim | ((used / 4) - 2));
1130
1131 if (dev_priv->use_mi_batchbuffer_start) {
1132 vp[used / 4] = MI_BATCH_BUFFER_END;
1133 used += 4;
1134 }
1135
1136 if (used & 4) {
1137 vp[used / 4] = 0;
1138 used += 4;
1139 }
1140
1141 i830_unmap_buffer(buf);
1142 }
1143
1144 if (used) {
1145 do {
1146 if (i < nbox) {
1147 BEGIN_LP_RING(6);
1148 OUT_RING(GFX_OP_DRAWRECT_INFO);
1149 OUT_RING(sarea_priv->
1150 BufferState[I830_DESTREG_DR1]);
1151 OUT_RING(box[i].x1 | (box[i].y1 << 16));
1152 OUT_RING(box[i].x2 | (box[i].y2 << 16));
1153 OUT_RING(sarea_priv->
1154 BufferState[I830_DESTREG_DR4]);
1155 OUT_RING(0);
1156 ADVANCE_LP_RING();
1157 }
1158
1159 if (dev_priv->use_mi_batchbuffer_start) {
1160 BEGIN_LP_RING(2);
1161 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
1162 OUT_RING(start | MI_BATCH_NON_SECURE);
1163 ADVANCE_LP_RING();
1164 } else {
1165 BEGIN_LP_RING(4);
1166 OUT_RING(MI_BATCH_BUFFER);
1167 OUT_RING(start | MI_BATCH_NON_SECURE);
1168 OUT_RING(start + used - 4);
1169 OUT_RING(0);
1170 ADVANCE_LP_RING();
1171 }
1172
1173 } while (++i < nbox);
1174 }
1175
1176 if (discard) {
1177 dev_priv->counter++;
1178
1179 (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1180 I830_BUF_HARDWARE);
1181
1182 BEGIN_LP_RING(8);
1183 OUT_RING(CMD_STORE_DWORD_IDX);
1184 OUT_RING(20);
1185 OUT_RING(dev_priv->counter);
1186 OUT_RING(CMD_STORE_DWORD_IDX);
1187 OUT_RING(buf_priv->my_use_idx);
1188 OUT_RING(I830_BUF_FREE);
1189 OUT_RING(CMD_REPORT_HEAD);
1190 OUT_RING(0);
1191 ADVANCE_LP_RING();
1192 }
1193}
1194
1195static void i830_dma_quiescent(struct drm_device * dev)
1196{
1197 drm_i830_private_t *dev_priv = dev->dev_private;
1198 RING_LOCALS;
1199
1200 i830_kernel_lost_context(dev);
1201
1202 BEGIN_LP_RING(4);
1203 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
1204 OUT_RING(CMD_REPORT_HEAD);
1205 OUT_RING(0);
1206 OUT_RING(0);
1207 ADVANCE_LP_RING();
1208
1209 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1210}
1211
1212static int i830_flush_queue(struct drm_device * dev)
1213{
1214 drm_i830_private_t *dev_priv = dev->dev_private;
1215 struct drm_device_dma *dma = dev->dma;
1216 int i, ret = 0;
1217 RING_LOCALS;
1218
1219 i830_kernel_lost_context(dev);
1220
1221 BEGIN_LP_RING(2);
1222 OUT_RING(CMD_REPORT_HEAD);
1223 OUT_RING(0);
1224 ADVANCE_LP_RING();
1225
1226 i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
1227
1228 for (i = 0; i < dma->buf_count; i++) {
1229 struct drm_buf *buf = dma->buflist[i];
1230 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1231
1232 int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
1233 I830_BUF_FREE);
1234
1235 if (used == I830_BUF_HARDWARE)
1236 DRM_DEBUG("reclaimed from HARDWARE\n");
1237 if (used == I830_BUF_CLIENT)
1238 DRM_DEBUG("still on client\n");
1239 }
1240
1241 return ret;
1242}
1243
1244/* Must be called with the lock held */
1245static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
1246{
1247 struct drm_device_dma *dma = dev->dma;
1248 int i;
1249
1250 if (!dma)
1251 return;
1252 if (!dev->dev_private)
1253 return;
1254 if (!dma->buflist)
1255 return;
1256
1257 i830_flush_queue(dev);
1258
1259 for (i = 0; i < dma->buf_count; i++) {
1260 struct drm_buf *buf = dma->buflist[i];
1261 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1262
1263 if (buf->file_priv == file_priv && buf_priv) {
1264 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1265 I830_BUF_FREE);
1266
1267 if (used == I830_BUF_CLIENT)
1268 DRM_DEBUG("reclaimed from client\n");
1269 if (buf_priv->currently_mapped == I830_BUF_MAPPED)
1270 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
1271 }
1272 }
1273}
1274
1275static int i830_flush_ioctl(struct drm_device *dev, void *data,
1276 struct drm_file *file_priv)
1277{
1278 LOCK_TEST_WITH_RETURN(dev, file_priv);
1279
1280 i830_flush_queue(dev);
1281 return 0;
1282}
1283
1284static int i830_dma_vertex(struct drm_device *dev, void *data,
1285 struct drm_file *file_priv)
1286{
1287 struct drm_device_dma *dma = dev->dma;
1288 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1289 u32 *hw_status = dev_priv->hw_status_page;
1290 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1291 dev_priv->sarea_priv;
1292 drm_i830_vertex_t *vertex = data;
1293
1294 LOCK_TEST_WITH_RETURN(dev, file_priv);
1295
1296 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1297 vertex->idx, vertex->used, vertex->discard);
1298
1299 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
1300 return -EINVAL;
1301
1302 i830_dma_dispatch_vertex(dev,
1303 dma->buflist[vertex->idx],
1304 vertex->discard, vertex->used);
1305
1306 sarea_priv->last_enqueue = dev_priv->counter - 1;
1307 sarea_priv->last_dispatch = (int)hw_status[5];
1308
1309 return 0;
1310}
1311
1312static int i830_clear_bufs(struct drm_device *dev, void *data,
1313 struct drm_file *file_priv)
1314{
1315 drm_i830_clear_t *clear = data;
1316
1317 LOCK_TEST_WITH_RETURN(dev, file_priv);
1318
1319 /* GH: Someone's doing nasty things... */
1320 if (!dev->dev_private) {
1321 return -EINVAL;
1322 }
1323
1324 i830_dma_dispatch_clear(dev, clear->flags,
1325 clear->clear_color,
1326 clear->clear_depth, clear->clear_depthmask);
1327 return 0;
1328}
1329
1330static int i830_swap_bufs(struct drm_device *dev, void *data,
1331 struct drm_file *file_priv)
1332{
1333 DRM_DEBUG("i830_swap_bufs\n");
1334
1335 LOCK_TEST_WITH_RETURN(dev, file_priv);
1336
1337 i830_dma_dispatch_swap(dev);
1338 return 0;
1339}
1340
1341/* Not sure why this isn't set all the time:
1342 */
1343static void i830_do_init_pageflip(struct drm_device * dev)
1344{
1345 drm_i830_private_t *dev_priv = dev->dev_private;
1346
1347 DRM_DEBUG("%s\n", __func__);
1348 dev_priv->page_flipping = 1;
1349 dev_priv->current_page = 0;
1350 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1351}
1352
1353static int i830_do_cleanup_pageflip(struct drm_device * dev)
1354{
1355 drm_i830_private_t *dev_priv = dev->dev_private;
1356
1357 DRM_DEBUG("%s\n", __func__);
1358 if (dev_priv->current_page != 0)
1359 i830_dma_dispatch_flip(dev);
1360
1361 dev_priv->page_flipping = 0;
1362 return 0;
1363}
1364
1365static int i830_flip_bufs(struct drm_device *dev, void *data,
1366 struct drm_file *file_priv)
1367{
1368 drm_i830_private_t *dev_priv = dev->dev_private;
1369
1370 DRM_DEBUG("%s\n", __func__);
1371
1372 LOCK_TEST_WITH_RETURN(dev, file_priv);
1373
1374 if (!dev_priv->page_flipping)
1375 i830_do_init_pageflip(dev);
1376
1377 i830_dma_dispatch_flip(dev);
1378 return 0;
1379}
1380
1381static int i830_getage(struct drm_device *dev, void *data,
1382 struct drm_file *file_priv)
1383{
1384 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1385 u32 *hw_status = dev_priv->hw_status_page;
1386 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1387 dev_priv->sarea_priv;
1388
1389 sarea_priv->last_dispatch = (int)hw_status[5];
1390 return 0;
1391}
1392
1393static int i830_getbuf(struct drm_device *dev, void *data,
1394 struct drm_file *file_priv)
1395{
1396 int retcode = 0;
1397 drm_i830_dma_t *d = data;
1398 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
1399 u32 *hw_status = dev_priv->hw_status_page;
1400 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1401 dev_priv->sarea_priv;
1402
1403 DRM_DEBUG("getbuf\n");
1404
1405 LOCK_TEST_WITH_RETURN(dev, file_priv);
1406
1407 d->granted = 0;
1408
1409 retcode = i830_dma_get_buffer(dev, d, file_priv);
1410
1411 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1412 task_pid_nr(current), retcode, d->granted);
1413
1414 sarea_priv->last_dispatch = (int)hw_status[5];
1415
1416 return retcode;
1417}
1418
1419static int i830_copybuf(struct drm_device *dev, void *data,
1420 struct drm_file *file_priv)
1421{
1422 /* Never copy - 2.4.x doesn't need it */
1423 return 0;
1424}
1425
1426static int i830_docopy(struct drm_device *dev, void *data,
1427 struct drm_file *file_priv)
1428{
1429 return 0;
1430}
1431
1432static int i830_getparam(struct drm_device *dev, void *data,
1433 struct drm_file *file_priv)
1434{
1435 drm_i830_private_t *dev_priv = dev->dev_private;
1436 drm_i830_getparam_t *param = data;
1437 int value;
1438
1439 if (!dev_priv) {
1440 DRM_ERROR("%s called with no initialization\n", __func__);
1441 return -EINVAL;
1442 }
1443
1444 switch (param->param) {
1445 case I830_PARAM_IRQ_ACTIVE:
1446 value = dev->irq_enabled;
1447 break;
1448 default:
1449 return -EINVAL;
1450 }
1451
1452 if (copy_to_user(param->value, &value, sizeof(int))) {
1453 DRM_ERROR("copy_to_user\n");
1454 return -EFAULT;
1455 }
1456
1457 return 0;
1458}
1459
1460static int i830_setparam(struct drm_device *dev, void *data,
1461 struct drm_file *file_priv)
1462{
1463 drm_i830_private_t *dev_priv = dev->dev_private;
1464 drm_i830_setparam_t *param = data;
1465
1466 if (!dev_priv) {
1467 DRM_ERROR("%s called with no initialization\n", __func__);
1468 return -EINVAL;
1469 }
1470
1471 switch (param->param) {
1472 case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
1473 dev_priv->use_mi_batchbuffer_start = param->value;
1474 break;
1475 default:
1476 return -EINVAL;
1477 }
1478
1479 return 0;
1480}
1481
1482int i830_driver_load(struct drm_device *dev, unsigned long flags)
1483{
1484 /* i830 has 4 more counters */
1485 dev->counters += 4;
1486 dev->types[6] = _DRM_STAT_IRQ;
1487 dev->types[7] = _DRM_STAT_PRIMARY;
1488 dev->types[8] = _DRM_STAT_SECONDARY;
1489 dev->types[9] = _DRM_STAT_DMA;
1490
1491 return 0;
1492}
1493
1494void i830_driver_lastclose(struct drm_device * dev)
1495{
1496 i830_dma_cleanup(dev);
1497}
1498
1499void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1500{
1501 if (dev->dev_private) {
1502 drm_i830_private_t *dev_priv = dev->dev_private;
1503 if (dev_priv->page_flipping) {
1504 i830_do_cleanup_pageflip(dev);
1505 }
1506 }
1507}
1508
1509void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
1510{
1511 i830_reclaim_buffers(dev, file_priv);
1512}
1513
1514int i830_driver_dma_quiescent(struct drm_device * dev)
1515{
1516 i830_dma_quiescent(dev);
1517 return 0;
1518}
1519
1520struct drm_ioctl_desc i830_ioctls[] = {
1521 DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1522 DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
1523 DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
1524 DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
1525 DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
1526 DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
1527 DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
1528 DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
1529 DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
1530 DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
1531 DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
1532 DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
1533 DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
1534 DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
1535};
1536
1537int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
1538
1539/**
1540 * Determine if the device really is AGP or not.
1541 *
1542 * All Intel graphics chipsets are treated as AGP, even if they are really
1543 * PCI-e.
1544 *
1545 * \param dev The device to be tested.
1546 *
1547 * \returns
1548 * A value of 1 is always retured to indictate every i8xx is AGP.
1549 */
1550int i830_driver_device_is_agp(struct drm_device * dev)
1551{
1552 return 1;
1553}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
new file mode 100644
index 000000000000..389597e4a623
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -0,0 +1,108 @@
1/* i830_drv.c -- I810 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Jeff Hartmann <jhartmann@valinux.com>
30 * Gareth Hughes <gareth@valinux.com>
31 * Abraham vd Merwe <abraham@2d3d.co.za>
32 * Keith Whitwell <keith@tungstengraphics.com>
33 */
34
35#include "drmP.h"
36#include "drm.h"
37#include "i830_drm.h"
38#include "i830_drv.h"
39
40#include "drm_pciids.h"
41
42static struct pci_device_id pciidlist[] = {
43 i830_PCI_IDS
44};
45
46static struct drm_driver driver = {
47 .driver_features =
48 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
49 DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
50#if USE_IRQS
51 .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
52#endif
53 .dev_priv_size = sizeof(drm_i830_buf_priv_t),
54 .load = i830_driver_load,
55 .lastclose = i830_driver_lastclose,
56 .preclose = i830_driver_preclose,
57 .device_is_agp = i830_driver_device_is_agp,
58 .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
59 .dma_quiescent = i830_driver_dma_quiescent,
60 .get_map_ofs = drm_core_get_map_ofs,
61 .get_reg_ofs = drm_core_get_reg_ofs,
62#if USE_IRQS
63 .irq_preinstall = i830_driver_irq_preinstall,
64 .irq_postinstall = i830_driver_irq_postinstall,
65 .irq_uninstall = i830_driver_irq_uninstall,
66 .irq_handler = i830_driver_irq_handler,
67#endif
68 .ioctls = i830_ioctls,
69 .fops = {
70 .owner = THIS_MODULE,
71 .open = drm_open,
72 .release = drm_release,
73 .ioctl = drm_ioctl,
74 .mmap = drm_mmap,
75 .poll = drm_poll,
76 .fasync = drm_fasync,
77 },
78
79 .pci_driver = {
80 .name = DRIVER_NAME,
81 .id_table = pciidlist,
82 },
83
84 .name = DRIVER_NAME,
85 .desc = DRIVER_DESC,
86 .date = DRIVER_DATE,
87 .major = DRIVER_MAJOR,
88 .minor = DRIVER_MINOR,
89 .patchlevel = DRIVER_PATCHLEVEL,
90};
91
92static int __init i830_init(void)
93{
94 driver.num_ioctls = i830_max_ioctl;
95 return drm_init(&driver);
96}
97
98static void __exit i830_exit(void)
99{
100 drm_exit(&driver);
101}
102
103module_init(i830_init);
104module_exit(i830_exit);
105
106MODULE_AUTHOR(DRIVER_AUTHOR);
107MODULE_DESCRIPTION(DRIVER_DESC);
108MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
new file mode 100644
index 000000000000..b5bf8cc0fdaa
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_drv.h
@@ -0,0 +1,292 @@
1/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 *
30 */
31
32#ifndef _I830_DRV_H_
33#define _I830_DRV_H_
34
35/* General customization:
36 */
37
38#define DRIVER_AUTHOR "VA Linux Systems Inc."
39
40#define DRIVER_NAME "i830"
41#define DRIVER_DESC "Intel 830M"
42#define DRIVER_DATE "20021108"
43
44/* Interface history:
45 *
46 * 1.1: Original.
47 * 1.2: ?
48 * 1.3: New irq emit/wait ioctls.
49 * New pageflip ioctl.
50 * New getparam ioctl.
51 * State for texunits 3&4 in sarea.
52 * New (alternative) layout for texture state.
53 */
54#define DRIVER_MAJOR 1
55#define DRIVER_MINOR 3
56#define DRIVER_PATCHLEVEL 2
57
58/* Driver will work either way: IRQ's save cpu time when waiting for
59 * the card, but are subject to subtle interactions between bios,
60 * hardware and the driver.
61 */
62/* XXX: Add vblank support? */
63#define USE_IRQS 0
64
65typedef struct drm_i830_buf_priv {
66 u32 *in_use;
67 int my_use_idx;
68 int currently_mapped;
69 void __user *virtual;
70 void *kernel_virtual;
71 drm_local_map_t map;
72} drm_i830_buf_priv_t;
73
74typedef struct _drm_i830_ring_buffer {
75 int tail_mask;
76 unsigned long Start;
77 unsigned long End;
78 unsigned long Size;
79 u8 *virtual_start;
80 int head;
81 int tail;
82 int space;
83 drm_local_map_t map;
84} drm_i830_ring_buffer_t;
85
86typedef struct drm_i830_private {
87 struct drm_map *sarea_map;
88 struct drm_map *mmio_map;
89
90 drm_i830_sarea_t *sarea_priv;
91 drm_i830_ring_buffer_t ring;
92
93 void *hw_status_page;
94 unsigned long counter;
95
96 dma_addr_t dma_status_page;
97
98 struct drm_buf *mmap_buffer;
99
100 u32 front_di1, back_di1, zi1;
101
102 int back_offset;
103 int depth_offset;
104 int front_offset;
105 int w, h;
106 int pitch;
107 int back_pitch;
108 int depth_pitch;
109 unsigned int cpp;
110
111 int do_boxes;
112 int dma_used;
113
114 int current_page;
115 int page_flipping;
116
117 wait_queue_head_t irq_queue;
118 atomic_t irq_received;
119 atomic_t irq_emitted;
120
121 int use_mi_batchbuffer_start;
122
123} drm_i830_private_t;
124
125extern struct drm_ioctl_desc i830_ioctls[];
126extern int i830_max_ioctl;
127
128/* i830_irq.c */
129extern int i830_irq_emit(struct drm_device *dev, void *data,
130 struct drm_file *file_priv);
131extern int i830_irq_wait(struct drm_device *dev, void *data,
132 struct drm_file *file_priv);
133
134extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
135extern void i830_driver_irq_preinstall(struct drm_device * dev);
136extern void i830_driver_irq_postinstall(struct drm_device * dev);
137extern void i830_driver_irq_uninstall(struct drm_device * dev);
138extern int i830_driver_load(struct drm_device *, unsigned long flags);
139extern void i830_driver_preclose(struct drm_device * dev,
140 struct drm_file *file_priv);
141extern void i830_driver_lastclose(struct drm_device * dev);
142extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
143 struct drm_file *file_priv);
144extern int i830_driver_dma_quiescent(struct drm_device * dev);
145extern int i830_driver_device_is_agp(struct drm_device * dev);
146
147#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
148#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
149#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
150#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
151
152#define I830_VERBOSE 0
153
154#define RING_LOCALS unsigned int outring, ringmask, outcount; \
155 volatile char *virt;
156
157#define BEGIN_LP_RING(n) do { \
158 if (I830_VERBOSE) \
159 printk("BEGIN_LP_RING(%d)\n", (n)); \
160 if (dev_priv->ring.space < n*4) \
161 i830_wait_ring(dev, n*4, __func__); \
162 outcount = 0; \
163 outring = dev_priv->ring.tail; \
164 ringmask = dev_priv->ring.tail_mask; \
165 virt = dev_priv->ring.virtual_start; \
166} while (0)
167
168#define OUT_RING(n) do { \
169 if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
170 *(volatile unsigned int *)(virt + outring) = n; \
171 outcount++; \
172 outring += 4; \
173 outring &= ringmask; \
174} while (0)
175
176#define ADVANCE_LP_RING() do { \
177 if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
178 dev_priv->ring.tail = outring; \
179 dev_priv->ring.space -= outcount * 4; \
180 I830_WRITE(LP_RING + RING_TAIL, outring); \
181} while(0)
182
183extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
184
185#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
186#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
187#define CMD_REPORT_HEAD (7<<23)
188#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
189#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
190
191#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
192#define LOAD_TEXTURE_MAP0 (1<<11)
193
194#define INST_PARSER_CLIENT 0x00000000
195#define INST_OP_FLUSH 0x02000000
196#define INST_FLUSH_MAP_CACHE 0x00000001
197
198#define BB1_START_ADDR_MASK (~0x7)
199#define BB1_PROTECTED (1<<0)
200#define BB1_UNPROTECTED (0<<0)
201#define BB2_END_ADDR_MASK (~0x7)
202
203#define I830REG_HWSTAM 0x02098
204#define I830REG_INT_IDENTITY_R 0x020a4
205#define I830REG_INT_MASK_R 0x020a8
206#define I830REG_INT_ENABLE_R 0x020a0
207
208#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
209
210#define LP_RING 0x2030
211#define HP_RING 0x2040
212#define RING_TAIL 0x00
213#define TAIL_ADDR 0x001FFFF8
214#define RING_HEAD 0x04
215#define HEAD_WRAP_COUNT 0xFFE00000
216#define HEAD_WRAP_ONE 0x00200000
217#define HEAD_ADDR 0x001FFFFC
218#define RING_START 0x08
219#define START_ADDR 0x0xFFFFF000
220#define RING_LEN 0x0C
221#define RING_NR_PAGES 0x001FF000
222#define RING_REPORT_MASK 0x00000006
223#define RING_REPORT_64K 0x00000002
224#define RING_REPORT_128K 0x00000004
225#define RING_NO_REPORT 0x00000000
226#define RING_VALID_MASK 0x00000001
227#define RING_VALID 0x00000001
228#define RING_INVALID 0x00000000
229
230#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
231#define SC_UPDATE_SCISSOR (0x1<<1)
232#define SC_ENABLE_MASK (0x1<<0)
233#define SC_ENABLE (0x1<<0)
234
235#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
236#define SCI_YMIN_MASK (0xffff<<16)
237#define SCI_XMIN_MASK (0xffff<<0)
238#define SCI_YMAX_MASK (0xffff<<16)
239#define SCI_XMAX_MASK (0xffff<<0)
240
241#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
242#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
243#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
244#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
245#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
246#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
247#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
248#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
249
250#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
251
252#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
253#define ASYNC_FLIP (1<<22)
254
255#define CMD_3D (0x3<<29)
256#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
257#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
258
259#define BR00_BITBLT_CLIENT 0x40000000
260#define BR00_OP_COLOR_BLT 0x10000000
261#define BR00_OP_SRC_COPY_BLT 0x10C00000
262#define BR13_SOLID_PATTERN 0x80000000
263
264#define BUF_3D_ID_COLOR_BACK (0x3<<24)
265#define BUF_3D_ID_DEPTH (0x7<<24)
266#define BUF_3D_USE_FENCE (1<<23)
267#define BUF_3D_PITCH(x) (((x)/4)<<2)
268
269#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
270#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
271#define MAP_PALETTE_BOTH (1<<11)
272
273#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
274#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
275#define XY_COLOR_BLT_WRITE_RGB (1<<20)
276
277#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
278#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
279#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
280
281#define MI_BATCH_BUFFER ((0x30<<23)|1)
282#define MI_BATCH_BUFFER_START (0x31<<23)
283#define MI_BATCH_BUFFER_END (0xA<<23)
284#define MI_BATCH_NON_SECURE (1)
285
286#define MI_WAIT_FOR_EVENT ((0x3<<23))
287#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
288#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
289
290#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
291
292#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
new file mode 100644
index 000000000000..91ec2bb497e9
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_irq.c
@@ -0,0 +1,186 @@
1/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 *
3 * Copyright 2002 Tungsten Graphics, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors: Keith Whitwell <keith@tungstengraphics.com>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i830_drm.h"
32#include "i830_drv.h"
33#include <linux/interrupt.h> /* For task queue support */
34#include <linux/delay.h>
35
36irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
37{
38 struct drm_device *dev = (struct drm_device *) arg;
39 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
40 u16 temp;
41
42 temp = I830_READ16(I830REG_INT_IDENTITY_R);
43 DRM_DEBUG("%x\n", temp);
44
45 if (!(temp & 2))
46 return IRQ_NONE;
47
48 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
49
50 atomic_inc(&dev_priv->irq_received);
51 wake_up_interruptible(&dev_priv->irq_queue);
52
53 return IRQ_HANDLED;
54}
55
56static int i830_emit_irq(struct drm_device * dev)
57{
58 drm_i830_private_t *dev_priv = dev->dev_private;
59 RING_LOCALS;
60
61 DRM_DEBUG("%s\n", __func__);
62
63 atomic_inc(&dev_priv->irq_emitted);
64
65 BEGIN_LP_RING(2);
66 OUT_RING(0);
67 OUT_RING(GFX_OP_USER_INTERRUPT);
68 ADVANCE_LP_RING();
69
70 return atomic_read(&dev_priv->irq_emitted);
71}
72
73static int i830_wait_irq(struct drm_device * dev, int irq_nr)
74{
75 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
76 DECLARE_WAITQUEUE(entry, current);
77 unsigned long end = jiffies + HZ * 3;
78 int ret = 0;
79
80 DRM_DEBUG("%s\n", __func__);
81
82 if (atomic_read(&dev_priv->irq_received) >= irq_nr)
83 return 0;
84
85 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
86
87 add_wait_queue(&dev_priv->irq_queue, &entry);
88
89 for (;;) {
90 __set_current_state(TASK_INTERRUPTIBLE);
91 if (atomic_read(&dev_priv->irq_received) >= irq_nr)
92 break;
93 if ((signed)(end - jiffies) <= 0) {
94 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
95 I830_READ16(I830REG_INT_IDENTITY_R),
96 I830_READ16(I830REG_INT_MASK_R),
97 I830_READ16(I830REG_INT_ENABLE_R),
98 I830_READ16(I830REG_HWSTAM));
99
100 ret = -EBUSY; /* Lockup? Missed irq? */
101 break;
102 }
103 schedule_timeout(HZ * 3);
104 if (signal_pending(current)) {
105 ret = -EINTR;
106 break;
107 }
108 }
109
110 __set_current_state(TASK_RUNNING);
111 remove_wait_queue(&dev_priv->irq_queue, &entry);
112 return ret;
113}
114
115/* Needs the lock as it touches the ring.
116 */
117int i830_irq_emit(struct drm_device *dev, void *data,
118 struct drm_file *file_priv)
119{
120 drm_i830_private_t *dev_priv = dev->dev_private;
121 drm_i830_irq_emit_t *emit = data;
122 int result;
123
124 LOCK_TEST_WITH_RETURN(dev, file_priv);
125
126 if (!dev_priv) {
127 DRM_ERROR("%s called with no initialization\n", __func__);
128 return -EINVAL;
129 }
130
131 result = i830_emit_irq(dev);
132
133 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
134 DRM_ERROR("copy_to_user\n");
135 return -EFAULT;
136 }
137
138 return 0;
139}
140
141/* Doesn't need the hardware lock.
142 */
143int i830_irq_wait(struct drm_device *dev, void *data,
144 struct drm_file *file_priv)
145{
146 drm_i830_private_t *dev_priv = dev->dev_private;
147 drm_i830_irq_wait_t *irqwait = data;
148
149 if (!dev_priv) {
150 DRM_ERROR("%s called with no initialization\n", __func__);
151 return -EINVAL;
152 }
153
154 return i830_wait_irq(dev, irqwait->irq_seq);
155}
156
157/* drm_dma.h hooks
158*/
159void i830_driver_irq_preinstall(struct drm_device * dev)
160{
161 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
162
163 I830_WRITE16(I830REG_HWSTAM, 0xffff);
164 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
165 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
166 atomic_set(&dev_priv->irq_received, 0);
167 atomic_set(&dev_priv->irq_emitted, 0);
168 init_waitqueue_head(&dev_priv->irq_queue);
169}
170
171void i830_driver_irq_postinstall(struct drm_device * dev)
172{
173 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
174
175 I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
176}
177
178void i830_driver_irq_uninstall(struct drm_device * dev)
179{
180 drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
181 if (!dev_priv)
182 return;
183
184 I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
185 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
186}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 000000000000..a9e60464df74
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
7
8i915-$(CONFIG_COMPAT) += i915_ioc32.o
9
10obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
new file mode 100644
index 000000000000..88974342933c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -0,0 +1,858 @@
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34/* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring
37 * actually stalls for (eg) 3 seconds.
38 */
39int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
40{
41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
44 int i;
45
46 for (i = 0; i < 10000; i++) {
47 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
48 ring->space = ring->head - (ring->tail + 8);
49 if (ring->space < 0)
50 ring->space += ring->Size;
51 if (ring->space >= n)
52 return 0;
53
54 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
55
56 if (ring->head != last_head)
57 i = 0;
58
59 last_head = ring->head;
60 }
61
62 return -EBUSY;
63}
64
65void i915_kernel_lost_context(struct drm_device * dev)
66{
67 drm_i915_private_t *dev_priv = dev->dev_private;
68 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69
70 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
72 ring->space = ring->head - (ring->tail + 8);
73 if (ring->space < 0)
74 ring->space += ring->Size;
75
76 if (ring->head == ring->tail)
77 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
78}
79
80static int i915_dma_cleanup(struct drm_device * dev)
81{
82 drm_i915_private_t *dev_priv = dev->dev_private;
83 /* Make sure interrupts are disabled here because the uninstall ioctl
84 * may not have been called from userspace and after dev_private
85 * is freed, it's too late.
86 */
87 if (dev->irq)
88 drm_irq_uninstall(dev);
89
90 if (dev_priv->ring.virtual_start) {
91 drm_core_ioremapfree(&dev_priv->ring.map, dev);
92 dev_priv->ring.virtual_start = 0;
93 dev_priv->ring.map.handle = 0;
94 dev_priv->ring.map.size = 0;
95 }
96
97 if (dev_priv->status_page_dmah) {
98 drm_pci_free(dev, dev_priv->status_page_dmah);
99 dev_priv->status_page_dmah = NULL;
100 /* Need to rewrite hardware status page */
101 I915_WRITE(0x02080, 0x1ffff000);
102 }
103
104 if (dev_priv->status_gfx_addr) {
105 dev_priv->status_gfx_addr = 0;
106 drm_core_ioremapfree(&dev_priv->hws_map, dev);
107 I915_WRITE(0x2080, 0x1ffff000);
108 }
109
110 return 0;
111}
112
113static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
114{
115 drm_i915_private_t *dev_priv = dev->dev_private;
116
117 dev_priv->sarea = drm_getsarea(dev);
118 if (!dev_priv->sarea) {
119 DRM_ERROR("can not find sarea!\n");
120 i915_dma_cleanup(dev);
121 return -EINVAL;
122 }
123
124 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
125 if (!dev_priv->mmio_map) {
126 i915_dma_cleanup(dev);
127 DRM_ERROR("can not find mmio map!\n");
128 return -EINVAL;
129 }
130
131 dev_priv->sarea_priv = (drm_i915_sarea_t *)
132 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
133
134 dev_priv->ring.Start = init->ring_start;
135 dev_priv->ring.End = init->ring_end;
136 dev_priv->ring.Size = init->ring_size;
137 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
138
139 dev_priv->ring.map.offset = init->ring_start;
140 dev_priv->ring.map.size = init->ring_size;
141 dev_priv->ring.map.type = 0;
142 dev_priv->ring.map.flags = 0;
143 dev_priv->ring.map.mtrr = 0;
144
145 drm_core_ioremap(&dev_priv->ring.map, dev);
146
147 if (dev_priv->ring.map.handle == NULL) {
148 i915_dma_cleanup(dev);
149 DRM_ERROR("can not ioremap virtual address for"
150 " ring buffer\n");
151 return -ENOMEM;
152 }
153
154 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
155
156 dev_priv->cpp = init->cpp;
157 dev_priv->back_offset = init->back_offset;
158 dev_priv->front_offset = init->front_offset;
159 dev_priv->current_page = 0;
160 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
161
162 /* We are using separate values as placeholders for mechanisms for
163 * private backbuffer/depthbuffer usage.
164 */
165 dev_priv->use_mi_batchbuffer_start = 0;
166 if (IS_I965G(dev)) /* 965 doesn't support older method */
167 dev_priv->use_mi_batchbuffer_start = 1;
168
169 /* Allow hardware batchbuffers unless told otherwise.
170 */
171 dev_priv->allow_batchbuffer = 1;
172
173 /* Program Hardware Status Page */
174 if (!I915_NEED_GFX_HWS(dev)) {
175 dev_priv->status_page_dmah =
176 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
177
178 if (!dev_priv->status_page_dmah) {
179 i915_dma_cleanup(dev);
180 DRM_ERROR("Can not allocate hardware status page\n");
181 return -ENOMEM;
182 }
183 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
184 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
185
186 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
187 I915_WRITE(0x02080, dev_priv->dma_status_page);
188 }
189 DRM_DEBUG("Enabled hardware status page\n");
190 return 0;
191}
192
193static int i915_dma_resume(struct drm_device * dev)
194{
195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
196
197 DRM_DEBUG("%s\n", __func__);
198
199 if (!dev_priv->sarea) {
200 DRM_ERROR("can not find sarea!\n");
201 return -EINVAL;
202 }
203
204 if (!dev_priv->mmio_map) {
205 DRM_ERROR("can not find mmio map!\n");
206 return -EINVAL;
207 }
208
209 if (dev_priv->ring.map.handle == NULL) {
210 DRM_ERROR("can not ioremap virtual address for"
211 " ring buffer\n");
212 return -ENOMEM;
213 }
214
215 /* Program Hardware Status Page */
216 if (!dev_priv->hw_status_page) {
217 DRM_ERROR("Can not find hardware status page\n");
218 return -EINVAL;
219 }
220 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
221
222 if (dev_priv->status_gfx_addr != 0)
223 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
224 else
225 I915_WRITE(0x02080, dev_priv->dma_status_page);
226 DRM_DEBUG("Enabled hardware status page\n");
227
228 return 0;
229}
230
231static int i915_dma_init(struct drm_device *dev, void *data,
232 struct drm_file *file_priv)
233{
234 drm_i915_init_t *init = data;
235 int retcode = 0;
236
237 switch (init->func) {
238 case I915_INIT_DMA:
239 retcode = i915_initialize(dev, init);
240 break;
241 case I915_CLEANUP_DMA:
242 retcode = i915_dma_cleanup(dev);
243 break;
244 case I915_RESUME_DMA:
245 retcode = i915_dma_resume(dev);
246 break;
247 default:
248 retcode = -EINVAL;
249 break;
250 }
251
252 return retcode;
253}
254
255/* Implement basically the same security restrictions as hardware does
256 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
257 *
258 * Most of the calculations below involve calculating the size of a
259 * particular instruction. It's important to get the size right as
260 * that tells us where the next instruction to check is. Any illegal
261 * instruction detected will be given a size of zero, which is a
262 * signal to abort the rest of the buffer.
263 */
264static int do_validate_cmd(int cmd)
265{
266 switch (((cmd >> 29) & 0x7)) {
267 case 0x0:
268 switch ((cmd >> 23) & 0x3f) {
269 case 0x0:
270 return 1; /* MI_NOOP */
271 case 0x4:
272 return 1; /* MI_FLUSH */
273 default:
274 return 0; /* disallow everything else */
275 }
276 break;
277 case 0x1:
278 return 0; /* reserved */
279 case 0x2:
280 return (cmd & 0xff) + 2; /* 2d commands */
281 case 0x3:
282 if (((cmd >> 24) & 0x1f) <= 0x18)
283 return 1;
284
285 switch ((cmd >> 24) & 0x1f) {
286 case 0x1c:
287 return 1;
288 case 0x1d:
289 switch ((cmd >> 16) & 0xff) {
290 case 0x3:
291 return (cmd & 0x1f) + 2;
292 case 0x4:
293 return (cmd & 0xf) + 2;
294 default:
295 return (cmd & 0xffff) + 2;
296 }
297 case 0x1e:
298 if (cmd & (1 << 23))
299 return (cmd & 0xffff) + 1;
300 else
301 return 1;
302 case 0x1f:
303 if ((cmd & (1 << 23)) == 0) /* inline vertices */
304 return (cmd & 0x1ffff) + 2;
305 else if (cmd & (1 << 17)) /* indirect random */
306 if ((cmd & 0xffff) == 0)
307 return 0; /* unknown length, too hard */
308 else
309 return (((cmd & 0xffff) + 1) / 2) + 1;
310 else
311 return 2; /* indirect sequential */
312 default:
313 return 0;
314 }
315 default:
316 return 0;
317 }
318
319 return 0;
320}
321
322static int validate_cmd(int cmd)
323{
324 int ret = do_validate_cmd(cmd);
325
326/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
327
328 return ret;
329}
330
331static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
332{
333 drm_i915_private_t *dev_priv = dev->dev_private;
334 int i;
335 RING_LOCALS;
336
337 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
338 return -EINVAL;
339
340 BEGIN_LP_RING((dwords+1)&~1);
341
342 for (i = 0; i < dwords;) {
343 int cmd, sz;
344
345 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
346 return -EINVAL;
347
348 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
349 return -EINVAL;
350
351 OUT_RING(cmd);
352
353 while (++i, --sz) {
354 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
355 sizeof(cmd))) {
356 return -EINVAL;
357 }
358 OUT_RING(cmd);
359 }
360 }
361
362 if (dwords & 1)
363 OUT_RING(0);
364
365 ADVANCE_LP_RING();
366
367 return 0;
368}
369
370static int i915_emit_box(struct drm_device * dev,
371 struct drm_clip_rect __user * boxes,
372 int i, int DR1, int DR4)
373{
374 drm_i915_private_t *dev_priv = dev->dev_private;
375 struct drm_clip_rect box;
376 RING_LOCALS;
377
378 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
379 return -EFAULT;
380 }
381
382 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
383 DRM_ERROR("Bad box %d,%d..%d,%d\n",
384 box.x1, box.y1, box.x2, box.y2);
385 return -EINVAL;
386 }
387
388 if (IS_I965G(dev)) {
389 BEGIN_LP_RING(4);
390 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
391 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
392 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
393 OUT_RING(DR4);
394 ADVANCE_LP_RING();
395 } else {
396 BEGIN_LP_RING(6);
397 OUT_RING(GFX_OP_DRAWRECT_INFO);
398 OUT_RING(DR1);
399 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
400 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
401 OUT_RING(DR4);
402 OUT_RING(0);
403 ADVANCE_LP_RING();
404 }
405
406 return 0;
407}
408
409/* XXX: Emitting the counter should really be moved to part of the IRQ
410 * emit. For now, do it in both places:
411 */
412
413static void i915_emit_breadcrumb(struct drm_device *dev)
414{
415 drm_i915_private_t *dev_priv = dev->dev_private;
416 RING_LOCALS;
417
418 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
419
420 if (dev_priv->counter > 0x7FFFFFFFUL)
421 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
422
423 BEGIN_LP_RING(4);
424 OUT_RING(CMD_STORE_DWORD_IDX);
425 OUT_RING(20);
426 OUT_RING(dev_priv->counter);
427 OUT_RING(0);
428 ADVANCE_LP_RING();
429}
430
431static int i915_dispatch_cmdbuffer(struct drm_device * dev,
432 drm_i915_cmdbuffer_t * cmd)
433{
434 int nbox = cmd->num_cliprects;
435 int i = 0, count, ret;
436
437 if (cmd->sz & 0x3) {
438 DRM_ERROR("alignment");
439 return -EINVAL;
440 }
441
442 i915_kernel_lost_context(dev);
443
444 count = nbox ? nbox : 1;
445
446 for (i = 0; i < count; i++) {
447 if (i < nbox) {
448 ret = i915_emit_box(dev, cmd->cliprects, i,
449 cmd->DR1, cmd->DR4);
450 if (ret)
451 return ret;
452 }
453
454 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
455 if (ret)
456 return ret;
457 }
458
459 i915_emit_breadcrumb(dev);
460 return 0;
461}
462
463static int i915_dispatch_batchbuffer(struct drm_device * dev,
464 drm_i915_batchbuffer_t * batch)
465{
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 struct drm_clip_rect __user *boxes = batch->cliprects;
468 int nbox = batch->num_cliprects;
469 int i = 0, count;
470 RING_LOCALS;
471
472 if ((batch->start | batch->used) & 0x7) {
473 DRM_ERROR("alignment");
474 return -EINVAL;
475 }
476
477 i915_kernel_lost_context(dev);
478
479 count = nbox ? nbox : 1;
480
481 for (i = 0; i < count; i++) {
482 if (i < nbox) {
483 int ret = i915_emit_box(dev, boxes, i,
484 batch->DR1, batch->DR4);
485 if (ret)
486 return ret;
487 }
488
489 if (dev_priv->use_mi_batchbuffer_start) {
490 BEGIN_LP_RING(2);
491 if (IS_I965G(dev)) {
492 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
493 OUT_RING(batch->start);
494 } else {
495 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
496 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
497 }
498 ADVANCE_LP_RING();
499 } else {
500 BEGIN_LP_RING(4);
501 OUT_RING(MI_BATCH_BUFFER);
502 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
503 OUT_RING(batch->start + batch->used - 4);
504 OUT_RING(0);
505 ADVANCE_LP_RING();
506 }
507 }
508
509 i915_emit_breadcrumb(dev);
510
511 return 0;
512}
513
514static int i915_dispatch_flip(struct drm_device * dev)
515{
516 drm_i915_private_t *dev_priv = dev->dev_private;
517 RING_LOCALS;
518
519 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
520 __FUNCTION__,
521 dev_priv->current_page,
522 dev_priv->sarea_priv->pf_current_page);
523
524 i915_kernel_lost_context(dev);
525
526 BEGIN_LP_RING(2);
527 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
528 OUT_RING(0);
529 ADVANCE_LP_RING();
530
531 BEGIN_LP_RING(6);
532 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
533 OUT_RING(0);
534 if (dev_priv->current_page == 0) {
535 OUT_RING(dev_priv->back_offset);
536 dev_priv->current_page = 1;
537 } else {
538 OUT_RING(dev_priv->front_offset);
539 dev_priv->current_page = 0;
540 }
541 OUT_RING(0);
542 ADVANCE_LP_RING();
543
544 BEGIN_LP_RING(2);
545 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
546 OUT_RING(0);
547 ADVANCE_LP_RING();
548
549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
550
551 BEGIN_LP_RING(4);
552 OUT_RING(CMD_STORE_DWORD_IDX);
553 OUT_RING(20);
554 OUT_RING(dev_priv->counter);
555 OUT_RING(0);
556 ADVANCE_LP_RING();
557
558 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
559 return 0;
560}
561
562static int i915_quiescent(struct drm_device * dev)
563{
564 drm_i915_private_t *dev_priv = dev->dev_private;
565
566 i915_kernel_lost_context(dev);
567 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
568}
569
570static int i915_flush_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *file_priv)
572{
573 LOCK_TEST_WITH_RETURN(dev, file_priv);
574
575 return i915_quiescent(dev);
576}
577
578static int i915_batchbuffer(struct drm_device *dev, void *data,
579 struct drm_file *file_priv)
580{
581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
582 u32 *hw_status = dev_priv->hw_status_page;
583 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
584 dev_priv->sarea_priv;
585 drm_i915_batchbuffer_t *batch = data;
586 int ret;
587
588 if (!dev_priv->allow_batchbuffer) {
589 DRM_ERROR("Batchbuffer ioctl disabled\n");
590 return -EINVAL;
591 }
592
593 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
594 batch->start, batch->used, batch->num_cliprects);
595
596 LOCK_TEST_WITH_RETURN(dev, file_priv);
597
598 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
599 batch->num_cliprects *
600 sizeof(struct drm_clip_rect)))
601 return -EFAULT;
602
603 ret = i915_dispatch_batchbuffer(dev, batch);
604
605 sarea_priv->last_dispatch = (int)hw_status[5];
606 return ret;
607}
608
609static int i915_cmdbuffer(struct drm_device *dev, void *data,
610 struct drm_file *file_priv)
611{
612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
613 u32 *hw_status = dev_priv->hw_status_page;
614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
615 dev_priv->sarea_priv;
616 drm_i915_cmdbuffer_t *cmdbuf = data;
617 int ret;
618
619 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
620 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
621
622 LOCK_TEST_WITH_RETURN(dev, file_priv);
623
624 if (cmdbuf->num_cliprects &&
625 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
626 cmdbuf->num_cliprects *
627 sizeof(struct drm_clip_rect))) {
628 DRM_ERROR("Fault accessing cliprects\n");
629 return -EFAULT;
630 }
631
632 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
633 if (ret) {
634 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
635 return ret;
636 }
637
638 sarea_priv->last_dispatch = (int)hw_status[5];
639 return 0;
640}
641
642static int i915_flip_bufs(struct drm_device *dev, void *data,
643 struct drm_file *file_priv)
644{
645 DRM_DEBUG("%s\n", __FUNCTION__);
646
647 LOCK_TEST_WITH_RETURN(dev, file_priv);
648
649 return i915_dispatch_flip(dev);
650}
651
652static int i915_getparam(struct drm_device *dev, void *data,
653 struct drm_file *file_priv)
654{
655 drm_i915_private_t *dev_priv = dev->dev_private;
656 drm_i915_getparam_t *param = data;
657 int value;
658
659 if (!dev_priv) {
660 DRM_ERROR("called with no initialization\n");
661 return -EINVAL;
662 }
663
664 switch (param->param) {
665 case I915_PARAM_IRQ_ACTIVE:
666 value = dev->irq ? 1 : 0;
667 break;
668 case I915_PARAM_ALLOW_BATCHBUFFER:
669 value = dev_priv->allow_batchbuffer ? 1 : 0;
670 break;
671 case I915_PARAM_LAST_DISPATCH:
672 value = READ_BREADCRUMB(dev_priv);
673 break;
674 default:
675 DRM_ERROR("Unknown parameter %d\n", param->param);
676 return -EINVAL;
677 }
678
679 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
680 DRM_ERROR("DRM_COPY_TO_USER failed\n");
681 return -EFAULT;
682 }
683
684 return 0;
685}
686
687static int i915_setparam(struct drm_device *dev, void *data,
688 struct drm_file *file_priv)
689{
690 drm_i915_private_t *dev_priv = dev->dev_private;
691 drm_i915_setparam_t *param = data;
692
693 if (!dev_priv) {
694 DRM_ERROR("called with no initialization\n");
695 return -EINVAL;
696 }
697
698 switch (param->param) {
699 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
700 if (!IS_I965G(dev))
701 dev_priv->use_mi_batchbuffer_start = param->value;
702 break;
703 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
704 dev_priv->tex_lru_log_granularity = param->value;
705 break;
706 case I915_SETPARAM_ALLOW_BATCHBUFFER:
707 dev_priv->allow_batchbuffer = param->value;
708 break;
709 default:
710 DRM_ERROR("unknown parameter %d\n", param->param);
711 return -EINVAL;
712 }
713
714 return 0;
715}
716
717static int i915_set_status_page(struct drm_device *dev, void *data,
718 struct drm_file *file_priv)
719{
720 drm_i915_private_t *dev_priv = dev->dev_private;
721 drm_i915_hws_addr_t *hws = data;
722
723 if (!I915_NEED_GFX_HWS(dev))
724 return -EINVAL;
725
726 if (!dev_priv) {
727 DRM_ERROR("called with no initialization\n");
728 return -EINVAL;
729 }
730
731 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
732
733 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
734
735 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
736 dev_priv->hws_map.size = 4*1024;
737 dev_priv->hws_map.type = 0;
738 dev_priv->hws_map.flags = 0;
739 dev_priv->hws_map.mtrr = 0;
740
741 drm_core_ioremap(&dev_priv->hws_map, dev);
742 if (dev_priv->hws_map.handle == NULL) {
743 i915_dma_cleanup(dev);
744 dev_priv->status_gfx_addr = 0;
745 DRM_ERROR("can not ioremap virtual address for"
746 " G33 hw status page\n");
747 return -ENOMEM;
748 }
749 dev_priv->hw_status_page = dev_priv->hws_map.handle;
750
751 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
752 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
753 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
754 dev_priv->status_gfx_addr);
755 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
756 return 0;
757}
758
759int i915_driver_load(struct drm_device *dev, unsigned long flags)
760{
761 struct drm_i915_private *dev_priv = dev->dev_private;
762 unsigned long base, size;
763 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
764
765 /* i915 has 4 more counters */
766 dev->counters += 4;
767 dev->types[6] = _DRM_STAT_IRQ;
768 dev->types[7] = _DRM_STAT_PRIMARY;
769 dev->types[8] = _DRM_STAT_SECONDARY;
770 dev->types[9] = _DRM_STAT_DMA;
771
772 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
773 if (dev_priv == NULL)
774 return -ENOMEM;
775
776 memset(dev_priv, 0, sizeof(drm_i915_private_t));
777
778 dev->dev_private = (void *)dev_priv;
779
780 /* Add register map (needed for suspend/resume) */
781 base = drm_get_resource_start(dev, mmio_bar);
782 size = drm_get_resource_len(dev, mmio_bar);
783
784 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
785 _DRM_KERNEL | _DRM_DRIVER,
786 &dev_priv->mmio_map);
787 return ret;
788}
789
790int i915_driver_unload(struct drm_device *dev)
791{
792 struct drm_i915_private *dev_priv = dev->dev_private;
793
794 if (dev_priv->mmio_map)
795 drm_rmmap(dev, dev_priv->mmio_map);
796
797 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
798 DRM_MEM_DRIVER);
799
800 return 0;
801}
802
803void i915_driver_lastclose(struct drm_device * dev)
804{
805 drm_i915_private_t *dev_priv = dev->dev_private;
806
807 if (!dev_priv)
808 return;
809
810 if (dev_priv->agp_heap)
811 i915_mem_takedown(&(dev_priv->agp_heap));
812
813 i915_dma_cleanup(dev);
814}
815
816void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
817{
818 drm_i915_private_t *dev_priv = dev->dev_private;
819 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
820}
821
822struct drm_ioctl_desc i915_ioctls[] = {
823 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
824 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
825 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
826 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
827 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
828 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
829 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
830 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
831 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
832 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
833 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
834 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
835 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
836 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
837 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
838 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
839 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
840};
841
842int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
843
844/**
845 * Determine if the device really is AGP or not.
846 *
847 * All Intel graphics chipsets are treated as AGP, even if they are really
848 * PCI-e.
849 *
850 * \param dev The device to be tested.
851 *
852 * \returns
853 * A value of 1 is always retured to indictate every i9x5 is AGP.
854 */
855int i915_driver_device_is_agp(struct drm_device * dev)
856{
857 return 1;
858}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
new file mode 100644
index 000000000000..93aed1c38bd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -0,0 +1,605 @@
1/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h"
34
35#include "drm_pciids.h"
36
37static struct pci_device_id pciidlist[] = {
38 i915_PCI_IDS
39};
40
41enum pipe {
42 PIPE_A = 0,
43 PIPE_B,
44};
45
46static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
47{
48 struct drm_i915_private *dev_priv = dev->dev_private;
49
50 if (pipe == PIPE_A)
51 return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
52 else
53 return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
54}
55
56static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
57{
58 struct drm_i915_private *dev_priv = dev->dev_private;
59 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
60 u32 *array;
61 int i;
62
63 if (!i915_pipe_enabled(dev, pipe))
64 return;
65
66 if (pipe == PIPE_A)
67 array = dev_priv->save_palette_a;
68 else
69 array = dev_priv->save_palette_b;
70
71 for(i = 0; i < 256; i++)
72 array[i] = I915_READ(reg + (i << 2));
73}
74
75static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
76{
77 struct drm_i915_private *dev_priv = dev->dev_private;
78 unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
79 u32 *array;
80 int i;
81
82 if (!i915_pipe_enabled(dev, pipe))
83 return;
84
85 if (pipe == PIPE_A)
86 array = dev_priv->save_palette_a;
87 else
88 array = dev_priv->save_palette_b;
89
90 for(i = 0; i < 256; i++)
91 I915_WRITE(reg + (i << 2), array[i]);
92}
93
94static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
95{
96 outb(reg, index_port);
97 return inb(data_port);
98}
99
100static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
101{
102 inb(st01);
103 outb(palette_enable | reg, VGA_AR_INDEX);
104 return inb(VGA_AR_DATA_READ);
105}
106
107static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
108{
109 inb(st01);
110 outb(palette_enable | reg, VGA_AR_INDEX);
111 outb(val, VGA_AR_DATA_WRITE);
112}
113
114static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
115{
116 outb(reg, index_port);
117 outb(val, data_port);
118}
119
120static void i915_save_vga(struct drm_device *dev)
121{
122 struct drm_i915_private *dev_priv = dev->dev_private;
123 int i;
124 u16 cr_index, cr_data, st01;
125
126 /* VGA color palette registers */
127 dev_priv->saveDACMASK = inb(VGA_DACMASK);
128 /* DACCRX automatically increments during read */
129 outb(0, VGA_DACRX);
130 /* Read 3 bytes of color data from each index */
131 for (i = 0; i < 256 * 3; i++)
132 dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
133
134 /* MSR bits */
135 dev_priv->saveMSR = inb(VGA_MSR_READ);
136 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
137 cr_index = VGA_CR_INDEX_CGA;
138 cr_data = VGA_CR_DATA_CGA;
139 st01 = VGA_ST01_CGA;
140 } else {
141 cr_index = VGA_CR_INDEX_MDA;
142 cr_data = VGA_CR_DATA_MDA;
143 st01 = VGA_ST01_MDA;
144 }
145
146 /* CRT controller regs */
147 i915_write_indexed(cr_index, cr_data, 0x11,
148 i915_read_indexed(cr_index, cr_data, 0x11) &
149 (~0x80));
150 for (i = 0; i <= 0x24; i++)
151 dev_priv->saveCR[i] =
152 i915_read_indexed(cr_index, cr_data, i);
153 /* Make sure we don't turn off CR group 0 writes */
154 dev_priv->saveCR[0x11] &= ~0x80;
155
156 /* Attribute controller registers */
157 inb(st01);
158 dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
159 for (i = 0; i <= 0x14; i++)
160 dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
161 inb(st01);
162 outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
163 inb(st01);
164
165 /* Graphics controller registers */
166 for (i = 0; i < 9; i++)
167 dev_priv->saveGR[i] =
168 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
169
170 dev_priv->saveGR[0x10] =
171 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
172 dev_priv->saveGR[0x11] =
173 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
174 dev_priv->saveGR[0x18] =
175 i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
176
177 /* Sequencer registers */
178 for (i = 0; i < 8; i++)
179 dev_priv->saveSR[i] =
180 i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
181}
182
183static void i915_restore_vga(struct drm_device *dev)
184{
185 struct drm_i915_private *dev_priv = dev->dev_private;
186 int i;
187 u16 cr_index, cr_data, st01;
188
189 /* MSR bits */
190 outb(dev_priv->saveMSR, VGA_MSR_WRITE);
191 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
192 cr_index = VGA_CR_INDEX_CGA;
193 cr_data = VGA_CR_DATA_CGA;
194 st01 = VGA_ST01_CGA;
195 } else {
196 cr_index = VGA_CR_INDEX_MDA;
197 cr_data = VGA_CR_DATA_MDA;
198 st01 = VGA_ST01_MDA;
199 }
200
201 /* Sequencer registers, don't write SR07 */
202 for (i = 0; i < 7; i++)
203 i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
204 dev_priv->saveSR[i]);
205
206 /* CRT controller regs */
207 /* Enable CR group 0 writes */
208 i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
209 for (i = 0; i <= 0x24; i++)
210 i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
211
212 /* Graphics controller regs */
213 for (i = 0; i < 9; i++)
214 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
215 dev_priv->saveGR[i]);
216
217 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
218 dev_priv->saveGR[0x10]);
219 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
220 dev_priv->saveGR[0x11]);
221 i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
222 dev_priv->saveGR[0x18]);
223
224 /* Attribute controller registers */
225 inb(st01);
226 for (i = 0; i <= 0x14; i++)
227 i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
228 inb(st01); /* switch back to index mode */
229 outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
230 inb(st01);
231
232 /* VGA color palette registers */
233 outb(dev_priv->saveDACMASK, VGA_DACMASK);
234 /* DACCRX automatically increments during read */
235 outb(0, VGA_DACWX);
236 /* Read 3 bytes of color data from each index */
237 for (i = 0; i < 256 * 3; i++)
238 outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
239
240}
241
242static int i915_suspend(struct drm_device *dev, pm_message_t state)
243{
244 struct drm_i915_private *dev_priv = dev->dev_private;
245 int i;
246
247 if (!dev || !dev_priv) {
248 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
249 printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
250 return -ENODEV;
251 }
252
253 if (state.event == PM_EVENT_PRETHAW)
254 return 0;
255
256 pci_save_state(dev->pdev);
257 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
258
259 /* Display arbitration control */
260 dev_priv->saveDSPARB = I915_READ(DSPARB);
261
262 /* Pipe & plane A info */
263 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
264 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
265 dev_priv->saveFPA0 = I915_READ(FPA0);
266 dev_priv->saveFPA1 = I915_READ(FPA1);
267 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
268 if (IS_I965G(dev))
269 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
270 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
271 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
272 dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
273 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
274 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
275 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
276 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
277
278 dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
279 dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
280 dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
281 dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
282 dev_priv->saveDSPABASE = I915_READ(DSPABASE);
283 if (IS_I965G(dev)) {
284 dev_priv->saveDSPASURF = I915_READ(DSPASURF);
285 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
286 }
287 i915_save_palette(dev, PIPE_A);
288 dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
289
290 /* Pipe & plane B info */
291 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
292 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
293 dev_priv->saveFPB0 = I915_READ(FPB0);
294 dev_priv->saveFPB1 = I915_READ(FPB1);
295 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
296 if (IS_I965G(dev))
297 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
298 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
299 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
300 dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
301 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
302 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
303 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
304 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
305
306 dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
307 dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
308 dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
309 dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
310 dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
311 if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
312 dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
313 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
314 }
315 i915_save_palette(dev, PIPE_B);
316 dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
317
318 /* CRT state */
319 dev_priv->saveADPA = I915_READ(ADPA);
320
321 /* LVDS state */
322 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
323 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
324 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
325 if (IS_I965G(dev))
326 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
327 if (IS_MOBILE(dev) && !IS_I830(dev))
328 dev_priv->saveLVDS = I915_READ(LVDS);
329 if (!IS_I830(dev) && !IS_845G(dev))
330 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
331 dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
332 dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
333 dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
334
335 /* FIXME: save TV & SDVO state */
336
337 /* FBC state */
338 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
339 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
340 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
341 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
342
343 /* Interrupt state */
344 dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
345 dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
346 dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
347
348 /* VGA state */
349 dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
350 dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
351 dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
352 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
353
354 /* Clock gating state */
355 dev_priv->saveD_STATE = I915_READ(D_STATE);
356 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
357
358 /* Cache mode state */
359 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
360
361 /* Memory Arbitration state */
362 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
363
364 /* Scratch space */
365 for (i = 0; i < 16; i++) {
366 dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
367 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
368 }
369 for (i = 0; i < 3; i++)
370 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
371
372 i915_save_vga(dev);
373
374 if (state.event == PM_EVENT_SUSPEND) {
375 /* Shut down the device */
376 pci_disable_device(dev->pdev);
377 pci_set_power_state(dev->pdev, PCI_D3hot);
378 }
379
380 return 0;
381}
382
383static int i915_resume(struct drm_device *dev)
384{
385 struct drm_i915_private *dev_priv = dev->dev_private;
386 int i;
387
388 pci_set_power_state(dev->pdev, PCI_D0);
389 pci_restore_state(dev->pdev);
390 if (pci_enable_device(dev->pdev))
391 return -1;
392 pci_set_master(dev->pdev);
393
394 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
395
396 I915_WRITE(DSPARB, dev_priv->saveDSPARB);
397
398 /* Pipe & plane A info */
399 /* Prime the clock */
400 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
401 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
402 ~DPLL_VCO_ENABLE);
403 udelay(150);
404 }
405 I915_WRITE(FPA0, dev_priv->saveFPA0);
406 I915_WRITE(FPA1, dev_priv->saveFPA1);
407 /* Actually enable it */
408 I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
409 udelay(150);
410 if (IS_I965G(dev))
411 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
412 udelay(150);
413
414 /* Restore mode */
415 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
416 I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
417 I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
418 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
419 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
420 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
421 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
422
423 /* Restore plane info */
424 I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
425 I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
426 I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
427 I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
428 I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
429 if (IS_I965G(dev)) {
430 I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
431 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
432 }
433
434 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
435
436 i915_restore_palette(dev, PIPE_A);
437 /* Enable the plane */
438 I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
439 I915_WRITE(DSPABASE, I915_READ(DSPABASE));
440
441 /* Pipe & plane B info */
442 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
443 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
444 ~DPLL_VCO_ENABLE);
445 udelay(150);
446 }
447 I915_WRITE(FPB0, dev_priv->saveFPB0);
448 I915_WRITE(FPB1, dev_priv->saveFPB1);
449 /* Actually enable it */
450 I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
451 udelay(150);
452 if (IS_I965G(dev))
453 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
454 udelay(150);
455
456 /* Restore mode */
457 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
458 I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
459 I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
460 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
461 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
462 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
463 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
464
465 /* Restore plane info */
466 I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
467 I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
468 I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
469 I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
470 I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
471 if (IS_I965G(dev)) {
472 I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
473 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
474 }
475
476 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
477
478 i915_restore_palette(dev, PIPE_B);
479 /* Enable the plane */
480 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
481 I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
482
483 /* CRT state */
484 I915_WRITE(ADPA, dev_priv->saveADPA);
485
486 /* LVDS state */
487 if (IS_I965G(dev))
488 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
489 if (IS_MOBILE(dev) && !IS_I830(dev))
490 I915_WRITE(LVDS, dev_priv->saveLVDS);
491 if (!IS_I830(dev) && !IS_845G(dev))
492 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
493
494 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
495 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
496 I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
497 I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
498 I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
499 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
500
501 /* FIXME: restore TV & SDVO state */
502
503 /* FBC info */
504 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
505 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
506 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
507 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
508
509 /* VGA state */
510 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
511 I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
512 I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
513 I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
514 udelay(150);
515
516 /* Clock gating state */
517 I915_WRITE (D_STATE, dev_priv->saveD_STATE);
518 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
519
520 /* Cache mode state */
521 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
522
523 /* Memory arbitration state */
524 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
525
526 for (i = 0; i < 16; i++) {
527 I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
528 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
529 }
530 for (i = 0; i < 3; i++)
531 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
532
533 i915_restore_vga(dev);
534
535 return 0;
536}
537
538static struct drm_driver driver = {
539 /* don't use mtrr's here, the Xserver or user space app should
540 * deal with them for intel hardware.
541 */
542 .driver_features =
543 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
544 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
545 DRIVER_IRQ_VBL2,
546 .load = i915_driver_load,
547 .unload = i915_driver_unload,
548 .lastclose = i915_driver_lastclose,
549 .preclose = i915_driver_preclose,
550 .suspend = i915_suspend,
551 .resume = i915_resume,
552 .device_is_agp = i915_driver_device_is_agp,
553 .vblank_wait = i915_driver_vblank_wait,
554 .vblank_wait2 = i915_driver_vblank_wait2,
555 .irq_preinstall = i915_driver_irq_preinstall,
556 .irq_postinstall = i915_driver_irq_postinstall,
557 .irq_uninstall = i915_driver_irq_uninstall,
558 .irq_handler = i915_driver_irq_handler,
559 .reclaim_buffers = drm_core_reclaim_buffers,
560 .get_map_ofs = drm_core_get_map_ofs,
561 .get_reg_ofs = drm_core_get_reg_ofs,
562 .ioctls = i915_ioctls,
563 .fops = {
564 .owner = THIS_MODULE,
565 .open = drm_open,
566 .release = drm_release,
567 .ioctl = drm_ioctl,
568 .mmap = drm_mmap,
569 .poll = drm_poll,
570 .fasync = drm_fasync,
571#ifdef CONFIG_COMPAT
572 .compat_ioctl = i915_compat_ioctl,
573#endif
574 },
575
576 .pci_driver = {
577 .name = DRIVER_NAME,
578 .id_table = pciidlist,
579 },
580
581 .name = DRIVER_NAME,
582 .desc = DRIVER_DESC,
583 .date = DRIVER_DATE,
584 .major = DRIVER_MAJOR,
585 .minor = DRIVER_MINOR,
586 .patchlevel = DRIVER_PATCHLEVEL,
587};
588
589static int __init i915_init(void)
590{
591 driver.num_ioctls = i915_max_ioctl;
592 return drm_init(&driver);
593}
594
595static void __exit i915_exit(void)
596{
597 drm_exit(&driver);
598}
599
600module_init(i915_init);
601module_exit(i915_exit);
602
603MODULE_AUTHOR(DRIVER_AUTHOR);
604MODULE_DESCRIPTION(DRIVER_DESC);
605MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
new file mode 100644
index 000000000000..d7326d92a237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -0,0 +1,1142 @@
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3/*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
33/* General customization:
34 */
35
36#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
37
38#define DRIVER_NAME "i915"
39#define DRIVER_DESC "Intel Graphics"
40#define DRIVER_DATE "20060119"
41
42/* Interface history:
43 *
44 * 1.1: Original.
45 * 1.2: Add Power Management
46 * 1.3: Add vblank support
47 * 1.4: Fix cmdbuffer path, add heap destroy
48 * 1.5: Add vblank pipe configuration
49 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
50 * - Support vertical blank on secondary display pipe
51 */
52#define DRIVER_MAJOR 1
53#define DRIVER_MINOR 6
54#define DRIVER_PATCHLEVEL 0
55
56typedef struct _drm_i915_ring_buffer {
57 int tail_mask;
58 unsigned long Start;
59 unsigned long End;
60 unsigned long Size;
61 u8 *virtual_start;
62 int head;
63 int tail;
64 int space;
65 drm_local_map_t map;
66} drm_i915_ring_buffer_t;
67
68struct mem_block {
69 struct mem_block *next;
70 struct mem_block *prev;
71 int start;
72 int size;
73 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
74};
75
76typedef struct _drm_i915_vbl_swap {
77 struct list_head head;
78 drm_drawable_t drw_id;
79 unsigned int pipe;
80 unsigned int sequence;
81} drm_i915_vbl_swap_t;
82
83typedef struct drm_i915_private {
84 drm_local_map_t *sarea;
85 drm_local_map_t *mmio_map;
86
87 drm_i915_sarea_t *sarea_priv;
88 drm_i915_ring_buffer_t ring;
89
90 drm_dma_handle_t *status_page_dmah;
91 void *hw_status_page;
92 dma_addr_t dma_status_page;
93 unsigned long counter;
94 unsigned int status_gfx_addr;
95 drm_local_map_t hws_map;
96
97 unsigned int cpp;
98 int back_offset;
99 int front_offset;
100 int current_page;
101 int page_flipping;
102 int use_mi_batchbuffer_start;
103
104 wait_queue_head_t irq_queue;
105 atomic_t irq_received;
106 atomic_t irq_emitted;
107
108 int tex_lru_log_granularity;
109 int allow_batchbuffer;
110 struct mem_block *agp_heap;
111 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
112 int vblank_pipe;
113
114 spinlock_t swaps_lock;
115 drm_i915_vbl_swap_t vbl_swaps;
116 unsigned int swaps_pending;
117
118 /* Register state */
119 u8 saveLBB;
120 u32 saveDSPACNTR;
121 u32 saveDSPBCNTR;
122 u32 saveDSPARB;
123 u32 savePIPEACONF;
124 u32 savePIPEBCONF;
125 u32 savePIPEASRC;
126 u32 savePIPEBSRC;
127 u32 saveFPA0;
128 u32 saveFPA1;
129 u32 saveDPLL_A;
130 u32 saveDPLL_A_MD;
131 u32 saveHTOTAL_A;
132 u32 saveHBLANK_A;
133 u32 saveHSYNC_A;
134 u32 saveVTOTAL_A;
135 u32 saveVBLANK_A;
136 u32 saveVSYNC_A;
137 u32 saveBCLRPAT_A;
138 u32 savePIPEASTAT;
139 u32 saveDSPASTRIDE;
140 u32 saveDSPASIZE;
141 u32 saveDSPAPOS;
142 u32 saveDSPABASE;
143 u32 saveDSPASURF;
144 u32 saveDSPATILEOFF;
145 u32 savePFIT_PGM_RATIOS;
146 u32 saveBLC_PWM_CTL;
147 u32 saveBLC_PWM_CTL2;
148 u32 saveFPB0;
149 u32 saveFPB1;
150 u32 saveDPLL_B;
151 u32 saveDPLL_B_MD;
152 u32 saveHTOTAL_B;
153 u32 saveHBLANK_B;
154 u32 saveHSYNC_B;
155 u32 saveVTOTAL_B;
156 u32 saveVBLANK_B;
157 u32 saveVSYNC_B;
158 u32 saveBCLRPAT_B;
159 u32 savePIPEBSTAT;
160 u32 saveDSPBSTRIDE;
161 u32 saveDSPBSIZE;
162 u32 saveDSPBPOS;
163 u32 saveDSPBBASE;
164 u32 saveDSPBSURF;
165 u32 saveDSPBTILEOFF;
166 u32 saveVCLK_DIVISOR_VGA0;
167 u32 saveVCLK_DIVISOR_VGA1;
168 u32 saveVCLK_POST_DIV;
169 u32 saveVGACNTRL;
170 u32 saveADPA;
171 u32 saveLVDS;
172 u32 saveLVDSPP_ON;
173 u32 saveLVDSPP_OFF;
174 u32 saveDVOA;
175 u32 saveDVOB;
176 u32 saveDVOC;
177 u32 savePP_ON;
178 u32 savePP_OFF;
179 u32 savePP_CONTROL;
180 u32 savePP_CYCLE;
181 u32 savePFIT_CONTROL;
182 u32 save_palette_a[256];
183 u32 save_palette_b[256];
184 u32 saveFBC_CFB_BASE;
185 u32 saveFBC_LL_BASE;
186 u32 saveFBC_CONTROL;
187 u32 saveFBC_CONTROL2;
188 u32 saveIER;
189 u32 saveIIR;
190 u32 saveIMR;
191 u32 saveCACHE_MODE_0;
192 u32 saveD_STATE;
193 u32 saveDSPCLK_GATE_D;
194 u32 saveMI_ARB_STATE;
195 u32 saveSWF0[16];
196 u32 saveSWF1[16];
197 u32 saveSWF2[3];
198 u8 saveMSR;
199 u8 saveSR[8];
200 u8 saveGR[25];
201 u8 saveAR_INDEX;
202 u8 saveAR[21];
203 u8 saveDACMASK;
204 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
205 u8 saveCR[37];
206} drm_i915_private_t;
207
208extern struct drm_ioctl_desc i915_ioctls[];
209extern int i915_max_ioctl;
210
211 /* i915_dma.c */
212extern void i915_kernel_lost_context(struct drm_device * dev);
213extern int i915_driver_load(struct drm_device *, unsigned long flags);
214extern int i915_driver_unload(struct drm_device *);
215extern void i915_driver_lastclose(struct drm_device * dev);
216extern void i915_driver_preclose(struct drm_device *dev,
217 struct drm_file *file_priv);
218extern int i915_driver_device_is_agp(struct drm_device * dev);
219extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
220 unsigned long arg);
221
222/* i915_irq.c */
223extern int i915_irq_emit(struct drm_device *dev, void *data,
224 struct drm_file *file_priv);
225extern int i915_irq_wait(struct drm_device *dev, void *data,
226 struct drm_file *file_priv);
227
228extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
229extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
230extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
231extern void i915_driver_irq_preinstall(struct drm_device * dev);
232extern void i915_driver_irq_postinstall(struct drm_device * dev);
233extern void i915_driver_irq_uninstall(struct drm_device * dev);
234extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
235 struct drm_file *file_priv);
236extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
237 struct drm_file *file_priv);
238extern int i915_vblank_swap(struct drm_device *dev, void *data,
239 struct drm_file *file_priv);
240
241/* i915_mem.c */
242extern int i915_mem_alloc(struct drm_device *dev, void *data,
243 struct drm_file *file_priv);
244extern int i915_mem_free(struct drm_device *dev, void *data,
245 struct drm_file *file_priv);
246extern int i915_mem_init_heap(struct drm_device *dev, void *data,
247 struct drm_file *file_priv);
248extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
249 struct drm_file *file_priv);
250extern void i915_mem_takedown(struct mem_block **heap);
251extern void i915_mem_release(struct drm_device * dev,
252 struct drm_file *file_priv, struct mem_block *heap);
253
254#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
255#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
256#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
257#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
258
259#define I915_VERBOSE 0
260
261#define RING_LOCALS unsigned int outring, ringmask, outcount; \
262 volatile char *virt;
263
264#define BEGIN_LP_RING(n) do { \
265 if (I915_VERBOSE) \
266 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
267 if (dev_priv->ring.space < (n)*4) \
268 i915_wait_ring(dev, (n)*4, __func__); \
269 outcount = 0; \
270 outring = dev_priv->ring.tail; \
271 ringmask = dev_priv->ring.tail_mask; \
272 virt = dev_priv->ring.virtual_start; \
273} while (0)
274
275#define OUT_RING(n) do { \
276 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
277 *(volatile unsigned int *)(virt + outring) = (n); \
278 outcount++; \
279 outring += 4; \
280 outring &= ringmask; \
281} while (0)
282
283#define ADVANCE_LP_RING() do { \
284 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
285 dev_priv->ring.tail = outring; \
286 dev_priv->ring.space -= outcount * 4; \
287 I915_WRITE(LP_RING + RING_TAIL, outring); \
288} while(0)
289
290extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
291
292/* Extended config space */
293#define LBB 0xf4
294
295/* VGA stuff */
296
297#define VGA_ST01_MDA 0x3ba
298#define VGA_ST01_CGA 0x3da
299
300#define VGA_MSR_WRITE 0x3c2
301#define VGA_MSR_READ 0x3cc
302#define VGA_MSR_MEM_EN (1<<1)
303#define VGA_MSR_CGA_MODE (1<<0)
304
305#define VGA_SR_INDEX 0x3c4
306#define VGA_SR_DATA 0x3c5
307
308#define VGA_AR_INDEX 0x3c0
309#define VGA_AR_VID_EN (1<<5)
310#define VGA_AR_DATA_WRITE 0x3c0
311#define VGA_AR_DATA_READ 0x3c1
312
313#define VGA_GR_INDEX 0x3ce
314#define VGA_GR_DATA 0x3cf
315/* GR05 */
316#define VGA_GR_MEM_READ_MODE_SHIFT 3
317#define VGA_GR_MEM_READ_MODE_PLANE 1
318/* GR06 */
319#define VGA_GR_MEM_MODE_MASK 0xc
320#define VGA_GR_MEM_MODE_SHIFT 2
321#define VGA_GR_MEM_A0000_AFFFF 0
322#define VGA_GR_MEM_A0000_BFFFF 1
323#define VGA_GR_MEM_B0000_B7FFF 2
324#define VGA_GR_MEM_B0000_BFFFF 3
325
326#define VGA_DACMASK 0x3c6
327#define VGA_DACRX 0x3c7
328#define VGA_DACWX 0x3c8
329#define VGA_DACDATA 0x3c9
330
331#define VGA_CR_INDEX_MDA 0x3b4
332#define VGA_CR_DATA_MDA 0x3b5
333#define VGA_CR_INDEX_CGA 0x3d4
334#define VGA_CR_DATA_CGA 0x3d5
335
336#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
337#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
338#define CMD_REPORT_HEAD (7<<23)
339#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
340#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
341
342#define INST_PARSER_CLIENT 0x00000000
343#define INST_OP_FLUSH 0x02000000
344#define INST_FLUSH_MAP_CACHE 0x00000001
345
346#define BB1_START_ADDR_MASK (~0x7)
347#define BB1_PROTECTED (1<<0)
348#define BB1_UNPROTECTED (0<<0)
349#define BB2_END_ADDR_MASK (~0x7)
350
351/* Framebuffer compression */
352#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
353#define FBC_LL_BASE 0x03204 /* 4k page aligned */
354#define FBC_CONTROL 0x03208
355#define FBC_CTL_EN (1<<31)
356#define FBC_CTL_PERIODIC (1<<30)
357#define FBC_CTL_INTERVAL_SHIFT (16)
358#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
359#define FBC_CTL_STRIDE_SHIFT (5)
360#define FBC_CTL_FENCENO (1<<0)
361#define FBC_COMMAND 0x0320c
362#define FBC_CMD_COMPRESS (1<<0)
363#define FBC_STATUS 0x03210
364#define FBC_STAT_COMPRESSING (1<<31)
365#define FBC_STAT_COMPRESSED (1<<30)
366#define FBC_STAT_MODIFIED (1<<29)
367#define FBC_STAT_CURRENT_LINE (1<<0)
368#define FBC_CONTROL2 0x03214
369#define FBC_CTL_FENCE_DBL (0<<4)
370#define FBC_CTL_IDLE_IMM (0<<2)
371#define FBC_CTL_IDLE_FULL (1<<2)
372#define FBC_CTL_IDLE_LINE (2<<2)
373#define FBC_CTL_IDLE_DEBUG (3<<2)
374#define FBC_CTL_CPU_FENCE (1<<1)
375#define FBC_CTL_PLANEA (0<<0)
376#define FBC_CTL_PLANEB (1<<0)
377#define FBC_FENCE_OFF 0x0321b
378
379#define FBC_LL_SIZE (1536)
380#define FBC_LL_PAD (32)
381
382/* Interrupt bits:
383 */
384#define USER_INT_FLAG (1<<1)
385#define VSYNC_PIPEB_FLAG (1<<5)
386#define VSYNC_PIPEA_FLAG (1<<7)
387#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
388
389#define I915REG_HWSTAM 0x02098
390#define I915REG_INT_IDENTITY_R 0x020a4
391#define I915REG_INT_MASK_R 0x020a8
392#define I915REG_INT_ENABLE_R 0x020a0
393
394#define I915REG_PIPEASTAT 0x70024
395#define I915REG_PIPEBSTAT 0x71024
396
397#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
398#define I915_VBLANK_CLEAR (1UL<<1)
399
400#define SRX_INDEX 0x3c4
401#define SRX_DATA 0x3c5
402#define SR01 1
403#define SR01_SCREEN_OFF (1<<5)
404
405#define PPCR 0x61204
406#define PPCR_ON (1<<0)
407
408#define DVOB 0x61140
409#define DVOB_ON (1<<31)
410#define DVOC 0x61160
411#define DVOC_ON (1<<31)
412#define LVDS 0x61180
413#define LVDS_ON (1<<31)
414
415#define ADPA 0x61100
416#define ADPA_DPMS_MASK (~(3<<10))
417#define ADPA_DPMS_ON (0<<10)
418#define ADPA_DPMS_SUSPEND (1<<10)
419#define ADPA_DPMS_STANDBY (2<<10)
420#define ADPA_DPMS_OFF (3<<10)
421
422#define NOPID 0x2094
423#define LP_RING 0x2030
424#define HP_RING 0x2040
425/* The binner has its own ring buffer:
426 */
427#define HWB_RING 0x2400
428
429#define RING_TAIL 0x00
430#define TAIL_ADDR 0x001FFFF8
431#define RING_HEAD 0x04
432#define HEAD_WRAP_COUNT 0xFFE00000
433#define HEAD_WRAP_ONE 0x00200000
434#define HEAD_ADDR 0x001FFFFC
435#define RING_START 0x08
436#define START_ADDR 0x0xFFFFF000
437#define RING_LEN 0x0C
438#define RING_NR_PAGES 0x001FF000
439#define RING_REPORT_MASK 0x00000006
440#define RING_REPORT_64K 0x00000002
441#define RING_REPORT_128K 0x00000004
442#define RING_NO_REPORT 0x00000000
443#define RING_VALID_MASK 0x00000001
444#define RING_VALID 0x00000001
445#define RING_INVALID 0x00000000
446
447/* Instruction parser error reg:
448 */
449#define IPEIR 0x2088
450
451/* Scratch pad debug 0 reg:
452 */
453#define SCPD0 0x209c
454
455/* Error status reg:
456 */
457#define ESR 0x20b8
458
459/* Secondary DMA fetch address debug reg:
460 */
461#define DMA_FADD_S 0x20d4
462
463/* Memory Interface Arbitration State
464 */
465#define MI_ARB_STATE 0x20e4
466
467/* Cache mode 0 reg.
468 * - Manipulating render cache behaviour is central
469 * to the concept of zone rendering, tuning this reg can help avoid
470 * unnecessary render cache reads and even writes (for z/stencil)
471 * at beginning and end of scene.
472 *
473 * - To change a bit, write to this reg with a mask bit set and the
474 * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
475 */
476#define Cache_Mode_0 0x2120
477#define CACHE_MODE_0 0x2120
478#define CM0_MASK_SHIFT 16
479#define CM0_IZ_OPT_DISABLE (1<<6)
480#define CM0_ZR_OPT_DISABLE (1<<5)
481#define CM0_DEPTH_EVICT_DISABLE (1<<4)
482#define CM0_COLOR_EVICT_DISABLE (1<<3)
483#define CM0_DEPTH_WRITE_DISABLE (1<<1)
484#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
485
486
487/* Graphics flush control. A CPU write flushes the GWB of all writes.
488 * The data is discarded.
489 */
490#define GFX_FLSH_CNTL 0x2170
491
492/* Binner control. Defines the location of the bin pointer list:
493 */
494#define BINCTL 0x2420
495#define BC_MASK (1 << 9)
496
497/* Binned scene info.
498 */
499#define BINSCENE 0x2428
500#define BS_OP_LOAD (1 << 8)
501#define BS_MASK (1 << 22)
502
503/* Bin command parser debug reg:
504 */
505#define BCPD 0x2480
506
507/* Bin memory control debug reg:
508 */
509#define BMCD 0x2484
510
511/* Bin data cache debug reg:
512 */
513#define BDCD 0x2488
514
515/* Binner pointer cache debug reg:
516 */
517#define BPCD 0x248c
518
519/* Binner scratch pad debug reg:
520 */
521#define BINSKPD 0x24f0
522
523/* HWB scratch pad debug reg:
524 */
525#define HWBSKPD 0x24f4
526
527/* Binner memory pool reg:
528 */
529#define BMP_BUFFER 0x2430
530#define BMP_PAGE_SIZE_4K (0 << 10)
531#define BMP_BUFFER_SIZE_SHIFT 1
532#define BMP_ENABLE (1 << 0)
533
534/* Get/put memory from the binner memory pool:
535 */
536#define BMP_GET 0x2438
537#define BMP_PUT 0x2440
538#define BMP_OFFSET_SHIFT 5
539
540/* 3D state packets:
541 */
542#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
543
544#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
545#define SC_UPDATE_SCISSOR (0x1<<1)
546#define SC_ENABLE_MASK (0x1<<0)
547#define SC_ENABLE (0x1<<0)
548
549#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
550
551#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
552#define SCI_YMIN_MASK (0xffff<<16)
553#define SCI_XMIN_MASK (0xffff<<0)
554#define SCI_YMAX_MASK (0xffff<<16)
555#define SCI_XMAX_MASK (0xffff<<0)
556
557#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
558#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
559#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
560#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
561#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
562#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
563#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
564
565#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
566
567#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
568#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
569#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
570#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
571#define XY_SRC_COPY_BLT_SRC_TILED (1<<15)
572#define XY_SRC_COPY_BLT_DST_TILED (1<<11)
573
574#define MI_BATCH_BUFFER ((0x30<<23)|1)
575#define MI_BATCH_BUFFER_START (0x31<<23)
576#define MI_BATCH_BUFFER_END (0xA<<23)
577#define MI_BATCH_NON_SECURE (1)
578#define MI_BATCH_NON_SECURE_I965 (1<<8)
579
580#define MI_WAIT_FOR_EVENT ((0x3<<23))
581#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
582#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
583#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
584
585#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
586
587#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
588#define ASYNC_FLIP (1<<22)
589#define DISPLAY_PLANE_A (0<<20)
590#define DISPLAY_PLANE_B (1<<20)
591
592/* Display regs */
593#define DSPACNTR 0x70180
594#define DSPBCNTR 0x71180
595#define DISPPLANE_SEL_PIPE_MASK (1<<24)
596
597/* Define the region of interest for the binner:
598 */
599#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
600
601#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
602
603#define CMD_MI_FLUSH (0x04 << 23)
604#define MI_NO_WRITE_FLUSH (1 << 2)
605#define MI_READ_FLUSH (1 << 0)
606#define MI_EXE_FLUSH (1 << 1)
607#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
608#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
609
610#define BREADCRUMB_BITS 31
611#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
612
613#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
614#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
615
616#define BLC_PWM_CTL 0x61254
617#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
618
619#define BLC_PWM_CTL2 0x61250
620/**
621 * This is the most significant 15 bits of the number of backlight cycles in a
622 * complete cycle of the modulated backlight control.
623 *
624 * The actual value is this field multiplied by two.
625 */
626#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
627#define BLM_LEGACY_MODE (1 << 16)
628/**
629 * This is the number of cycles out of the backlight modulation cycle for which
630 * the backlight is on.
631 *
632 * This field must be no greater than the number of cycles in the complete
633 * backlight modulation cycle.
634 */
635#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
636#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
637
638#define I915_GCFGC 0xf0
639#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
640#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
641#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
642#define I915_DISPLAY_CLOCK_MASK (7 << 4)
643
644#define I855_HPLLCC 0xc0
645#define I855_CLOCK_CONTROL_MASK (3 << 0)
646#define I855_CLOCK_133_200 (0 << 0)
647#define I855_CLOCK_100_200 (1 << 0)
648#define I855_CLOCK_100_133 (2 << 0)
649#define I855_CLOCK_166_250 (3 << 0)
650
651/* p317, 319
652 */
653#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
654#define VCLK2_VCO_N 0x600a
655#define VCLK2_VCO_DIV_SEL 0x6012
656
657#define VCLK_DIVISOR_VGA0 0x6000
658#define VCLK_DIVISOR_VGA1 0x6004
659#define VCLK_POST_DIV 0x6010
660/** Selects a post divisor of 4 instead of 2. */
661# define VGA1_PD_P2_DIV_4 (1 << 15)
662/** Overrides the p2 post divisor field */
663# define VGA1_PD_P1_DIV_2 (1 << 13)
664# define VGA1_PD_P1_SHIFT 8
665/** P1 value is 2 greater than this field */
666# define VGA1_PD_P1_MASK (0x1f << 8)
667/** Selects a post divisor of 4 instead of 2. */
668# define VGA0_PD_P2_DIV_4 (1 << 7)
669/** Overrides the p2 post divisor field */
670# define VGA0_PD_P1_DIV_2 (1 << 5)
671# define VGA0_PD_P1_SHIFT 0
672/** P1 value is 2 greater than this field */
673# define VGA0_PD_P1_MASK (0x1f << 0)
674
675/* PCI D state control register */
676#define D_STATE 0x6104
677#define DSPCLK_GATE_D 0x6200
678
679/* I830 CRTC registers */
680#define HTOTAL_A 0x60000
681#define HBLANK_A 0x60004
682#define HSYNC_A 0x60008
683#define VTOTAL_A 0x6000c
684#define VBLANK_A 0x60010
685#define VSYNC_A 0x60014
686#define PIPEASRC 0x6001c
687#define BCLRPAT_A 0x60020
688#define VSYNCSHIFT_A 0x60028
689
690#define HTOTAL_B 0x61000
691#define HBLANK_B 0x61004
692#define HSYNC_B 0x61008
693#define VTOTAL_B 0x6100c
694#define VBLANK_B 0x61010
695#define VSYNC_B 0x61014
696#define PIPEBSRC 0x6101c
697#define BCLRPAT_B 0x61020
698#define VSYNCSHIFT_B 0x61028
699
700#define PP_STATUS 0x61200
701# define PP_ON (1 << 31)
702/**
703 * Indicates that all dependencies of the panel are on:
704 *
705 * - PLL enabled
706 * - pipe enabled
707 * - LVDS/DVOB/DVOC on
708 */
709# define PP_READY (1 << 30)
710# define PP_SEQUENCE_NONE (0 << 28)
711# define PP_SEQUENCE_ON (1 << 28)
712# define PP_SEQUENCE_OFF (2 << 28)
713# define PP_SEQUENCE_MASK 0x30000000
714#define PP_CONTROL 0x61204
715# define POWER_TARGET_ON (1 << 0)
716
717#define LVDSPP_ON 0x61208
718#define LVDSPP_OFF 0x6120c
719#define PP_CYCLE 0x61210
720
721#define PFIT_CONTROL 0x61230
722# define PFIT_ENABLE (1 << 31)
723# define PFIT_PIPE_MASK (3 << 29)
724# define PFIT_PIPE_SHIFT 29
725# define VERT_INTERP_DISABLE (0 << 10)
726# define VERT_INTERP_BILINEAR (1 << 10)
727# define VERT_INTERP_MASK (3 << 10)
728# define VERT_AUTO_SCALE (1 << 9)
729# define HORIZ_INTERP_DISABLE (0 << 6)
730# define HORIZ_INTERP_BILINEAR (1 << 6)
731# define HORIZ_INTERP_MASK (3 << 6)
732# define HORIZ_AUTO_SCALE (1 << 5)
733# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
734
735#define PFIT_PGM_RATIOS 0x61234
736# define PFIT_VERT_SCALE_MASK 0xfff00000
737# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
738
739#define PFIT_AUTO_RATIOS 0x61238
740
741
742#define DPLL_A 0x06014
743#define DPLL_B 0x06018
744# define DPLL_VCO_ENABLE (1 << 31)
745# define DPLL_DVO_HIGH_SPEED (1 << 30)
746# define DPLL_SYNCLOCK_ENABLE (1 << 29)
747# define DPLL_VGA_MODE_DIS (1 << 28)
748# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
749# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
750# define DPLL_MODE_MASK (3 << 26)
751# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
752# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
753# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
754# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
755# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
756# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
757/**
758 * The i830 generation, in DAC/serial mode, defines p1 as two plus this
759 * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
760 */
761# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
762/**
763 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
764 * this field (only one bit may be set).
765 */
766# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
767# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
768# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
769# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
770# define PLL_REF_INPUT_DREFCLK (0 << 13)
771# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
772# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
773# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
774# define PLL_REF_INPUT_MASK (3 << 13)
775# define PLL_LOAD_PULSE_PHASE_SHIFT 9
776/*
777 * Parallel to Serial Load Pulse phase selection.
778 * Selects the phase for the 10X DPLL clock for the PCIe
779 * digital display port. The range is 4 to 13; 10 or more
780 * is just a flip delay. The default is 6
781 */
782# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
783# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
784
785/**
786 * SDVO multiplier for 945G/GM. Not used on 965.
787 *
788 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
789 */
790# define SDVO_MULTIPLIER_MASK 0x000000ff
791# define SDVO_MULTIPLIER_SHIFT_HIRES 4
792# define SDVO_MULTIPLIER_SHIFT_VGA 0
793
794/** @defgroup DPLL_MD
795 * @{
796 */
797/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
798#define DPLL_A_MD 0x0601c
799/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
800#define DPLL_B_MD 0x06020
801/**
802 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
803 *
804 * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
805 */
806# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
807# define DPLL_MD_UDI_DIVIDER_SHIFT 24
808/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
809# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
810# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
811/**
812 * SDVO/UDI pixel multiplier.
813 *
814 * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
815 * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
816 * modes, the bus rate would be below the limits, so SDVO allows for stuffing
817 * dummy bytes in the datastream at an increased clock rate, with both sides of
818 * the link knowing how many bytes are fill.
819 *
820 * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
821 * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
822 * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
823 * through an SDVO command.
824 *
825 * This register field has values of multiplication factor minus 1, with
826 * a maximum multiplier of 5 for SDVO.
827 */
828# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
829# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
830/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
831 * This best be set to the default value (3) or the CRT won't work. No,
832 * I don't entirely understand what this does...
833 */
834# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
835# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
836/** @} */
837
838#define DPLL_TEST 0x606c
839# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
840# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
841# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
842# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
843# define DPLLB_TEST_N_BYPASS (1 << 19)
844# define DPLLB_TEST_M_BYPASS (1 << 18)
845# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
846# define DPLLA_TEST_N_BYPASS (1 << 3)
847# define DPLLA_TEST_M_BYPASS (1 << 2)
848# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
849
850#define ADPA 0x61100
851#define ADPA_DAC_ENABLE (1<<31)
852#define ADPA_DAC_DISABLE 0
853#define ADPA_PIPE_SELECT_MASK (1<<30)
854#define ADPA_PIPE_A_SELECT 0
855#define ADPA_PIPE_B_SELECT (1<<30)
856#define ADPA_USE_VGA_HVPOLARITY (1<<15)
857#define ADPA_SETS_HVPOLARITY 0
858#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
859#define ADPA_VSYNC_CNTL_ENABLE 0
860#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
861#define ADPA_HSYNC_CNTL_ENABLE 0
862#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
863#define ADPA_VSYNC_ACTIVE_LOW 0
864#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
865#define ADPA_HSYNC_ACTIVE_LOW 0
866
867#define FPA0 0x06040
868#define FPA1 0x06044
869#define FPB0 0x06048
870#define FPB1 0x0604c
871# define FP_N_DIV_MASK 0x003f0000
872# define FP_N_DIV_SHIFT 16
873# define FP_M1_DIV_MASK 0x00003f00
874# define FP_M1_DIV_SHIFT 8
875# define FP_M2_DIV_MASK 0x0000003f
876# define FP_M2_DIV_SHIFT 0
877
878
879#define PORT_HOTPLUG_EN 0x61110
880# define SDVOB_HOTPLUG_INT_EN (1 << 26)
881# define SDVOC_HOTPLUG_INT_EN (1 << 25)
882# define TV_HOTPLUG_INT_EN (1 << 18)
883# define CRT_HOTPLUG_INT_EN (1 << 9)
884# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
885
886#define PORT_HOTPLUG_STAT 0x61114
887# define CRT_HOTPLUG_INT_STATUS (1 << 11)
888# define TV_HOTPLUG_INT_STATUS (1 << 10)
889# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
890# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
891# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
892# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
893# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
894# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
895
896#define SDVOB 0x61140
897#define SDVOC 0x61160
898#define SDVO_ENABLE (1 << 31)
899#define SDVO_PIPE_B_SELECT (1 << 30)
900#define SDVO_STALL_SELECT (1 << 29)
901#define SDVO_INTERRUPT_ENABLE (1 << 26)
902/**
903 * 915G/GM SDVO pixel multiplier.
904 *
905 * Programmed value is multiplier - 1, up to 5x.
906 *
907 * \sa DPLL_MD_UDI_MULTIPLIER_MASK
908 */
909#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
910#define SDVO_PORT_MULTIPLY_SHIFT 23
911#define SDVO_PHASE_SELECT_MASK (15 << 19)
912#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
913#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
914#define SDVOC_GANG_MODE (1 << 16)
915#define SDVO_BORDER_ENABLE (1 << 7)
916#define SDVOB_PCIE_CONCURRENCY (1 << 3)
917#define SDVO_DETECTED (1 << 2)
918/* Bits to be preserved when writing */
919#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
920#define SDVOC_PRESERVE_MASK (1 << 17)
921
922/** @defgroup LVDS
923 * @{
924 */
925/**
926 * This register controls the LVDS output enable, pipe selection, and data
927 * format selection.
928 *
929 * All of the clock/data pairs are force powered down by power sequencing.
930 */
931#define LVDS 0x61180
932/**
933 * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
934 * the DPLL semantics change when the LVDS is assigned to that pipe.
935 */
936# define LVDS_PORT_EN (1 << 31)
937/** Selects pipe B for LVDS data. Must be set on pre-965. */
938# define LVDS_PIPEB_SELECT (1 << 30)
939
940/**
941 * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
942 * pixel.
943 */
944# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
945# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
946# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
947/**
948 * Controls the A3 data pair, which contains the additional LSBs for 24 bit
949 * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
950 * on.
951 */
952# define LVDS_A3_POWER_MASK (3 << 6)
953# define LVDS_A3_POWER_DOWN (0 << 6)
954# define LVDS_A3_POWER_UP (3 << 6)
955/**
956 * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
957 * is set.
958 */
959# define LVDS_CLKB_POWER_MASK (3 << 4)
960# define LVDS_CLKB_POWER_DOWN (0 << 4)
961# define LVDS_CLKB_POWER_UP (3 << 4)
962
963/**
964 * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
965 * setting for whether we are in dual-channel mode. The B3 pair will
966 * additionally only be powered up when LVDS_A3_POWER_UP is set.
967 */
968# define LVDS_B0B3_POWER_MASK (3 << 2)
969# define LVDS_B0B3_POWER_DOWN (0 << 2)
970# define LVDS_B0B3_POWER_UP (3 << 2)
971
972#define PIPEACONF 0x70008
973#define PIPEACONF_ENABLE (1<<31)
974#define PIPEACONF_DISABLE 0
975#define PIPEACONF_DOUBLE_WIDE (1<<30)
976#define I965_PIPECONF_ACTIVE (1<<30)
977#define PIPEACONF_SINGLE_WIDE 0
978#define PIPEACONF_PIPE_UNLOCKED 0
979#define PIPEACONF_PIPE_LOCKED (1<<25)
980#define PIPEACONF_PALETTE 0
981#define PIPEACONF_GAMMA (1<<24)
982#define PIPECONF_FORCE_BORDER (1<<25)
983#define PIPECONF_PROGRESSIVE (0 << 21)
984#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
985#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
986
987#define DSPARB 0x70030
988#define DSPARB_CSTART_MASK (0x7f << 7)
989#define DSPARB_CSTART_SHIFT 7
990#define DSPARB_BSTART_MASK (0x7f)
991#define DSPARB_BSTART_SHIFT 0
992
993#define PIPEBCONF 0x71008
994#define PIPEBCONF_ENABLE (1<<31)
995#define PIPEBCONF_DISABLE 0
996#define PIPEBCONF_DOUBLE_WIDE (1<<30)
997#define PIPEBCONF_DISABLE 0
998#define PIPEBCONF_GAMMA (1<<24)
999#define PIPEBCONF_PALETTE 0
1000
1001#define PIPEBGCMAXRED 0x71010
1002#define PIPEBGCMAXGREEN 0x71014
1003#define PIPEBGCMAXBLUE 0x71018
1004#define PIPEBSTAT 0x71024
1005#define PIPEBFRAMEHIGH 0x71040
1006#define PIPEBFRAMEPIXEL 0x71044
1007
1008#define DSPACNTR 0x70180
1009#define DSPBCNTR 0x71180
1010#define DISPLAY_PLANE_ENABLE (1<<31)
1011#define DISPLAY_PLANE_DISABLE 0
1012#define DISPPLANE_GAMMA_ENABLE (1<<30)
1013#define DISPPLANE_GAMMA_DISABLE 0
1014#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
1015#define DISPPLANE_8BPP (0x2<<26)
1016#define DISPPLANE_15_16BPP (0x4<<26)
1017#define DISPPLANE_16BPP (0x5<<26)
1018#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1019#define DISPPLANE_32BPP (0x7<<26)
1020#define DISPPLANE_STEREO_ENABLE (1<<25)
1021#define DISPPLANE_STEREO_DISABLE 0
1022#define DISPPLANE_SEL_PIPE_MASK (1<<24)
1023#define DISPPLANE_SEL_PIPE_A 0
1024#define DISPPLANE_SEL_PIPE_B (1<<24)
1025#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
1026#define DISPPLANE_SRC_KEY_DISABLE 0
1027#define DISPPLANE_LINE_DOUBLE (1<<20)
1028#define DISPPLANE_NO_LINE_DOUBLE 0
1029#define DISPPLANE_STEREO_POLARITY_FIRST 0
1030#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1031/* plane B only */
1032#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
1033#define DISPPLANE_ALPHA_TRANS_DISABLE 0
1034#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
1035#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
1036
1037#define DSPABASE 0x70184
1038#define DSPASTRIDE 0x70188
1039
1040#define DSPBBASE 0x71184
1041#define DSPBADDR DSPBBASE
1042#define DSPBSTRIDE 0x71188
1043
1044#define DSPAKEYVAL 0x70194
1045#define DSPAKEYMASK 0x70198
1046
1047#define DSPAPOS 0x7018C /* reserved */
1048#define DSPASIZE 0x70190
1049#define DSPBPOS 0x7118C
1050#define DSPBSIZE 0x71190
1051
1052#define DSPASURF 0x7019C
1053#define DSPATILEOFF 0x701A4
1054
1055#define DSPBSURF 0x7119C
1056#define DSPBTILEOFF 0x711A4
1057
1058#define VGACNTRL 0x71400
1059# define VGA_DISP_DISABLE (1 << 31)
1060# define VGA_2X_MODE (1 << 30)
1061# define VGA_PIPE_B_SELECT (1 << 29)
1062
1063/*
1064 * Some BIOS scratch area registers. The 845 (and 830?) store the amount
1065 * of video memory available to the BIOS in SWF1.
1066 */
1067
1068#define SWF0 0x71410
1069
1070/*
1071 * 855 scratch registers.
1072 */
1073#define SWF10 0x70410
1074
1075#define SWF30 0x72414
1076
1077/*
1078 * Overlay registers. These are overlay registers accessed via MMIO.
1079 * Those loaded via the overlay register page are defined in i830_video.c.
1080 */
1081#define OVADD 0x30000
1082
1083#define DOVSTA 0x30008
1084#define OC_BUF (0x3<<20)
1085
1086#define OGAMC5 0x30010
1087#define OGAMC4 0x30014
1088#define OGAMC3 0x30018
1089#define OGAMC2 0x3001c
1090#define OGAMC1 0x30020
1091#define OGAMC0 0x30024
1092/*
1093 * Palette registers
1094 */
1095#define PALETTE_A 0x0a000
1096#define PALETTE_B 0x0a800
1097
1098#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1099#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1100#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1101#define IS_I855(dev) ((dev)->pci_device == 0x3582)
1102#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1103
1104#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
1105#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1106#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1107#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
1108 (dev)->pci_device == 0x27AE)
1109#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
1110 (dev)->pci_device == 0x2982 || \
1111 (dev)->pci_device == 0x2992 || \
1112 (dev)->pci_device == 0x29A2 || \
1113 (dev)->pci_device == 0x2A02 || \
1114 (dev)->pci_device == 0x2A12 || \
1115 (dev)->pci_device == 0x2A42 || \
1116 (dev)->pci_device == 0x2E02 || \
1117 (dev)->pci_device == 0x2E12 || \
1118 (dev)->pci_device == 0x2E22)
1119
1120#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
1121
1122#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
1123
1124#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
1125 (dev)->pci_device == 0x2E12 || \
1126 (dev)->pci_device == 0x2E22)
1127
1128#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
1129 (dev)->pci_device == 0x29B2 || \
1130 (dev)->pci_device == 0x29D2)
1131
1132#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1133 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
1134
1135#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1136 IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
1137
1138#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev))
1139
1140#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1141
1142#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
new file mode 100644
index 000000000000..1fe68a251b75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -0,0 +1,222 @@
1/**
2 * \file i915_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the i915 DRM.
5 *
6 * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Alan Hourihane 2005
11 * All Rights Reserved.
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
28 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32#include <linux/compat.h>
33
34#include "drmP.h"
35#include "drm.h"
36#include "i915_drm.h"
37
38typedef struct _drm_i915_batchbuffer32 {
39 int start; /* agp offset */
40 int used; /* nr bytes in use */
41 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
42 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
43 int num_cliprects; /* mulitpass with multiple cliprects? */
44 u32 cliprects; /* pointer to userspace cliprects */
45} drm_i915_batchbuffer32_t;
46
47static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
48 unsigned long arg)
49{
50 drm_i915_batchbuffer32_t batchbuffer32;
51 drm_i915_batchbuffer_t __user *batchbuffer;
52
53 if (copy_from_user
54 (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
55 return -EFAULT;
56
57 batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
58 if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
59 || __put_user(batchbuffer32.start, &batchbuffer->start)
60 || __put_user(batchbuffer32.used, &batchbuffer->used)
61 || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
62 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
63 || __put_user(batchbuffer32.num_cliprects,
64 &batchbuffer->num_cliprects)
65 || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
66 &batchbuffer->cliprects))
67 return -EFAULT;
68
69 return drm_ioctl(file->f_path.dentry->d_inode, file,
70 DRM_IOCTL_I915_BATCHBUFFER,
71 (unsigned long)batchbuffer);
72}
73
74typedef struct _drm_i915_cmdbuffer32 {
75 u32 buf; /* pointer to userspace command buffer */
76 int sz; /* nr bytes in buf */
77 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
78 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
79 int num_cliprects; /* mulitpass with multiple cliprects? */
80 u32 cliprects; /* pointer to userspace cliprects */
81} drm_i915_cmdbuffer32_t;
82
83static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
84 unsigned long arg)
85{
86 drm_i915_cmdbuffer32_t cmdbuffer32;
87 drm_i915_cmdbuffer_t __user *cmdbuffer;
88
89 if (copy_from_user
90 (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
91 return -EFAULT;
92
93 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
94 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
95 || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
96 &cmdbuffer->buf)
97 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
98 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
99 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
100 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
101 || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
102 &cmdbuffer->cliprects))
103 return -EFAULT;
104
105 return drm_ioctl(file->f_path.dentry->d_inode, file,
106 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
107}
108
109typedef struct drm_i915_irq_emit32 {
110 u32 irq_seq;
111} drm_i915_irq_emit32_t;
112
113static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
114 unsigned long arg)
115{
116 drm_i915_irq_emit32_t req32;
117 drm_i915_irq_emit_t __user *request;
118
119 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
120 return -EFAULT;
121
122 request = compat_alloc_user_space(sizeof(*request));
123 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
124 || __put_user((int __user *)(unsigned long)req32.irq_seq,
125 &request->irq_seq))
126 return -EFAULT;
127
128 return drm_ioctl(file->f_path.dentry->d_inode, file,
129 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
130}
131typedef struct drm_i915_getparam32 {
132 int param;
133 u32 value;
134} drm_i915_getparam32_t;
135
136static int compat_i915_getparam(struct file *file, unsigned int cmd,
137 unsigned long arg)
138{
139 drm_i915_getparam32_t req32;
140 drm_i915_getparam_t __user *request;
141
142 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
143 return -EFAULT;
144
145 request = compat_alloc_user_space(sizeof(*request));
146 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
147 || __put_user(req32.param, &request->param)
148 || __put_user((void __user *)(unsigned long)req32.value,
149 &request->value))
150 return -EFAULT;
151
152 return drm_ioctl(file->f_path.dentry->d_inode, file,
153 DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
154}
155
156typedef struct drm_i915_mem_alloc32 {
157 int region;
158 int alignment;
159 int size;
160 u32 region_offset; /* offset from start of fb or agp */
161} drm_i915_mem_alloc32_t;
162
163static int compat_i915_alloc(struct file *file, unsigned int cmd,
164 unsigned long arg)
165{
166 drm_i915_mem_alloc32_t req32;
167 drm_i915_mem_alloc_t __user *request;
168
169 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
170 return -EFAULT;
171
172 request = compat_alloc_user_space(sizeof(*request));
173 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
174 || __put_user(req32.region, &request->region)
175 || __put_user(req32.alignment, &request->alignment)
176 || __put_user(req32.size, &request->size)
177 || __put_user((void __user *)(unsigned long)req32.region_offset,
178 &request->region_offset))
179 return -EFAULT;
180
181 return drm_ioctl(file->f_path.dentry->d_inode, file,
182 DRM_IOCTL_I915_ALLOC, (unsigned long)request);
183}
184
185drm_ioctl_compat_t *i915_compat_ioctls[] = {
186 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
187 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
188 [DRM_I915_GETPARAM] = compat_i915_getparam,
189 [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
190 [DRM_I915_ALLOC] = compat_i915_alloc
191};
192
193/**
194 * Called whenever a 32-bit process running under a 64-bit kernel
195 * performs an ioctl on /dev/dri/card<n>.
196 *
197 * \param filp file pointer.
198 * \param cmd command.
199 * \param arg user argument.
200 * \return zero on success or negative number on failure.
201 */
202long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
203{
204 unsigned int nr = DRM_IOCTL_NR(cmd);
205 drm_ioctl_compat_t *fn = NULL;
206 int ret;
207
208 if (nr < DRM_COMMAND_BASE)
209 return drm_compat_ioctl(filp, cmd, arg);
210
211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
213
214 lock_kernel(); /* XXX for now */
215 if (fn != NULL)
216 ret = (*fn) (filp, cmd, arg);
217 else
218 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
219 unlock_kernel();
220
221 return ret;
222}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
new file mode 100644
index 000000000000..df036118b8b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -0,0 +1,623 @@
1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34#define USER_INT_FLAG (1<<1)
35#define VSYNC_PIPEB_FLAG (1<<5)
36#define VSYNC_PIPEA_FLAG (1<<7)
37
38#define MAX_NOPID ((u32)~0)
39
40/**
41 * Emit blits for scheduled buffer swaps.
42 *
43 * This function will be called with the HW lock held.
44 */
45static void i915_vblank_tasklet(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received),
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp;
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD;
60 u32 src_pitch = sarea_priv->pitch * cpp;
61 u32 dst_pitch = sarea_priv->pitch * cpp;
62 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
63 RING_LOCALS;
64
65 if (IS_I965G(dev) && sarea_priv->front_tiled) {
66 cmd |= XY_SRC_COPY_BLT_DST_TILED;
67 dst_pitch >>= 2;
68 }
69 if (IS_I965G(dev) && sarea_priv->back_tiled) {
70 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
71 src_pitch >>= 2;
72 }
73
74 DRM_DEBUG("\n");
75
76 INIT_LIST_HEAD(&hits);
77
78 nhits = nrects = 0;
79
80 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
81
82 /* Find buffer swaps scheduled for this vertical blank */
83 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
84 drm_i915_vbl_swap_t *vbl_swap =
85 list_entry(list, drm_i915_vbl_swap_t, head);
86
87 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
88 continue;
89
90 list_del(list);
91 dev_priv->swaps_pending--;
92
93 spin_unlock(&dev_priv->swaps_lock);
94 spin_lock(&dev->drw_lock);
95
96 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
97
98 if (!drw) {
99 spin_unlock(&dev->drw_lock);
100 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
101 spin_lock(&dev_priv->swaps_lock);
102 continue;
103 }
104
105 list_for_each(hit, &hits) {
106 drm_i915_vbl_swap_t *swap_cmp =
107 list_entry(hit, drm_i915_vbl_swap_t, head);
108 struct drm_drawable_info *drw_cmp =
109 drm_get_drawable_info(dev, swap_cmp->drw_id);
110
111 if (drw_cmp &&
112 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
113 list_add_tail(list, hit);
114 break;
115 }
116 }
117
118 spin_unlock(&dev->drw_lock);
119
120 /* List of hits was empty, or we reached the end of it */
121 if (hit == &hits)
122 list_add_tail(list, hits.prev);
123
124 nhits++;
125
126 spin_lock(&dev_priv->swaps_lock);
127 }
128
129 if (nhits == 0) {
130 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
131 return;
132 }
133
134 spin_unlock(&dev_priv->swaps_lock);
135
136 i915_kernel_lost_context(dev);
137
138 if (IS_I965G(dev)) {
139 BEGIN_LP_RING(4);
140
141 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
142 OUT_RING(0);
143 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
144 OUT_RING(0);
145 ADVANCE_LP_RING();
146 } else {
147 BEGIN_LP_RING(6);
148
149 OUT_RING(GFX_OP_DRAWRECT_INFO);
150 OUT_RING(0);
151 OUT_RING(0);
152 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
153 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
154 OUT_RING(0);
155
156 ADVANCE_LP_RING();
157 }
158
159 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
160
161 upper[0] = upper[1] = 0;
162 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
163 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
164 lower[0] = sarea_priv->pipeA_y + slice[0];
165 lower[1] = sarea_priv->pipeB_y + slice[0];
166
167 spin_lock(&dev->drw_lock);
168
169 /* Emit blits for buffer swaps, partitioning both outputs into as many
170 * slices as there are buffer swaps scheduled in order to avoid tearing
171 * (based on the assumption that a single buffer swap would always
172 * complete before scanout starts).
173 */
174 for (i = 0; i++ < nhits;
175 upper[0] = lower[0], lower[0] += slice[0],
176 upper[1] = lower[1], lower[1] += slice[1]) {
177 if (i == nhits)
178 lower[0] = lower[1] = sarea_priv->height;
179
180 list_for_each(hit, &hits) {
181 drm_i915_vbl_swap_t *swap_hit =
182 list_entry(hit, drm_i915_vbl_swap_t, head);
183 struct drm_clip_rect *rect;
184 int num_rects, pipe;
185 unsigned short top, bottom;
186
187 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
188
189 if (!drw)
190 continue;
191
192 rect = drw->rects;
193 pipe = swap_hit->pipe;
194 top = upper[pipe];
195 bottom = lower[pipe];
196
197 for (num_rects = drw->num_rects; num_rects--; rect++) {
198 int y1 = max(rect->y1, top);
199 int y2 = min(rect->y2, bottom);
200
201 if (y1 >= y2)
202 continue;
203
204 BEGIN_LP_RING(8);
205
206 OUT_RING(cmd);
207 OUT_RING(ropcpp | dst_pitch);
208 OUT_RING((y1 << 16) | rect->x1);
209 OUT_RING((y2 << 16) | rect->x2);
210 OUT_RING(sarea_priv->front_offset);
211 OUT_RING((y1 << 16) | rect->x1);
212 OUT_RING(src_pitch);
213 OUT_RING(sarea_priv->back_offset);
214
215 ADVANCE_LP_RING();
216 }
217 }
218 }
219
220 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
221
222 list_for_each_safe(hit, tmp, &hits) {
223 drm_i915_vbl_swap_t *swap_hit =
224 list_entry(hit, drm_i915_vbl_swap_t, head);
225
226 list_del(hit);
227
228 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
229 }
230}
231
232irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
233{
234 struct drm_device *dev = (struct drm_device *) arg;
235 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
236 u16 temp;
237 u32 pipea_stats, pipeb_stats;
238
239 pipea_stats = I915_READ(I915REG_PIPEASTAT);
240 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
241
242 temp = I915_READ16(I915REG_INT_IDENTITY_R);
243
244 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
245
246 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
247
248 if (temp == 0)
249 return IRQ_NONE;
250
251 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
252 (void) I915_READ16(I915REG_INT_IDENTITY_R);
253 DRM_READMEMORYBARRIER();
254
255 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
256
257 if (temp & USER_INT_FLAG)
258 DRM_WAKEUP(&dev_priv->irq_queue);
259
260 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
261 int vblank_pipe = dev_priv->vblank_pipe;
262
263 if ((vblank_pipe &
264 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
265 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
266 if (temp & VSYNC_PIPEA_FLAG)
267 atomic_inc(&dev->vbl_received);
268 if (temp & VSYNC_PIPEB_FLAG)
269 atomic_inc(&dev->vbl_received2);
270 } else if (((temp & VSYNC_PIPEA_FLAG) &&
271 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
272 ((temp & VSYNC_PIPEB_FLAG) &&
273 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
274 atomic_inc(&dev->vbl_received);
275
276 DRM_WAKEUP(&dev->vbl_queue);
277 drm_vbl_send_signals(dev);
278
279 if (dev_priv->swaps_pending > 0)
280 drm_locked_tasklet(dev, i915_vblank_tasklet);
281 I915_WRITE(I915REG_PIPEASTAT,
282 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
283 I915_VBLANK_CLEAR);
284 I915_WRITE(I915REG_PIPEBSTAT,
285 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
286 I915_VBLANK_CLEAR);
287 }
288
289 return IRQ_HANDLED;
290}
291
292static int i915_emit_irq(struct drm_device * dev)
293{
294 drm_i915_private_t *dev_priv = dev->dev_private;
295 RING_LOCALS;
296
297 i915_kernel_lost_context(dev);
298
299 DRM_DEBUG("\n");
300
301 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
302
303 if (dev_priv->counter > 0x7FFFFFFFUL)
304 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
305
306 BEGIN_LP_RING(6);
307 OUT_RING(CMD_STORE_DWORD_IDX);
308 OUT_RING(20);
309 OUT_RING(dev_priv->counter);
310 OUT_RING(0);
311 OUT_RING(0);
312 OUT_RING(GFX_OP_USER_INTERRUPT);
313 ADVANCE_LP_RING();
314
315 return dev_priv->counter;
316}
317
318static int i915_wait_irq(struct drm_device * dev, int irq_nr)
319{
320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
321 int ret = 0;
322
323 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
324 READ_BREADCRUMB(dev_priv));
325
326 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
327 return 0;
328
329 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
330
331 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
332 READ_BREADCRUMB(dev_priv) >= irq_nr);
333
334 if (ret == -EBUSY) {
335 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
336 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
337 }
338
339 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
340 return ret;
341}
342
343static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
344 atomic_t *counter)
345{
346 drm_i915_private_t *dev_priv = dev->dev_private;
347 unsigned int cur_vblank;
348 int ret = 0;
349
350 if (!dev_priv) {
351 DRM_ERROR("called with no initialization\n");
352 return -EINVAL;
353 }
354
355 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
356 (((cur_vblank = atomic_read(counter))
357 - *sequence) <= (1<<23)));
358
359 *sequence = cur_vblank;
360
361 return ret;
362}
363
364
365int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
366{
367 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
368}
369
370int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
371{
372 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
373}
374
375/* Needs the lock as it touches the ring.
376 */
377int i915_irq_emit(struct drm_device *dev, void *data,
378 struct drm_file *file_priv)
379{
380 drm_i915_private_t *dev_priv = dev->dev_private;
381 drm_i915_irq_emit_t *emit = data;
382 int result;
383
384 LOCK_TEST_WITH_RETURN(dev, file_priv);
385
386 if (!dev_priv) {
387 DRM_ERROR("called with no initialization\n");
388 return -EINVAL;
389 }
390
391 result = i915_emit_irq(dev);
392
393 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
394 DRM_ERROR("copy_to_user\n");
395 return -EFAULT;
396 }
397
398 return 0;
399}
400
401/* Doesn't need the hardware lock.
402 */
403int i915_irq_wait(struct drm_device *dev, void *data,
404 struct drm_file *file_priv)
405{
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 drm_i915_irq_wait_t *irqwait = data;
408
409 if (!dev_priv) {
410 DRM_ERROR("called with no initialization\n");
411 return -EINVAL;
412 }
413
414 return i915_wait_irq(dev, irqwait->irq_seq);
415}
416
417static void i915_enable_interrupt (struct drm_device *dev)
418{
419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
420 u16 flag;
421
422 flag = 0;
423 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
424 flag |= VSYNC_PIPEA_FLAG;
425 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
426 flag |= VSYNC_PIPEB_FLAG;
427
428 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
429}
430
431/* Set the vblank monitor pipe
432 */
433int i915_vblank_pipe_set(struct drm_device *dev, void *data,
434 struct drm_file *file_priv)
435{
436 drm_i915_private_t *dev_priv = dev->dev_private;
437 drm_i915_vblank_pipe_t *pipe = data;
438
439 if (!dev_priv) {
440 DRM_ERROR("called with no initialization\n");
441 return -EINVAL;
442 }
443
444 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
445 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
446 return -EINVAL;
447 }
448
449 dev_priv->vblank_pipe = pipe->pipe;
450
451 i915_enable_interrupt (dev);
452
453 return 0;
454}
455
456int i915_vblank_pipe_get(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 drm_i915_private_t *dev_priv = dev->dev_private;
460 drm_i915_vblank_pipe_t *pipe = data;
461 u16 flag;
462
463 if (!dev_priv) {
464 DRM_ERROR("called with no initialization\n");
465 return -EINVAL;
466 }
467
468 flag = I915_READ(I915REG_INT_ENABLE_R);
469 pipe->pipe = 0;
470 if (flag & VSYNC_PIPEA_FLAG)
471 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
472 if (flag & VSYNC_PIPEB_FLAG)
473 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
474
475 return 0;
476}
477
478/**
479 * Schedule buffer swap at given vertical blank.
480 */
481int i915_vblank_swap(struct drm_device *dev, void *data,
482 struct drm_file *file_priv)
483{
484 drm_i915_private_t *dev_priv = dev->dev_private;
485 drm_i915_vblank_swap_t *swap = data;
486 drm_i915_vbl_swap_t *vbl_swap;
487 unsigned int pipe, seqtype, curseq;
488 unsigned long irqflags;
489 struct list_head *list;
490
491 if (!dev_priv) {
492 DRM_ERROR("%s called with no initialization\n", __func__);
493 return -EINVAL;
494 }
495
496 if (dev_priv->sarea_priv->rotation) {
497 DRM_DEBUG("Rotation not supported\n");
498 return -EINVAL;
499 }
500
501 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
502 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
503 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
504 return -EINVAL;
505 }
506
507 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
508
509 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
510
511 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
512 DRM_ERROR("Invalid pipe %d\n", pipe);
513 return -EINVAL;
514 }
515
516 spin_lock_irqsave(&dev->drw_lock, irqflags);
517
518 if (!drm_get_drawable_info(dev, swap->drawable)) {
519 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
520 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
521 return -EINVAL;
522 }
523
524 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
525
526 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
527
528 if (seqtype == _DRM_VBLANK_RELATIVE)
529 swap->sequence += curseq;
530
531 if ((curseq - swap->sequence) <= (1<<23)) {
532 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
533 swap->sequence = curseq + 1;
534 } else {
535 DRM_DEBUG("Missed target sequence\n");
536 return -EINVAL;
537 }
538 }
539
540 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
541
542 list_for_each(list, &dev_priv->vbl_swaps.head) {
543 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
544
545 if (vbl_swap->drw_id == swap->drawable &&
546 vbl_swap->pipe == pipe &&
547 vbl_swap->sequence == swap->sequence) {
548 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
549 DRM_DEBUG("Already scheduled\n");
550 return 0;
551 }
552 }
553
554 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
555
556 if (dev_priv->swaps_pending >= 100) {
557 DRM_DEBUG("Too many swaps queued\n");
558 return -EBUSY;
559 }
560
561 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
562
563 if (!vbl_swap) {
564 DRM_ERROR("Failed to allocate memory to queue swap\n");
565 return -ENOMEM;
566 }
567
568 DRM_DEBUG("\n");
569
570 vbl_swap->drw_id = swap->drawable;
571 vbl_swap->pipe = pipe;
572 vbl_swap->sequence = swap->sequence;
573
574 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
575
576 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
577 dev_priv->swaps_pending++;
578
579 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
580
581 return 0;
582}
583
584/* drm_dma.h hooks
585*/
586void i915_driver_irq_preinstall(struct drm_device * dev)
587{
588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
589
590 I915_WRITE16(I915REG_HWSTAM, 0xfffe);
591 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
592 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
593}
594
595void i915_driver_irq_postinstall(struct drm_device * dev)
596{
597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
598
599 spin_lock_init(&dev_priv->swaps_lock);
600 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
601 dev_priv->swaps_pending = 0;
602
603 if (!dev_priv->vblank_pipe)
604 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
605 i915_enable_interrupt(dev);
606 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
607}
608
609void i915_driver_irq_uninstall(struct drm_device * dev)
610{
611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
612 u16 temp;
613
614 if (!dev_priv)
615 return;
616
617 I915_WRITE16(I915REG_HWSTAM, 0xffff);
618 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
619 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
620
621 temp = I915_READ16(I915REG_INT_IDENTITY_R);
622 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
623}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
new file mode 100644
index 000000000000..6126a60dc9cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -0,0 +1,386 @@
1/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33
34/* This memory manager is integrated into the global/local lru
35 * mechanisms used by the clients. Specifically, it operates by
36 * setting the 'in_use' fields of the global LRU to indicate whether
37 * this region is privately allocated to a client.
38 *
39 * This does require the client to actually respect that field.
40 *
41 * Currently no effort is made to allocate 'private' memory in any
42 * clever way - the LRU information isn't used to determine which
43 * block to allocate, and the ring is drained prior to allocations --
44 * in other words allocation is expensive.
45 */
46static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
47{
48 drm_i915_private_t *dev_priv = dev->dev_private;
49 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
50 struct drm_tex_region *list;
51 unsigned shift, nr;
52 unsigned start;
53 unsigned end;
54 unsigned i;
55 int age;
56
57 shift = dev_priv->tex_lru_log_granularity;
58 nr = I915_NR_TEX_REGIONS;
59
60 start = p->start >> shift;
61 end = (p->start + p->size - 1) >> shift;
62
63 age = ++sarea_priv->texAge;
64 list = sarea_priv->texList;
65
66 /* Mark the regions with the new flag and update their age. Move
67 * them to head of list to preserve LRU semantics.
68 */
69 for (i = start; i <= end; i++) {
70 list[i].in_use = in_use;
71 list[i].age = age;
72
73 /* remove_from_list(i)
74 */
75 list[(unsigned)list[i].next].prev = list[i].prev;
76 list[(unsigned)list[i].prev].next = list[i].next;
77
78 /* insert_at_head(list, i)
79 */
80 list[i].prev = nr;
81 list[i].next = list[nr].next;
82 list[(unsigned)list[nr].next].prev = i;
83 list[nr].next = i;
84 }
85}
86
87/* Very simple allocator for agp memory, working on a static range
88 * already mapped into each client's address space.
89 */
90
91static struct mem_block *split_block(struct mem_block *p, int start, int size,
92 struct drm_file *file_priv)
93{
94 /* Maybe cut off the start of an existing block */
95 if (start > p->start) {
96 struct mem_block *newblock =
97 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
98 if (!newblock)
99 goto out;
100 newblock->start = start;
101 newblock->size = p->size - (start - p->start);
102 newblock->file_priv = NULL;
103 newblock->next = p->next;
104 newblock->prev = p;
105 p->next->prev = newblock;
106 p->next = newblock;
107 p->size -= newblock->size;
108 p = newblock;
109 }
110
111 /* Maybe cut off the end of an existing block */
112 if (size < p->size) {
113 struct mem_block *newblock =
114 drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
115 if (!newblock)
116 goto out;
117 newblock->start = start + size;
118 newblock->size = p->size - size;
119 newblock->file_priv = NULL;
120 newblock->next = p->next;
121 newblock->prev = p;
122 p->next->prev = newblock;
123 p->next = newblock;
124 p->size = size;
125 }
126
127 out:
128 /* Our block is in the middle */
129 p->file_priv = file_priv;
130 return p;
131}
132
133static struct mem_block *alloc_block(struct mem_block *heap, int size,
134 int align2, struct drm_file *file_priv)
135{
136 struct mem_block *p;
137 int mask = (1 << align2) - 1;
138
139 for (p = heap->next; p != heap; p = p->next) {
140 int start = (p->start + mask) & ~mask;
141 if (p->file_priv == NULL && start + size <= p->start + p->size)
142 return split_block(p, start, size, file_priv);
143 }
144
145 return NULL;
146}
147
148static struct mem_block *find_block(struct mem_block *heap, int start)
149{
150 struct mem_block *p;
151
152 for (p = heap->next; p != heap; p = p->next)
153 if (p->start == start)
154 return p;
155
156 return NULL;
157}
158
159static void free_block(struct mem_block *p)
160{
161 p->file_priv = NULL;
162
163 /* Assumes a single contiguous range. Needs a special file_priv in
164 * 'heap' to stop it being subsumed.
165 */
166 if (p->next->file_priv == NULL) {
167 struct mem_block *q = p->next;
168 p->size += q->size;
169 p->next = q->next;
170 p->next->prev = p;
171 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
172 }
173
174 if (p->prev->file_priv == NULL) {
175 struct mem_block *q = p->prev;
176 q->size += p->size;
177 q->next = p->next;
178 q->next->prev = q;
179 drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
180 }
181}
182
183/* Initialize. How to check for an uninitialized heap?
184 */
185static int init_heap(struct mem_block **heap, int start, int size)
186{
187 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
188
189 if (!blocks)
190 return -ENOMEM;
191
192 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
193 if (!*heap) {
194 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
195 return -ENOMEM;
196 }
197
198 blocks->start = start;
199 blocks->size = size;
200 blocks->file_priv = NULL;
201 blocks->next = blocks->prev = *heap;
202
203 memset(*heap, 0, sizeof(**heap));
204 (*heap)->file_priv = (struct drm_file *) - 1;
205 (*heap)->next = (*heap)->prev = blocks;
206 return 0;
207}
208
209/* Free all blocks associated with the releasing file.
210 */
211void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
212 struct mem_block *heap)
213{
214 struct mem_block *p;
215
216 if (!heap || !heap->next)
217 return;
218
219 for (p = heap->next; p != heap; p = p->next) {
220 if (p->file_priv == file_priv) {
221 p->file_priv = NULL;
222 mark_block(dev, p, 0);
223 }
224 }
225
226 /* Assumes a single contiguous range. Needs a special file_priv in
227 * 'heap' to stop it being subsumed.
228 */
229 for (p = heap->next; p != heap; p = p->next) {
230 while (p->file_priv == NULL && p->next->file_priv == NULL) {
231 struct mem_block *q = p->next;
232 p->size += q->size;
233 p->next = q->next;
234 p->next->prev = p;
235 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
236 }
237 }
238}
239
240/* Shutdown.
241 */
242void i915_mem_takedown(struct mem_block **heap)
243{
244 struct mem_block *p;
245
246 if (!*heap)
247 return;
248
249 for (p = (*heap)->next; p != *heap;) {
250 struct mem_block *q = p;
251 p = p->next;
252 drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
253 }
254
255 drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
256 *heap = NULL;
257}
258
259static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
260{
261 switch (region) {
262 case I915_MEM_REGION_AGP:
263 return &dev_priv->agp_heap;
264 default:
265 return NULL;
266 }
267}
268
269/* IOCTL HANDLERS */
270
271int i915_mem_alloc(struct drm_device *dev, void *data,
272 struct drm_file *file_priv)
273{
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 drm_i915_mem_alloc_t *alloc = data;
276 struct mem_block *block, **heap;
277
278 if (!dev_priv) {
279 DRM_ERROR("called with no initialization\n");
280 return -EINVAL;
281 }
282
283 heap = get_heap(dev_priv, alloc->region);
284 if (!heap || !*heap)
285 return -EFAULT;
286
287 /* Make things easier on ourselves: all allocations at least
288 * 4k aligned.
289 */
290 if (alloc->alignment < 12)
291 alloc->alignment = 12;
292
293 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
294
295 if (!block)
296 return -ENOMEM;
297
298 mark_block(dev, block, 1);
299
300 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
301 sizeof(int))) {
302 DRM_ERROR("copy_to_user\n");
303 return -EFAULT;
304 }
305
306 return 0;
307}
308
309int i915_mem_free(struct drm_device *dev, void *data,
310 struct drm_file *file_priv)
311{
312 drm_i915_private_t *dev_priv = dev->dev_private;
313 drm_i915_mem_free_t *memfree = data;
314 struct mem_block *block, **heap;
315
316 if (!dev_priv) {
317 DRM_ERROR("called with no initialization\n");
318 return -EINVAL;
319 }
320
321 heap = get_heap(dev_priv, memfree->region);
322 if (!heap || !*heap)
323 return -EFAULT;
324
325 block = find_block(*heap, memfree->region_offset);
326 if (!block)
327 return -EFAULT;
328
329 if (block->file_priv != file_priv)
330 return -EPERM;
331
332 mark_block(dev, block, 0);
333 free_block(block);
334 return 0;
335}
336
337int i915_mem_init_heap(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
339{
340 drm_i915_private_t *dev_priv = dev->dev_private;
341 drm_i915_mem_init_heap_t *initheap = data;
342 struct mem_block **heap;
343
344 if (!dev_priv) {
345 DRM_ERROR("called with no initialization\n");
346 return -EINVAL;
347 }
348
349 heap = get_heap(dev_priv, initheap->region);
350 if (!heap)
351 return -EFAULT;
352
353 if (*heap) {
354 DRM_ERROR("heap already initialized?");
355 return -EFAULT;
356 }
357
358 return init_heap(heap, initheap->start, initheap->size);
359}
360
361int i915_mem_destroy_heap( struct drm_device *dev, void *data,
362 struct drm_file *file_priv )
363{
364 drm_i915_private_t *dev_priv = dev->dev_private;
365 drm_i915_mem_destroy_heap_t *destroyheap = data;
366 struct mem_block **heap;
367
368 if ( !dev_priv ) {
369 DRM_ERROR( "called with no initialization\n" );
370 return -EINVAL;
371 }
372
373 heap = get_heap( dev_priv, destroyheap->region );
374 if (!heap) {
375 DRM_ERROR("get_heap failed");
376 return -EFAULT;
377 }
378
379 if (!*heap) {
380 DRM_ERROR("heap not initialized?");
381 return -EFAULT;
382 }
383
384 i915_mem_takedown( heap );
385 return 0;
386}
diff --git a/drivers/gpu/drm/mga/Makefile b/drivers/gpu/drm/mga/Makefile
new file mode 100644
index 000000000000..60684785c203
--- /dev/null
+++ b/drivers/gpu/drm/mga/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
7
8mga-$(CONFIG_COMPAT) += mga_ioc32.o
9
10obj-$(CONFIG_DRM_MGA) += mga.o
11
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
new file mode 100644
index 000000000000..c1d12dbfa8d8
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -0,0 +1,1162 @@
1/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28/**
29 * \file mga_dma.c
30 * DMA support for MGA G200 / G400.
31 *
32 * \author Rickard E. (Rik) Faith <faith@valinux.com>
33 * \author Jeff Hartmann <jhartmann@valinux.com>
34 * \author Keith Whitwell <keith@tungstengraphics.com>
35 * \author Gareth Hughes <gareth@valinux.com>
36 */
37
38#include "drmP.h"
39#include "drm.h"
40#include "drm_sarea.h"
41#include "mga_drm.h"
42#include "mga_drv.h"
43
44#define MGA_DEFAULT_USEC_TIMEOUT 10000
45#define MGA_FREELIST_DEBUG 0
46
47#define MINIMAL_CLEANUP 0
48#define FULL_CLEANUP 1
49static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
50
51/* ================================================================
52 * Engine control
53 */
54
55int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
56{
57 u32 status = 0;
58 int i;
59 DRM_DEBUG("\n");
60
61 for (i = 0; i < dev_priv->usec_timeout; i++) {
62 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
63 if (status == MGA_ENDPRDMASTS) {
64 MGA_WRITE8(MGA_CRTC_INDEX, 0);
65 return 0;
66 }
67 DRM_UDELAY(1);
68 }
69
70#if MGA_DMA_DEBUG
71 DRM_ERROR("failed!\n");
72 DRM_INFO(" status=0x%08x\n", status);
73#endif
74 return -EBUSY;
75}
76
77static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
78{
79 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
80 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
81
82 DRM_DEBUG("\n");
83
84 /* The primary DMA stream should look like new right about now.
85 */
86 primary->tail = 0;
87 primary->space = primary->size;
88 primary->last_flush = 0;
89
90 sarea_priv->last_wrap = 0;
91
92 /* FIXME: Reset counters, buffer ages etc...
93 */
94
95 /* FIXME: What else do we need to reinitialize? WARP stuff?
96 */
97
98 return 0;
99}
100
101/* ================================================================
102 * Primary DMA stream
103 */
104
105void mga_do_dma_flush(drm_mga_private_t * dev_priv)
106{
107 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
108 u32 head, tail;
109 u32 status = 0;
110 int i;
111 DMA_LOCALS;
112 DRM_DEBUG("\n");
113
114 /* We need to wait so that we can do an safe flush */
115 for (i = 0; i < dev_priv->usec_timeout; i++) {
116 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
117 if (status == MGA_ENDPRDMASTS)
118 break;
119 DRM_UDELAY(1);
120 }
121
122 if (primary->tail == primary->last_flush) {
123 DRM_DEBUG(" bailing out...\n");
124 return;
125 }
126
127 tail = primary->tail + dev_priv->primary->offset;
128
129 /* We need to pad the stream between flushes, as the card
130 * actually (partially?) reads the first of these commands.
131 * See page 4-16 in the G400 manual, middle of the page or so.
132 */
133 BEGIN_DMA(1);
134
135 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
136 MGA_DMAPAD, 0x00000000,
137 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
138
139 ADVANCE_DMA();
140
141 primary->last_flush = primary->tail;
142
143 head = MGA_READ(MGA_PRIMADDRESS);
144
145 if (head <= tail) {
146 primary->space = primary->size - primary->tail;
147 } else {
148 primary->space = head - tail;
149 }
150
151 DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset);
152 DRM_DEBUG(" tail = 0x%06lx\n", tail - dev_priv->primary->offset);
153 DRM_DEBUG(" space = 0x%06x\n", primary->space);
154
155 mga_flush_write_combine();
156 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
157
158 DRM_DEBUG("done.\n");
159}
160
161void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
162{
163 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
164 u32 head, tail;
165 DMA_LOCALS;
166 DRM_DEBUG("\n");
167
168 BEGIN_DMA_WRAP();
169
170 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
171 MGA_DMAPAD, 0x00000000,
172 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
173
174 ADVANCE_DMA();
175
176 tail = primary->tail + dev_priv->primary->offset;
177
178 primary->tail = 0;
179 primary->last_flush = 0;
180 primary->last_wrap++;
181
182 head = MGA_READ(MGA_PRIMADDRESS);
183
184 if (head == dev_priv->primary->offset) {
185 primary->space = primary->size;
186 } else {
187 primary->space = head - dev_priv->primary->offset;
188 }
189
190 DRM_DEBUG(" head = 0x%06lx\n", head - dev_priv->primary->offset);
191 DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
192 DRM_DEBUG(" wrap = %d\n", primary->last_wrap);
193 DRM_DEBUG(" space = 0x%06x\n", primary->space);
194
195 mga_flush_write_combine();
196 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
197
198 set_bit(0, &primary->wrapped);
199 DRM_DEBUG("done.\n");
200}
201
202void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
203{
204 drm_mga_primary_buffer_t *primary = &dev_priv->prim;
205 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
206 u32 head = dev_priv->primary->offset;
207 DRM_DEBUG("\n");
208
209 sarea_priv->last_wrap++;
210 DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap);
211
212 mga_flush_write_combine();
213 MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
214
215 clear_bit(0, &primary->wrapped);
216 DRM_DEBUG("done.\n");
217}
218
219/* ================================================================
220 * Freelist management
221 */
222
223#define MGA_BUFFER_USED ~0
224#define MGA_BUFFER_FREE 0
225
226#if MGA_FREELIST_DEBUG
227static void mga_freelist_print(struct drm_device * dev)
228{
229 drm_mga_private_t *dev_priv = dev->dev_private;
230 drm_mga_freelist_t *entry;
231
232 DRM_INFO("\n");
233 DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
234 dev_priv->sarea_priv->last_dispatch,
235 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
236 dev_priv->primary->offset));
237 DRM_INFO("current freelist:\n");
238
239 for (entry = dev_priv->head->next; entry; entry = entry->next) {
240 DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n",
241 entry, entry->buf->idx, entry->age.head,
242 entry->age.head - dev_priv->primary->offset);
243 }
244 DRM_INFO("\n");
245}
246#endif
247
248static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
249{
250 struct drm_device_dma *dma = dev->dma;
251 struct drm_buf *buf;
252 drm_mga_buf_priv_t *buf_priv;
253 drm_mga_freelist_t *entry;
254 int i;
255 DRM_DEBUG("count=%d\n", dma->buf_count);
256
257 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
258 if (dev_priv->head == NULL)
259 return -ENOMEM;
260
261 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
262 SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
263
264 for (i = 0; i < dma->buf_count; i++) {
265 buf = dma->buflist[i];
266 buf_priv = buf->dev_private;
267
268 entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
269 if (entry == NULL)
270 return -ENOMEM;
271
272 memset(entry, 0, sizeof(drm_mga_freelist_t));
273
274 entry->next = dev_priv->head->next;
275 entry->prev = dev_priv->head;
276 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
277 entry->buf = buf;
278
279 if (dev_priv->head->next != NULL)
280 dev_priv->head->next->prev = entry;
281 if (entry->next == NULL)
282 dev_priv->tail = entry;
283
284 buf_priv->list_entry = entry;
285 buf_priv->discard = 0;
286 buf_priv->dispatched = 0;
287
288 dev_priv->head->next = entry;
289 }
290
291 return 0;
292}
293
294static void mga_freelist_cleanup(struct drm_device * dev)
295{
296 drm_mga_private_t *dev_priv = dev->dev_private;
297 drm_mga_freelist_t *entry;
298 drm_mga_freelist_t *next;
299 DRM_DEBUG("\n");
300
301 entry = dev_priv->head;
302 while (entry) {
303 next = entry->next;
304 drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
305 entry = next;
306 }
307
308 dev_priv->head = dev_priv->tail = NULL;
309}
310
311#if 0
312/* FIXME: Still needed?
313 */
314static void mga_freelist_reset(struct drm_device * dev)
315{
316 struct drm_device_dma *dma = dev->dma;
317 struct drm_buf *buf;
318 drm_mga_buf_priv_t *buf_priv;
319 int i;
320
321 for (i = 0; i < dma->buf_count; i++) {
322 buf = dma->buflist[i];
323 buf_priv = buf->dev_private;
324 SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
325 }
326}
327#endif
328
329static struct drm_buf *mga_freelist_get(struct drm_device * dev)
330{
331 drm_mga_private_t *dev_priv = dev->dev_private;
332 drm_mga_freelist_t *next;
333 drm_mga_freelist_t *prev;
334 drm_mga_freelist_t *tail = dev_priv->tail;
335 u32 head, wrap;
336 DRM_DEBUG("\n");
337
338 head = MGA_READ(MGA_PRIMADDRESS);
339 wrap = dev_priv->sarea_priv->last_wrap;
340
341 DRM_DEBUG(" tail=0x%06lx %d\n",
342 tail->age.head ?
343 tail->age.head - dev_priv->primary->offset : 0,
344 tail->age.wrap);
345 DRM_DEBUG(" head=0x%06lx %d\n",
346 head - dev_priv->primary->offset, wrap);
347
348 if (TEST_AGE(&tail->age, head, wrap)) {
349 prev = dev_priv->tail->prev;
350 next = dev_priv->tail;
351 prev->next = NULL;
352 next->prev = next->next = NULL;
353 dev_priv->tail = prev;
354 SET_AGE(&next->age, MGA_BUFFER_USED, 0);
355 return next->buf;
356 }
357
358 DRM_DEBUG("returning NULL!\n");
359 return NULL;
360}
361
362int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
363{
364 drm_mga_private_t *dev_priv = dev->dev_private;
365 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
366 drm_mga_freelist_t *head, *entry, *prev;
367
368 DRM_DEBUG("age=0x%06lx wrap=%d\n",
369 buf_priv->list_entry->age.head -
370 dev_priv->primary->offset, buf_priv->list_entry->age.wrap);
371
372 entry = buf_priv->list_entry;
373 head = dev_priv->head;
374
375 if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
376 SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
377 prev = dev_priv->tail;
378 prev->next = entry;
379 entry->prev = prev;
380 entry->next = NULL;
381 } else {
382 prev = head->next;
383 head->next = entry;
384 prev->prev = entry;
385 entry->prev = head;
386 entry->next = prev;
387 }
388
389 return 0;
390}
391
392/* ================================================================
393 * DMA initialization, cleanup
394 */
395
396int mga_driver_load(struct drm_device * dev, unsigned long flags)
397{
398 drm_mga_private_t *dev_priv;
399
400 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
401 if (!dev_priv)
402 return -ENOMEM;
403
404 dev->dev_private = (void *)dev_priv;
405 memset(dev_priv, 0, sizeof(drm_mga_private_t));
406
407 dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
408 dev_priv->chipset = flags;
409
410 dev_priv->mmio_base = drm_get_resource_start(dev, 1);
411 dev_priv->mmio_size = drm_get_resource_len(dev, 1);
412
413 dev->counters += 3;
414 dev->types[6] = _DRM_STAT_IRQ;
415 dev->types[7] = _DRM_STAT_PRIMARY;
416 dev->types[8] = _DRM_STAT_SECONDARY;
417
418 return 0;
419}
420
421#if __OS_HAS_AGP
422/**
423 * Bootstrap the driver for AGP DMA.
424 *
425 * \todo
426 * Investigate whether there is any benifit to storing the WARP microcode in
427 * AGP memory. If not, the microcode may as well always be put in PCI
428 * memory.
429 *
430 * \todo
431 * This routine needs to set dma_bs->agp_mode to the mode actually configured
432 * in the hardware. Looking just at the Linux AGP driver code, I don't see
433 * an easy way to determine this.
434 *
435 * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
436 */
437static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
438 drm_mga_dma_bootstrap_t * dma_bs)
439{
440 drm_mga_private_t *const dev_priv =
441 (drm_mga_private_t *) dev->dev_private;
442 unsigned int warp_size = mga_warp_microcode_size(dev_priv);
443 int err;
444 unsigned offset;
445 const unsigned secondary_size = dma_bs->secondary_bin_count
446 * dma_bs->secondary_bin_size;
447 const unsigned agp_size = (dma_bs->agp_size << 20);
448 struct drm_buf_desc req;
449 struct drm_agp_mode mode;
450 struct drm_agp_info info;
451 struct drm_agp_buffer agp_req;
452 struct drm_agp_binding bind_req;
453
454 /* Acquire AGP. */
455 err = drm_agp_acquire(dev);
456 if (err) {
457 DRM_ERROR("Unable to acquire AGP: %d\n", err);
458 return err;
459 }
460
461 err = drm_agp_info(dev, &info);
462 if (err) {
463 DRM_ERROR("Unable to get AGP info: %d\n", err);
464 return err;
465 }
466
467 mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
468 err = drm_agp_enable(dev, mode);
469 if (err) {
470 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
471 return err;
472 }
473
474 /* In addition to the usual AGP mode configuration, the G200 AGP cards
475 * need to have the AGP mode "manually" set.
476 */
477
478 if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
479 if (mode.mode & 0x02) {
480 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
481 } else {
482 MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
483 }
484 }
485
486 /* Allocate and bind AGP memory. */
487 agp_req.size = agp_size;
488 agp_req.type = 0;
489 err = drm_agp_alloc(dev, &agp_req);
490 if (err) {
491 dev_priv->agp_size = 0;
492 DRM_ERROR("Unable to allocate %uMB AGP memory\n",
493 dma_bs->agp_size);
494 return err;
495 }
496
497 dev_priv->agp_size = agp_size;
498 dev_priv->agp_handle = agp_req.handle;
499
500 bind_req.handle = agp_req.handle;
501 bind_req.offset = 0;
502 err = drm_agp_bind(dev, &bind_req);
503 if (err) {
504 DRM_ERROR("Unable to bind AGP memory: %d\n", err);
505 return err;
506 }
507
508 /* Make drm_addbufs happy by not trying to create a mapping for less
509 * than a page.
510 */
511 if (warp_size < PAGE_SIZE)
512 warp_size = PAGE_SIZE;
513
514 offset = 0;
515 err = drm_addmap(dev, offset, warp_size,
516 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
517 if (err) {
518 DRM_ERROR("Unable to map WARP microcode: %d\n", err);
519 return err;
520 }
521
522 offset += warp_size;
523 err = drm_addmap(dev, offset, dma_bs->primary_size,
524 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
525 if (err) {
526 DRM_ERROR("Unable to map primary DMA region: %d\n", err);
527 return err;
528 }
529
530 offset += dma_bs->primary_size;
531 err = drm_addmap(dev, offset, secondary_size,
532 _DRM_AGP, 0, &dev->agp_buffer_map);
533 if (err) {
534 DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
535 return err;
536 }
537
538 (void)memset(&req, 0, sizeof(req));
539 req.count = dma_bs->secondary_bin_count;
540 req.size = dma_bs->secondary_bin_size;
541 req.flags = _DRM_AGP_BUFFER;
542 req.agp_start = offset;
543
544 err = drm_addbufs_agp(dev, &req);
545 if (err) {
546 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
547 return err;
548 }
549
550 {
551 struct drm_map_list *_entry;
552 unsigned long agp_token = 0;
553
554 list_for_each_entry(_entry, &dev->maplist, head) {
555 if (_entry->map == dev->agp_buffer_map)
556 agp_token = _entry->user_token;
557 }
558 if (!agp_token)
559 return -EFAULT;
560
561 dev->agp_buffer_token = agp_token;
562 }
563
564 offset += secondary_size;
565 err = drm_addmap(dev, offset, agp_size - offset,
566 _DRM_AGP, 0, &dev_priv->agp_textures);
567 if (err) {
568 DRM_ERROR("Unable to map AGP texture region %d\n", err);
569 return err;
570 }
571
572 drm_core_ioremap(dev_priv->warp, dev);
573 drm_core_ioremap(dev_priv->primary, dev);
574 drm_core_ioremap(dev->agp_buffer_map, dev);
575
576 if (!dev_priv->warp->handle ||
577 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
578 DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
579 dev_priv->warp->handle, dev_priv->primary->handle,
580 dev->agp_buffer_map->handle);
581 return -ENOMEM;
582 }
583
584 dev_priv->dma_access = MGA_PAGPXFER;
585 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
586
587 DRM_INFO("Initialized card for AGP DMA.\n");
588 return 0;
589}
590#else
591static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
592 drm_mga_dma_bootstrap_t * dma_bs)
593{
594 return -EINVAL;
595}
596#endif
597
598/**
599 * Bootstrap the driver for PCI DMA.
600 *
601 * \todo
602 * The algorithm for decreasing the size of the primary DMA buffer could be
603 * better. The size should be rounded up to the nearest page size, then
604 * decrease the request size by a single page each pass through the loop.
605 *
606 * \todo
607 * Determine whether the maximum address passed to drm_pci_alloc is correct.
608 * The same goes for drm_addbufs_pci.
609 *
610 * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
611 */
612static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
613 drm_mga_dma_bootstrap_t * dma_bs)
614{
615 drm_mga_private_t *const dev_priv =
616 (drm_mga_private_t *) dev->dev_private;
617 unsigned int warp_size = mga_warp_microcode_size(dev_priv);
618 unsigned int primary_size;
619 unsigned int bin_count;
620 int err;
621 struct drm_buf_desc req;
622
623 if (dev->dma == NULL) {
624 DRM_ERROR("dev->dma is NULL\n");
625 return -EFAULT;
626 }
627
628 /* Make drm_addbufs happy by not trying to create a mapping for less
629 * than a page.
630 */
631 if (warp_size < PAGE_SIZE)
632 warp_size = PAGE_SIZE;
633
634 /* The proper alignment is 0x100 for this mapping */
635 err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
636 _DRM_READ_ONLY, &dev_priv->warp);
637 if (err != 0) {
638 DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
639 err);
640 return err;
641 }
642
643 /* Other than the bottom two bits being used to encode other
644 * information, there don't appear to be any restrictions on the
645 * alignment of the primary or secondary DMA buffers.
646 */
647
648 for (primary_size = dma_bs->primary_size; primary_size != 0;
649 primary_size >>= 1) {
650 /* The proper alignment for this mapping is 0x04 */
651 err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
652 _DRM_READ_ONLY, &dev_priv->primary);
653 if (!err)
654 break;
655 }
656
657 if (err != 0) {
658 DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
659 return -ENOMEM;
660 }
661
662 if (dev_priv->primary->size != dma_bs->primary_size) {
663 DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
664 dma_bs->primary_size,
665 (unsigned)dev_priv->primary->size);
666 dma_bs->primary_size = dev_priv->primary->size;
667 }
668
669 for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
670 bin_count--) {
671 (void)memset(&req, 0, sizeof(req));
672 req.count = bin_count;
673 req.size = dma_bs->secondary_bin_size;
674
675 err = drm_addbufs_pci(dev, &req);
676 if (!err) {
677 break;
678 }
679 }
680
681 if (bin_count == 0) {
682 DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
683 return err;
684 }
685
686 if (bin_count != dma_bs->secondary_bin_count) {
687 DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
688 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
689
690 dma_bs->secondary_bin_count = bin_count;
691 }
692
693 dev_priv->dma_access = 0;
694 dev_priv->wagp_enable = 0;
695
696 dma_bs->agp_mode = 0;
697
698 DRM_INFO("Initialized card for PCI DMA.\n");
699 return 0;
700}
701
702static int mga_do_dma_bootstrap(struct drm_device * dev,
703 drm_mga_dma_bootstrap_t * dma_bs)
704{
705 const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
706 int err;
707 drm_mga_private_t *const dev_priv =
708 (drm_mga_private_t *) dev->dev_private;
709
710 dev_priv->used_new_dma_init = 1;
711
712 /* The first steps are the same for both PCI and AGP based DMA. Map
713 * the cards MMIO registers and map a status page.
714 */
715 err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
716 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
717 if (err) {
718 DRM_ERROR("Unable to map MMIO region: %d\n", err);
719 return err;
720 }
721
722 err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
723 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
724 &dev_priv->status);
725 if (err) {
726 DRM_ERROR("Unable to map status region: %d\n", err);
727 return err;
728 }
729
730 /* The DMA initialization procedure is slightly different for PCI and
731 * AGP cards. AGP cards just allocate a large block of AGP memory and
732 * carve off portions of it for internal uses. The remaining memory
733 * is returned to user-mode to be used for AGP textures.
734 */
735 if (is_agp) {
736 err = mga_do_agp_dma_bootstrap(dev, dma_bs);
737 }
738
739 /* If we attempted to initialize the card for AGP DMA but failed,
740 * clean-up any mess that may have been created.
741 */
742
743 if (err) {
744 mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
745 }
746
747 /* Not only do we want to try and initialized PCI cards for PCI DMA,
748 * but we also try to initialized AGP cards that could not be
749 * initialized for AGP DMA. This covers the case where we have an AGP
750 * card in a system with an unsupported AGP chipset. In that case the
751 * card will be detected as AGP, but we won't be able to allocate any
752 * AGP memory, etc.
753 */
754
755 if (!is_agp || err) {
756 err = mga_do_pci_dma_bootstrap(dev, dma_bs);
757 }
758
759 return err;
760}
761
762int mga_dma_bootstrap(struct drm_device *dev, void *data,
763 struct drm_file *file_priv)
764{
765 drm_mga_dma_bootstrap_t *bootstrap = data;
766 int err;
767 static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
768 const drm_mga_private_t *const dev_priv =
769 (drm_mga_private_t *) dev->dev_private;
770
771 err = mga_do_dma_bootstrap(dev, bootstrap);
772 if (err) {
773 mga_do_cleanup_dma(dev, FULL_CLEANUP);
774 return err;
775 }
776
777 if (dev_priv->agp_textures != NULL) {
778 bootstrap->texture_handle = dev_priv->agp_textures->offset;
779 bootstrap->texture_size = dev_priv->agp_textures->size;
780 } else {
781 bootstrap->texture_handle = 0;
782 bootstrap->texture_size = 0;
783 }
784
785 bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
786
787 return err;
788}
789
790static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
791{
792 drm_mga_private_t *dev_priv;
793 int ret;
794 DRM_DEBUG("\n");
795
796 dev_priv = dev->dev_private;
797
798 if (init->sgram) {
799 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
800 } else {
801 dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
802 }
803 dev_priv->maccess = init->maccess;
804
805 dev_priv->fb_cpp = init->fb_cpp;
806 dev_priv->front_offset = init->front_offset;
807 dev_priv->front_pitch = init->front_pitch;
808 dev_priv->back_offset = init->back_offset;
809 dev_priv->back_pitch = init->back_pitch;
810
811 dev_priv->depth_cpp = init->depth_cpp;
812 dev_priv->depth_offset = init->depth_offset;
813 dev_priv->depth_pitch = init->depth_pitch;
814
815 /* FIXME: Need to support AGP textures...
816 */
817 dev_priv->texture_offset = init->texture_offset[0];
818 dev_priv->texture_size = init->texture_size[0];
819
820 dev_priv->sarea = drm_getsarea(dev);
821 if (!dev_priv->sarea) {
822 DRM_ERROR("failed to find sarea!\n");
823 return -EINVAL;
824 }
825
826 if (!dev_priv->used_new_dma_init) {
827
828 dev_priv->dma_access = MGA_PAGPXFER;
829 dev_priv->wagp_enable = MGA_WAGP_ENABLE;
830
831 dev_priv->status = drm_core_findmap(dev, init->status_offset);
832 if (!dev_priv->status) {
833 DRM_ERROR("failed to find status page!\n");
834 return -EINVAL;
835 }
836 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
837 if (!dev_priv->mmio) {
838 DRM_ERROR("failed to find mmio region!\n");
839 return -EINVAL;
840 }
841 dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
842 if (!dev_priv->warp) {
843 DRM_ERROR("failed to find warp microcode region!\n");
844 return -EINVAL;
845 }
846 dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
847 if (!dev_priv->primary) {
848 DRM_ERROR("failed to find primary dma region!\n");
849 return -EINVAL;
850 }
851 dev->agp_buffer_token = init->buffers_offset;
852 dev->agp_buffer_map =
853 drm_core_findmap(dev, init->buffers_offset);
854 if (!dev->agp_buffer_map) {
855 DRM_ERROR("failed to find dma buffer region!\n");
856 return -EINVAL;
857 }
858
859 drm_core_ioremap(dev_priv->warp, dev);
860 drm_core_ioremap(dev_priv->primary, dev);
861 drm_core_ioremap(dev->agp_buffer_map, dev);
862 }
863
864 dev_priv->sarea_priv =
865 (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
866 init->sarea_priv_offset);
867
868 if (!dev_priv->warp->handle ||
869 !dev_priv->primary->handle ||
870 ((dev_priv->dma_access != 0) &&
871 ((dev->agp_buffer_map == NULL) ||
872 (dev->agp_buffer_map->handle == NULL)))) {
873 DRM_ERROR("failed to ioremap agp regions!\n");
874 return -ENOMEM;
875 }
876
877 ret = mga_warp_install_microcode(dev_priv);
878 if (ret < 0) {
879 DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
880 return ret;
881 }
882
883 ret = mga_warp_init(dev_priv);
884 if (ret < 0) {
885 DRM_ERROR("failed to init WARP engine!: %d\n", ret);
886 return ret;
887 }
888
889 dev_priv->prim.status = (u32 *) dev_priv->status->handle;
890
891 mga_do_wait_for_idle(dev_priv);
892
893 /* Init the primary DMA registers.
894 */
895 MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
896#if 0
897 MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
898 MGA_PRIMPTREN1); /* DWGSYNC */
899#endif
900
901 dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
902 dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
903 + dev_priv->primary->size);
904 dev_priv->prim.size = dev_priv->primary->size;
905
906 dev_priv->prim.tail = 0;
907 dev_priv->prim.space = dev_priv->prim.size;
908 dev_priv->prim.wrapped = 0;
909
910 dev_priv->prim.last_flush = 0;
911 dev_priv->prim.last_wrap = 0;
912
913 dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
914
915 dev_priv->prim.status[0] = dev_priv->primary->offset;
916 dev_priv->prim.status[1] = 0;
917
918 dev_priv->sarea_priv->last_wrap = 0;
919 dev_priv->sarea_priv->last_frame.head = 0;
920 dev_priv->sarea_priv->last_frame.wrap = 0;
921
922 if (mga_freelist_init(dev, dev_priv) < 0) {
923 DRM_ERROR("could not initialize freelist\n");
924 return -ENOMEM;
925 }
926
927 return 0;
928}
929
930static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
931{
932 int err = 0;
933 DRM_DEBUG("\n");
934
935 /* Make sure interrupts are disabled here because the uninstall ioctl
936 * may not have been called from userspace and after dev_private
937 * is freed, it's too late.
938 */
939 if (dev->irq_enabled)
940 drm_irq_uninstall(dev);
941
942 if (dev->dev_private) {
943 drm_mga_private_t *dev_priv = dev->dev_private;
944
945 if ((dev_priv->warp != NULL)
946 && (dev_priv->warp->type != _DRM_CONSISTENT))
947 drm_core_ioremapfree(dev_priv->warp, dev);
948
949 if ((dev_priv->primary != NULL)
950 && (dev_priv->primary->type != _DRM_CONSISTENT))
951 drm_core_ioremapfree(dev_priv->primary, dev);
952
953 if (dev->agp_buffer_map != NULL)
954 drm_core_ioremapfree(dev->agp_buffer_map, dev);
955
956 if (dev_priv->used_new_dma_init) {
957#if __OS_HAS_AGP
958 if (dev_priv->agp_handle != 0) {
959 struct drm_agp_binding unbind_req;
960 struct drm_agp_buffer free_req;
961
962 unbind_req.handle = dev_priv->agp_handle;
963 drm_agp_unbind(dev, &unbind_req);
964
965 free_req.handle = dev_priv->agp_handle;
966 drm_agp_free(dev, &free_req);
967
968 dev_priv->agp_textures = NULL;
969 dev_priv->agp_size = 0;
970 dev_priv->agp_handle = 0;
971 }
972
973 if ((dev->agp != NULL) && dev->agp->acquired) {
974 err = drm_agp_release(dev);
975 }
976#endif
977 }
978
979 dev_priv->warp = NULL;
980 dev_priv->primary = NULL;
981 dev_priv->sarea = NULL;
982 dev_priv->sarea_priv = NULL;
983 dev->agp_buffer_map = NULL;
984
985 if (full_cleanup) {
986 dev_priv->mmio = NULL;
987 dev_priv->status = NULL;
988 dev_priv->used_new_dma_init = 0;
989 }
990
991 memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
992 dev_priv->warp_pipe = 0;
993 memset(dev_priv->warp_pipe_phys, 0,
994 sizeof(dev_priv->warp_pipe_phys));
995
996 if (dev_priv->head != NULL) {
997 mga_freelist_cleanup(dev);
998 }
999 }
1000
1001 return err;
1002}
1003
1004int mga_dma_init(struct drm_device *dev, void *data,
1005 struct drm_file *file_priv)
1006{
1007 drm_mga_init_t *init = data;
1008 int err;
1009
1010 LOCK_TEST_WITH_RETURN(dev, file_priv);
1011
1012 switch (init->func) {
1013 case MGA_INIT_DMA:
1014 err = mga_do_init_dma(dev, init);
1015 if (err) {
1016 (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
1017 }
1018 return err;
1019 case MGA_CLEANUP_DMA:
1020 return mga_do_cleanup_dma(dev, FULL_CLEANUP);
1021 }
1022
1023 return -EINVAL;
1024}
1025
1026/* ================================================================
1027 * Primary DMA stream management
1028 */
1029
1030int mga_dma_flush(struct drm_device *dev, void *data,
1031 struct drm_file *file_priv)
1032{
1033 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1034 struct drm_lock *lock = data;
1035
1036 LOCK_TEST_WITH_RETURN(dev, file_priv);
1037
1038 DRM_DEBUG("%s%s%s\n",
1039 (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
1040 (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
1041 (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
1042
1043 WRAP_WAIT_WITH_RETURN(dev_priv);
1044
1045 if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
1046 mga_do_dma_flush(dev_priv);
1047 }
1048
1049 if (lock->flags & _DRM_LOCK_QUIESCENT) {
1050#if MGA_DMA_DEBUG
1051 int ret = mga_do_wait_for_idle(dev_priv);
1052 if (ret < 0)
1053 DRM_INFO("-EBUSY\n");
1054 return ret;
1055#else
1056 return mga_do_wait_for_idle(dev_priv);
1057#endif
1058 } else {
1059 return 0;
1060 }
1061}
1062
1063int mga_dma_reset(struct drm_device *dev, void *data,
1064 struct drm_file *file_priv)
1065{
1066 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1067
1068 LOCK_TEST_WITH_RETURN(dev, file_priv);
1069
1070 return mga_do_dma_reset(dev_priv);
1071}
1072
1073/* ================================================================
1074 * DMA buffer management
1075 */
1076
1077static int mga_dma_get_buffers(struct drm_device * dev,
1078 struct drm_file *file_priv, struct drm_dma * d)
1079{
1080 struct drm_buf *buf;
1081 int i;
1082
1083 for (i = d->granted_count; i < d->request_count; i++) {
1084 buf = mga_freelist_get(dev);
1085 if (!buf)
1086 return -EAGAIN;
1087
1088 buf->file_priv = file_priv;
1089
1090 if (DRM_COPY_TO_USER(&d->request_indices[i],
1091 &buf->idx, sizeof(buf->idx)))
1092 return -EFAULT;
1093 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1094 &buf->total, sizeof(buf->total)))
1095 return -EFAULT;
1096
1097 d->granted_count++;
1098 }
1099 return 0;
1100}
1101
1102int mga_dma_buffers(struct drm_device *dev, void *data,
1103 struct drm_file *file_priv)
1104{
1105 struct drm_device_dma *dma = dev->dma;
1106 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
1107 struct drm_dma *d = data;
1108 int ret = 0;
1109
1110 LOCK_TEST_WITH_RETURN(dev, file_priv);
1111
1112 /* Please don't send us buffers.
1113 */
1114 if (d->send_count != 0) {
1115 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1116 DRM_CURRENTPID, d->send_count);
1117 return -EINVAL;
1118 }
1119
1120 /* We'll send you buffers.
1121 */
1122 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1123 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1124 DRM_CURRENTPID, d->request_count, dma->buf_count);
1125 return -EINVAL;
1126 }
1127
1128 WRAP_TEST_WITH_RETURN(dev_priv);
1129
1130 d->granted_count = 0;
1131
1132 if (d->request_count) {
1133 ret = mga_dma_get_buffers(dev, file_priv, d);
1134 }
1135
1136 return ret;
1137}
1138
1139/**
1140 * Called just before the module is unloaded.
1141 */
1142int mga_driver_unload(struct drm_device * dev)
1143{
1144 drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
1145 dev->dev_private = NULL;
1146
1147 return 0;
1148}
1149
1150/**
1151 * Called when the last opener of the device is closed.
1152 */
1153void mga_driver_lastclose(struct drm_device * dev)
1154{
1155 mga_do_cleanup_dma(dev, FULL_CLEANUP);
1156}
1157
1158int mga_driver_dma_quiescent(struct drm_device * dev)
1159{
1160 drm_mga_private_t *dev_priv = dev->dev_private;
1161 return mga_do_wait_for_idle(dev_priv);
1162}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
new file mode 100644
index 000000000000..5572939fc7d1
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -0,0 +1,141 @@
1/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "mga_drm.h"
35#include "mga_drv.h"
36
37#include "drm_pciids.h"
38
39static int mga_driver_device_is_agp(struct drm_device * dev);
40
41static struct pci_device_id pciidlist[] = {
42 mga_PCI_IDS
43};
44
45static struct drm_driver driver = {
46 .driver_features =
47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
48 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
49 DRIVER_IRQ_VBL,
50 .dev_priv_size = sizeof(drm_mga_buf_priv_t),
51 .load = mga_driver_load,
52 .unload = mga_driver_unload,
53 .lastclose = mga_driver_lastclose,
54 .dma_quiescent = mga_driver_dma_quiescent,
55 .device_is_agp = mga_driver_device_is_agp,
56 .vblank_wait = mga_driver_vblank_wait,
57 .irq_preinstall = mga_driver_irq_preinstall,
58 .irq_postinstall = mga_driver_irq_postinstall,
59 .irq_uninstall = mga_driver_irq_uninstall,
60 .irq_handler = mga_driver_irq_handler,
61 .reclaim_buffers = drm_core_reclaim_buffers,
62 .get_map_ofs = drm_core_get_map_ofs,
63 .get_reg_ofs = drm_core_get_reg_ofs,
64 .ioctls = mga_ioctls,
65 .dma_ioctl = mga_dma_buffers,
66 .fops = {
67 .owner = THIS_MODULE,
68 .open = drm_open,
69 .release = drm_release,
70 .ioctl = drm_ioctl,
71 .mmap = drm_mmap,
72 .poll = drm_poll,
73 .fasync = drm_fasync,
74#ifdef CONFIG_COMPAT
75 .compat_ioctl = mga_compat_ioctl,
76#endif
77 },
78 .pci_driver = {
79 .name = DRIVER_NAME,
80 .id_table = pciidlist,
81 },
82
83 .name = DRIVER_NAME,
84 .desc = DRIVER_DESC,
85 .date = DRIVER_DATE,
86 .major = DRIVER_MAJOR,
87 .minor = DRIVER_MINOR,
88 .patchlevel = DRIVER_PATCHLEVEL,
89};
90
91static int __init mga_init(void)
92{
93 driver.num_ioctls = mga_max_ioctl;
94 return drm_init(&driver);
95}
96
97static void __exit mga_exit(void)
98{
99 drm_exit(&driver);
100}
101
102module_init(mga_init);
103module_exit(mga_exit);
104
105MODULE_AUTHOR(DRIVER_AUTHOR);
106MODULE_DESCRIPTION(DRIVER_DESC);
107MODULE_LICENSE("GPL and additional rights");
108
109/**
110 * Determine if the device really is AGP or not.
111 *
112 * In addition to the usual tests performed by \c drm_device_is_agp, this
113 * function detects PCI G450 cards that appear to the system exactly like
114 * AGP G450 cards.
115 *
116 * \param dev The device to be tested.
117 *
118 * \returns
119 * If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
120 */
121static int mga_driver_device_is_agp(struct drm_device * dev)
122{
123 const struct pci_dev *const pdev = dev->pdev;
124
125 /* There are PCI versions of the G450. These cards have the
126 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
127 * bridge chip. We detect these cards, which are not currently
128 * supported by this driver, by looking at the device ID of the
129 * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
130 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
131 * device.
132 */
133
134 if ((pdev->device == 0x0525) && pdev->bus->self
135 && (pdev->bus->self->vendor == 0x3388)
136 && (pdev->bus->self->device == 0x0021)) {
137 return 0;
138 }
139
140 return 2;
141}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
new file mode 100644
index 000000000000..f6ebd24bd587
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -0,0 +1,687 @@
1/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All rights reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 */
30
31#ifndef __MGA_DRV_H__
32#define __MGA_DRV_H__
33
34/* General customization:
35 */
36
37#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
38
39#define DRIVER_NAME "mga"
40#define DRIVER_DESC "Matrox G200/G400"
41#define DRIVER_DATE "20051102"
42
43#define DRIVER_MAJOR 3
44#define DRIVER_MINOR 2
45#define DRIVER_PATCHLEVEL 1
46
47typedef struct drm_mga_primary_buffer {
48 u8 *start;
49 u8 *end;
50 int size;
51
52 u32 tail;
53 int space;
54 volatile long wrapped;
55
56 volatile u32 *status;
57
58 u32 last_flush;
59 u32 last_wrap;
60
61 u32 high_mark;
62} drm_mga_primary_buffer_t;
63
64typedef struct drm_mga_freelist {
65 struct drm_mga_freelist *next;
66 struct drm_mga_freelist *prev;
67 drm_mga_age_t age;
68 struct drm_buf *buf;
69} drm_mga_freelist_t;
70
71typedef struct {
72 drm_mga_freelist_t *list_entry;
73 int discard;
74 int dispatched;
75} drm_mga_buf_priv_t;
76
77typedef struct drm_mga_private {
78 drm_mga_primary_buffer_t prim;
79 drm_mga_sarea_t *sarea_priv;
80
81 drm_mga_freelist_t *head;
82 drm_mga_freelist_t *tail;
83
84 unsigned int warp_pipe;
85 unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
86
87 int chipset;
88 int usec_timeout;
89
90 /**
91 * If set, the new DMA initialization sequence was used. This is
92 * primarilly used to select how the driver should uninitialized its
93 * internal DMA structures.
94 */
95 int used_new_dma_init;
96
97 /**
98 * If AGP memory is used for DMA buffers, this will be the value
99 * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
100 */
101 u32 dma_access;
102
103 /**
104 * If AGP memory is used for DMA buffers, this will be the value
105 * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
106 * transfer).
107 */
108 u32 wagp_enable;
109
110 /**
111 * \name MMIO region parameters.
112 *
113 * \sa drm_mga_private_t::mmio
114 */
115 /*@{ */
116 u32 mmio_base; /**< Bus address of base of MMIO. */
117 u32 mmio_size; /**< Size of the MMIO region. */
118 /*@} */
119
120 u32 clear_cmd;
121 u32 maccess;
122
123 wait_queue_head_t fence_queue;
124 atomic_t last_fence_retired;
125 u32 next_fence_to_post;
126
127 unsigned int fb_cpp;
128 unsigned int front_offset;
129 unsigned int front_pitch;
130 unsigned int back_offset;
131 unsigned int back_pitch;
132
133 unsigned int depth_cpp;
134 unsigned int depth_offset;
135 unsigned int depth_pitch;
136
137 unsigned int texture_offset;
138 unsigned int texture_size;
139
140 drm_local_map_t *sarea;
141 drm_local_map_t *mmio;
142 drm_local_map_t *status;
143 drm_local_map_t *warp;
144 drm_local_map_t *primary;
145 drm_local_map_t *agp_textures;
146
147 unsigned long agp_handle;
148 unsigned int agp_size;
149} drm_mga_private_t;
150
151extern struct drm_ioctl_desc mga_ioctls[];
152extern int mga_max_ioctl;
153
154 /* mga_dma.c */
155extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
156 struct drm_file *file_priv);
157extern int mga_dma_init(struct drm_device *dev, void *data,
158 struct drm_file *file_priv);
159extern int mga_dma_flush(struct drm_device *dev, void *data,
160 struct drm_file *file_priv);
161extern int mga_dma_reset(struct drm_device *dev, void *data,
162 struct drm_file *file_priv);
163extern int mga_dma_buffers(struct drm_device *dev, void *data,
164 struct drm_file *file_priv);
165extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
166extern int mga_driver_unload(struct drm_device * dev);
167extern void mga_driver_lastclose(struct drm_device * dev);
168extern int mga_driver_dma_quiescent(struct drm_device * dev);
169
170extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
171
172extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
173extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
174extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
175
176extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
177
178 /* mga_warp.c */
179extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
180extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
181extern int mga_warp_init(drm_mga_private_t * dev_priv);
182
183 /* mga_irq.c */
184extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
185extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
186extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
187extern void mga_driver_irq_preinstall(struct drm_device * dev);
188extern void mga_driver_irq_postinstall(struct drm_device * dev);
189extern void mga_driver_irq_uninstall(struct drm_device * dev);
190extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
191 unsigned long arg);
192
193#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
194
195#if defined(__linux__) && defined(__alpha__)
196#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
197#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
198
199#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
200#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
201
202#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
203#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
204#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
205#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
206
207static inline u32 _MGA_READ(u32 * addr)
208{
209 DRM_MEMORYBARRIER();
210 return *(volatile u32 *)addr;
211}
212#else
213#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg))
214#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg))
215#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val))
216#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
217#endif
218
219#define DWGREG0 0x1c00
220#define DWGREG0_END 0x1dff
221#define DWGREG1 0x2c00
222#define DWGREG1_END 0x2dff
223
224#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
225#define DMAREG0(r) (u8)((r - DWGREG0) >> 2)
226#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
227#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
228
229/* ================================================================
230 * Helper macross...
231 */
232
233#define MGA_EMIT_STATE( dev_priv, dirty ) \
234do { \
235 if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
236 if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
237 mga_g400_emit_state( dev_priv ); \
238 } else { \
239 mga_g200_emit_state( dev_priv ); \
240 } \
241 } \
242} while (0)
243
244#define WRAP_TEST_WITH_RETURN( dev_priv ) \
245do { \
246 if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
247 if ( mga_is_idle( dev_priv ) ) { \
248 mga_do_dma_wrap_end( dev_priv ); \
249 } else if ( dev_priv->prim.space < \
250 dev_priv->prim.high_mark ) { \
251 if ( MGA_DMA_DEBUG ) \
252 DRM_INFO( "wrap...\n"); \
253 return -EBUSY; \
254 } \
255 } \
256} while (0)
257
258#define WRAP_WAIT_WITH_RETURN( dev_priv ) \
259do { \
260 if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
261 if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
262 if ( MGA_DMA_DEBUG ) \
263 DRM_INFO( "wrap...\n"); \
264 return -EBUSY; \
265 } \
266 mga_do_dma_wrap_end( dev_priv ); \
267 } \
268} while (0)
269
270/* ================================================================
271 * Primary DMA command stream
272 */
273
274#define MGA_VERBOSE 0
275
276#define DMA_LOCALS unsigned int write; volatile u8 *prim;
277
278#define DMA_BLOCK_SIZE (5 * sizeof(u32))
279
280#define BEGIN_DMA( n ) \
281do { \
282 if ( MGA_VERBOSE ) { \
283 DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
284 DRM_INFO( " space=0x%x req=0x%Zx\n", \
285 dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
286 } \
287 prim = dev_priv->prim.start; \
288 write = dev_priv->prim.tail; \
289} while (0)
290
291#define BEGIN_DMA_WRAP() \
292do { \
293 if ( MGA_VERBOSE ) { \
294 DRM_INFO( "BEGIN_DMA()\n" ); \
295 DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
296 } \
297 prim = dev_priv->prim.start; \
298 write = dev_priv->prim.tail; \
299} while (0)
300
301#define ADVANCE_DMA() \
302do { \
303 dev_priv->prim.tail = write; \
304 if ( MGA_VERBOSE ) { \
305 DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
306 write, dev_priv->prim.space ); \
307 } \
308} while (0)
309
310#define FLUSH_DMA() \
311do { \
312 if ( 0 ) { \
313 DRM_INFO( "\n" ); \
314 DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
315 dev_priv->prim.tail, \
316 MGA_READ( MGA_PRIMADDRESS ) - \
317 dev_priv->primary->offset ); \
318 } \
319 if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \
320 if ( dev_priv->prim.space < \
321 dev_priv->prim.high_mark ) { \
322 mga_do_dma_wrap_start( dev_priv ); \
323 } else { \
324 mga_do_dma_flush( dev_priv ); \
325 } \
326 } \
327} while (0)
328
329/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
330 */
331#define DMA_WRITE( offset, val ) \
332do { \
333 if ( MGA_VERBOSE ) { \
334 DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
335 (u32)(val), write + (offset) * sizeof(u32) ); \
336 } \
337 *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
338} while (0)
339
340#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \
341do { \
342 DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \
343 (DMAREG( reg1 ) << 8) | \
344 (DMAREG( reg2 ) << 16) | \
345 (DMAREG( reg3 ) << 24)) ); \
346 DMA_WRITE( 1, val0 ); \
347 DMA_WRITE( 2, val1 ); \
348 DMA_WRITE( 3, val2 ); \
349 DMA_WRITE( 4, val3 ); \
350 write += DMA_BLOCK_SIZE; \
351} while (0)
352
353/* Buffer aging via primary DMA stream head pointer.
354 */
355
356#define SET_AGE( age, h, w ) \
357do { \
358 (age)->head = h; \
359 (age)->wrap = w; \
360} while (0)
361
362#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \
363 ( (age)->wrap == w && \
364 (age)->head < h ) )
365
366#define AGE_BUFFER( buf_priv ) \
367do { \
368 drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
369 if ( (buf_priv)->dispatched ) { \
370 entry->age.head = (dev_priv->prim.tail + \
371 dev_priv->primary->offset); \
372 entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
373 } else { \
374 entry->age.head = 0; \
375 entry->age.wrap = 0; \
376 } \
377} while (0)
378
379#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \
380 MGA_DWGENGSTS | \
381 MGA_ENDPRDMASTS)
382#define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \
383 MGA_ENDPRDMASTS)
384
385#define MGA_DMA_DEBUG 0
386
387/* A reduced set of the mga registers.
388 */
389#define MGA_CRTC_INDEX 0x1fd4
390#define MGA_CRTC_DATA 0x1fd5
391
392/* CRTC11 */
393#define MGA_VINTCLR (1 << 4)
394#define MGA_VINTEN (1 << 5)
395
396#define MGA_ALPHACTRL 0x2c7c
397#define MGA_AR0 0x1c60
398#define MGA_AR1 0x1c64
399#define MGA_AR2 0x1c68
400#define MGA_AR3 0x1c6c
401#define MGA_AR4 0x1c70
402#define MGA_AR5 0x1c74
403#define MGA_AR6 0x1c78
404
405#define MGA_CXBNDRY 0x1c80
406#define MGA_CXLEFT 0x1ca0
407#define MGA_CXRIGHT 0x1ca4
408
409#define MGA_DMAPAD 0x1c54
410#define MGA_DSTORG 0x2cb8
411#define MGA_DWGCTL 0x1c00
412# define MGA_OPCOD_MASK (15 << 0)
413# define MGA_OPCOD_TRAP (4 << 0)
414# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
415# define MGA_OPCOD_BITBLT (8 << 0)
416# define MGA_OPCOD_ILOAD (9 << 0)
417# define MGA_ATYPE_MASK (7 << 4)
418# define MGA_ATYPE_RPL (0 << 4)
419# define MGA_ATYPE_RSTR (1 << 4)
420# define MGA_ATYPE_ZI (3 << 4)
421# define MGA_ATYPE_BLK (4 << 4)
422# define MGA_ATYPE_I (7 << 4)
423# define MGA_LINEAR (1 << 7)
424# define MGA_ZMODE_MASK (7 << 8)
425# define MGA_ZMODE_NOZCMP (0 << 8)
426# define MGA_ZMODE_ZE (2 << 8)
427# define MGA_ZMODE_ZNE (3 << 8)
428# define MGA_ZMODE_ZLT (4 << 8)
429# define MGA_ZMODE_ZLTE (5 << 8)
430# define MGA_ZMODE_ZGT (6 << 8)
431# define MGA_ZMODE_ZGTE (7 << 8)
432# define MGA_SOLID (1 << 11)
433# define MGA_ARZERO (1 << 12)
434# define MGA_SGNZERO (1 << 13)
435# define MGA_SHIFTZERO (1 << 14)
436# define MGA_BOP_MASK (15 << 16)
437# define MGA_BOP_ZERO (0 << 16)
438# define MGA_BOP_DST (10 << 16)
439# define MGA_BOP_SRC (12 << 16)
440# define MGA_BOP_ONE (15 << 16)
441# define MGA_TRANS_SHIFT 20
442# define MGA_TRANS_MASK (15 << 20)
443# define MGA_BLTMOD_MASK (15 << 25)
444# define MGA_BLTMOD_BMONOLEF (0 << 25)
445# define MGA_BLTMOD_BMONOWF (4 << 25)
446# define MGA_BLTMOD_PLAN (1 << 25)
447# define MGA_BLTMOD_BFCOL (2 << 25)
448# define MGA_BLTMOD_BU32BGR (3 << 25)
449# define MGA_BLTMOD_BU32RGB (7 << 25)
450# define MGA_BLTMOD_BU24BGR (11 << 25)
451# define MGA_BLTMOD_BU24RGB (15 << 25)
452# define MGA_PATTERN (1 << 29)
453# define MGA_TRANSC (1 << 30)
454# define MGA_CLIPDIS (1 << 31)
455#define MGA_DWGSYNC 0x2c4c
456
457#define MGA_FCOL 0x1c24
458#define MGA_FIFOSTATUS 0x1e10
459#define MGA_FOGCOL 0x1cf4
460#define MGA_FXBNDRY 0x1c84
461#define MGA_FXLEFT 0x1ca8
462#define MGA_FXRIGHT 0x1cac
463
464#define MGA_ICLEAR 0x1e18
465# define MGA_SOFTRAPICLR (1 << 0)
466# define MGA_VLINEICLR (1 << 5)
467#define MGA_IEN 0x1e1c
468# define MGA_SOFTRAPIEN (1 << 0)
469# define MGA_VLINEIEN (1 << 5)
470
471#define MGA_LEN 0x1c5c
472
473#define MGA_MACCESS 0x1c04
474
475#define MGA_PITCH 0x1c8c
476#define MGA_PLNWT 0x1c1c
477#define MGA_PRIMADDRESS 0x1e58
478# define MGA_DMA_GENERAL (0 << 0)
479# define MGA_DMA_BLIT (1 << 0)
480# define MGA_DMA_VECTOR (2 << 0)
481# define MGA_DMA_VERTEX (3 << 0)
482#define MGA_PRIMEND 0x1e5c
483# define MGA_PRIMNOSTART (1 << 0)
484# define MGA_PAGPXFER (1 << 1)
485#define MGA_PRIMPTR 0x1e50
486# define MGA_PRIMPTREN0 (1 << 0)
487# define MGA_PRIMPTREN1 (1 << 1)
488
489#define MGA_RST 0x1e40
490# define MGA_SOFTRESET (1 << 0)
491# define MGA_SOFTEXTRST (1 << 1)
492
493#define MGA_SECADDRESS 0x2c40
494#define MGA_SECEND 0x2c44
495#define MGA_SETUPADDRESS 0x2cd0
496#define MGA_SETUPEND 0x2cd4
497#define MGA_SGN 0x1c58
498#define MGA_SOFTRAP 0x2c48
499#define MGA_SRCORG 0x2cb4
500# define MGA_SRMMAP_MASK (1 << 0)
501# define MGA_SRCMAP_FB (0 << 0)
502# define MGA_SRCMAP_SYSMEM (1 << 0)
503# define MGA_SRCACC_MASK (1 << 1)
504# define MGA_SRCACC_PCI (0 << 1)
505# define MGA_SRCACC_AGP (1 << 1)
506#define MGA_STATUS 0x1e14
507# define MGA_SOFTRAPEN (1 << 0)
508# define MGA_VSYNCPEN (1 << 4)
509# define MGA_VLINEPEN (1 << 5)
510# define MGA_DWGENGSTS (1 << 16)
511# define MGA_ENDPRDMASTS (1 << 17)
512#define MGA_STENCIL 0x2cc8
513#define MGA_STENCILCTL 0x2ccc
514
515#define MGA_TDUALSTAGE0 0x2cf8
516#define MGA_TDUALSTAGE1 0x2cfc
517#define MGA_TEXBORDERCOL 0x2c5c
518#define MGA_TEXCTL 0x2c30
519#define MGA_TEXCTL2 0x2c3c
520# define MGA_DUALTEX (1 << 7)
521# define MGA_G400_TC2_MAGIC (1 << 15)
522# define MGA_MAP1_ENABLE (1 << 31)
523#define MGA_TEXFILTER 0x2c58
524#define MGA_TEXHEIGHT 0x2c2c
525#define MGA_TEXORG 0x2c24
526# define MGA_TEXORGMAP_MASK (1 << 0)
527# define MGA_TEXORGMAP_FB (0 << 0)
528# define MGA_TEXORGMAP_SYSMEM (1 << 0)
529# define MGA_TEXORGACC_MASK (1 << 1)
530# define MGA_TEXORGACC_PCI (0 << 1)
531# define MGA_TEXORGACC_AGP (1 << 1)
532#define MGA_TEXORG1 0x2ca4
533#define MGA_TEXORG2 0x2ca8
534#define MGA_TEXORG3 0x2cac
535#define MGA_TEXORG4 0x2cb0
536#define MGA_TEXTRANS 0x2c34
537#define MGA_TEXTRANSHIGH 0x2c38
538#define MGA_TEXWIDTH 0x2c28
539
540#define MGA_WACCEPTSEQ 0x1dd4
541#define MGA_WCODEADDR 0x1e6c
542#define MGA_WFLAG 0x1dc4
543#define MGA_WFLAG1 0x1de0
544#define MGA_WFLAGNB 0x1e64
545#define MGA_WFLAGNB1 0x1e08
546#define MGA_WGETMSB 0x1dc8
547#define MGA_WIADDR 0x1dc0
548#define MGA_WIADDR2 0x1dd8
549# define MGA_WMODE_SUSPEND (0 << 0)
550# define MGA_WMODE_RESUME (1 << 0)
551# define MGA_WMODE_JUMP (2 << 0)
552# define MGA_WMODE_START (3 << 0)
553# define MGA_WAGP_ENABLE (1 << 2)
554#define MGA_WMISC 0x1e70
555# define MGA_WUCODECACHE_ENABLE (1 << 0)
556# define MGA_WMASTER_ENABLE (1 << 1)
557# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
558#define MGA_WVRTXSZ 0x1dcc
559
560#define MGA_YBOT 0x1c9c
561#define MGA_YDST 0x1c90
562#define MGA_YDSTLEN 0x1c88
563#define MGA_YDSTORG 0x1c94
564#define MGA_YTOP 0x1c98
565
566#define MGA_ZORG 0x1c0c
567
568/* This finishes the current batch of commands
569 */
570#define MGA_EXEC 0x0100
571
572/* AGP PLL encoding (for G200 only).
573 */
574#define MGA_AGP_PLL 0x1e4c
575# define MGA_AGP2XPLL_DISABLE (0 << 0)
576# define MGA_AGP2XPLL_ENABLE (1 << 0)
577
578/* Warp registers
579 */
580#define MGA_WR0 0x2d00
581#define MGA_WR1 0x2d04
582#define MGA_WR2 0x2d08
583#define MGA_WR3 0x2d0c
584#define MGA_WR4 0x2d10
585#define MGA_WR5 0x2d14
586#define MGA_WR6 0x2d18
587#define MGA_WR7 0x2d1c
588#define MGA_WR8 0x2d20
589#define MGA_WR9 0x2d24
590#define MGA_WR10 0x2d28
591#define MGA_WR11 0x2d2c
592#define MGA_WR12 0x2d30
593#define MGA_WR13 0x2d34
594#define MGA_WR14 0x2d38
595#define MGA_WR15 0x2d3c
596#define MGA_WR16 0x2d40
597#define MGA_WR17 0x2d44
598#define MGA_WR18 0x2d48
599#define MGA_WR19 0x2d4c
600#define MGA_WR20 0x2d50
601#define MGA_WR21 0x2d54
602#define MGA_WR22 0x2d58
603#define MGA_WR23 0x2d5c
604#define MGA_WR24 0x2d60
605#define MGA_WR25 0x2d64
606#define MGA_WR26 0x2d68
607#define MGA_WR27 0x2d6c
608#define MGA_WR28 0x2d70
609#define MGA_WR29 0x2d74
610#define MGA_WR30 0x2d78
611#define MGA_WR31 0x2d7c
612#define MGA_WR32 0x2d80
613#define MGA_WR33 0x2d84
614#define MGA_WR34 0x2d88
615#define MGA_WR35 0x2d8c
616#define MGA_WR36 0x2d90
617#define MGA_WR37 0x2d94
618#define MGA_WR38 0x2d98
619#define MGA_WR39 0x2d9c
620#define MGA_WR40 0x2da0
621#define MGA_WR41 0x2da4
622#define MGA_WR42 0x2da8
623#define MGA_WR43 0x2dac
624#define MGA_WR44 0x2db0
625#define MGA_WR45 0x2db4
626#define MGA_WR46 0x2db8
627#define MGA_WR47 0x2dbc
628#define MGA_WR48 0x2dc0
629#define MGA_WR49 0x2dc4
630#define MGA_WR50 0x2dc8
631#define MGA_WR51 0x2dcc
632#define MGA_WR52 0x2dd0
633#define MGA_WR53 0x2dd4
634#define MGA_WR54 0x2dd8
635#define MGA_WR55 0x2ddc
636#define MGA_WR56 0x2de0
637#define MGA_WR57 0x2de4
638#define MGA_WR58 0x2de8
639#define MGA_WR59 0x2dec
640#define MGA_WR60 0x2df0
641#define MGA_WR61 0x2df4
642#define MGA_WR62 0x2df8
643#define MGA_WR63 0x2dfc
644# define MGA_G400_WR_MAGIC (1 << 6)
645# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */
646
647#define MGA_ILOAD_ALIGN 64
648#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1)
649
650#define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \
651 MGA_ATYPE_I | \
652 MGA_ZMODE_NOZCMP | \
653 MGA_ARZERO | \
654 MGA_SGNZERO | \
655 MGA_BOP_SRC | \
656 (15 << MGA_TRANS_SHIFT))
657
658#define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \
659 MGA_ZMODE_NOZCMP | \
660 MGA_SOLID | \
661 MGA_ARZERO | \
662 MGA_SGNZERO | \
663 MGA_SHIFTZERO | \
664 MGA_BOP_SRC | \
665 (0 << MGA_TRANS_SHIFT) | \
666 MGA_BLTMOD_BMONOLEF | \
667 MGA_TRANSC | \
668 MGA_CLIPDIS)
669
670#define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \
671 MGA_ATYPE_RPL | \
672 MGA_SGNZERO | \
673 MGA_SHIFTZERO | \
674 MGA_BOP_SRC | \
675 (0 << MGA_TRANS_SHIFT) | \
676 MGA_BLTMOD_BFCOL | \
677 MGA_CLIPDIS)
678
679/* Simple idle test.
680 */
681static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
682{
683 u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
684 return (status == MGA_ENDPRDMASTS);
685}
686
687#endif
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
new file mode 100644
index 000000000000..30d00478ddee
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -0,0 +1,231 @@
1/**
2 * \file mga_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the MGA DRM.
5 *
6 * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Egbert Eich 2003,2004
11 * Copyright (C) Dave Airlie 2005
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
29 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33#include <linux/compat.h>
34
35#include "drmP.h"
36#include "drm.h"
37#include "mga_drm.h"
38
39typedef struct drm32_mga_init {
40 int func;
41 u32 sarea_priv_offset;
42 int chipset;
43 int sgram;
44 unsigned int maccess;
45 unsigned int fb_cpp;
46 unsigned int front_offset, front_pitch;
47 unsigned int back_offset, back_pitch;
48 unsigned int depth_cpp;
49 unsigned int depth_offset, depth_pitch;
50 unsigned int texture_offset[MGA_NR_TEX_HEAPS];
51 unsigned int texture_size[MGA_NR_TEX_HEAPS];
52 u32 fb_offset;
53 u32 mmio_offset;
54 u32 status_offset;
55 u32 warp_offset;
56 u32 primary_offset;
57 u32 buffers_offset;
58} drm_mga_init32_t;
59
60static int compat_mga_init(struct file *file, unsigned int cmd,
61 unsigned long arg)
62{
63 drm_mga_init32_t init32;
64 drm_mga_init_t __user *init;
65 int err = 0, i;
66
67 if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
68 return -EFAULT;
69
70 init = compat_alloc_user_space(sizeof(*init));
71 if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
72 || __put_user(init32.func, &init->func)
73 || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
74 || __put_user(init32.chipset, &init->chipset)
75 || __put_user(init32.sgram, &init->sgram)
76 || __put_user(init32.maccess, &init->maccess)
77 || __put_user(init32.fb_cpp, &init->fb_cpp)
78 || __put_user(init32.front_offset, &init->front_offset)
79 || __put_user(init32.front_pitch, &init->front_pitch)
80 || __put_user(init32.back_offset, &init->back_offset)
81 || __put_user(init32.back_pitch, &init->back_pitch)
82 || __put_user(init32.depth_cpp, &init->depth_cpp)
83 || __put_user(init32.depth_offset, &init->depth_offset)
84 || __put_user(init32.depth_pitch, &init->depth_pitch)
85 || __put_user(init32.fb_offset, &init->fb_offset)
86 || __put_user(init32.mmio_offset, &init->mmio_offset)
87 || __put_user(init32.status_offset, &init->status_offset)
88 || __put_user(init32.warp_offset, &init->warp_offset)
89 || __put_user(init32.primary_offset, &init->primary_offset)
90 || __put_user(init32.buffers_offset, &init->buffers_offset))
91 return -EFAULT;
92
93 for (i = 0; i < MGA_NR_TEX_HEAPS; i++) {
94 err |=
95 __put_user(init32.texture_offset[i],
96 &init->texture_offset[i]);
97 err |=
98 __put_user(init32.texture_size[i], &init->texture_size[i]);
99 }
100 if (err)
101 return -EFAULT;
102
103 return drm_ioctl(file->f_path.dentry->d_inode, file,
104 DRM_IOCTL_MGA_INIT, (unsigned long)init);
105}
106
107typedef struct drm_mga_getparam32 {
108 int param;
109 u32 value;
110} drm_mga_getparam32_t;
111
112static int compat_mga_getparam(struct file *file, unsigned int cmd,
113 unsigned long arg)
114{
115 drm_mga_getparam32_t getparam32;
116 drm_mga_getparam_t __user *getparam;
117
118 if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
119 return -EFAULT;
120
121 getparam = compat_alloc_user_space(sizeof(*getparam));
122 if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
123 || __put_user(getparam32.param, &getparam->param)
124 || __put_user((void __user *)(unsigned long)getparam32.value,
125 &getparam->value))
126 return -EFAULT;
127
128 return drm_ioctl(file->f_path.dentry->d_inode, file,
129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
130}
131
132typedef struct drm_mga_drm_bootstrap32 {
133 u32 texture_handle;
134 u32 texture_size;
135 u32 primary_size;
136 u32 secondary_bin_count;
137 u32 secondary_bin_size;
138 u32 agp_mode;
139 u8 agp_size;
140} drm_mga_dma_bootstrap32_t;
141
142static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
143 unsigned long arg)
144{
145 drm_mga_dma_bootstrap32_t dma_bootstrap32;
146 drm_mga_dma_bootstrap_t __user *dma_bootstrap;
147 int err;
148
149 if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
150 sizeof(dma_bootstrap32)))
151 return -EFAULT;
152
153 dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
154 if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
155 || __put_user(dma_bootstrap32.texture_handle,
156 &dma_bootstrap->texture_handle)
157 || __put_user(dma_bootstrap32.texture_size,
158 &dma_bootstrap->texture_size)
159 || __put_user(dma_bootstrap32.primary_size,
160 &dma_bootstrap->primary_size)
161 || __put_user(dma_bootstrap32.secondary_bin_count,
162 &dma_bootstrap->secondary_bin_count)
163 || __put_user(dma_bootstrap32.secondary_bin_size,
164 &dma_bootstrap->secondary_bin_size)
165 || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
167 return -EFAULT;
168
169 err = drm_ioctl(file->f_path.dentry->d_inode, file,
170 DRM_IOCTL_MGA_DMA_BOOTSTRAP,
171 (unsigned long)dma_bootstrap);
172 if (err)
173 return err;
174
175 if (__get_user(dma_bootstrap32.texture_handle,
176 &dma_bootstrap->texture_handle)
177 || __get_user(dma_bootstrap32.texture_size,
178 &dma_bootstrap->texture_size)
179 || __get_user(dma_bootstrap32.primary_size,
180 &dma_bootstrap->primary_size)
181 || __get_user(dma_bootstrap32.secondary_bin_count,
182 &dma_bootstrap->secondary_bin_count)
183 || __get_user(dma_bootstrap32.secondary_bin_size,
184 &dma_bootstrap->secondary_bin_size)
185 || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
186 || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
187 return -EFAULT;
188
189 if (copy_to_user((void __user *)arg, &dma_bootstrap32,
190 sizeof(dma_bootstrap32)))
191 return -EFAULT;
192
193 return 0;
194}
195
196drm_ioctl_compat_t *mga_compat_ioctls[] = {
197 [DRM_MGA_INIT] = compat_mga_init,
198 [DRM_MGA_GETPARAM] = compat_mga_getparam,
199 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
200};
201
202/**
203 * Called whenever a 32-bit process running under a 64-bit kernel
204 * performs an ioctl on /dev/dri/card<n>.
205 *
206 * \param filp file pointer.
207 * \param cmd command.
208 * \param arg user argument.
209 * \return zero on success or negative number on failure.
210 */
211long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
212{
213 unsigned int nr = DRM_IOCTL_NR(cmd);
214 drm_ioctl_compat_t *fn = NULL;
215 int ret;
216
217 if (nr < DRM_COMMAND_BASE)
218 return drm_compat_ioctl(filp, cmd, arg);
219
220 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
221 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
222
223 lock_kernel(); /* XXX for now */
224 if (fn != NULL)
225 ret = (*fn) (filp, cmd, arg);
226 else
227 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
228 unlock_kernel();
229
230 return ret;
231}
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
new file mode 100644
index 000000000000..9302cb8f0f83
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -0,0 +1,148 @@
1/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Eric Anholt <anholt@FreeBSD.org>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "mga_drm.h"
36#include "mga_drv.h"
37
38irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39{
40 struct drm_device *dev = (struct drm_device *) arg;
41 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
42 int status;
43 int handled = 0;
44
45 status = MGA_READ(MGA_STATUS);
46
47 /* VBLANK interrupt */
48 if (status & MGA_VLINEPEN) {
49 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
50 atomic_inc(&dev->vbl_received);
51 DRM_WAKEUP(&dev->vbl_queue);
52 drm_vbl_send_signals(dev);
53 handled = 1;
54 }
55
56 /* SOFTRAP interrupt */
57 if (status & MGA_SOFTRAPEN) {
58 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
59 const u32 prim_end = MGA_READ(MGA_PRIMEND);
60
61 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
62
63 /* In addition to clearing the interrupt-pending bit, we
64 * have to write to MGA_PRIMEND to re-start the DMA operation.
65 */
66 if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
67 MGA_WRITE(MGA_PRIMEND, prim_end);
68 }
69
70 atomic_inc(&dev_priv->last_fence_retired);
71 DRM_WAKEUP(&dev_priv->fence_queue);
72 handled = 1;
73 }
74
75 if (handled) {
76 return IRQ_HANDLED;
77 }
78 return IRQ_NONE;
79}
80
81int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
82{
83 unsigned int cur_vblank;
84 int ret = 0;
85
86 /* Assume that the user has missed the current sequence number
87 * by about a day rather than she wants to wait for years
88 * using vertical blanks...
89 */
90 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
91 (((cur_vblank = atomic_read(&dev->vbl_received))
92 - *sequence) <= (1 << 23)));
93
94 *sequence = cur_vblank;
95
96 return ret;
97}
98
99int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
100{
101 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
102 unsigned int cur_fence;
103 int ret = 0;
104
105 /* Assume that the user has missed the current sequence number
106 * by about a day rather than she wants to wait for years
107 * using fences.
108 */
109 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
110 (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
111 - *sequence) <= (1 << 23)));
112
113 *sequence = cur_fence;
114
115 return ret;
116}
117
118void mga_driver_irq_preinstall(struct drm_device * dev)
119{
120 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
121
122 /* Disable *all* interrupts */
123 MGA_WRITE(MGA_IEN, 0);
124 /* Clear bits if they're already high */
125 MGA_WRITE(MGA_ICLEAR, ~0);
126}
127
128void mga_driver_irq_postinstall(struct drm_device * dev)
129{
130 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
131
132 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
133
134 /* Turn on vertical blank interrupt and soft trap interrupt. */
135 MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
136}
137
138void mga_driver_irq_uninstall(struct drm_device * dev)
139{
140 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
141 if (!dev_priv)
142 return;
143
144 /* Disable *all* interrupts */
145 MGA_WRITE(MGA_IEN, 0);
146
147 dev->irq_enabled = 0;
148}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
new file mode 100644
index 000000000000..d3f8aade07b3
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -0,0 +1,1104 @@
1/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 * Rewritten by:
32 * Gareth Hughes <gareth@valinux.com>
33 */
34
35#include "drmP.h"
36#include "drm.h"
37#include "mga_drm.h"
38#include "mga_drv.h"
39
40/* ================================================================
41 * DMA hardware state programming functions
42 */
43
44static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
45 struct drm_clip_rect * box)
46{
47 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
48 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
49 unsigned int pitch = dev_priv->front_pitch;
50 DMA_LOCALS;
51
52 BEGIN_DMA(2);
53
54 /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
55 */
56 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
57 DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
58 MGA_LEN + MGA_EXEC, 0x80000000,
59 MGA_DWGCTL, ctx->dwgctl,
60 MGA_LEN + MGA_EXEC, 0x80000000);
61 }
62 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
63 MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
64 MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
65
66 ADVANCE_DMA();
67}
68
69static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
70{
71 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
72 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
73 DMA_LOCALS;
74
75 BEGIN_DMA(3);
76
77 DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
78 MGA_MACCESS, ctx->maccess,
79 MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
80
81 DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
82 MGA_FOGCOL, ctx->fogcolor,
83 MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
84
85 DMA_BLOCK(MGA_FCOL, ctx->fcol,
86 MGA_DMAPAD, 0x00000000,
87 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
88
89 ADVANCE_DMA();
90}
91
92static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
93{
94 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
95 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
96 DMA_LOCALS;
97
98 BEGIN_DMA(4);
99
100 DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
101 MGA_MACCESS, ctx->maccess,
102 MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
103
104 DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
105 MGA_FOGCOL, ctx->fogcolor,
106 MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
107
108 DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
109 MGA_TDUALSTAGE0, ctx->tdualstage0,
110 MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol);
111
112 DMA_BLOCK(MGA_STENCIL, ctx->stencil,
113 MGA_STENCILCTL, ctx->stencilctl,
114 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
115
116 ADVANCE_DMA();
117}
118
119static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
120{
121 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
122 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
123 DMA_LOCALS;
124
125 BEGIN_DMA(4);
126
127 DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
128 MGA_TEXCTL, tex->texctl,
129 MGA_TEXFILTER, tex->texfilter,
130 MGA_TEXBORDERCOL, tex->texbordercol);
131
132 DMA_BLOCK(MGA_TEXORG, tex->texorg,
133 MGA_TEXORG1, tex->texorg1,
134 MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
135
136 DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
137 MGA_TEXWIDTH, tex->texwidth,
138 MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth);
139
140 DMA_BLOCK(MGA_WR34, tex->texheight,
141 MGA_TEXTRANS, 0x0000ffff,
142 MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000);
143
144 ADVANCE_DMA();
145}
146
147static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
148{
149 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
150 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
151 DMA_LOCALS;
152
153/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
154/* tex->texctl, tex->texctl2); */
155
156 BEGIN_DMA(6);
157
158 DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
159 MGA_TEXCTL, tex->texctl,
160 MGA_TEXFILTER, tex->texfilter,
161 MGA_TEXBORDERCOL, tex->texbordercol);
162
163 DMA_BLOCK(MGA_TEXORG, tex->texorg,
164 MGA_TEXORG1, tex->texorg1,
165 MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
166
167 DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
168 MGA_TEXWIDTH, tex->texwidth,
169 MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
170
171 DMA_BLOCK(MGA_WR57, 0x00000000,
172 MGA_WR53, 0x00000000,
173 MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC);
174
175 DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
176 MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
177 MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
178 MGA_DMAPAD, 0x00000000);
179
180 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
181 MGA_DMAPAD, 0x00000000,
182 MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff);
183
184 ADVANCE_DMA();
185}
186
187static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
188{
189 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
190 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
191 DMA_LOCALS;
192
193/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
194/* tex->texctl, tex->texctl2); */
195
196 BEGIN_DMA(5);
197
198 DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
199 MGA_MAP1_ENABLE |
200 MGA_G400_TC2_MAGIC),
201 MGA_TEXCTL, tex->texctl,
202 MGA_TEXFILTER, tex->texfilter,
203 MGA_TEXBORDERCOL, tex->texbordercol);
204
205 DMA_BLOCK(MGA_TEXORG, tex->texorg,
206 MGA_TEXORG1, tex->texorg1,
207 MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
208
209 DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
210 MGA_TEXWIDTH, tex->texwidth,
211 MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
212
213 DMA_BLOCK(MGA_WR57, 0x00000000,
214 MGA_WR53, 0x00000000,
215 MGA_WR61, 0x00000000,
216 MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
217
218 DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
219 MGA_TEXTRANS, 0x0000ffff,
220 MGA_TEXTRANSHIGH, 0x0000ffff,
221 MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
222
223 ADVANCE_DMA();
224}
225
226static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
227{
228 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
229 unsigned int pipe = sarea_priv->warp_pipe;
230 DMA_LOCALS;
231
232 BEGIN_DMA(3);
233
234 DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
235 MGA_WVRTXSZ, 0x00000007,
236 MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000);
237
238 DMA_BLOCK(MGA_WR25, 0x00000100,
239 MGA_WR34, 0x00000000,
240 MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
241
242 /* Padding required to to hardware bug.
243 */
244 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
245 MGA_DMAPAD, 0xffffffff,
246 MGA_DMAPAD, 0xffffffff,
247 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
248 MGA_WMODE_START | dev_priv->wagp_enable));
249
250 ADVANCE_DMA();
251}
252
253static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
254{
255 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
256 unsigned int pipe = sarea_priv->warp_pipe;
257 DMA_LOCALS;
258
259/* printk("mga_g400_emit_pipe %x\n", pipe); */
260
261 BEGIN_DMA(10);
262
263 DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
264 MGA_DMAPAD, 0x00000000,
265 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
266
267 if (pipe & MGA_T2) {
268 DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
269 MGA_DMAPAD, 0x00000000,
270 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
271
272 DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
273 MGA_WACCEPTSEQ, 0x00000000,
274 MGA_WACCEPTSEQ, 0x00000000,
275 MGA_WACCEPTSEQ, 0x1e000000);
276 } else {
277 if (dev_priv->warp_pipe & MGA_T2) {
278 /* Flush the WARP pipe */
279 DMA_BLOCK(MGA_YDST, 0x00000000,
280 MGA_FXLEFT, 0x00000000,
281 MGA_FXRIGHT, 0x00000001,
282 MGA_DWGCTL, MGA_DWGCTL_FLUSH);
283
284 DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
285 MGA_DWGSYNC, 0x00007000,
286 MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
287 MGA_LEN + MGA_EXEC, 0x00000000);
288
289 DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
290 MGA_G400_TC2_MAGIC),
291 MGA_LEN + MGA_EXEC, 0x00000000,
292 MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
293 MGA_DMAPAD, 0x00000000);
294 }
295
296 DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
297 MGA_DMAPAD, 0x00000000,
298 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
299
300 DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
301 MGA_WACCEPTSEQ, 0x00000000,
302 MGA_WACCEPTSEQ, 0x00000000,
303 MGA_WACCEPTSEQ, 0x18000000);
304 }
305
306 DMA_BLOCK(MGA_WFLAG, 0x00000000,
307 MGA_WFLAG1, 0x00000000,
308 MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000);
309
310 DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */
311 MGA_WR57, 0x00000000, /* tex0 */
312 MGA_WR53, 0x00000000, /* tex1 */
313 MGA_WR61, 0x00000000); /* tex1 */
314
315 DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */
316 MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */
317 MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */
318 MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */
319
320 /* Padding required to to hardware bug */
321 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
322 MGA_DMAPAD, 0xffffffff,
323 MGA_DMAPAD, 0xffffffff,
324 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
325 MGA_WMODE_START | dev_priv->wagp_enable));
326
327 ADVANCE_DMA();
328}
329
330static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
331{
332 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
333 unsigned int dirty = sarea_priv->dirty;
334
335 if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
336 mga_g200_emit_pipe(dev_priv);
337 dev_priv->warp_pipe = sarea_priv->warp_pipe;
338 }
339
340 if (dirty & MGA_UPLOAD_CONTEXT) {
341 mga_g200_emit_context(dev_priv);
342 sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
343 }
344
345 if (dirty & MGA_UPLOAD_TEX0) {
346 mga_g200_emit_tex0(dev_priv);
347 sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
348 }
349}
350
351static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
352{
353 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
354 unsigned int dirty = sarea_priv->dirty;
355 int multitex = sarea_priv->warp_pipe & MGA_T2;
356
357 if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
358 mga_g400_emit_pipe(dev_priv);
359 dev_priv->warp_pipe = sarea_priv->warp_pipe;
360 }
361
362 if (dirty & MGA_UPLOAD_CONTEXT) {
363 mga_g400_emit_context(dev_priv);
364 sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
365 }
366
367 if (dirty & MGA_UPLOAD_TEX0) {
368 mga_g400_emit_tex0(dev_priv);
369 sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
370 }
371
372 if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
373 mga_g400_emit_tex1(dev_priv);
374 sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
375 }
376}
377
378/* ================================================================
379 * SAREA state verification
380 */
381
382/* Disallow all write destinations except the front and backbuffer.
383 */
384static int mga_verify_context(drm_mga_private_t * dev_priv)
385{
386 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
387 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
388
389 if (ctx->dstorg != dev_priv->front_offset &&
390 ctx->dstorg != dev_priv->back_offset) {
391 DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
392 ctx->dstorg, dev_priv->front_offset,
393 dev_priv->back_offset);
394 ctx->dstorg = 0;
395 return -EINVAL;
396 }
397
398 return 0;
399}
400
401/* Disallow texture reads from PCI space.
402 */
403static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
404{
405 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
406 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
407 unsigned int org;
408
409 org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
410
411 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
412 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
413 tex->texorg = 0;
414 return -EINVAL;
415 }
416
417 return 0;
418}
419
420static int mga_verify_state(drm_mga_private_t * dev_priv)
421{
422 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
423 unsigned int dirty = sarea_priv->dirty;
424 int ret = 0;
425
426 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
427 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
428
429 if (dirty & MGA_UPLOAD_CONTEXT)
430 ret |= mga_verify_context(dev_priv);
431
432 if (dirty & MGA_UPLOAD_TEX0)
433 ret |= mga_verify_tex(dev_priv, 0);
434
435 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
436 if (dirty & MGA_UPLOAD_TEX1)
437 ret |= mga_verify_tex(dev_priv, 1);
438
439 if (dirty & MGA_UPLOAD_PIPE)
440 ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
441 } else {
442 if (dirty & MGA_UPLOAD_PIPE)
443 ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
444 }
445
446 return (ret == 0);
447}
448
449static int mga_verify_iload(drm_mga_private_t * dev_priv,
450 unsigned int dstorg, unsigned int length)
451{
452 if (dstorg < dev_priv->texture_offset ||
453 dstorg + length > (dev_priv->texture_offset +
454 dev_priv->texture_size)) {
455 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
456 return -EINVAL;
457 }
458
459 if (length & MGA_ILOAD_MASK) {
460 DRM_ERROR("*** bad iload length: 0x%x\n",
461 length & MGA_ILOAD_MASK);
462 return -EINVAL;
463 }
464
465 return 0;
466}
467
468static int mga_verify_blit(drm_mga_private_t * dev_priv,
469 unsigned int srcorg, unsigned int dstorg)
470{
471 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
472 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
473 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
474 return -EINVAL;
475 }
476 return 0;
477}
478
479/* ================================================================
480 *
481 */
482
483static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
484{
485 drm_mga_private_t *dev_priv = dev->dev_private;
486 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
487 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
488 struct drm_clip_rect *pbox = sarea_priv->boxes;
489 int nbox = sarea_priv->nbox;
490 int i;
491 DMA_LOCALS;
492 DRM_DEBUG("\n");
493
494 BEGIN_DMA(1);
495
496 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
497 MGA_DMAPAD, 0x00000000,
498 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
499
500 ADVANCE_DMA();
501
502 for (i = 0; i < nbox; i++) {
503 struct drm_clip_rect *box = &pbox[i];
504 u32 height = box->y2 - box->y1;
505
506 DRM_DEBUG(" from=%d,%d to=%d,%d\n",
507 box->x1, box->y1, box->x2, box->y2);
508
509 if (clear->flags & MGA_FRONT) {
510 BEGIN_DMA(2);
511
512 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
513 MGA_PLNWT, clear->color_mask,
514 MGA_YDSTLEN, (box->y1 << 16) | height,
515 MGA_FXBNDRY, (box->x2 << 16) | box->x1);
516
517 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
518 MGA_FCOL, clear->clear_color,
519 MGA_DSTORG, dev_priv->front_offset,
520 MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
521
522 ADVANCE_DMA();
523 }
524
525 if (clear->flags & MGA_BACK) {
526 BEGIN_DMA(2);
527
528 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
529 MGA_PLNWT, clear->color_mask,
530 MGA_YDSTLEN, (box->y1 << 16) | height,
531 MGA_FXBNDRY, (box->x2 << 16) | box->x1);
532
533 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
534 MGA_FCOL, clear->clear_color,
535 MGA_DSTORG, dev_priv->back_offset,
536 MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
537
538 ADVANCE_DMA();
539 }
540
541 if (clear->flags & MGA_DEPTH) {
542 BEGIN_DMA(2);
543
544 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
545 MGA_PLNWT, clear->depth_mask,
546 MGA_YDSTLEN, (box->y1 << 16) | height,
547 MGA_FXBNDRY, (box->x2 << 16) | box->x1);
548
549 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
550 MGA_FCOL, clear->clear_depth,
551 MGA_DSTORG, dev_priv->depth_offset,
552 MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
553
554 ADVANCE_DMA();
555 }
556
557 }
558
559 BEGIN_DMA(1);
560
561 /* Force reset of DWGCTL */
562 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
563 MGA_DMAPAD, 0x00000000,
564 MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
565
566 ADVANCE_DMA();
567
568 FLUSH_DMA();
569}
570
571static void mga_dma_dispatch_swap(struct drm_device * dev)
572{
573 drm_mga_private_t *dev_priv = dev->dev_private;
574 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
575 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
576 struct drm_clip_rect *pbox = sarea_priv->boxes;
577 int nbox = sarea_priv->nbox;
578 int i;
579 DMA_LOCALS;
580 DRM_DEBUG("\n");
581
582 sarea_priv->last_frame.head = dev_priv->prim.tail;
583 sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
584
585 BEGIN_DMA(4 + nbox);
586
587 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
588 MGA_DMAPAD, 0x00000000,
589 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
590
591 DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
592 MGA_MACCESS, dev_priv->maccess,
593 MGA_SRCORG, dev_priv->back_offset,
594 MGA_AR5, dev_priv->front_pitch);
595
596 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
597 MGA_DMAPAD, 0x00000000,
598 MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
599
600 for (i = 0; i < nbox; i++) {
601 struct drm_clip_rect *box = &pbox[i];
602 u32 height = box->y2 - box->y1;
603 u32 start = box->y1 * dev_priv->front_pitch;
604
605 DRM_DEBUG(" from=%d,%d to=%d,%d\n",
606 box->x1, box->y1, box->x2, box->y2);
607
608 DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
609 MGA_AR3, start + box->x1,
610 MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
611 MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
612 }
613
614 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
615 MGA_PLNWT, ctx->plnwt,
616 MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl);
617
618 ADVANCE_DMA();
619
620 FLUSH_DMA();
621
622 DRM_DEBUG("... done.\n");
623}
624
625static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
626{
627 drm_mga_private_t *dev_priv = dev->dev_private;
628 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
629 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
630 u32 address = (u32) buf->bus_address;
631 u32 length = (u32) buf->used;
632 int i = 0;
633 DMA_LOCALS;
634 DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
635
636 if (buf->used) {
637 buf_priv->dispatched = 1;
638
639 MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
640
641 do {
642 if (i < sarea_priv->nbox) {
643 mga_emit_clip_rect(dev_priv,
644 &sarea_priv->boxes[i]);
645 }
646
647 BEGIN_DMA(1);
648
649 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
650 MGA_DMAPAD, 0x00000000,
651 MGA_SECADDRESS, (address |
652 MGA_DMA_VERTEX),
653 MGA_SECEND, ((address + length) |
654 dev_priv->dma_access));
655
656 ADVANCE_DMA();
657 } while (++i < sarea_priv->nbox);
658 }
659
660 if (buf_priv->discard) {
661 AGE_BUFFER(buf_priv);
662 buf->pending = 0;
663 buf->used = 0;
664 buf_priv->dispatched = 0;
665
666 mga_freelist_put(dev, buf);
667 }
668
669 FLUSH_DMA();
670}
671
672static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
673 unsigned int start, unsigned int end)
674{
675 drm_mga_private_t *dev_priv = dev->dev_private;
676 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
677 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
678 u32 address = (u32) buf->bus_address;
679 int i = 0;
680 DMA_LOCALS;
681 DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
682
683 if (start != end) {
684 buf_priv->dispatched = 1;
685
686 MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
687
688 do {
689 if (i < sarea_priv->nbox) {
690 mga_emit_clip_rect(dev_priv,
691 &sarea_priv->boxes[i]);
692 }
693
694 BEGIN_DMA(1);
695
696 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
697 MGA_DMAPAD, 0x00000000,
698 MGA_SETUPADDRESS, address + start,
699 MGA_SETUPEND, ((address + end) |
700 dev_priv->dma_access));
701
702 ADVANCE_DMA();
703 } while (++i < sarea_priv->nbox);
704 }
705
706 if (buf_priv->discard) {
707 AGE_BUFFER(buf_priv);
708 buf->pending = 0;
709 buf->used = 0;
710 buf_priv->dispatched = 0;
711
712 mga_freelist_put(dev, buf);
713 }
714
715 FLUSH_DMA();
716}
717
718/* This copies a 64 byte aligned agp region to the frambuffer with a
719 * standard blit, the ioctl needs to do checking.
720 */
721static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
722 unsigned int dstorg, unsigned int length)
723{
724 drm_mga_private_t *dev_priv = dev->dev_private;
725 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
726 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
727 u32 srcorg =
728 buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
729 u32 y2;
730 DMA_LOCALS;
731 DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
732
733 y2 = length / 64;
734
735 BEGIN_DMA(5);
736
737 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
738 MGA_DMAPAD, 0x00000000,
739 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
740
741 DMA_BLOCK(MGA_DSTORG, dstorg,
742 MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64);
743
744 DMA_BLOCK(MGA_PITCH, 64,
745 MGA_PLNWT, 0xffffffff,
746 MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY);
747
748 DMA_BLOCK(MGA_AR0, 63,
749 MGA_AR3, 0,
750 MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2);
751
752 DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
753 MGA_SRCORG, dev_priv->front_offset,
754 MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000);
755
756 ADVANCE_DMA();
757
758 AGE_BUFFER(buf_priv);
759
760 buf->pending = 0;
761 buf->used = 0;
762 buf_priv->dispatched = 0;
763
764 mga_freelist_put(dev, buf);
765
766 FLUSH_DMA();
767}
768
769static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
770{
771 drm_mga_private_t *dev_priv = dev->dev_private;
772 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
773 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
774 struct drm_clip_rect *pbox = sarea_priv->boxes;
775 int nbox = sarea_priv->nbox;
776 u32 scandir = 0, i;
777 DMA_LOCALS;
778 DRM_DEBUG("\n");
779
780 BEGIN_DMA(4 + nbox);
781
782 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
783 MGA_DMAPAD, 0x00000000,
784 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
785
786 DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
787 MGA_PLNWT, blit->planemask,
788 MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg);
789
790 DMA_BLOCK(MGA_SGN, scandir,
791 MGA_MACCESS, dev_priv->maccess,
792 MGA_AR5, blit->ydir * blit->src_pitch,
793 MGA_PITCH, blit->dst_pitch);
794
795 for (i = 0; i < nbox; i++) {
796 int srcx = pbox[i].x1 + blit->delta_sx;
797 int srcy = pbox[i].y1 + blit->delta_sy;
798 int dstx = pbox[i].x1 + blit->delta_dx;
799 int dsty = pbox[i].y1 + blit->delta_dy;
800 int h = pbox[i].y2 - pbox[i].y1;
801 int w = pbox[i].x2 - pbox[i].x1 - 1;
802 int start;
803
804 if (blit->ydir == -1) {
805 srcy = blit->height - srcy - 1;
806 }
807
808 start = srcy * blit->src_pitch + srcx;
809
810 DMA_BLOCK(MGA_AR0, start + w,
811 MGA_AR3, start,
812 MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
813 MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
814 }
815
816 /* Do something to flush AGP?
817 */
818
819 /* Force reset of DWGCTL */
820 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
821 MGA_PLNWT, ctx->plnwt,
822 MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl);
823
824 ADVANCE_DMA();
825}
826
827/* ================================================================
828 *
829 */
830
831static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
832{
833 drm_mga_private_t *dev_priv = dev->dev_private;
834 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
835 drm_mga_clear_t *clear = data;
836
837 LOCK_TEST_WITH_RETURN(dev, file_priv);
838
839 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
840 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
841
842 WRAP_TEST_WITH_RETURN(dev_priv);
843
844 mga_dma_dispatch_clear(dev, clear);
845
846 /* Make sure we restore the 3D state next time.
847 */
848 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
849
850 return 0;
851}
852
853static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
854{
855 drm_mga_private_t *dev_priv = dev->dev_private;
856 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
857
858 LOCK_TEST_WITH_RETURN(dev, file_priv);
859
860 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
861 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
862
863 WRAP_TEST_WITH_RETURN(dev_priv);
864
865 mga_dma_dispatch_swap(dev);
866
867 /* Make sure we restore the 3D state next time.
868 */
869 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
870
871 return 0;
872}
873
874static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
875{
876 drm_mga_private_t *dev_priv = dev->dev_private;
877 struct drm_device_dma *dma = dev->dma;
878 struct drm_buf *buf;
879 drm_mga_buf_priv_t *buf_priv;
880 drm_mga_vertex_t *vertex = data;
881
882 LOCK_TEST_WITH_RETURN(dev, file_priv);
883
884 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
885 return -EINVAL;
886 buf = dma->buflist[vertex->idx];
887 buf_priv = buf->dev_private;
888
889 buf->used = vertex->used;
890 buf_priv->discard = vertex->discard;
891
892 if (!mga_verify_state(dev_priv)) {
893 if (vertex->discard) {
894 if (buf_priv->dispatched == 1)
895 AGE_BUFFER(buf_priv);
896 buf_priv->dispatched = 0;
897 mga_freelist_put(dev, buf);
898 }
899 return -EINVAL;
900 }
901
902 WRAP_TEST_WITH_RETURN(dev_priv);
903
904 mga_dma_dispatch_vertex(dev, buf);
905
906 return 0;
907}
908
909static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
910{
911 drm_mga_private_t *dev_priv = dev->dev_private;
912 struct drm_device_dma *dma = dev->dma;
913 struct drm_buf *buf;
914 drm_mga_buf_priv_t *buf_priv;
915 drm_mga_indices_t *indices = data;
916
917 LOCK_TEST_WITH_RETURN(dev, file_priv);
918
919 if (indices->idx < 0 || indices->idx > dma->buf_count)
920 return -EINVAL;
921
922 buf = dma->buflist[indices->idx];
923 buf_priv = buf->dev_private;
924
925 buf_priv->discard = indices->discard;
926
927 if (!mga_verify_state(dev_priv)) {
928 if (indices->discard) {
929 if (buf_priv->dispatched == 1)
930 AGE_BUFFER(buf_priv);
931 buf_priv->dispatched = 0;
932 mga_freelist_put(dev, buf);
933 }
934 return -EINVAL;
935 }
936
937 WRAP_TEST_WITH_RETURN(dev_priv);
938
939 mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
940
941 return 0;
942}
943
944static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
945{
946 struct drm_device_dma *dma = dev->dma;
947 drm_mga_private_t *dev_priv = dev->dev_private;
948 struct drm_buf *buf;
949 drm_mga_buf_priv_t *buf_priv;
950 drm_mga_iload_t *iload = data;
951 DRM_DEBUG("\n");
952
953 LOCK_TEST_WITH_RETURN(dev, file_priv);
954
955#if 0
956 if (mga_do_wait_for_idle(dev_priv) < 0) {
957 if (MGA_DMA_DEBUG)
958 DRM_INFO("-EBUSY\n");
959 return -EBUSY;
960 }
961#endif
962 if (iload->idx < 0 || iload->idx > dma->buf_count)
963 return -EINVAL;
964
965 buf = dma->buflist[iload->idx];
966 buf_priv = buf->dev_private;
967
968 if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
969 mga_freelist_put(dev, buf);
970 return -EINVAL;
971 }
972
973 WRAP_TEST_WITH_RETURN(dev_priv);
974
975 mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
976
977 /* Make sure we restore the 3D state next time.
978 */
979 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
980
981 return 0;
982}
983
984static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
985{
986 drm_mga_private_t *dev_priv = dev->dev_private;
987 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
988 drm_mga_blit_t *blit = data;
989 DRM_DEBUG("\n");
990
991 LOCK_TEST_WITH_RETURN(dev, file_priv);
992
993 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
994 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
995
996 if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
997 return -EINVAL;
998
999 WRAP_TEST_WITH_RETURN(dev_priv);
1000
1001 mga_dma_dispatch_blit(dev, blit);
1002
1003 /* Make sure we restore the 3D state next time.
1004 */
1005 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
1006
1007 return 0;
1008}
1009
1010static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1011{
1012 drm_mga_private_t *dev_priv = dev->dev_private;
1013 drm_mga_getparam_t *param = data;
1014 int value;
1015
1016 if (!dev_priv) {
1017 DRM_ERROR("called with no initialization\n");
1018 return -EINVAL;
1019 }
1020
1021 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1022
1023 switch (param->param) {
1024 case MGA_PARAM_IRQ_NR:
1025 value = dev->irq;
1026 break;
1027 case MGA_PARAM_CARD_TYPE:
1028 value = dev_priv->chipset;
1029 break;
1030 default:
1031 return -EINVAL;
1032 }
1033
1034 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1035 DRM_ERROR("copy_to_user\n");
1036 return -EFAULT;
1037 }
1038
1039 return 0;
1040}
1041
1042static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
1043{
1044 drm_mga_private_t *dev_priv = dev->dev_private;
1045 u32 *fence = data;
1046 DMA_LOCALS;
1047
1048 if (!dev_priv) {
1049 DRM_ERROR("called with no initialization\n");
1050 return -EINVAL;
1051 }
1052
1053 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1054
1055 /* I would normal do this assignment in the declaration of fence,
1056 * but dev_priv may be NULL.
1057 */
1058
1059 *fence = dev_priv->next_fence_to_post;
1060 dev_priv->next_fence_to_post++;
1061
1062 BEGIN_DMA(1);
1063 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
1064 MGA_DMAPAD, 0x00000000,
1065 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
1066 ADVANCE_DMA();
1067
1068 return 0;
1069}
1070
1071static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
1072file_priv)
1073{
1074 drm_mga_private_t *dev_priv = dev->dev_private;
1075 u32 *fence = data;
1076
1077 if (!dev_priv) {
1078 DRM_ERROR("called with no initialization\n");
1079 return -EINVAL;
1080 }
1081
1082 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1083
1084 mga_driver_fence_wait(dev, fence);
1085 return 0;
1086}
1087
1088struct drm_ioctl_desc mga_ioctls[] = {
1089 DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1090 DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1091 DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
1092 DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
1093 DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
1094 DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
1095 DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
1096 DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
1097 DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
1098 DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
1099 DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
1100 DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
1101 DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1102};
1103
1104int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mga/mga_ucode.h b/drivers/gpu/drm/mga/mga_ucode.h
new file mode 100644
index 000000000000..b611e27470e1
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_ucode.h
@@ -0,0 +1,11645 @@
1/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*-
2 * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com
3 *
4 * Copyright 1999 Matrox Graphics Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
23 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Kernel-based WARP engine management:
26 * Gareth Hughes <gareth@valinux.com>
27 */
28
29/*
30 * WARP pipes are named according to the functions they perform, where:
31 *
32 * - T stands for computation of texture stage 0
33 * - T2 stands for computation of both texture stage 0 and texture stage 1
34 * - G stands for computation of triangle intensity (Gouraud interpolation)
35 * - Z stands for computation of Z buffer interpolation
36 * - S stands for computation of specular highlight
37 * - A stands for computation of the alpha channel
38 * - F stands for computation of vertex fog interpolation
39 */
40
41static unsigned char warp_g200_tgz[] = {
42
43 0x00, 0x80, 0x00, 0xE8,
44 0x00, 0x80, 0x00, 0xE8,
45
46 0x00, 0x80, 0x00, 0xE8,
47 0x00, 0x80, 0x00, 0xE8,
48
49 0x00, 0x80, 0x00, 0xE8,
50 0x00, 0x80, 0x00, 0xE8,
51
52 0x00, 0x80, 0x00, 0xE8,
53 0x00, 0x80, 0x00, 0xE8,
54
55 0x00, 0x80, 0x00, 0xE8,
56 0x00, 0x80, 0x00, 0xE8,
57
58 0x00, 0x98, 0xA0, 0xE9,
59 0x40, 0x40, 0xD8, 0xEC,
60
61 0xFF, 0x80, 0xC0, 0xE9,
62 0x00, 0x80, 0x00, 0xE8,
63
64 0x1F, 0xD7, 0x18, 0xBD,
65 0x3F, 0xD7, 0x22, 0xBD,
66
67 0x81, 0x04,
68 0x89, 0x04,
69 0x01, 0x04,
70 0x09, 0x04,
71
72 0xC9, 0x41, 0xC0, 0xEC,
73 0x11, 0x04,
74 0x00, 0xE0,
75
76 0x41, 0xCC, 0x41, 0xCD,
77 0x49, 0xCC, 0x49, 0xCD,
78
79 0xD1, 0x41, 0xC0, 0xEC,
80 0x51, 0xCC, 0x51, 0xCD,
81
82 0x80, 0x04,
83 0x10, 0x04,
84 0x08, 0x04,
85 0x00, 0xE0,
86
87 0x00, 0xCC, 0xC0, 0xCD,
88 0xD1, 0x49, 0xC0, 0xEC,
89
90 0x8A, 0x1F, 0x20, 0xE9,
91 0x8B, 0x3F, 0x20, 0xE9,
92
93 0x41, 0x3C, 0x41, 0xAD,
94 0x49, 0x3C, 0x49, 0xAD,
95
96 0x10, 0xCC, 0x10, 0xCD,
97 0x08, 0xCC, 0x08, 0xCD,
98
99 0xB9, 0x41, 0x49, 0xBB,
100 0x1F, 0xF0, 0x41, 0xCD,
101
102 0x51, 0x3C, 0x51, 0xAD,
103 0x00, 0x98, 0x80, 0xE9,
104
105 0x72, 0x80, 0x07, 0xEA,
106 0x24, 0x1F, 0x20, 0xE9,
107
108 0x15, 0x41, 0x49, 0xBD,
109 0x1D, 0x41, 0x51, 0xBD,
110
111 0x2E, 0x41, 0x2A, 0xB8,
112 0x34, 0x53, 0xA0, 0xE8,
113
114 0x15, 0x30,
115 0x1D, 0x30,
116 0x58, 0xE3,
117 0x00, 0xE0,
118
119 0xB5, 0x40, 0x48, 0xBD,
120 0x3D, 0x40, 0x50, 0xBD,
121
122 0x24, 0x43, 0xA0, 0xE8,
123 0x2C, 0x4B, 0xA0, 0xE8,
124
125 0x15, 0x72,
126 0x09, 0xE3,
127 0x00, 0xE0,
128 0x1D, 0x72,
129
130 0x35, 0x30,
131 0xB5, 0x30,
132 0xBD, 0x30,
133 0x3D, 0x30,
134
135 0x9C, 0x97, 0x57, 0x9F,
136 0x00, 0x80, 0x00, 0xE8,
137
138 0x6C, 0x64, 0xC8, 0xEC,
139 0x98, 0xE1,
140 0xB5, 0x05,
141
142 0xBD, 0x05,
143 0x2E, 0x30,
144 0x32, 0xC0, 0xA0, 0xE8,
145
146 0x33, 0xC0, 0xA0, 0xE8,
147 0x74, 0x64, 0xC8, 0xEC,
148
149 0x40, 0x3C, 0x40, 0xAD,
150 0x32, 0x6A,
151 0x2A, 0x30,
152
153 0x20, 0x73,
154 0x33, 0x6A,
155 0x00, 0xE0,
156 0x28, 0x73,
157
158 0x1C, 0x72,
159 0x83, 0xE2,
160 0x60, 0x80, 0x15, 0xEA,
161
162 0xB8, 0x3D, 0x28, 0xDF,
163 0x30, 0x35, 0x20, 0xDF,
164
165 0x40, 0x30,
166 0x00, 0xE0,
167 0xCC, 0xE2,
168 0x64, 0x72,
169
170 0x25, 0x42, 0x52, 0xBF,
171 0x2D, 0x42, 0x4A, 0xBF,
172
173 0x30, 0x2E, 0x30, 0xDF,
174 0x38, 0x2E, 0x38, 0xDF,
175
176 0x18, 0x1D, 0x45, 0xE9,
177 0x1E, 0x15, 0x45, 0xE9,
178
179 0x2B, 0x49, 0x51, 0xBD,
180 0x00, 0xE0,
181 0x1F, 0x73,
182
183 0x38, 0x38, 0x40, 0xAF,
184 0x30, 0x30, 0x40, 0xAF,
185
186 0x24, 0x1F, 0x24, 0xDF,
187 0x1D, 0x32, 0x20, 0xE9,
188
189 0x2C, 0x1F, 0x2C, 0xDF,
190 0x1A, 0x33, 0x20, 0xE9,
191
192 0xB0, 0x10,
193 0x08, 0xE3,
194 0x40, 0x10,
195 0xB8, 0x10,
196
197 0x26, 0xF0, 0x30, 0xCD,
198 0x2F, 0xF0, 0x38, 0xCD,
199
200 0x2B, 0x80, 0x20, 0xE9,
201 0x2A, 0x80, 0x20, 0xE9,
202
203 0xA6, 0x20,
204 0x88, 0xE2,
205 0x00, 0xE0,
206 0xAF, 0x20,
207
208 0x28, 0x2A, 0x26, 0xAF,
209 0x20, 0x2A, 0xC0, 0xAF,
210
211 0x34, 0x1F, 0x34, 0xDF,
212 0x46, 0x24, 0x46, 0xDF,
213
214 0x28, 0x30, 0x80, 0xBF,
215 0x20, 0x38, 0x80, 0xBF,
216
217 0x47, 0x24, 0x47, 0xDF,
218 0x4E, 0x2C, 0x4E, 0xDF,
219
220 0x4F, 0x2C, 0x4F, 0xDF,
221 0x56, 0x34, 0x56, 0xDF,
222
223 0x28, 0x15, 0x28, 0xDF,
224 0x20, 0x1D, 0x20, 0xDF,
225
226 0x57, 0x34, 0x57, 0xDF,
227 0x00, 0xE0,
228 0x1D, 0x05,
229
230 0x04, 0x80, 0x10, 0xEA,
231 0x89, 0xE2,
232 0x2B, 0x30,
233
234 0x3F, 0xC1, 0x1D, 0xBD,
235 0x00, 0x80, 0x00, 0xE8,
236
237 0x00, 0x80, 0x00, 0xE8,
238 0x00, 0x80, 0x00, 0xE8,
239
240 0xA0, 0x68,
241 0xBF, 0x25,
242 0x00, 0x80, 0x00, 0xE8,
243
244 0x20, 0xC0, 0x20, 0xAF,
245 0x28, 0x05,
246 0x97, 0x74,
247
248 0x00, 0xE0,
249 0x2A, 0x10,
250 0x16, 0xC0, 0x20, 0xE9,
251
252 0x04, 0x80, 0x10, 0xEA,
253 0x8C, 0xE2,
254 0x95, 0x05,
255
256 0x28, 0xC1, 0x28, 0xAD,
257 0x1F, 0xC1, 0x15, 0xBD,
258
259 0x00, 0x80, 0x00, 0xE8,
260 0x00, 0x80, 0x00, 0xE8,
261
262 0xA8, 0x67,
263 0x9F, 0x6B,
264 0x00, 0x80, 0x00, 0xE8,
265
266 0x28, 0xC0, 0x28, 0xAD,
267 0x1D, 0x25,
268 0x20, 0x05,
269
270 0x28, 0x32, 0x80, 0xAD,
271 0x40, 0x2A, 0x40, 0xBD,
272
273 0x1C, 0x80, 0x20, 0xE9,
274 0x20, 0x33, 0x20, 0xAD,
275
276 0x20, 0x73,
277 0x00, 0xE0,
278 0xB6, 0x49, 0x51, 0xBB,
279
280 0x26, 0x2F, 0xB0, 0xE8,
281 0x19, 0x20, 0x20, 0xE9,
282
283 0x35, 0x20, 0x35, 0xDF,
284 0x3D, 0x20, 0x3D, 0xDF,
285
286 0x15, 0x20, 0x15, 0xDF,
287 0x1D, 0x20, 0x1D, 0xDF,
288
289 0x26, 0xD0, 0x26, 0xCD,
290 0x29, 0x49, 0x2A, 0xB8,
291
292 0x26, 0x40, 0x80, 0xBD,
293 0x3B, 0x48, 0x50, 0xBD,
294
295 0x3E, 0x54, 0x57, 0x9F,
296 0x00, 0xE0,
297 0x82, 0xE1,
298
299 0x1E, 0xAF, 0x59, 0x9F,
300 0x00, 0x80, 0x00, 0xE8,
301
302 0x26, 0x30,
303 0x29, 0x30,
304 0x48, 0x3C, 0x48, 0xAD,
305
306 0x2B, 0x72,
307 0xC2, 0xE1,
308 0x2C, 0xC0, 0x44, 0xC2,
309
310 0x05, 0x24, 0x34, 0xBF,
311 0x0D, 0x24, 0x2C, 0xBF,
312
313 0x2D, 0x46, 0x4E, 0xBF,
314 0x25, 0x46, 0x56, 0xBF,
315
316 0x20, 0x1D, 0x6F, 0x8F,
317 0x32, 0x3E, 0x5F, 0xE9,
318
319 0x3E, 0x50, 0x56, 0x9F,
320 0x00, 0xE0,
321 0x3B, 0x30,
322
323 0x1E, 0x8F, 0x51, 0x9F,
324 0x33, 0x1E, 0x5F, 0xE9,
325
326 0x05, 0x44, 0x54, 0xB2,
327 0x0D, 0x44, 0x4C, 0xB2,
328
329 0x19, 0xC0, 0xB0, 0xE8,
330 0x34, 0xC0, 0x44, 0xC4,
331
332 0x33, 0x73,
333 0x00, 0xE0,
334 0x3E, 0x62, 0x57, 0x9F,
335
336 0x1E, 0xAF, 0x59, 0x9F,
337 0x00, 0xE0,
338 0x0D, 0x20,
339
340 0x84, 0x3E, 0x58, 0xE9,
341 0x28, 0x1D, 0x6F, 0x8F,
342
343 0x05, 0x20,
344 0x00, 0xE0,
345 0x85, 0x1E, 0x58, 0xE9,
346
347 0x9B, 0x3B, 0x33, 0xDF,
348 0x20, 0x20, 0x42, 0xAF,
349
350 0x30, 0x42, 0x56, 0x9F,
351 0x80, 0x3E, 0x57, 0xE9,
352
353 0x3F, 0x8F, 0x51, 0x9F,
354 0x30, 0x80, 0x5F, 0xE9,
355
356 0x28, 0x28, 0x24, 0xAF,
357 0x81, 0x1E, 0x57, 0xE9,
358
359 0x05, 0x47, 0x57, 0xBF,
360 0x0D, 0x47, 0x4F, 0xBF,
361
362 0x88, 0x80, 0x58, 0xE9,
363 0x1B, 0x29, 0x1B, 0xDF,
364
365 0x30, 0x1D, 0x6F, 0x8F,
366 0x3A, 0x30, 0x4F, 0xE9,
367
368 0x1C, 0x30, 0x26, 0xDF,
369 0x09, 0xE3,
370 0x3B, 0x05,
371
372 0x3E, 0x50, 0x56, 0x9F,
373 0x3B, 0x3F, 0x4F, 0xE9,
374
375 0x1E, 0x8F, 0x51, 0x9F,
376 0x00, 0xE0,
377 0xAC, 0x20,
378
379 0x2D, 0x44, 0x4C, 0xB4,
380 0x2C, 0x1C, 0xC0, 0xAF,
381
382 0x25, 0x44, 0x54, 0xB4,
383 0x00, 0xE0,
384 0xC8, 0x30,
385
386 0x30, 0x46, 0x30, 0xAF,
387 0x1B, 0x1B, 0x48, 0xAF,
388
389 0x00, 0xE0,
390 0x25, 0x20,
391 0x38, 0x2C, 0x4F, 0xE9,
392
393 0x86, 0x80, 0x57, 0xE9,
394 0x38, 0x1D, 0x6F, 0x8F,
395
396 0x28, 0x74,
397 0x00, 0xE0,
398 0x0D, 0x44, 0x4C, 0xB0,
399
400 0x05, 0x44, 0x54, 0xB0,
401 0x2D, 0x20,
402 0x9B, 0x10,
403
404 0x82, 0x3E, 0x57, 0xE9,
405 0x32, 0xF0, 0x1B, 0xCD,
406
407 0x1E, 0xBD, 0x59, 0x9F,
408 0x83, 0x1E, 0x57, 0xE9,
409
410 0x38, 0x47, 0x38, 0xAF,
411 0x34, 0x20,
412 0x2A, 0x30,
413
414 0x00, 0xE0,
415 0x0D, 0x20,
416 0x32, 0x20,
417 0x05, 0x20,
418
419 0x87, 0x80, 0x57, 0xE9,
420 0x1F, 0x54, 0x57, 0x9F,
421
422 0x17, 0x42, 0x56, 0x9F,
423 0x00, 0xE0,
424 0x3B, 0x6A,
425
426 0x3F, 0x8F, 0x51, 0x9F,
427 0x37, 0x1E, 0x4F, 0xE9,
428
429 0x37, 0x32, 0x2A, 0xAF,
430 0x00, 0xE0,
431 0x32, 0x00,
432
433 0x00, 0x80, 0x00, 0xE8,
434 0x27, 0xC0, 0x44, 0xC0,
435
436 0x36, 0x1F, 0x4F, 0xE9,
437 0x1F, 0x1F, 0x26, 0xDF,
438
439 0x37, 0x1B, 0x37, 0xBF,
440 0x17, 0x26, 0x17, 0xDF,
441
442 0x3E, 0x17, 0x4F, 0xE9,
443 0x3F, 0x3F, 0x4F, 0xE9,
444
445 0x34, 0x1F, 0x34, 0xAF,
446 0x2B, 0x05,
447 0xA7, 0x20,
448
449 0x33, 0x2B, 0x37, 0xDF,
450 0x27, 0x17, 0xC0, 0xAF,
451
452 0x34, 0x80, 0x4F, 0xE9,
453 0x00, 0x80, 0x00, 0xE8,
454
455 0x03, 0x80, 0x0A, 0xEA,
456 0x17, 0xC1, 0x2B, 0xBD,
457
458 0x00, 0x80, 0x00, 0xE8,
459 0x00, 0x80, 0x00, 0xE8,
460
461 0xB3, 0x68,
462 0x97, 0x25,
463 0x00, 0x80, 0x00, 0xE8,
464
465 0x33, 0xC0, 0x33, 0xAF,
466 0x3C, 0x27, 0x4F, 0xE9,
467
468 0x57, 0x39, 0x20, 0xE9,
469 0x28, 0x19, 0x60, 0xEC,
470
471 0x2B, 0x32, 0x20, 0xE9,
472 0x1D, 0x3B, 0x20, 0xE9,
473
474 0xB3, 0x05,
475 0x00, 0xE0,
476 0x16, 0x28, 0x20, 0xE9,
477
478 0x23, 0x3B, 0x33, 0xAD,
479 0x1E, 0x2B, 0x20, 0xE9,
480
481 0x1C, 0x80, 0x20, 0xE9,
482 0x57, 0x36, 0x20, 0xE9,
483
484 0x00, 0x80, 0xA0, 0xE9,
485 0x40, 0x40, 0xD8, 0xEC,
486
487 0xFF, 0x80, 0xC0, 0xE9,
488 0x90, 0xE2,
489 0x00, 0xE0,
490
491 0x85, 0xFF, 0x20, 0xEA,
492 0x19, 0xC8, 0xC1, 0xCD,
493
494 0x1F, 0xD7, 0x18, 0xBD,
495 0x3F, 0xD7, 0x22, 0xBD,
496
497 0x9F, 0x41, 0x49, 0xBD,
498 0x00, 0x80, 0x00, 0xE8,
499
500 0x25, 0x41, 0x49, 0xBD,
501 0x2D, 0x41, 0x51, 0xBD,
502
503 0x0D, 0x80, 0x07, 0xEA,
504 0x00, 0x80, 0x00, 0xE8,
505
506 0x35, 0x40, 0x48, 0xBD,
507 0x3D, 0x40, 0x50, 0xBD,
508
509 0x00, 0x80, 0x00, 0xE8,
510 0x25, 0x30,
511 0x2D, 0x30,
512
513 0x35, 0x30,
514 0xB5, 0x30,
515 0xBD, 0x30,
516 0x3D, 0x30,
517
518 0x9C, 0xA7, 0x5B, 0x9F,
519 0x00, 0x80, 0x00, 0xE8,
520
521 0x00, 0x80, 0x00, 0xE8,
522 0x00, 0x80, 0x00, 0xE8,
523
524 0x00, 0x80, 0x00, 0xE8,
525 0x00, 0x80, 0x00, 0xE8,
526
527 0x00, 0x80, 0x00, 0xE8,
528 0x00, 0x80, 0x00, 0xE8,
529
530 0x00, 0x80, 0x00, 0xE8,
531 0x00, 0x80, 0x00, 0xE8,
532
533 0x84, 0xFF, 0x0A, 0xEA,
534 0x00, 0x80, 0x00, 0xE8,
535
536 0xC9, 0x41, 0xC8, 0xEC,
537 0x42, 0xE1,
538 0x00, 0xE0,
539
540 0x82, 0xFF, 0x20, 0xEA,
541 0x00, 0x80, 0x00, 0xE8,
542
543 0x00, 0x80, 0x00, 0xE8,
544 0x00, 0x80, 0x00, 0xE8,
545
546 0xC8, 0x40, 0xC0, 0xEC,
547 0x00, 0x80, 0x00, 0xE8,
548
549 0x7F, 0xFF, 0x20, 0xEA,
550 0x00, 0x80, 0x00, 0xE8,
551
552 0x00, 0x80, 0x00, 0xE8,
553 0x00, 0x80, 0x00, 0xE8,
554
555};
556
557static unsigned char warp_g200_tgza[] = {
558
559 0x00, 0x98, 0xA0, 0xE9,
560 0x40, 0x40, 0xD8, 0xEC,
561
562 0xFF, 0x80, 0xC0, 0xE9,
563 0x00, 0x80, 0x00, 0xE8,
564
565 0x1F, 0xD7, 0x18, 0xBD,
566 0x3F, 0xD7, 0x22, 0xBD,
567
568 0x81, 0x04,
569 0x89, 0x04,
570 0x01, 0x04,
571 0x09, 0x04,
572
573 0xC9, 0x41, 0xC0, 0xEC,
574 0x11, 0x04,
575 0x00, 0xE0,
576
577 0x41, 0xCC, 0x41, 0xCD,
578 0x49, 0xCC, 0x49, 0xCD,
579
580 0xD1, 0x41, 0xC0, 0xEC,
581 0x51, 0xCC, 0x51, 0xCD,
582
583 0x80, 0x04,
584 0x10, 0x04,
585 0x08, 0x04,
586 0x00, 0xE0,
587
588 0x00, 0xCC, 0xC0, 0xCD,
589 0xD1, 0x49, 0xC0, 0xEC,
590
591 0x8A, 0x1F, 0x20, 0xE9,
592 0x8B, 0x3F, 0x20, 0xE9,
593
594 0x41, 0x3C, 0x41, 0xAD,
595 0x49, 0x3C, 0x49, 0xAD,
596
597 0x10, 0xCC, 0x10, 0xCD,
598 0x08, 0xCC, 0x08, 0xCD,
599
600 0xB9, 0x41, 0x49, 0xBB,
601 0x1F, 0xF0, 0x41, 0xCD,
602
603 0x51, 0x3C, 0x51, 0xAD,
604 0x00, 0x98, 0x80, 0xE9,
605
606 0x7D, 0x80, 0x07, 0xEA,
607 0x24, 0x1F, 0x20, 0xE9,
608
609 0x15, 0x41, 0x49, 0xBD,
610 0x1D, 0x41, 0x51, 0xBD,
611
612 0x2E, 0x41, 0x2A, 0xB8,
613 0x34, 0x53, 0xA0, 0xE8,
614
615 0x15, 0x30,
616 0x1D, 0x30,
617 0x58, 0xE3,
618 0x00, 0xE0,
619
620 0xB5, 0x40, 0x48, 0xBD,
621 0x3D, 0x40, 0x50, 0xBD,
622
623 0x24, 0x43, 0xA0, 0xE8,
624 0x2C, 0x4B, 0xA0, 0xE8,
625
626 0x15, 0x72,
627 0x09, 0xE3,
628 0x00, 0xE0,
629 0x1D, 0x72,
630
631 0x35, 0x30,
632 0xB5, 0x30,
633 0xBD, 0x30,
634 0x3D, 0x30,
635
636 0x9C, 0x97, 0x57, 0x9F,
637 0x00, 0x80, 0x00, 0xE8,
638
639 0x6C, 0x64, 0xC8, 0xEC,
640 0x98, 0xE1,
641 0xB5, 0x05,
642
643 0xBD, 0x05,
644 0x2E, 0x30,
645 0x32, 0xC0, 0xA0, 0xE8,
646
647 0x33, 0xC0, 0xA0, 0xE8,
648 0x74, 0x64, 0xC8, 0xEC,
649
650 0x40, 0x3C, 0x40, 0xAD,
651 0x32, 0x6A,
652 0x2A, 0x30,
653
654 0x20, 0x73,
655 0x33, 0x6A,
656 0x00, 0xE0,
657 0x28, 0x73,
658
659 0x1C, 0x72,
660 0x83, 0xE2,
661 0x6B, 0x80, 0x15, 0xEA,
662
663 0xB8, 0x3D, 0x28, 0xDF,
664 0x30, 0x35, 0x20, 0xDF,
665
666 0x40, 0x30,
667 0x00, 0xE0,
668 0xCC, 0xE2,
669 0x64, 0x72,
670
671 0x25, 0x42, 0x52, 0xBF,
672 0x2D, 0x42, 0x4A, 0xBF,
673
674 0x30, 0x2E, 0x30, 0xDF,
675 0x38, 0x2E, 0x38, 0xDF,
676
677 0x18, 0x1D, 0x45, 0xE9,
678 0x1E, 0x15, 0x45, 0xE9,
679
680 0x2B, 0x49, 0x51, 0xBD,
681 0x00, 0xE0,
682 0x1F, 0x73,
683
684 0x38, 0x38, 0x40, 0xAF,
685 0x30, 0x30, 0x40, 0xAF,
686
687 0x24, 0x1F, 0x24, 0xDF,
688 0x1D, 0x32, 0x20, 0xE9,
689
690 0x2C, 0x1F, 0x2C, 0xDF,
691 0x1A, 0x33, 0x20, 0xE9,
692
693 0xB0, 0x10,
694 0x08, 0xE3,
695 0x40, 0x10,
696 0xB8, 0x10,
697
698 0x26, 0xF0, 0x30, 0xCD,
699 0x2F, 0xF0, 0x38, 0xCD,
700
701 0x2B, 0x80, 0x20, 0xE9,
702 0x2A, 0x80, 0x20, 0xE9,
703
704 0xA6, 0x20,
705 0x88, 0xE2,
706 0x00, 0xE0,
707 0xAF, 0x20,
708
709 0x28, 0x2A, 0x26, 0xAF,
710 0x20, 0x2A, 0xC0, 0xAF,
711
712 0x34, 0x1F, 0x34, 0xDF,
713 0x46, 0x24, 0x46, 0xDF,
714
715 0x28, 0x30, 0x80, 0xBF,
716 0x20, 0x38, 0x80, 0xBF,
717
718 0x47, 0x24, 0x47, 0xDF,
719 0x4E, 0x2C, 0x4E, 0xDF,
720
721 0x4F, 0x2C, 0x4F, 0xDF,
722 0x56, 0x34, 0x56, 0xDF,
723
724 0x28, 0x15, 0x28, 0xDF,
725 0x20, 0x1D, 0x20, 0xDF,
726
727 0x57, 0x34, 0x57, 0xDF,
728 0x00, 0xE0,
729 0x1D, 0x05,
730
731 0x04, 0x80, 0x10, 0xEA,
732 0x89, 0xE2,
733 0x2B, 0x30,
734
735 0x3F, 0xC1, 0x1D, 0xBD,
736 0x00, 0x80, 0x00, 0xE8,
737
738 0x00, 0x80, 0x00, 0xE8,
739 0x00, 0x80, 0x00, 0xE8,
740
741 0xA0, 0x68,
742 0xBF, 0x25,
743 0x00, 0x80, 0x00, 0xE8,
744
745 0x20, 0xC0, 0x20, 0xAF,
746 0x28, 0x05,
747 0x97, 0x74,
748
749 0x00, 0xE0,
750 0x2A, 0x10,
751 0x16, 0xC0, 0x20, 0xE9,
752
753 0x04, 0x80, 0x10, 0xEA,
754 0x8C, 0xE2,
755 0x95, 0x05,
756
757 0x28, 0xC1, 0x28, 0xAD,
758 0x1F, 0xC1, 0x15, 0xBD,
759
760 0x00, 0x80, 0x00, 0xE8,
761 0x00, 0x80, 0x00, 0xE8,
762
763 0xA8, 0x67,
764 0x9F, 0x6B,
765 0x00, 0x80, 0x00, 0xE8,
766
767 0x28, 0xC0, 0x28, 0xAD,
768 0x1D, 0x25,
769 0x20, 0x05,
770
771 0x28, 0x32, 0x80, 0xAD,
772 0x40, 0x2A, 0x40, 0xBD,
773
774 0x1C, 0x80, 0x20, 0xE9,
775 0x20, 0x33, 0x20, 0xAD,
776
777 0x20, 0x73,
778 0x00, 0xE0,
779 0xB6, 0x49, 0x51, 0xBB,
780
781 0x26, 0x2F, 0xB0, 0xE8,
782 0x19, 0x20, 0x20, 0xE9,
783
784 0x35, 0x20, 0x35, 0xDF,
785 0x3D, 0x20, 0x3D, 0xDF,
786
787 0x15, 0x20, 0x15, 0xDF,
788 0x1D, 0x20, 0x1D, 0xDF,
789
790 0x26, 0xD0, 0x26, 0xCD,
791 0x29, 0x49, 0x2A, 0xB8,
792
793 0x26, 0x40, 0x80, 0xBD,
794 0x3B, 0x48, 0x50, 0xBD,
795
796 0x3E, 0x54, 0x57, 0x9F,
797 0x00, 0xE0,
798 0x82, 0xE1,
799
800 0x1E, 0xAF, 0x59, 0x9F,
801 0x00, 0x80, 0x00, 0xE8,
802
803 0x26, 0x30,
804 0x29, 0x30,
805 0x48, 0x3C, 0x48, 0xAD,
806
807 0x2B, 0x72,
808 0xC2, 0xE1,
809 0x2C, 0xC0, 0x44, 0xC2,
810
811 0x05, 0x24, 0x34, 0xBF,
812 0x0D, 0x24, 0x2C, 0xBF,
813
814 0x2D, 0x46, 0x4E, 0xBF,
815 0x25, 0x46, 0x56, 0xBF,
816
817 0x20, 0x1D, 0x6F, 0x8F,
818 0x32, 0x3E, 0x5F, 0xE9,
819
820 0x3E, 0x50, 0x56, 0x9F,
821 0x00, 0xE0,
822 0x3B, 0x30,
823
824 0x1E, 0x8F, 0x51, 0x9F,
825 0x33, 0x1E, 0x5F, 0xE9,
826
827 0x05, 0x44, 0x54, 0xB2,
828 0x0D, 0x44, 0x4C, 0xB2,
829
830 0x19, 0xC0, 0xB0, 0xE8,
831 0x34, 0xC0, 0x44, 0xC4,
832
833 0x33, 0x73,
834 0x00, 0xE0,
835 0x3E, 0x62, 0x57, 0x9F,
836
837 0x1E, 0xAF, 0x59, 0x9F,
838 0x00, 0xE0,
839 0x0D, 0x20,
840
841 0x84, 0x3E, 0x58, 0xE9,
842 0x28, 0x1D, 0x6F, 0x8F,
843
844 0x05, 0x20,
845 0x00, 0xE0,
846 0x85, 0x1E, 0x58, 0xE9,
847
848 0x9B, 0x3B, 0x33, 0xDF,
849 0x20, 0x20, 0x42, 0xAF,
850
851 0x30, 0x42, 0x56, 0x9F,
852 0x80, 0x3E, 0x57, 0xE9,
853
854 0x3F, 0x8F, 0x51, 0x9F,
855 0x30, 0x80, 0x5F, 0xE9,
856
857 0x28, 0x28, 0x24, 0xAF,
858 0x81, 0x1E, 0x57, 0xE9,
859
860 0x05, 0x47, 0x57, 0xBF,
861 0x0D, 0x47, 0x4F, 0xBF,
862
863 0x88, 0x80, 0x58, 0xE9,
864 0x1B, 0x29, 0x1B, 0xDF,
865
866 0x30, 0x1D, 0x6F, 0x8F,
867 0x3A, 0x30, 0x4F, 0xE9,
868
869 0x1C, 0x30, 0x26, 0xDF,
870 0x09, 0xE3,
871 0x3B, 0x05,
872
873 0x3E, 0x50, 0x56, 0x9F,
874 0x3B, 0x3F, 0x4F, 0xE9,
875
876 0x1E, 0x8F, 0x51, 0x9F,
877 0x00, 0xE0,
878 0xAC, 0x20,
879
880 0x2D, 0x44, 0x4C, 0xB4,
881 0x2C, 0x1C, 0xC0, 0xAF,
882
883 0x25, 0x44, 0x54, 0xB4,
884 0x00, 0xE0,
885 0xC8, 0x30,
886
887 0x30, 0x46, 0x30, 0xAF,
888 0x1B, 0x1B, 0x48, 0xAF,
889
890 0x00, 0xE0,
891 0x25, 0x20,
892 0x38, 0x2C, 0x4F, 0xE9,
893
894 0x86, 0x80, 0x57, 0xE9,
895 0x38, 0x1D, 0x6F, 0x8F,
896
897 0x28, 0x74,
898 0x00, 0xE0,
899 0x0D, 0x44, 0x4C, 0xB0,
900
901 0x05, 0x44, 0x54, 0xB0,
902 0x2D, 0x20,
903 0x9B, 0x10,
904
905 0x82, 0x3E, 0x57, 0xE9,
906 0x32, 0xF0, 0x1B, 0xCD,
907
908 0x1E, 0xBD, 0x59, 0x9F,
909 0x83, 0x1E, 0x57, 0xE9,
910
911 0x38, 0x47, 0x38, 0xAF,
912 0x34, 0x20,
913 0x2A, 0x30,
914
915 0x00, 0xE0,
916 0x0D, 0x20,
917 0x32, 0x20,
918 0x05, 0x20,
919
920 0x87, 0x80, 0x57, 0xE9,
921 0x1F, 0x54, 0x57, 0x9F,
922
923 0x17, 0x42, 0x56, 0x9F,
924 0x00, 0xE0,
925 0x3B, 0x6A,
926
927 0x3F, 0x8F, 0x51, 0x9F,
928 0x37, 0x1E, 0x4F, 0xE9,
929
930 0x37, 0x32, 0x2A, 0xAF,
931 0x00, 0xE0,
932 0x32, 0x00,
933
934 0x00, 0x80, 0x00, 0xE8,
935 0x27, 0xC0, 0x44, 0xC0,
936
937 0x36, 0x1F, 0x4F, 0xE9,
938 0x1F, 0x1F, 0x26, 0xDF,
939
940 0x37, 0x1B, 0x37, 0xBF,
941 0x17, 0x26, 0x17, 0xDF,
942
943 0x3E, 0x17, 0x4F, 0xE9,
944 0x3F, 0x3F, 0x4F, 0xE9,
945
946 0x34, 0x1F, 0x34, 0xAF,
947 0x2B, 0x05,
948 0xA7, 0x20,
949
950 0x33, 0x2B, 0x37, 0xDF,
951 0x27, 0x17, 0xC0, 0xAF,
952
953 0x34, 0x80, 0x4F, 0xE9,
954 0x00, 0x80, 0x00, 0xE8,
955
956 0x2D, 0x44, 0x4C, 0xB6,
957 0x25, 0x44, 0x54, 0xB6,
958
959 0x03, 0x80, 0x2A, 0xEA,
960 0x17, 0xC1, 0x2B, 0xBD,
961
962 0x2D, 0x20,
963 0x25, 0x20,
964 0x07, 0xC0, 0x44, 0xC6,
965
966 0xB3, 0x68,
967 0x97, 0x25,
968 0x00, 0x80, 0x00, 0xE8,
969
970 0x33, 0xC0, 0x33, 0xAF,
971 0x3C, 0x27, 0x4F, 0xE9,
972
973 0x1F, 0x62, 0x57, 0x9F,
974 0x00, 0x80, 0x00, 0xE8,
975
976 0x3F, 0x3D, 0x5D, 0x9F,
977 0x00, 0xE0,
978 0x07, 0x20,
979
980 0x00, 0x80, 0x00, 0xE8,
981 0x28, 0x19, 0x60, 0xEC,
982
983 0xB3, 0x05,
984 0x00, 0xE0,
985 0x00, 0x80, 0x00, 0xE8,
986
987 0x23, 0x3B, 0x33, 0xAD,
988 0x00, 0x80, 0x00, 0xE8,
989
990 0x1F, 0x26, 0x1F, 0xDF,
991 0x9D, 0x1F, 0x4F, 0xE9,
992
993 0x00, 0x80, 0x00, 0xE8,
994 0x00, 0x80, 0x00, 0xE8,
995
996 0x00, 0x80, 0x00, 0xE8,
997 0x9E, 0x3F, 0x4F, 0xE9,
998
999 0x07, 0x07, 0x1F, 0xAF,
1000 0x00, 0x80, 0x00, 0xE8,
1001
1002 0x00, 0x80, 0x00, 0xE8,
1003 0x00, 0x80, 0x00, 0xE8,
1004
1005 0x9C, 0x80, 0x4F, 0xE9,
1006 0x00, 0x80, 0x00, 0xE8,
1007
1008 0x00, 0x80, 0x00, 0xE8,
1009 0x57, 0x39, 0x20, 0xE9,
1010
1011 0x16, 0x28, 0x20, 0xE9,
1012 0x1D, 0x3B, 0x20, 0xE9,
1013
1014 0x1E, 0x2B, 0x20, 0xE9,
1015 0x2B, 0x32, 0x20, 0xE9,
1016
1017 0x1C, 0x23, 0x20, 0xE9,
1018 0x57, 0x36, 0x20, 0xE9,
1019
1020 0x00, 0x80, 0xA0, 0xE9,
1021 0x40, 0x40, 0xD8, 0xEC,
1022
1023 0xFF, 0x80, 0xC0, 0xE9,
1024 0x90, 0xE2,
1025 0x00, 0xE0,
1026
1027 0x7A, 0xFF, 0x20, 0xEA,
1028 0x19, 0xC8, 0xC1, 0xCD,
1029
1030 0x1F, 0xD7, 0x18, 0xBD,
1031 0x3F, 0xD7, 0x22, 0xBD,
1032
1033 0x9F, 0x41, 0x49, 0xBD,
1034 0x00, 0x80, 0x00, 0xE8,
1035
1036 0x25, 0x41, 0x49, 0xBD,
1037 0x2D, 0x41, 0x51, 0xBD,
1038
1039 0x0D, 0x80, 0x07, 0xEA,
1040 0x00, 0x80, 0x00, 0xE8,
1041
1042 0x35, 0x40, 0x48, 0xBD,
1043 0x3D, 0x40, 0x50, 0xBD,
1044
1045 0x00, 0x80, 0x00, 0xE8,
1046 0x25, 0x30,
1047 0x2D, 0x30,
1048
1049 0x35, 0x30,
1050 0xB5, 0x30,
1051 0xBD, 0x30,
1052 0x3D, 0x30,
1053
1054 0x9C, 0xA7, 0x5B, 0x9F,
1055 0x00, 0x80, 0x00, 0xE8,
1056
1057 0x00, 0x80, 0x00, 0xE8,
1058 0x00, 0x80, 0x00, 0xE8,
1059
1060 0x00, 0x80, 0x00, 0xE8,
1061 0x00, 0x80, 0x00, 0xE8,
1062
1063 0x00, 0x80, 0x00, 0xE8,
1064 0x00, 0x80, 0x00, 0xE8,
1065
1066 0x00, 0x80, 0x00, 0xE8,
1067 0x00, 0x80, 0x00, 0xE8,
1068
1069 0x79, 0xFF, 0x0A, 0xEA,
1070 0x00, 0x80, 0x00, 0xE8,
1071
1072 0xC9, 0x41, 0xC8, 0xEC,
1073 0x42, 0xE1,
1074 0x00, 0xE0,
1075
1076 0x77, 0xFF, 0x20, 0xEA,
1077 0x00, 0x80, 0x00, 0xE8,
1078
1079 0x00, 0x80, 0x00, 0xE8,
1080 0x00, 0x80, 0x00, 0xE8,
1081
1082 0xC8, 0x40, 0xC0, 0xEC,
1083 0x00, 0x80, 0x00, 0xE8,
1084
1085 0x74, 0xFF, 0x20, 0xEA,
1086 0x00, 0x80, 0x00, 0xE8,
1087
1088 0x00, 0x80, 0x00, 0xE8,
1089 0x00, 0x80, 0x00, 0xE8,
1090
1091};
1092
1093static unsigned char warp_g200_tgzaf[] = {
1094
1095 0x00, 0x80, 0x00, 0xE8,
1096 0x00, 0x80, 0x00, 0xE8,
1097
1098 0x00, 0x80, 0x00, 0xE8,
1099 0x00, 0x80, 0x00, 0xE8,
1100
1101 0x00, 0x80, 0x00, 0xE8,
1102 0x00, 0x80, 0x00, 0xE8,
1103
1104 0x00, 0x80, 0x00, 0xE8,
1105 0x00, 0x80, 0x00, 0xE8,
1106
1107 0x00, 0x80, 0x00, 0xE8,
1108 0x00, 0x80, 0x00, 0xE8,
1109
1110 0x00, 0x80, 0x00, 0xE8,
1111 0x00, 0x80, 0x00, 0xE8,
1112
1113 0x00, 0x80, 0x00, 0xE8,
1114 0x00, 0x80, 0x00, 0xE8,
1115
1116 0x00, 0x80, 0x00, 0xE8,
1117 0x00, 0x80, 0x00, 0xE8,
1118
1119 0x00, 0x80, 0x00, 0xE8,
1120 0x00, 0x80, 0x00, 0xE8,
1121
1122 0x00, 0x80, 0x00, 0xE8,
1123 0x00, 0x80, 0x00, 0xE8,
1124
1125 0x00, 0x98, 0xA0, 0xE9,
1126 0x40, 0x40, 0xD8, 0xEC,
1127
1128 0xFF, 0x80, 0xC0, 0xE9,
1129 0x00, 0x80, 0x00, 0xE8,
1130
1131 0x1F, 0xD7, 0x18, 0xBD,
1132 0x3F, 0xD7, 0x22, 0xBD,
1133
1134 0x81, 0x04,
1135 0x89, 0x04,
1136 0x01, 0x04,
1137 0x09, 0x04,
1138
1139 0xC9, 0x41, 0xC0, 0xEC,
1140 0x11, 0x04,
1141 0x00, 0xE0,
1142
1143 0x41, 0xCC, 0x41, 0xCD,
1144 0x49, 0xCC, 0x49, 0xCD,
1145
1146 0xD1, 0x41, 0xC0, 0xEC,
1147 0x51, 0xCC, 0x51, 0xCD,
1148
1149 0x80, 0x04,
1150 0x10, 0x04,
1151 0x08, 0x04,
1152 0x00, 0xE0,
1153
1154 0x00, 0xCC, 0xC0, 0xCD,
1155 0xD1, 0x49, 0xC0, 0xEC,
1156
1157 0x8A, 0x1F, 0x20, 0xE9,
1158 0x8B, 0x3F, 0x20, 0xE9,
1159
1160 0x41, 0x3C, 0x41, 0xAD,
1161 0x49, 0x3C, 0x49, 0xAD,
1162
1163 0x10, 0xCC, 0x10, 0xCD,
1164 0x08, 0xCC, 0x08, 0xCD,
1165
1166 0xB9, 0x41, 0x49, 0xBB,
1167 0x1F, 0xF0, 0x41, 0xCD,
1168
1169 0x51, 0x3C, 0x51, 0xAD,
1170 0x00, 0x98, 0x80, 0xE9,
1171
1172 0x83, 0x80, 0x07, 0xEA,
1173 0x24, 0x1F, 0x20, 0xE9,
1174
1175 0x21, 0x45, 0x80, 0xE8,
1176 0x1A, 0x4D, 0x80, 0xE8,
1177
1178 0x31, 0x55, 0x80, 0xE8,
1179 0x00, 0x80, 0x00, 0xE8,
1180
1181 0x15, 0x41, 0x49, 0xBD,
1182 0x1D, 0x41, 0x51, 0xBD,
1183
1184 0x2E, 0x41, 0x2A, 0xB8,
1185 0x34, 0x53, 0xA0, 0xE8,
1186
1187 0x15, 0x30,
1188 0x1D, 0x30,
1189 0x58, 0xE3,
1190 0x00, 0xE0,
1191
1192 0xB5, 0x40, 0x48, 0xBD,
1193 0x3D, 0x40, 0x50, 0xBD,
1194
1195 0x24, 0x43, 0xA0, 0xE8,
1196 0x2C, 0x4B, 0xA0, 0xE8,
1197
1198 0x15, 0x72,
1199 0x09, 0xE3,
1200 0x00, 0xE0,
1201 0x1D, 0x72,
1202
1203 0x35, 0x30,
1204 0xB5, 0x30,
1205 0xBD, 0x30,
1206 0x3D, 0x30,
1207
1208 0x9C, 0x97, 0x57, 0x9F,
1209 0x00, 0x80, 0x00, 0xE8,
1210
1211 0x6C, 0x64, 0xC8, 0xEC,
1212 0x98, 0xE1,
1213 0xB5, 0x05,
1214
1215 0xBD, 0x05,
1216 0x2E, 0x30,
1217 0x32, 0xC0, 0xA0, 0xE8,
1218
1219 0x33, 0xC0, 0xA0, 0xE8,
1220 0x74, 0x64, 0xC8, 0xEC,
1221
1222 0x40, 0x3C, 0x40, 0xAD,
1223 0x32, 0x6A,
1224 0x2A, 0x30,
1225
1226 0x20, 0x73,
1227 0x33, 0x6A,
1228 0x00, 0xE0,
1229 0x28, 0x73,
1230
1231 0x1C, 0x72,
1232 0x83, 0xE2,
1233 0x6F, 0x80, 0x15, 0xEA,
1234
1235 0xB8, 0x3D, 0x28, 0xDF,
1236 0x30, 0x35, 0x20, 0xDF,
1237
1238 0x40, 0x30,
1239 0x00, 0xE0,
1240 0xCC, 0xE2,
1241 0x64, 0x72,
1242
1243 0x25, 0x42, 0x52, 0xBF,
1244 0x2D, 0x42, 0x4A, 0xBF,
1245
1246 0x30, 0x2E, 0x30, 0xDF,
1247 0x38, 0x2E, 0x38, 0xDF,
1248
1249 0x18, 0x1D, 0x45, 0xE9,
1250 0x1E, 0x15, 0x45, 0xE9,
1251
1252 0x2B, 0x49, 0x51, 0xBD,
1253 0x00, 0xE0,
1254 0x1F, 0x73,
1255
1256 0x38, 0x38, 0x40, 0xAF,
1257 0x30, 0x30, 0x40, 0xAF,
1258
1259 0x24, 0x1F, 0x24, 0xDF,
1260 0x1D, 0x32, 0x20, 0xE9,
1261
1262 0x2C, 0x1F, 0x2C, 0xDF,
1263 0x1A, 0x33, 0x20, 0xE9,
1264
1265 0xB0, 0x10,
1266 0x08, 0xE3,
1267 0x40, 0x10,
1268 0xB8, 0x10,
1269
1270 0x26, 0xF0, 0x30, 0xCD,
1271 0x2F, 0xF0, 0x38, 0xCD,
1272
1273 0x2B, 0x80, 0x20, 0xE9,
1274 0x2A, 0x80, 0x20, 0xE9,
1275
1276 0xA6, 0x20,
1277 0x88, 0xE2,
1278 0x00, 0xE0,
1279 0xAF, 0x20,
1280
1281 0x28, 0x2A, 0x26, 0xAF,
1282 0x20, 0x2A, 0xC0, 0xAF,
1283
1284 0x34, 0x1F, 0x34, 0xDF,
1285 0x46, 0x24, 0x46, 0xDF,
1286
1287 0x28, 0x30, 0x80, 0xBF,
1288 0x20, 0x38, 0x80, 0xBF,
1289
1290 0x47, 0x24, 0x47, 0xDF,
1291 0x4E, 0x2C, 0x4E, 0xDF,
1292
1293 0x4F, 0x2C, 0x4F, 0xDF,
1294 0x56, 0x34, 0x56, 0xDF,
1295
1296 0x28, 0x15, 0x28, 0xDF,
1297 0x20, 0x1D, 0x20, 0xDF,
1298
1299 0x57, 0x34, 0x57, 0xDF,
1300 0x00, 0xE0,
1301 0x1D, 0x05,
1302
1303 0x04, 0x80, 0x10, 0xEA,
1304 0x89, 0xE2,
1305 0x2B, 0x30,
1306
1307 0x3F, 0xC1, 0x1D, 0xBD,
1308 0x00, 0x80, 0x00, 0xE8,
1309
1310 0x00, 0x80, 0x00, 0xE8,
1311 0x00, 0x80, 0x00, 0xE8,
1312
1313 0xA0, 0x68,
1314 0xBF, 0x25,
1315 0x00, 0x80, 0x00, 0xE8,
1316
1317 0x20, 0xC0, 0x20, 0xAF,
1318 0x28, 0x05,
1319 0x97, 0x74,
1320
1321 0x00, 0xE0,
1322 0x2A, 0x10,
1323 0x16, 0xC0, 0x20, 0xE9,
1324
1325 0x04, 0x80, 0x10, 0xEA,
1326 0x8C, 0xE2,
1327 0x95, 0x05,
1328
1329 0x28, 0xC1, 0x28, 0xAD,
1330 0x1F, 0xC1, 0x15, 0xBD,
1331
1332 0x00, 0x80, 0x00, 0xE8,
1333 0x00, 0x80, 0x00, 0xE8,
1334
1335 0xA8, 0x67,
1336 0x9F, 0x6B,
1337 0x00, 0x80, 0x00, 0xE8,
1338
1339 0x28, 0xC0, 0x28, 0xAD,
1340 0x1D, 0x25,
1341 0x20, 0x05,
1342
1343 0x28, 0x32, 0x80, 0xAD,
1344 0x40, 0x2A, 0x40, 0xBD,
1345
1346 0x1C, 0x80, 0x20, 0xE9,
1347 0x20, 0x33, 0x20, 0xAD,
1348
1349 0x20, 0x73,
1350 0x00, 0xE0,
1351 0xB6, 0x49, 0x51, 0xBB,
1352
1353 0x26, 0x2F, 0xB0, 0xE8,
1354 0x19, 0x20, 0x20, 0xE9,
1355
1356 0x35, 0x20, 0x35, 0xDF,
1357 0x3D, 0x20, 0x3D, 0xDF,
1358
1359 0x15, 0x20, 0x15, 0xDF,
1360 0x1D, 0x20, 0x1D, 0xDF,
1361
1362 0x26, 0xD0, 0x26, 0xCD,
1363 0x29, 0x49, 0x2A, 0xB8,
1364
1365 0x26, 0x40, 0x80, 0xBD,
1366 0x3B, 0x48, 0x50, 0xBD,
1367
1368 0x3E, 0x54, 0x57, 0x9F,
1369 0x00, 0xE0,
1370 0x82, 0xE1,
1371
1372 0x1E, 0xAF, 0x59, 0x9F,
1373 0x00, 0x80, 0x00, 0xE8,
1374
1375 0x26, 0x30,
1376 0x29, 0x30,
1377 0x48, 0x3C, 0x48, 0xAD,
1378
1379 0x2B, 0x72,
1380 0xC2, 0xE1,
1381 0x2C, 0xC0, 0x44, 0xC2,
1382
1383 0x05, 0x24, 0x34, 0xBF,
1384 0x0D, 0x24, 0x2C, 0xBF,
1385
1386 0x2D, 0x46, 0x4E, 0xBF,
1387 0x25, 0x46, 0x56, 0xBF,
1388
1389 0x20, 0x1D, 0x6F, 0x8F,
1390 0x32, 0x3E, 0x5F, 0xE9,
1391
1392 0x3E, 0x50, 0x56, 0x9F,
1393 0x00, 0xE0,
1394 0x3B, 0x30,
1395
1396 0x1E, 0x8F, 0x51, 0x9F,
1397 0x33, 0x1E, 0x5F, 0xE9,
1398
1399 0x05, 0x44, 0x54, 0xB2,
1400 0x0D, 0x44, 0x4C, 0xB2,
1401
1402 0x19, 0xC0, 0xB0, 0xE8,
1403 0x34, 0xC0, 0x44, 0xC4,
1404
1405 0x33, 0x73,
1406 0x00, 0xE0,
1407 0x3E, 0x62, 0x57, 0x9F,
1408
1409 0x1E, 0xAF, 0x59, 0x9F,
1410 0x00, 0xE0,
1411 0x0D, 0x20,
1412
1413 0x84, 0x3E, 0x58, 0xE9,
1414 0x28, 0x1D, 0x6F, 0x8F,
1415
1416 0x05, 0x20,
1417 0x00, 0xE0,
1418 0x85, 0x1E, 0x58, 0xE9,
1419
1420 0x9B, 0x3B, 0x33, 0xDF,
1421 0x20, 0x20, 0x42, 0xAF,
1422
1423 0x30, 0x42, 0x56, 0x9F,
1424 0x80, 0x3E, 0x57, 0xE9,
1425
1426 0x3F, 0x8F, 0x51, 0x9F,
1427 0x30, 0x80, 0x5F, 0xE9,
1428
1429 0x28, 0x28, 0x24, 0xAF,
1430 0x81, 0x1E, 0x57, 0xE9,
1431
1432 0x05, 0x47, 0x57, 0xBF,
1433 0x0D, 0x47, 0x4F, 0xBF,
1434
1435 0x88, 0x80, 0x58, 0xE9,
1436 0x1B, 0x29, 0x1B, 0xDF,
1437
1438 0x30, 0x1D, 0x6F, 0x8F,
1439 0x3A, 0x30, 0x4F, 0xE9,
1440
1441 0x1C, 0x30, 0x26, 0xDF,
1442 0x09, 0xE3,
1443 0x3B, 0x05,
1444
1445 0x3E, 0x50, 0x56, 0x9F,
1446 0x3B, 0x3F, 0x4F, 0xE9,
1447
1448 0x1E, 0x8F, 0x51, 0x9F,
1449 0x00, 0xE0,
1450 0xAC, 0x20,
1451
1452 0x2D, 0x44, 0x4C, 0xB4,
1453 0x2C, 0x1C, 0xC0, 0xAF,
1454
1455 0x25, 0x44, 0x54, 0xB4,
1456 0x00, 0xE0,
1457 0xC8, 0x30,
1458
1459 0x30, 0x46, 0x30, 0xAF,
1460 0x1B, 0x1B, 0x48, 0xAF,
1461
1462 0x00, 0xE0,
1463 0x25, 0x20,
1464 0x38, 0x2C, 0x4F, 0xE9,
1465
1466 0x86, 0x80, 0x57, 0xE9,
1467 0x38, 0x1D, 0x6F, 0x8F,
1468
1469 0x28, 0x74,
1470 0x00, 0xE0,
1471 0x0D, 0x44, 0x4C, 0xB0,
1472
1473 0x05, 0x44, 0x54, 0xB0,
1474 0x2D, 0x20,
1475 0x9B, 0x10,
1476
1477 0x82, 0x3E, 0x57, 0xE9,
1478 0x32, 0xF0, 0x1B, 0xCD,
1479
1480 0x1E, 0xBD, 0x59, 0x9F,
1481 0x83, 0x1E, 0x57, 0xE9,
1482
1483 0x38, 0x47, 0x38, 0xAF,
1484 0x34, 0x20,
1485 0x2A, 0x30,
1486
1487 0x00, 0xE0,
1488 0x0D, 0x20,
1489 0x32, 0x20,
1490 0x05, 0x20,
1491
1492 0x87, 0x80, 0x57, 0xE9,
1493 0x1F, 0x54, 0x57, 0x9F,
1494
1495 0x17, 0x42, 0x56, 0x9F,
1496 0x00, 0xE0,
1497 0x3B, 0x6A,
1498
1499 0x3F, 0x8F, 0x51, 0x9F,
1500 0x37, 0x1E, 0x4F, 0xE9,
1501
1502 0x37, 0x32, 0x2A, 0xAF,
1503 0x00, 0xE0,
1504 0x32, 0x00,
1505
1506 0x00, 0x80, 0x00, 0xE8,
1507 0x27, 0xC0, 0x44, 0xC0,
1508
1509 0x36, 0x1F, 0x4F, 0xE9,
1510 0x1F, 0x1F, 0x26, 0xDF,
1511
1512 0x37, 0x1B, 0x37, 0xBF,
1513 0x17, 0x26, 0x17, 0xDF,
1514
1515 0x3E, 0x17, 0x4F, 0xE9,
1516 0x3F, 0x3F, 0x4F, 0xE9,
1517
1518 0x34, 0x1F, 0x34, 0xAF,
1519 0x2B, 0x05,
1520 0xA7, 0x20,
1521
1522 0x33, 0x2B, 0x37, 0xDF,
1523 0x27, 0x17, 0xC0, 0xAF,
1524
1525 0x34, 0x80, 0x4F, 0xE9,
1526 0x00, 0x80, 0x00, 0xE8,
1527
1528 0x0D, 0x21, 0x1A, 0xB6,
1529 0x05, 0x21, 0x31, 0xB6,
1530
1531 0x2D, 0x44, 0x4C, 0xB6,
1532 0x25, 0x44, 0x54, 0xB6,
1533
1534 0x03, 0x80, 0x2A, 0xEA,
1535 0x17, 0xC1, 0x2B, 0xBD,
1536
1537 0x0D, 0x20,
1538 0x05, 0x20,
1539 0x2F, 0xC0, 0x21, 0xC6,
1540
1541 0xB3, 0x68,
1542 0x97, 0x25,
1543 0x00, 0x80, 0x00, 0xE8,
1544
1545 0x33, 0xC0, 0x33, 0xAF,
1546 0x3C, 0x27, 0x4F, 0xE9,
1547
1548 0x00, 0xE0,
1549 0x25, 0x20,
1550 0x07, 0xC0, 0x44, 0xC6,
1551
1552 0x17, 0x50, 0x56, 0x9F,
1553 0x00, 0xE0,
1554 0x2D, 0x20,
1555
1556 0x37, 0x0F, 0x5C, 0x9F,
1557 0x00, 0xE0,
1558 0x2F, 0x20,
1559
1560 0x1F, 0x62, 0x57, 0x9F,
1561 0x00, 0xE0,
1562 0x07, 0x20,
1563
1564 0x3F, 0x3D, 0x5D, 0x9F,
1565 0x00, 0x80, 0x00, 0xE8,
1566
1567 0x00, 0x80, 0x00, 0xE8,
1568 0x28, 0x19, 0x60, 0xEC,
1569
1570 0xB3, 0x05,
1571 0x00, 0xE0,
1572 0x17, 0x26, 0x17, 0xDF,
1573
1574 0x23, 0x3B, 0x33, 0xAD,
1575 0x35, 0x17, 0x4F, 0xE9,
1576
1577 0x1F, 0x26, 0x1F, 0xDF,
1578 0x9D, 0x1F, 0x4F, 0xE9,
1579
1580 0x9E, 0x3F, 0x4F, 0xE9,
1581 0x39, 0x37, 0x4F, 0xE9,
1582
1583 0x2F, 0x2F, 0x17, 0xAF,
1584 0x00, 0x80, 0x00, 0xE8,
1585
1586 0x07, 0x07, 0x1F, 0xAF,
1587 0x00, 0x80, 0x00, 0xE8,
1588
1589 0x31, 0x80, 0x4F, 0xE9,
1590 0x00, 0x80, 0x00, 0xE8,
1591
1592 0x9C, 0x80, 0x4F, 0xE9,
1593 0x00, 0x80, 0x00, 0xE8,
1594
1595 0x00, 0x80, 0x00, 0xE8,
1596 0x57, 0x39, 0x20, 0xE9,
1597
1598 0x16, 0x28, 0x20, 0xE9,
1599 0x1D, 0x3B, 0x20, 0xE9,
1600
1601 0x1E, 0x2B, 0x20, 0xE9,
1602 0x2B, 0x32, 0x20, 0xE9,
1603
1604 0x1C, 0x23, 0x20, 0xE9,
1605 0x57, 0x36, 0x20, 0xE9,
1606
1607 0x00, 0x80, 0xA0, 0xE9,
1608 0x40, 0x40, 0xD8, 0xEC,
1609
1610 0xFF, 0x80, 0xC0, 0xE9,
1611 0x90, 0xE2,
1612 0x00, 0xE0,
1613
1614 0x74, 0xFF, 0x20, 0xEA,
1615 0x19, 0xC8, 0xC1, 0xCD,
1616
1617 0x1F, 0xD7, 0x18, 0xBD,
1618 0x3F, 0xD7, 0x22, 0xBD,
1619
1620 0x9F, 0x41, 0x49, 0xBD,
1621 0x00, 0x80, 0x00, 0xE8,
1622
1623 0x25, 0x41, 0x49, 0xBD,
1624 0x2D, 0x41, 0x51, 0xBD,
1625
1626 0x0D, 0x80, 0x07, 0xEA,
1627 0x00, 0x80, 0x00, 0xE8,
1628
1629 0x35, 0x40, 0x48, 0xBD,
1630 0x3D, 0x40, 0x50, 0xBD,
1631
1632 0x00, 0x80, 0x00, 0xE8,
1633 0x25, 0x30,
1634 0x2D, 0x30,
1635
1636 0x35, 0x30,
1637 0xB5, 0x30,
1638 0xBD, 0x30,
1639 0x3D, 0x30,
1640
1641 0x9C, 0xA7, 0x5B, 0x9F,
1642 0x00, 0x80, 0x00, 0xE8,
1643
1644 0x00, 0x80, 0x00, 0xE8,
1645 0x00, 0x80, 0x00, 0xE8,
1646
1647 0x00, 0x80, 0x00, 0xE8,
1648 0x00, 0x80, 0x00, 0xE8,
1649
1650 0x00, 0x80, 0x00, 0xE8,
1651 0x00, 0x80, 0x00, 0xE8,
1652
1653 0x00, 0x80, 0x00, 0xE8,
1654 0x00, 0x80, 0x00, 0xE8,
1655
1656 0x73, 0xFF, 0x0A, 0xEA,
1657 0x00, 0x80, 0x00, 0xE8,
1658
1659 0xC9, 0x41, 0xC8, 0xEC,
1660 0x42, 0xE1,
1661 0x00, 0xE0,
1662
1663 0x71, 0xFF, 0x20, 0xEA,
1664 0x00, 0x80, 0x00, 0xE8,
1665
1666 0x00, 0x80, 0x00, 0xE8,
1667 0x00, 0x80, 0x00, 0xE8,
1668
1669 0xC8, 0x40, 0xC0, 0xEC,
1670 0x00, 0x80, 0x00, 0xE8,
1671
1672 0x6E, 0xFF, 0x20, 0xEA,
1673 0x00, 0x80, 0x00, 0xE8,
1674
1675 0x00, 0x80, 0x00, 0xE8,
1676 0x00, 0x80, 0x00, 0xE8,
1677
1678};
1679
1680static unsigned char warp_g200_tgzf[] = {
1681
1682 0x00, 0x80, 0x00, 0xE8,
1683 0x00, 0x80, 0x00, 0xE8,
1684
1685 0x00, 0x80, 0x00, 0xE8,
1686 0x00, 0x80, 0x00, 0xE8,
1687
1688 0x00, 0x80, 0x00, 0xE8,
1689 0x00, 0x80, 0x00, 0xE8,
1690
1691 0x00, 0x80, 0x00, 0xE8,
1692 0x00, 0x80, 0x00, 0xE8,
1693
1694 0x00, 0x80, 0x00, 0xE8,
1695 0x00, 0x80, 0x00, 0xE8,
1696
1697 0x00, 0x80, 0x00, 0xE8,
1698 0x00, 0x80, 0x00, 0xE8,
1699
1700 0x00, 0x80, 0x00, 0xE8,
1701 0x00, 0x80, 0x00, 0xE8,
1702
1703 0x00, 0x80, 0x00, 0xE8,
1704 0x00, 0x80, 0x00, 0xE8,
1705
1706 0x00, 0x80, 0x00, 0xE8,
1707 0x00, 0x80, 0x00, 0xE8,
1708
1709 0x00, 0x80, 0x00, 0xE8,
1710 0x00, 0x80, 0x00, 0xE8,
1711
1712 0x00, 0x98, 0xA0, 0xE9,
1713 0x40, 0x40, 0xD8, 0xEC,
1714
1715 0xFF, 0x80, 0xC0, 0xE9,
1716 0x00, 0x80, 0x00, 0xE8,
1717
1718 0x1F, 0xD7, 0x18, 0xBD,
1719 0x3F, 0xD7, 0x22, 0xBD,
1720
1721 0x81, 0x04,
1722 0x89, 0x04,
1723 0x01, 0x04,
1724 0x09, 0x04,
1725
1726 0xC9, 0x41, 0xC0, 0xEC,
1727 0x11, 0x04,
1728 0x00, 0xE0,
1729
1730 0x41, 0xCC, 0x41, 0xCD,
1731 0x49, 0xCC, 0x49, 0xCD,
1732
1733 0xD1, 0x41, 0xC0, 0xEC,
1734 0x51, 0xCC, 0x51, 0xCD,
1735
1736 0x80, 0x04,
1737 0x10, 0x04,
1738 0x08, 0x04,
1739 0x00, 0xE0,
1740
1741 0x00, 0xCC, 0xC0, 0xCD,
1742 0xD1, 0x49, 0xC0, 0xEC,
1743
1744 0x8A, 0x1F, 0x20, 0xE9,
1745 0x8B, 0x3F, 0x20, 0xE9,
1746
1747 0x41, 0x3C, 0x41, 0xAD,
1748 0x49, 0x3C, 0x49, 0xAD,
1749
1750 0x10, 0xCC, 0x10, 0xCD,
1751 0x08, 0xCC, 0x08, 0xCD,
1752
1753 0xB9, 0x41, 0x49, 0xBB,
1754 0x1F, 0xF0, 0x41, 0xCD,
1755
1756 0x51, 0x3C, 0x51, 0xAD,
1757 0x00, 0x98, 0x80, 0xE9,
1758
1759 0x7F, 0x80, 0x07, 0xEA,
1760 0x24, 0x1F, 0x20, 0xE9,
1761
1762 0x21, 0x45, 0x80, 0xE8,
1763 0x1A, 0x4D, 0x80, 0xE8,
1764
1765 0x31, 0x55, 0x80, 0xE8,
1766 0x00, 0x80, 0x00, 0xE8,
1767
1768 0x15, 0x41, 0x49, 0xBD,
1769 0x1D, 0x41, 0x51, 0xBD,
1770
1771 0x2E, 0x41, 0x2A, 0xB8,
1772 0x34, 0x53, 0xA0, 0xE8,
1773
1774 0x15, 0x30,
1775 0x1D, 0x30,
1776 0x58, 0xE3,
1777 0x00, 0xE0,
1778
1779 0xB5, 0x40, 0x48, 0xBD,
1780 0x3D, 0x40, 0x50, 0xBD,
1781
1782 0x24, 0x43, 0xA0, 0xE8,
1783 0x2C, 0x4B, 0xA0, 0xE8,
1784
1785 0x15, 0x72,
1786 0x09, 0xE3,
1787 0x00, 0xE0,
1788 0x1D, 0x72,
1789
1790 0x35, 0x30,
1791 0xB5, 0x30,
1792 0xBD, 0x30,
1793 0x3D, 0x30,
1794
1795 0x9C, 0x97, 0x57, 0x9F,
1796 0x00, 0x80, 0x00, 0xE8,
1797
1798 0x6C, 0x64, 0xC8, 0xEC,
1799 0x98, 0xE1,
1800 0xB5, 0x05,
1801
1802 0xBD, 0x05,
1803 0x2E, 0x30,
1804 0x32, 0xC0, 0xA0, 0xE8,
1805
1806 0x33, 0xC0, 0xA0, 0xE8,
1807 0x74, 0x64, 0xC8, 0xEC,
1808
1809 0x40, 0x3C, 0x40, 0xAD,
1810 0x32, 0x6A,
1811 0x2A, 0x30,
1812
1813 0x20, 0x73,
1814 0x33, 0x6A,
1815 0x00, 0xE0,
1816 0x28, 0x73,
1817
1818 0x1C, 0x72,
1819 0x83, 0xE2,
1820 0x6B, 0x80, 0x15, 0xEA,
1821
1822 0xB8, 0x3D, 0x28, 0xDF,
1823 0x30, 0x35, 0x20, 0xDF,
1824
1825 0x40, 0x30,
1826 0x00, 0xE0,
1827 0xCC, 0xE2,
1828 0x64, 0x72,
1829
1830 0x25, 0x42, 0x52, 0xBF,
1831 0x2D, 0x42, 0x4A, 0xBF,
1832
1833 0x30, 0x2E, 0x30, 0xDF,
1834 0x38, 0x2E, 0x38, 0xDF,
1835
1836 0x18, 0x1D, 0x45, 0xE9,
1837 0x1E, 0x15, 0x45, 0xE9,
1838
1839 0x2B, 0x49, 0x51, 0xBD,
1840 0x00, 0xE0,
1841 0x1F, 0x73,
1842
1843 0x38, 0x38, 0x40, 0xAF,
1844 0x30, 0x30, 0x40, 0xAF,
1845
1846 0x24, 0x1F, 0x24, 0xDF,
1847 0x1D, 0x32, 0x20, 0xE9,
1848
1849 0x2C, 0x1F, 0x2C, 0xDF,
1850 0x1A, 0x33, 0x20, 0xE9,
1851
1852 0xB0, 0x10,
1853 0x08, 0xE3,
1854 0x40, 0x10,
1855 0xB8, 0x10,
1856
1857 0x26, 0xF0, 0x30, 0xCD,
1858 0x2F, 0xF0, 0x38, 0xCD,
1859
1860 0x2B, 0x80, 0x20, 0xE9,
1861 0x2A, 0x80, 0x20, 0xE9,
1862
1863 0xA6, 0x20,
1864 0x88, 0xE2,
1865 0x00, 0xE0,
1866 0xAF, 0x20,
1867
1868 0x28, 0x2A, 0x26, 0xAF,
1869 0x20, 0x2A, 0xC0, 0xAF,
1870
1871 0x34, 0x1F, 0x34, 0xDF,
1872 0x46, 0x24, 0x46, 0xDF,
1873
1874 0x28, 0x30, 0x80, 0xBF,
1875 0x20, 0x38, 0x80, 0xBF,
1876
1877 0x47, 0x24, 0x47, 0xDF,
1878 0x4E, 0x2C, 0x4E, 0xDF,
1879
1880 0x4F, 0x2C, 0x4F, 0xDF,
1881 0x56, 0x34, 0x56, 0xDF,
1882
1883 0x28, 0x15, 0x28, 0xDF,
1884 0x20, 0x1D, 0x20, 0xDF,
1885
1886 0x57, 0x34, 0x57, 0xDF,
1887 0x00, 0xE0,
1888 0x1D, 0x05,
1889
1890 0x04, 0x80, 0x10, 0xEA,
1891 0x89, 0xE2,
1892 0x2B, 0x30,
1893
1894 0x3F, 0xC1, 0x1D, 0xBD,
1895 0x00, 0x80, 0x00, 0xE8,
1896
1897 0x00, 0x80, 0x00, 0xE8,
1898 0x00, 0x80, 0x00, 0xE8,
1899
1900 0xA0, 0x68,
1901 0xBF, 0x25,
1902 0x00, 0x80, 0x00, 0xE8,
1903
1904 0x20, 0xC0, 0x20, 0xAF,
1905 0x28, 0x05,
1906 0x97, 0x74,
1907
1908 0x00, 0xE0,
1909 0x2A, 0x10,
1910 0x16, 0xC0, 0x20, 0xE9,
1911
1912 0x04, 0x80, 0x10, 0xEA,
1913 0x8C, 0xE2,
1914 0x95, 0x05,
1915
1916 0x28, 0xC1, 0x28, 0xAD,
1917 0x1F, 0xC1, 0x15, 0xBD,
1918
1919 0x00, 0x80, 0x00, 0xE8,
1920 0x00, 0x80, 0x00, 0xE8,
1921
1922 0xA8, 0x67,
1923 0x9F, 0x6B,
1924 0x00, 0x80, 0x00, 0xE8,
1925
1926 0x28, 0xC0, 0x28, 0xAD,
1927 0x1D, 0x25,
1928 0x20, 0x05,
1929
1930 0x28, 0x32, 0x80, 0xAD,
1931 0x40, 0x2A, 0x40, 0xBD,
1932
1933 0x1C, 0x80, 0x20, 0xE9,
1934 0x20, 0x33, 0x20, 0xAD,
1935
1936 0x20, 0x73,
1937 0x00, 0xE0,
1938 0xB6, 0x49, 0x51, 0xBB,
1939
1940 0x26, 0x2F, 0xB0, 0xE8,
1941 0x19, 0x20, 0x20, 0xE9,
1942
1943 0x35, 0x20, 0x35, 0xDF,
1944 0x3D, 0x20, 0x3D, 0xDF,
1945
1946 0x15, 0x20, 0x15, 0xDF,
1947 0x1D, 0x20, 0x1D, 0xDF,
1948
1949 0x26, 0xD0, 0x26, 0xCD,
1950 0x29, 0x49, 0x2A, 0xB8,
1951
1952 0x26, 0x40, 0x80, 0xBD,
1953 0x3B, 0x48, 0x50, 0xBD,
1954
1955 0x3E, 0x54, 0x57, 0x9F,
1956 0x00, 0xE0,
1957 0x82, 0xE1,
1958
1959 0x1E, 0xAF, 0x59, 0x9F,
1960 0x00, 0x80, 0x00, 0xE8,
1961
1962 0x26, 0x30,
1963 0x29, 0x30,
1964 0x48, 0x3C, 0x48, 0xAD,
1965
1966 0x2B, 0x72,
1967 0xC2, 0xE1,
1968 0x2C, 0xC0, 0x44, 0xC2,
1969
1970 0x05, 0x24, 0x34, 0xBF,
1971 0x0D, 0x24, 0x2C, 0xBF,
1972
1973 0x2D, 0x46, 0x4E, 0xBF,
1974 0x25, 0x46, 0x56, 0xBF,
1975
1976 0x20, 0x1D, 0x6F, 0x8F,
1977 0x32, 0x3E, 0x5F, 0xE9,
1978
1979 0x3E, 0x50, 0x56, 0x9F,
1980 0x00, 0xE0,
1981 0x3B, 0x30,
1982
1983 0x1E, 0x8F, 0x51, 0x9F,
1984 0x33, 0x1E, 0x5F, 0xE9,
1985
1986 0x05, 0x44, 0x54, 0xB2,
1987 0x0D, 0x44, 0x4C, 0xB2,
1988
1989 0x19, 0xC0, 0xB0, 0xE8,
1990 0x34, 0xC0, 0x44, 0xC4,
1991
1992 0x33, 0x73,
1993 0x00, 0xE0,
1994 0x3E, 0x62, 0x57, 0x9F,
1995
1996 0x1E, 0xAF, 0x59, 0x9F,
1997 0x00, 0xE0,
1998 0x0D, 0x20,
1999
2000 0x84, 0x3E, 0x58, 0xE9,
2001 0x28, 0x1D, 0x6F, 0x8F,
2002
2003 0x05, 0x20,
2004 0x00, 0xE0,
2005 0x85, 0x1E, 0x58, 0xE9,
2006
2007 0x9B, 0x3B, 0x33, 0xDF,
2008 0x20, 0x20, 0x42, 0xAF,
2009
2010 0x30, 0x42, 0x56, 0x9F,
2011 0x80, 0x3E, 0x57, 0xE9,
2012
2013 0x3F, 0x8F, 0x51, 0x9F,
2014 0x30, 0x80, 0x5F, 0xE9,
2015
2016 0x28, 0x28, 0x24, 0xAF,
2017 0x81, 0x1E, 0x57, 0xE9,
2018
2019 0x05, 0x47, 0x57, 0xBF,
2020 0x0D, 0x47, 0x4F, 0xBF,
2021
2022 0x88, 0x80, 0x58, 0xE9,
2023 0x1B, 0x29, 0x1B, 0xDF,
2024
2025 0x30, 0x1D, 0x6F, 0x8F,
2026 0x3A, 0x30, 0x4F, 0xE9,
2027
2028 0x1C, 0x30, 0x26, 0xDF,
2029 0x09, 0xE3,
2030 0x3B, 0x05,
2031
2032 0x3E, 0x50, 0x56, 0x9F,
2033 0x3B, 0x3F, 0x4F, 0xE9,
2034
2035 0x1E, 0x8F, 0x51, 0x9F,
2036 0x00, 0xE0,
2037 0xAC, 0x20,
2038
2039 0x2D, 0x44, 0x4C, 0xB4,
2040 0x2C, 0x1C, 0xC0, 0xAF,
2041
2042 0x25, 0x44, 0x54, 0xB4,
2043 0x00, 0xE0,
2044 0xC8, 0x30,
2045
2046 0x30, 0x46, 0x30, 0xAF,
2047 0x1B, 0x1B, 0x48, 0xAF,
2048
2049 0x00, 0xE0,
2050 0x25, 0x20,
2051 0x38, 0x2C, 0x4F, 0xE9,
2052
2053 0x86, 0x80, 0x57, 0xE9,
2054 0x38, 0x1D, 0x6F, 0x8F,
2055
2056 0x28, 0x74,
2057 0x00, 0xE0,
2058 0x0D, 0x44, 0x4C, 0xB0,
2059
2060 0x05, 0x44, 0x54, 0xB0,
2061 0x2D, 0x20,
2062 0x9B, 0x10,
2063
2064 0x82, 0x3E, 0x57, 0xE9,
2065 0x32, 0xF0, 0x1B, 0xCD,
2066
2067 0x1E, 0xBD, 0x59, 0x9F,
2068 0x83, 0x1E, 0x57, 0xE9,
2069
2070 0x38, 0x47, 0x38, 0xAF,
2071 0x34, 0x20,
2072 0x2A, 0x30,
2073
2074 0x00, 0xE0,
2075 0x0D, 0x20,
2076 0x32, 0x20,
2077 0x05, 0x20,
2078
2079 0x87, 0x80, 0x57, 0xE9,
2080 0x1F, 0x54, 0x57, 0x9F,
2081
2082 0x17, 0x42, 0x56, 0x9F,
2083 0x00, 0xE0,
2084 0x3B, 0x6A,
2085
2086 0x3F, 0x8F, 0x51, 0x9F,
2087 0x37, 0x1E, 0x4F, 0xE9,
2088
2089 0x37, 0x32, 0x2A, 0xAF,
2090 0x00, 0xE0,
2091 0x32, 0x00,
2092
2093 0x00, 0x80, 0x00, 0xE8,
2094 0x27, 0xC0, 0x44, 0xC0,
2095
2096 0x36, 0x1F, 0x4F, 0xE9,
2097 0x1F, 0x1F, 0x26, 0xDF,
2098
2099 0x37, 0x1B, 0x37, 0xBF,
2100 0x17, 0x26, 0x17, 0xDF,
2101
2102 0x3E, 0x17, 0x4F, 0xE9,
2103 0x3F, 0x3F, 0x4F, 0xE9,
2104
2105 0x34, 0x1F, 0x34, 0xAF,
2106 0x2B, 0x05,
2107 0xA7, 0x20,
2108
2109 0x33, 0x2B, 0x37, 0xDF,
2110 0x27, 0x17, 0xC0, 0xAF,
2111
2112 0x34, 0x80, 0x4F, 0xE9,
2113 0x00, 0x80, 0x00, 0xE8,
2114
2115 0x0D, 0x21, 0x1A, 0xB6,
2116 0x05, 0x21, 0x31, 0xB6,
2117
2118 0x03, 0x80, 0x2A, 0xEA,
2119 0x17, 0xC1, 0x2B, 0xBD,
2120
2121 0x0D, 0x20,
2122 0x05, 0x20,
2123 0x2F, 0xC0, 0x21, 0xC6,
2124
2125 0xB3, 0x68,
2126 0x97, 0x25,
2127 0x00, 0x80, 0x00, 0xE8,
2128
2129 0x33, 0xC0, 0x33, 0xAF,
2130 0x3C, 0x27, 0x4F, 0xE9,
2131
2132 0x17, 0x50, 0x56, 0x9F,
2133 0x00, 0x80, 0x00, 0xE8,
2134
2135 0x37, 0x0F, 0x5C, 0x9F,
2136 0x00, 0xE0,
2137 0x2F, 0x20,
2138
2139 0x00, 0x80, 0x00, 0xE8,
2140 0x28, 0x19, 0x60, 0xEC,
2141
2142 0xB3, 0x05,
2143 0x00, 0xE0,
2144 0x00, 0x80, 0x00, 0xE8,
2145
2146 0x23, 0x3B, 0x33, 0xAD,
2147 0x00, 0x80, 0x00, 0xE8,
2148
2149 0x17, 0x26, 0x17, 0xDF,
2150 0x35, 0x17, 0x4F, 0xE9,
2151
2152 0x00, 0x80, 0x00, 0xE8,
2153 0x00, 0x80, 0x00, 0xE8,
2154
2155 0x00, 0x80, 0x00, 0xE8,
2156 0x39, 0x37, 0x4F, 0xE9,
2157
2158 0x2F, 0x2F, 0x17, 0xAF,
2159 0x00, 0x80, 0x00, 0xE8,
2160
2161 0x00, 0x80, 0x00, 0xE8,
2162 0x00, 0x80, 0x00, 0xE8,
2163
2164 0x31, 0x80, 0x4F, 0xE9,
2165 0x00, 0x80, 0x00, 0xE8,
2166
2167 0x00, 0x80, 0x00, 0xE8,
2168 0x57, 0x39, 0x20, 0xE9,
2169
2170 0x16, 0x28, 0x20, 0xE9,
2171 0x1D, 0x3B, 0x20, 0xE9,
2172
2173 0x1E, 0x2B, 0x20, 0xE9,
2174 0x2B, 0x32, 0x20, 0xE9,
2175
2176 0x1C, 0x23, 0x20, 0xE9,
2177 0x57, 0x36, 0x20, 0xE9,
2178
2179 0x00, 0x80, 0xA0, 0xE9,
2180 0x40, 0x40, 0xD8, 0xEC,
2181
2182 0xFF, 0x80, 0xC0, 0xE9,
2183 0x90, 0xE2,
2184 0x00, 0xE0,
2185
2186 0x78, 0xFF, 0x20, 0xEA,
2187 0x19, 0xC8, 0xC1, 0xCD,
2188
2189 0x1F, 0xD7, 0x18, 0xBD,
2190 0x3F, 0xD7, 0x22, 0xBD,
2191
2192 0x9F, 0x41, 0x49, 0xBD,
2193 0x00, 0x80, 0x00, 0xE8,
2194
2195 0x25, 0x41, 0x49, 0xBD,
2196 0x2D, 0x41, 0x51, 0xBD,
2197
2198 0x0D, 0x80, 0x07, 0xEA,
2199 0x00, 0x80, 0x00, 0xE8,
2200
2201 0x35, 0x40, 0x48, 0xBD,
2202 0x3D, 0x40, 0x50, 0xBD,
2203
2204 0x00, 0x80, 0x00, 0xE8,
2205 0x25, 0x30,
2206 0x2D, 0x30,
2207
2208 0x35, 0x30,
2209 0xB5, 0x30,
2210 0xBD, 0x30,
2211 0x3D, 0x30,
2212
2213 0x9C, 0xA7, 0x5B, 0x9F,
2214 0x00, 0x80, 0x00, 0xE8,
2215
2216 0x00, 0x80, 0x00, 0xE8,
2217 0x00, 0x80, 0x00, 0xE8,
2218
2219 0x00, 0x80, 0x00, 0xE8,
2220 0x00, 0x80, 0x00, 0xE8,
2221
2222 0x00, 0x80, 0x00, 0xE8,
2223 0x00, 0x80, 0x00, 0xE8,
2224
2225 0x00, 0x80, 0x00, 0xE8,
2226 0x00, 0x80, 0x00, 0xE8,
2227
2228 0x77, 0xFF, 0x0A, 0xEA,
2229 0x00, 0x80, 0x00, 0xE8,
2230
2231 0xC9, 0x41, 0xC8, 0xEC,
2232 0x42, 0xE1,
2233 0x00, 0xE0,
2234
2235 0x75, 0xFF, 0x20, 0xEA,
2236 0x00, 0x80, 0x00, 0xE8,
2237
2238 0x00, 0x80, 0x00, 0xE8,
2239 0x00, 0x80, 0x00, 0xE8,
2240
2241 0xC8, 0x40, 0xC0, 0xEC,
2242 0x00, 0x80, 0x00, 0xE8,
2243
2244 0x72, 0xFF, 0x20, 0xEA,
2245 0x00, 0x80, 0x00, 0xE8,
2246
2247 0x00, 0x80, 0x00, 0xE8,
2248 0x00, 0x80, 0x00, 0xE8,
2249
2250};
2251
2252static unsigned char warp_g200_tgzs[] = {
2253
2254 0x00, 0x80, 0x00, 0xE8,
2255 0x00, 0x80, 0x00, 0xE8,
2256
2257 0x00, 0x80, 0x00, 0xE8,
2258 0x00, 0x80, 0x00, 0xE8,
2259
2260 0x00, 0x80, 0x00, 0xE8,
2261 0x00, 0x80, 0x00, 0xE8,
2262
2263 0x00, 0x80, 0x00, 0xE8,
2264 0x00, 0x80, 0x00, 0xE8,
2265
2266 0x00, 0x80, 0x00, 0xE8,
2267 0x00, 0x80, 0x00, 0xE8,
2268
2269 0x00, 0x80, 0x00, 0xE8,
2270 0x00, 0x80, 0x00, 0xE8,
2271
2272 0x00, 0x80, 0x00, 0xE8,
2273 0x00, 0x80, 0x00, 0xE8,
2274
2275 0x00, 0x80, 0x00, 0xE8,
2276 0x00, 0x80, 0x00, 0xE8,
2277
2278 0x00, 0x80, 0x00, 0xE8,
2279 0x00, 0x80, 0x00, 0xE8,
2280
2281 0x00, 0x80, 0x00, 0xE8,
2282 0x00, 0x80, 0x00, 0xE8,
2283
2284 0x00, 0x80, 0x00, 0xE8,
2285 0x00, 0x80, 0x00, 0xE8,
2286
2287 0x00, 0x80, 0x00, 0xE8,
2288 0x00, 0x80, 0x00, 0xE8,
2289
2290 0x00, 0x80, 0x00, 0xE8,
2291 0x00, 0x80, 0x00, 0xE8,
2292
2293 0x00, 0x98, 0xA0, 0xE9,
2294 0x40, 0x40, 0xD8, 0xEC,
2295
2296 0xFF, 0x80, 0xC0, 0xE9,
2297 0x00, 0x80, 0x00, 0xE8,
2298
2299 0x1F, 0xD7, 0x18, 0xBD,
2300 0x3F, 0xD7, 0x22, 0xBD,
2301
2302 0x81, 0x04,
2303 0x89, 0x04,
2304 0x01, 0x04,
2305 0x09, 0x04,
2306
2307 0xC9, 0x41, 0xC0, 0xEC,
2308 0x11, 0x04,
2309 0x00, 0xE0,
2310
2311 0x41, 0xCC, 0x41, 0xCD,
2312 0x49, 0xCC, 0x49, 0xCD,
2313
2314 0xD1, 0x41, 0xC0, 0xEC,
2315 0x51, 0xCC, 0x51, 0xCD,
2316
2317 0x80, 0x04,
2318 0x10, 0x04,
2319 0x08, 0x04,
2320 0x00, 0xE0,
2321
2322 0x00, 0xCC, 0xC0, 0xCD,
2323 0xD1, 0x49, 0xC0, 0xEC,
2324
2325 0x8A, 0x1F, 0x20, 0xE9,
2326 0x8B, 0x3F, 0x20, 0xE9,
2327
2328 0x41, 0x3C, 0x41, 0xAD,
2329 0x49, 0x3C, 0x49, 0xAD,
2330
2331 0x10, 0xCC, 0x10, 0xCD,
2332 0x08, 0xCC, 0x08, 0xCD,
2333
2334 0xB9, 0x41, 0x49, 0xBB,
2335 0x1F, 0xF0, 0x41, 0xCD,
2336
2337 0x51, 0x3C, 0x51, 0xAD,
2338 0x00, 0x98, 0x80, 0xE9,
2339
2340 0x8B, 0x80, 0x07, 0xEA,
2341 0x24, 0x1F, 0x20, 0xE9,
2342
2343 0x21, 0x45, 0x80, 0xE8,
2344 0x1A, 0x4D, 0x80, 0xE8,
2345
2346 0x31, 0x55, 0x80, 0xE8,
2347 0x00, 0x80, 0x00, 0xE8,
2348
2349 0x15, 0x41, 0x49, 0xBD,
2350 0x1D, 0x41, 0x51, 0xBD,
2351
2352 0x2E, 0x41, 0x2A, 0xB8,
2353 0x34, 0x53, 0xA0, 0xE8,
2354
2355 0x15, 0x30,
2356 0x1D, 0x30,
2357 0x58, 0xE3,
2358 0x00, 0xE0,
2359
2360 0xB5, 0x40, 0x48, 0xBD,
2361 0x3D, 0x40, 0x50, 0xBD,
2362
2363 0x24, 0x43, 0xA0, 0xE8,
2364 0x2C, 0x4B, 0xA0, 0xE8,
2365
2366 0x15, 0x72,
2367 0x09, 0xE3,
2368 0x00, 0xE0,
2369 0x1D, 0x72,
2370
2371 0x35, 0x30,
2372 0xB5, 0x30,
2373 0xBD, 0x30,
2374 0x3D, 0x30,
2375
2376 0x9C, 0x97, 0x57, 0x9F,
2377 0x00, 0x80, 0x00, 0xE8,
2378
2379 0x6C, 0x64, 0xC8, 0xEC,
2380 0x98, 0xE1,
2381 0xB5, 0x05,
2382
2383 0xBD, 0x05,
2384 0x2E, 0x30,
2385 0x32, 0xC0, 0xA0, 0xE8,
2386
2387 0x33, 0xC0, 0xA0, 0xE8,
2388 0x74, 0x64, 0xC8, 0xEC,
2389
2390 0x40, 0x3C, 0x40, 0xAD,
2391 0x32, 0x6A,
2392 0x2A, 0x30,
2393
2394 0x20, 0x73,
2395 0x33, 0x6A,
2396 0x00, 0xE0,
2397 0x28, 0x73,
2398
2399 0x1C, 0x72,
2400 0x83, 0xE2,
2401 0x77, 0x80, 0x15, 0xEA,
2402
2403 0xB8, 0x3D, 0x28, 0xDF,
2404 0x30, 0x35, 0x20, 0xDF,
2405
2406 0x40, 0x30,
2407 0x00, 0xE0,
2408 0xCC, 0xE2,
2409 0x64, 0x72,
2410
2411 0x25, 0x42, 0x52, 0xBF,
2412 0x2D, 0x42, 0x4A, 0xBF,
2413
2414 0x30, 0x2E, 0x30, 0xDF,
2415 0x38, 0x2E, 0x38, 0xDF,
2416
2417 0x18, 0x1D, 0x45, 0xE9,
2418 0x1E, 0x15, 0x45, 0xE9,
2419
2420 0x2B, 0x49, 0x51, 0xBD,
2421 0x00, 0xE0,
2422 0x1F, 0x73,
2423
2424 0x38, 0x38, 0x40, 0xAF,
2425 0x30, 0x30, 0x40, 0xAF,
2426
2427 0x24, 0x1F, 0x24, 0xDF,
2428 0x1D, 0x32, 0x20, 0xE9,
2429
2430 0x2C, 0x1F, 0x2C, 0xDF,
2431 0x1A, 0x33, 0x20, 0xE9,
2432
2433 0xB0, 0x10,
2434 0x08, 0xE3,
2435 0x40, 0x10,
2436 0xB8, 0x10,
2437
2438 0x26, 0xF0, 0x30, 0xCD,
2439 0x2F, 0xF0, 0x38, 0xCD,
2440
2441 0x2B, 0x80, 0x20, 0xE9,
2442 0x2A, 0x80, 0x20, 0xE9,
2443
2444 0xA6, 0x20,
2445 0x88, 0xE2,
2446 0x00, 0xE0,
2447 0xAF, 0x20,
2448
2449 0x28, 0x2A, 0x26, 0xAF,
2450 0x20, 0x2A, 0xC0, 0xAF,
2451
2452 0x34, 0x1F, 0x34, 0xDF,
2453 0x46, 0x24, 0x46, 0xDF,
2454
2455 0x28, 0x30, 0x80, 0xBF,
2456 0x20, 0x38, 0x80, 0xBF,
2457
2458 0x47, 0x24, 0x47, 0xDF,
2459 0x4E, 0x2C, 0x4E, 0xDF,
2460
2461 0x4F, 0x2C, 0x4F, 0xDF,
2462 0x56, 0x34, 0x56, 0xDF,
2463
2464 0x28, 0x15, 0x28, 0xDF,
2465 0x20, 0x1D, 0x20, 0xDF,
2466
2467 0x57, 0x34, 0x57, 0xDF,
2468 0x00, 0xE0,
2469 0x1D, 0x05,
2470
2471 0x04, 0x80, 0x10, 0xEA,
2472 0x89, 0xE2,
2473 0x2B, 0x30,
2474
2475 0x3F, 0xC1, 0x1D, 0xBD,
2476 0x00, 0x80, 0x00, 0xE8,
2477
2478 0x00, 0x80, 0x00, 0xE8,
2479 0x00, 0x80, 0x00, 0xE8,
2480
2481 0xA0, 0x68,
2482 0xBF, 0x25,
2483 0x00, 0x80, 0x00, 0xE8,
2484
2485 0x20, 0xC0, 0x20, 0xAF,
2486 0x28, 0x05,
2487 0x97, 0x74,
2488
2489 0x00, 0xE0,
2490 0x2A, 0x10,
2491 0x16, 0xC0, 0x20, 0xE9,
2492
2493 0x04, 0x80, 0x10, 0xEA,
2494 0x8C, 0xE2,
2495 0x95, 0x05,
2496
2497 0x28, 0xC1, 0x28, 0xAD,
2498 0x1F, 0xC1, 0x15, 0xBD,
2499
2500 0x00, 0x80, 0x00, 0xE8,
2501 0x00, 0x80, 0x00, 0xE8,
2502
2503 0xA8, 0x67,
2504 0x9F, 0x6B,
2505 0x00, 0x80, 0x00, 0xE8,
2506
2507 0x28, 0xC0, 0x28, 0xAD,
2508 0x1D, 0x25,
2509 0x20, 0x05,
2510
2511 0x28, 0x32, 0x80, 0xAD,
2512 0x40, 0x2A, 0x40, 0xBD,
2513
2514 0x1C, 0x80, 0x20, 0xE9,
2515 0x20, 0x33, 0x20, 0xAD,
2516
2517 0x20, 0x73,
2518 0x00, 0xE0,
2519 0xB6, 0x49, 0x51, 0xBB,
2520
2521 0x26, 0x2F, 0xB0, 0xE8,
2522 0x19, 0x20, 0x20, 0xE9,
2523
2524 0x35, 0x20, 0x35, 0xDF,
2525 0x3D, 0x20, 0x3D, 0xDF,
2526
2527 0x15, 0x20, 0x15, 0xDF,
2528 0x1D, 0x20, 0x1D, 0xDF,
2529
2530 0x26, 0xD0, 0x26, 0xCD,
2531 0x29, 0x49, 0x2A, 0xB8,
2532
2533 0x26, 0x40, 0x80, 0xBD,
2534 0x3B, 0x48, 0x50, 0xBD,
2535
2536 0x3E, 0x54, 0x57, 0x9F,
2537 0x00, 0xE0,
2538 0x82, 0xE1,
2539
2540 0x1E, 0xAF, 0x59, 0x9F,
2541 0x00, 0x80, 0x00, 0xE8,
2542
2543 0x26, 0x30,
2544 0x29, 0x30,
2545 0x48, 0x3C, 0x48, 0xAD,
2546
2547 0x2B, 0x72,
2548 0xC2, 0xE1,
2549 0x2C, 0xC0, 0x44, 0xC2,
2550
2551 0x05, 0x24, 0x34, 0xBF,
2552 0x0D, 0x24, 0x2C, 0xBF,
2553
2554 0x2D, 0x46, 0x4E, 0xBF,
2555 0x25, 0x46, 0x56, 0xBF,
2556
2557 0x20, 0x1D, 0x6F, 0x8F,
2558 0x32, 0x3E, 0x5F, 0xE9,
2559
2560 0x3E, 0x50, 0x56, 0x9F,
2561 0x00, 0xE0,
2562 0x3B, 0x30,
2563
2564 0x1E, 0x8F, 0x51, 0x9F,
2565 0x33, 0x1E, 0x5F, 0xE9,
2566
2567 0x05, 0x44, 0x54, 0xB2,
2568 0x0D, 0x44, 0x4C, 0xB2,
2569
2570 0x19, 0xC0, 0xB0, 0xE8,
2571 0x34, 0xC0, 0x44, 0xC4,
2572
2573 0x33, 0x73,
2574 0x00, 0xE0,
2575 0x3E, 0x62, 0x57, 0x9F,
2576
2577 0x1E, 0xAF, 0x59, 0x9F,
2578 0x00, 0xE0,
2579 0x0D, 0x20,
2580
2581 0x84, 0x3E, 0x58, 0xE9,
2582 0x28, 0x1D, 0x6F, 0x8F,
2583
2584 0x05, 0x20,
2585 0x00, 0xE0,
2586 0x85, 0x1E, 0x58, 0xE9,
2587
2588 0x9B, 0x3B, 0x33, 0xDF,
2589 0x20, 0x20, 0x42, 0xAF,
2590
2591 0x30, 0x42, 0x56, 0x9F,
2592 0x80, 0x3E, 0x57, 0xE9,
2593
2594 0x3F, 0x8F, 0x51, 0x9F,
2595 0x30, 0x80, 0x5F, 0xE9,
2596
2597 0x28, 0x28, 0x24, 0xAF,
2598 0x81, 0x1E, 0x57, 0xE9,
2599
2600 0x05, 0x47, 0x57, 0xBF,
2601 0x0D, 0x47, 0x4F, 0xBF,
2602
2603 0x88, 0x80, 0x58, 0xE9,
2604 0x1B, 0x29, 0x1B, 0xDF,
2605
2606 0x30, 0x1D, 0x6F, 0x8F,
2607 0x3A, 0x30, 0x4F, 0xE9,
2608
2609 0x1C, 0x30, 0x26, 0xDF,
2610 0x09, 0xE3,
2611 0x3B, 0x05,
2612
2613 0x3E, 0x50, 0x56, 0x9F,
2614 0x3B, 0x3F, 0x4F, 0xE9,
2615
2616 0x1E, 0x8F, 0x51, 0x9F,
2617 0x00, 0xE0,
2618 0xAC, 0x20,
2619
2620 0x2D, 0x44, 0x4C, 0xB4,
2621 0x2C, 0x1C, 0xC0, 0xAF,
2622
2623 0x25, 0x44, 0x54, 0xB4,
2624 0x00, 0xE0,
2625 0xC8, 0x30,
2626
2627 0x30, 0x46, 0x30, 0xAF,
2628 0x1B, 0x1B, 0x48, 0xAF,
2629
2630 0x00, 0xE0,
2631 0x25, 0x20,
2632 0x38, 0x2C, 0x4F, 0xE9,
2633
2634 0x86, 0x80, 0x57, 0xE9,
2635 0x38, 0x1D, 0x6F, 0x8F,
2636
2637 0x28, 0x74,
2638 0x00, 0xE0,
2639 0x0D, 0x44, 0x4C, 0xB0,
2640
2641 0x05, 0x44, 0x54, 0xB0,
2642 0x2D, 0x20,
2643 0x9B, 0x10,
2644
2645 0x82, 0x3E, 0x57, 0xE9,
2646 0x32, 0xF0, 0x1B, 0xCD,
2647
2648 0x1E, 0xBD, 0x59, 0x9F,
2649 0x83, 0x1E, 0x57, 0xE9,
2650
2651 0x38, 0x47, 0x38, 0xAF,
2652 0x34, 0x20,
2653 0x2A, 0x30,
2654
2655 0x00, 0xE0,
2656 0x0D, 0x20,
2657 0x32, 0x20,
2658 0x05, 0x20,
2659
2660 0x87, 0x80, 0x57, 0xE9,
2661 0x1F, 0x54, 0x57, 0x9F,
2662
2663 0x17, 0x42, 0x56, 0x9F,
2664 0x00, 0xE0,
2665 0x3B, 0x6A,
2666
2667 0x3F, 0x8F, 0x51, 0x9F,
2668 0x37, 0x1E, 0x4F, 0xE9,
2669
2670 0x37, 0x32, 0x2A, 0xAF,
2671 0x00, 0xE0,
2672 0x32, 0x00,
2673
2674 0x00, 0x80, 0x00, 0xE8,
2675 0x27, 0xC0, 0x44, 0xC0,
2676
2677 0x36, 0x1F, 0x4F, 0xE9,
2678 0x1F, 0x1F, 0x26, 0xDF,
2679
2680 0x37, 0x1B, 0x37, 0xBF,
2681 0x17, 0x26, 0x17, 0xDF,
2682
2683 0x3E, 0x17, 0x4F, 0xE9,
2684 0x3F, 0x3F, 0x4F, 0xE9,
2685
2686 0x34, 0x1F, 0x34, 0xAF,
2687 0x2B, 0x05,
2688 0xA7, 0x20,
2689
2690 0x33, 0x2B, 0x37, 0xDF,
2691 0x27, 0x17, 0xC0, 0xAF,
2692
2693 0x34, 0x80, 0x4F, 0xE9,
2694 0x00, 0x80, 0x00, 0xE8,
2695
2696 0x2D, 0x21, 0x1A, 0xB0,
2697 0x25, 0x21, 0x31, 0xB0,
2698
2699 0x0D, 0x21, 0x1A, 0xB2,
2700 0x05, 0x21, 0x31, 0xB2,
2701
2702 0x03, 0x80, 0x2A, 0xEA,
2703 0x17, 0xC1, 0x2B, 0xBD,
2704
2705 0x2D, 0x20,
2706 0x25, 0x20,
2707 0x05, 0x20,
2708 0x0D, 0x20,
2709
2710 0xB3, 0x68,
2711 0x97, 0x25,
2712 0x00, 0x80, 0x00, 0xE8,
2713
2714 0x33, 0xC0, 0x33, 0xAF,
2715 0x2F, 0xC0, 0x21, 0xC0,
2716
2717 0x16, 0x42, 0x56, 0x9F,
2718 0x3C, 0x27, 0x4F, 0xE9,
2719
2720 0x1E, 0x62, 0x57, 0x9F,
2721 0x00, 0x80, 0x00, 0xE8,
2722
2723 0x25, 0x21, 0x31, 0xB4,
2724 0x2D, 0x21, 0x1A, 0xB4,
2725
2726 0x3F, 0x2F, 0x5D, 0x9F,
2727 0x00, 0x80, 0x00, 0xE8,
2728
2729 0x33, 0x05,
2730 0x00, 0xE0,
2731 0x28, 0x19, 0x60, 0xEC,
2732
2733 0x37, 0x0F, 0x5C, 0x9F,
2734 0x00, 0xE0,
2735 0x2F, 0x20,
2736
2737 0x23, 0x3B, 0x33, 0xAD,
2738 0x1E, 0x26, 0x1E, 0xDF,
2739
2740 0xA7, 0x1E, 0x4F, 0xE9,
2741 0x17, 0x26, 0x16, 0xDF,
2742
2743 0x2D, 0x20,
2744 0x00, 0xE0,
2745 0xA8, 0x3F, 0x4F, 0xE9,
2746
2747 0x2F, 0x2F, 0x1E, 0xAF,
2748 0x25, 0x20,
2749 0x00, 0xE0,
2750
2751 0xA4, 0x16, 0x4F, 0xE9,
2752 0x0F, 0xC0, 0x21, 0xC2,
2753
2754 0xA6, 0x80, 0x4F, 0xE9,
2755 0x1F, 0x62, 0x57, 0x9F,
2756
2757 0x3F, 0x2F, 0x5D, 0x9F,
2758 0x00, 0xE0,
2759 0x8F, 0x20,
2760
2761 0xA5, 0x37, 0x4F, 0xE9,
2762 0x0F, 0x17, 0x0F, 0xAF,
2763
2764 0x06, 0xC0, 0x21, 0xC4,
2765 0x00, 0x80, 0x00, 0xE8,
2766
2767 0x00, 0x80, 0x00, 0xE8,
2768 0xA3, 0x80, 0x4F, 0xE9,
2769
2770 0x06, 0x20,
2771 0x00, 0xE0,
2772 0x1F, 0x26, 0x1F, 0xDF,
2773
2774 0xA1, 0x1F, 0x4F, 0xE9,
2775 0xA2, 0x3F, 0x4F, 0xE9,
2776
2777 0x00, 0x80, 0x00, 0xE8,
2778 0x00, 0x80, 0x00, 0xE8,
2779
2780 0x06, 0x06, 0x1F, 0xAF,
2781 0x00, 0x80, 0x00, 0xE8,
2782
2783 0x00, 0x80, 0x00, 0xE8,
2784 0x00, 0x80, 0x00, 0xE8,
2785
2786 0xA0, 0x80, 0x4F, 0xE9,
2787 0x00, 0x80, 0x00, 0xE8,
2788
2789 0x00, 0x80, 0x00, 0xE8,
2790 0x57, 0x39, 0x20, 0xE9,
2791
2792 0x16, 0x28, 0x20, 0xE9,
2793 0x1D, 0x3B, 0x20, 0xE9,
2794
2795 0x1E, 0x2B, 0x20, 0xE9,
2796 0x2B, 0x32, 0x20, 0xE9,
2797
2798 0x1C, 0x23, 0x20, 0xE9,
2799 0x57, 0x36, 0x20, 0xE9,
2800
2801 0x00, 0x80, 0xA0, 0xE9,
2802 0x40, 0x40, 0xD8, 0xEC,
2803
2804 0xFF, 0x80, 0xC0, 0xE9,
2805 0x90, 0xE2,
2806 0x00, 0xE0,
2807
2808 0x6C, 0xFF, 0x20, 0xEA,
2809 0x19, 0xC8, 0xC1, 0xCD,
2810
2811 0x1F, 0xD7, 0x18, 0xBD,
2812 0x3F, 0xD7, 0x22, 0xBD,
2813
2814 0x9F, 0x41, 0x49, 0xBD,
2815 0x00, 0x80, 0x00, 0xE8,
2816
2817 0x25, 0x41, 0x49, 0xBD,
2818 0x2D, 0x41, 0x51, 0xBD,
2819
2820 0x0D, 0x80, 0x07, 0xEA,
2821 0x00, 0x80, 0x00, 0xE8,
2822
2823 0x35, 0x40, 0x48, 0xBD,
2824 0x3D, 0x40, 0x50, 0xBD,
2825
2826 0x00, 0x80, 0x00, 0xE8,
2827 0x25, 0x30,
2828 0x2D, 0x30,
2829
2830 0x35, 0x30,
2831 0xB5, 0x30,
2832 0xBD, 0x30,
2833 0x3D, 0x30,
2834
2835 0x9C, 0xA7, 0x5B, 0x9F,
2836 0x00, 0x80, 0x00, 0xE8,
2837
2838 0x00, 0x80, 0x00, 0xE8,
2839 0x00, 0x80, 0x00, 0xE8,
2840
2841 0x00, 0x80, 0x00, 0xE8,
2842 0x00, 0x80, 0x00, 0xE8,
2843
2844 0x00, 0x80, 0x00, 0xE8,
2845 0x00, 0x80, 0x00, 0xE8,
2846
2847 0x00, 0x80, 0x00, 0xE8,
2848 0x00, 0x80, 0x00, 0xE8,
2849
2850 0x6B, 0xFF, 0x0A, 0xEA,
2851 0x00, 0x80, 0x00, 0xE8,
2852
2853 0xC9, 0x41, 0xC8, 0xEC,
2854 0x42, 0xE1,
2855 0x00, 0xE0,
2856
2857 0x69, 0xFF, 0x20, 0xEA,
2858 0x00, 0x80, 0x00, 0xE8,
2859
2860 0x00, 0x80, 0x00, 0xE8,
2861 0x00, 0x80, 0x00, 0xE8,
2862
2863 0xC8, 0x40, 0xC0, 0xEC,
2864 0x00, 0x80, 0x00, 0xE8,
2865
2866 0x66, 0xFF, 0x20, 0xEA,
2867 0x00, 0x80, 0x00, 0xE8,
2868
2869 0x00, 0x80, 0x00, 0xE8,
2870 0x00, 0x80, 0x00, 0xE8,
2871
2872};
2873
2874static unsigned char warp_g200_tgzsa[] = {
2875
2876 0x00, 0x80, 0x00, 0xE8,
2877 0x00, 0x80, 0x00, 0xE8,
2878
2879 0x00, 0x80, 0x00, 0xE8,
2880 0x00, 0x80, 0x00, 0xE8,
2881
2882 0x00, 0x80, 0x00, 0xE8,
2883 0x00, 0x80, 0x00, 0xE8,
2884
2885 0x00, 0x80, 0x00, 0xE8,
2886 0x00, 0x80, 0x00, 0xE8,
2887
2888 0x00, 0x80, 0x00, 0xE8,
2889 0x00, 0x80, 0x00, 0xE8,
2890
2891 0x00, 0x80, 0x00, 0xE8,
2892 0x00, 0x80, 0x00, 0xE8,
2893
2894 0x00, 0x80, 0x00, 0xE8,
2895 0x00, 0x80, 0x00, 0xE8,
2896
2897 0x00, 0x80, 0x00, 0xE8,
2898 0x00, 0x80, 0x00, 0xE8,
2899
2900 0x00, 0x80, 0x00, 0xE8,
2901 0x00, 0x80, 0x00, 0xE8,
2902
2903 0x00, 0x80, 0x00, 0xE8,
2904 0x00, 0x80, 0x00, 0xE8,
2905
2906 0x00, 0x80, 0x00, 0xE8,
2907 0x00, 0x80, 0x00, 0xE8,
2908
2909 0x00, 0x80, 0x00, 0xE8,
2910 0x00, 0x80, 0x00, 0xE8,
2911
2912 0x00, 0x80, 0x00, 0xE8,
2913 0x00, 0x80, 0x00, 0xE8,
2914
2915 0x00, 0x98, 0xA0, 0xE9,
2916 0x40, 0x40, 0xD8, 0xEC,
2917
2918 0xFF, 0x80, 0xC0, 0xE9,
2919 0x00, 0x80, 0x00, 0xE8,
2920
2921 0x1F, 0xD7, 0x18, 0xBD,
2922 0x3F, 0xD7, 0x22, 0xBD,
2923
2924 0x81, 0x04,
2925 0x89, 0x04,
2926 0x01, 0x04,
2927 0x09, 0x04,
2928
2929 0xC9, 0x41, 0xC0, 0xEC,
2930 0x11, 0x04,
2931 0x00, 0xE0,
2932
2933 0x41, 0xCC, 0x41, 0xCD,
2934 0x49, 0xCC, 0x49, 0xCD,
2935
2936 0xD1, 0x41, 0xC0, 0xEC,
2937 0x51, 0xCC, 0x51, 0xCD,
2938
2939 0x80, 0x04,
2940 0x10, 0x04,
2941 0x08, 0x04,
2942 0x00, 0xE0,
2943
2944 0x00, 0xCC, 0xC0, 0xCD,
2945 0xD1, 0x49, 0xC0, 0xEC,
2946
2947 0x8A, 0x1F, 0x20, 0xE9,
2948 0x8B, 0x3F, 0x20, 0xE9,
2949
2950 0x41, 0x3C, 0x41, 0xAD,
2951 0x49, 0x3C, 0x49, 0xAD,
2952
2953 0x10, 0xCC, 0x10, 0xCD,
2954 0x08, 0xCC, 0x08, 0xCD,
2955
2956 0xB9, 0x41, 0x49, 0xBB,
2957 0x1F, 0xF0, 0x41, 0xCD,
2958
2959 0x51, 0x3C, 0x51, 0xAD,
2960 0x00, 0x98, 0x80, 0xE9,
2961
2962 0x8F, 0x80, 0x07, 0xEA,
2963 0x24, 0x1F, 0x20, 0xE9,
2964
2965 0x21, 0x45, 0x80, 0xE8,
2966 0x1A, 0x4D, 0x80, 0xE8,
2967
2968 0x31, 0x55, 0x80, 0xE8,
2969 0x00, 0x80, 0x00, 0xE8,
2970
2971 0x15, 0x41, 0x49, 0xBD,
2972 0x1D, 0x41, 0x51, 0xBD,
2973
2974 0x2E, 0x41, 0x2A, 0xB8,
2975 0x34, 0x53, 0xA0, 0xE8,
2976
2977 0x15, 0x30,
2978 0x1D, 0x30,
2979 0x58, 0xE3,
2980 0x00, 0xE0,
2981
2982 0xB5, 0x40, 0x48, 0xBD,
2983 0x3D, 0x40, 0x50, 0xBD,
2984
2985 0x24, 0x43, 0xA0, 0xE8,
2986 0x2C, 0x4B, 0xA0, 0xE8,
2987
2988 0x15, 0x72,
2989 0x09, 0xE3,
2990 0x00, 0xE0,
2991 0x1D, 0x72,
2992
2993 0x35, 0x30,
2994 0xB5, 0x30,
2995 0xBD, 0x30,
2996 0x3D, 0x30,
2997
2998 0x9C, 0x97, 0x57, 0x9F,
2999 0x00, 0x80, 0x00, 0xE8,
3000
3001 0x6C, 0x64, 0xC8, 0xEC,
3002 0x98, 0xE1,
3003 0xB5, 0x05,
3004
3005 0xBD, 0x05,
3006 0x2E, 0x30,
3007 0x32, 0xC0, 0xA0, 0xE8,
3008
3009 0x33, 0xC0, 0xA0, 0xE8,
3010 0x74, 0x64, 0xC8, 0xEC,
3011
3012 0x40, 0x3C, 0x40, 0xAD,
3013 0x32, 0x6A,
3014 0x2A, 0x30,
3015
3016 0x20, 0x73,
3017 0x33, 0x6A,
3018 0x00, 0xE0,
3019 0x28, 0x73,
3020
3021 0x1C, 0x72,
3022 0x83, 0xE2,
3023 0x7B, 0x80, 0x15, 0xEA,
3024
3025 0xB8, 0x3D, 0x28, 0xDF,
3026 0x30, 0x35, 0x20, 0xDF,
3027
3028 0x40, 0x30,
3029 0x00, 0xE0,
3030 0xCC, 0xE2,
3031 0x64, 0x72,
3032
3033 0x25, 0x42, 0x52, 0xBF,
3034 0x2D, 0x42, 0x4A, 0xBF,
3035
3036 0x30, 0x2E, 0x30, 0xDF,
3037 0x38, 0x2E, 0x38, 0xDF,
3038
3039 0x18, 0x1D, 0x45, 0xE9,
3040 0x1E, 0x15, 0x45, 0xE9,
3041
3042 0x2B, 0x49, 0x51, 0xBD,
3043 0x00, 0xE0,
3044 0x1F, 0x73,
3045
3046 0x38, 0x38, 0x40, 0xAF,
3047 0x30, 0x30, 0x40, 0xAF,
3048
3049 0x24, 0x1F, 0x24, 0xDF,
3050 0x1D, 0x32, 0x20, 0xE9,
3051
3052 0x2C, 0x1F, 0x2C, 0xDF,
3053 0x1A, 0x33, 0x20, 0xE9,
3054
3055 0xB0, 0x10,
3056 0x08, 0xE3,
3057 0x40, 0x10,
3058 0xB8, 0x10,
3059
3060 0x26, 0xF0, 0x30, 0xCD,
3061 0x2F, 0xF0, 0x38, 0xCD,
3062
3063 0x2B, 0x80, 0x20, 0xE9,
3064 0x2A, 0x80, 0x20, 0xE9,
3065
3066 0xA6, 0x20,
3067 0x88, 0xE2,
3068 0x00, 0xE0,
3069 0xAF, 0x20,
3070
3071 0x28, 0x2A, 0x26, 0xAF,
3072 0x20, 0x2A, 0xC0, 0xAF,
3073
3074 0x34, 0x1F, 0x34, 0xDF,
3075 0x46, 0x24, 0x46, 0xDF,
3076
3077 0x28, 0x30, 0x80, 0xBF,
3078 0x20, 0x38, 0x80, 0xBF,
3079
3080 0x47, 0x24, 0x47, 0xDF,
3081 0x4E, 0x2C, 0x4E, 0xDF,
3082
3083 0x4F, 0x2C, 0x4F, 0xDF,
3084 0x56, 0x34, 0x56, 0xDF,
3085
3086 0x28, 0x15, 0x28, 0xDF,
3087 0x20, 0x1D, 0x20, 0xDF,
3088
3089 0x57, 0x34, 0x57, 0xDF,
3090 0x00, 0xE0,
3091 0x1D, 0x05,
3092
3093 0x04, 0x80, 0x10, 0xEA,
3094 0x89, 0xE2,
3095 0x2B, 0x30,
3096
3097 0x3F, 0xC1, 0x1D, 0xBD,
3098 0x00, 0x80, 0x00, 0xE8,
3099
3100 0x00, 0x80, 0x00, 0xE8,
3101 0x00, 0x80, 0x00, 0xE8,
3102
3103 0xA0, 0x68,
3104 0xBF, 0x25,
3105 0x00, 0x80, 0x00, 0xE8,
3106
3107 0x20, 0xC0, 0x20, 0xAF,
3108 0x28, 0x05,
3109 0x97, 0x74,
3110
3111 0x00, 0xE0,
3112 0x2A, 0x10,
3113 0x16, 0xC0, 0x20, 0xE9,
3114
3115 0x04, 0x80, 0x10, 0xEA,
3116 0x8C, 0xE2,
3117 0x95, 0x05,
3118
3119 0x28, 0xC1, 0x28, 0xAD,
3120 0x1F, 0xC1, 0x15, 0xBD,
3121
3122 0x00, 0x80, 0x00, 0xE8,
3123 0x00, 0x80, 0x00, 0xE8,
3124
3125 0xA8, 0x67,
3126 0x9F, 0x6B,
3127 0x00, 0x80, 0x00, 0xE8,
3128
3129 0x28, 0xC0, 0x28, 0xAD,
3130 0x1D, 0x25,
3131 0x20, 0x05,
3132
3133 0x28, 0x32, 0x80, 0xAD,
3134 0x40, 0x2A, 0x40, 0xBD,
3135
3136 0x1C, 0x80, 0x20, 0xE9,
3137 0x20, 0x33, 0x20, 0xAD,
3138
3139 0x20, 0x73,
3140 0x00, 0xE0,
3141 0xB6, 0x49, 0x51, 0xBB,
3142
3143 0x26, 0x2F, 0xB0, 0xE8,
3144 0x19, 0x20, 0x20, 0xE9,
3145
3146 0x35, 0x20, 0x35, 0xDF,
3147 0x3D, 0x20, 0x3D, 0xDF,
3148
3149 0x15, 0x20, 0x15, 0xDF,
3150 0x1D, 0x20, 0x1D, 0xDF,
3151
3152 0x26, 0xD0, 0x26, 0xCD,
3153 0x29, 0x49, 0x2A, 0xB8,
3154
3155 0x26, 0x40, 0x80, 0xBD,
3156 0x3B, 0x48, 0x50, 0xBD,
3157
3158 0x3E, 0x54, 0x57, 0x9F,
3159 0x00, 0xE0,
3160 0x82, 0xE1,
3161
3162 0x1E, 0xAF, 0x59, 0x9F,
3163 0x00, 0x80, 0x00, 0xE8,
3164
3165 0x26, 0x30,
3166 0x29, 0x30,
3167 0x48, 0x3C, 0x48, 0xAD,
3168
3169 0x2B, 0x72,
3170 0xC2, 0xE1,
3171 0x2C, 0xC0, 0x44, 0xC2,
3172
3173 0x05, 0x24, 0x34, 0xBF,
3174 0x0D, 0x24, 0x2C, 0xBF,
3175
3176 0x2D, 0x46, 0x4E, 0xBF,
3177 0x25, 0x46, 0x56, 0xBF,
3178
3179 0x20, 0x1D, 0x6F, 0x8F,
3180 0x32, 0x3E, 0x5F, 0xE9,
3181
3182 0x3E, 0x50, 0x56, 0x9F,
3183 0x00, 0xE0,
3184 0x3B, 0x30,
3185
3186 0x1E, 0x8F, 0x51, 0x9F,
3187 0x33, 0x1E, 0x5F, 0xE9,
3188
3189 0x05, 0x44, 0x54, 0xB2,
3190 0x0D, 0x44, 0x4C, 0xB2,
3191
3192 0x19, 0xC0, 0xB0, 0xE8,
3193 0x34, 0xC0, 0x44, 0xC4,
3194
3195 0x33, 0x73,
3196 0x00, 0xE0,
3197 0x3E, 0x62, 0x57, 0x9F,
3198
3199 0x1E, 0xAF, 0x59, 0x9F,
3200 0x00, 0xE0,
3201 0x0D, 0x20,
3202
3203 0x84, 0x3E, 0x58, 0xE9,
3204 0x28, 0x1D, 0x6F, 0x8F,
3205
3206 0x05, 0x20,
3207 0x00, 0xE0,
3208 0x85, 0x1E, 0x58, 0xE9,
3209
3210 0x9B, 0x3B, 0x33, 0xDF,
3211 0x20, 0x20, 0x42, 0xAF,
3212
3213 0x30, 0x42, 0x56, 0x9F,
3214 0x80, 0x3E, 0x57, 0xE9,
3215
3216 0x3F, 0x8F, 0x51, 0x9F,
3217 0x30, 0x80, 0x5F, 0xE9,
3218
3219 0x28, 0x28, 0x24, 0xAF,
3220 0x81, 0x1E, 0x57, 0xE9,
3221
3222 0x05, 0x47, 0x57, 0xBF,
3223 0x0D, 0x47, 0x4F, 0xBF,
3224
3225 0x88, 0x80, 0x58, 0xE9,
3226 0x1B, 0x29, 0x1B, 0xDF,
3227
3228 0x30, 0x1D, 0x6F, 0x8F,
3229 0x3A, 0x30, 0x4F, 0xE9,
3230
3231 0x1C, 0x30, 0x26, 0xDF,
3232 0x09, 0xE3,
3233 0x3B, 0x05,
3234
3235 0x3E, 0x50, 0x56, 0x9F,
3236 0x3B, 0x3F, 0x4F, 0xE9,
3237
3238 0x1E, 0x8F, 0x51, 0x9F,
3239 0x00, 0xE0,
3240 0xAC, 0x20,
3241
3242 0x2D, 0x44, 0x4C, 0xB4,
3243 0x2C, 0x1C, 0xC0, 0xAF,
3244
3245 0x25, 0x44, 0x54, 0xB4,
3246 0x00, 0xE0,
3247 0xC8, 0x30,
3248
3249 0x30, 0x46, 0x30, 0xAF,
3250 0x1B, 0x1B, 0x48, 0xAF,
3251
3252 0x00, 0xE0,
3253 0x25, 0x20,
3254 0x38, 0x2C, 0x4F, 0xE9,
3255
3256 0x86, 0x80, 0x57, 0xE9,
3257 0x38, 0x1D, 0x6F, 0x8F,
3258
3259 0x28, 0x74,
3260 0x00, 0xE0,
3261 0x0D, 0x44, 0x4C, 0xB0,
3262
3263 0x05, 0x44, 0x54, 0xB0,
3264 0x2D, 0x20,
3265 0x9B, 0x10,
3266
3267 0x82, 0x3E, 0x57, 0xE9,
3268 0x32, 0xF0, 0x1B, 0xCD,
3269
3270 0x1E, 0xBD, 0x59, 0x9F,
3271 0x83, 0x1E, 0x57, 0xE9,
3272
3273 0x38, 0x47, 0x38, 0xAF,
3274 0x34, 0x20,
3275 0x2A, 0x30,
3276
3277 0x00, 0xE0,
3278 0x0D, 0x20,
3279 0x32, 0x20,
3280 0x05, 0x20,
3281
3282 0x87, 0x80, 0x57, 0xE9,
3283 0x1F, 0x54, 0x57, 0x9F,
3284
3285 0x17, 0x42, 0x56, 0x9F,
3286 0x00, 0xE0,
3287 0x3B, 0x6A,
3288
3289 0x3F, 0x8F, 0x51, 0x9F,
3290 0x37, 0x1E, 0x4F, 0xE9,
3291
3292 0x37, 0x32, 0x2A, 0xAF,
3293 0x00, 0xE0,
3294 0x32, 0x00,
3295
3296 0x00, 0x80, 0x00, 0xE8,
3297 0x27, 0xC0, 0x44, 0xC0,
3298
3299 0x36, 0x1F, 0x4F, 0xE9,
3300 0x1F, 0x1F, 0x26, 0xDF,
3301
3302 0x37, 0x1B, 0x37, 0xBF,
3303 0x17, 0x26, 0x17, 0xDF,
3304
3305 0x3E, 0x17, 0x4F, 0xE9,
3306 0x3F, 0x3F, 0x4F, 0xE9,
3307
3308 0x34, 0x1F, 0x34, 0xAF,
3309 0x2B, 0x05,
3310 0xA7, 0x20,
3311
3312 0x33, 0x2B, 0x37, 0xDF,
3313 0x27, 0x17, 0xC0, 0xAF,
3314
3315 0x34, 0x80, 0x4F, 0xE9,
3316 0x00, 0x80, 0x00, 0xE8,
3317
3318 0x2D, 0x21, 0x1A, 0xB0,
3319 0x25, 0x21, 0x31, 0xB0,
3320
3321 0x0D, 0x21, 0x1A, 0xB2,
3322 0x05, 0x21, 0x31, 0xB2,
3323
3324 0x03, 0x80, 0x2A, 0xEA,
3325 0x17, 0xC1, 0x2B, 0xBD,
3326
3327 0x2D, 0x20,
3328 0x25, 0x20,
3329 0x05, 0x20,
3330 0x0D, 0x20,
3331
3332 0xB3, 0x68,
3333 0x97, 0x25,
3334 0x00, 0x80, 0x00, 0xE8,
3335
3336 0x33, 0xC0, 0x33, 0xAF,
3337 0x2F, 0xC0, 0x21, 0xC0,
3338
3339 0x16, 0x42, 0x56, 0x9F,
3340 0x3C, 0x27, 0x4F, 0xE9,
3341
3342 0x1E, 0x62, 0x57, 0x9F,
3343 0x00, 0x80, 0x00, 0xE8,
3344
3345 0x25, 0x21, 0x31, 0xB4,
3346 0x2D, 0x21, 0x1A, 0xB4,
3347
3348 0x3F, 0x2F, 0x5D, 0x9F,
3349 0x00, 0x80, 0x00, 0xE8,
3350
3351 0x33, 0x05,
3352 0x00, 0xE0,
3353 0x28, 0x19, 0x60, 0xEC,
3354
3355 0x0D, 0x44, 0x4C, 0xB6,
3356 0x05, 0x44, 0x54, 0xB6,
3357
3358 0x37, 0x0F, 0x5C, 0x9F,
3359 0x00, 0xE0,
3360 0x2F, 0x20,
3361
3362 0x23, 0x3B, 0x33, 0xAD,
3363 0x1E, 0x26, 0x1E, 0xDF,
3364
3365 0xA7, 0x1E, 0x4F, 0xE9,
3366 0x17, 0x26, 0x16, 0xDF,
3367
3368 0x2D, 0x20,
3369 0x00, 0xE0,
3370 0xA8, 0x3F, 0x4F, 0xE9,
3371
3372 0x2F, 0x2F, 0x1E, 0xAF,
3373 0x25, 0x20,
3374 0x00, 0xE0,
3375
3376 0xA4, 0x16, 0x4F, 0xE9,
3377 0x0F, 0xC0, 0x21, 0xC2,
3378
3379 0xA6, 0x80, 0x4F, 0xE9,
3380 0x1F, 0x62, 0x57, 0x9F,
3381
3382 0x0D, 0x20,
3383 0x05, 0x20,
3384 0x00, 0x80, 0x00, 0xE8,
3385
3386 0x3F, 0x2F, 0x5D, 0x9F,
3387 0x00, 0xE0,
3388 0x0F, 0x20,
3389
3390 0x17, 0x50, 0x56, 0x9F,
3391 0xA5, 0x37, 0x4F, 0xE9,
3392
3393 0x06, 0xC0, 0x21, 0xC4,
3394 0x0F, 0x17, 0x0F, 0xAF,
3395
3396 0x37, 0x0F, 0x5C, 0x9F,
3397 0x00, 0x80, 0x00, 0xE8,
3398
3399 0x2F, 0xC0, 0x44, 0xC6,
3400 0xA3, 0x80, 0x4F, 0xE9,
3401
3402 0x06, 0x20,
3403 0x00, 0xE0,
3404 0x1F, 0x26, 0x1F, 0xDF,
3405
3406 0x17, 0x26, 0x17, 0xDF,
3407 0x9D, 0x17, 0x4F, 0xE9,
3408
3409 0xA1, 0x1F, 0x4F, 0xE9,
3410 0xA2, 0x3F, 0x4F, 0xE9,
3411
3412 0x06, 0x06, 0x1F, 0xAF,
3413 0x00, 0xE0,
3414 0xAF, 0x20,
3415
3416 0x9E, 0x37, 0x4F, 0xE9,
3417 0x2F, 0x17, 0x2F, 0xAF,
3418
3419 0xA0, 0x80, 0x4F, 0xE9,
3420 0x00, 0x80, 0x00, 0xE8,
3421
3422 0x00, 0x80, 0x00, 0xE8,
3423 0x9C, 0x80, 0x4F, 0xE9,
3424
3425 0x00, 0x80, 0x00, 0xE8,
3426 0x57, 0x39, 0x20, 0xE9,
3427
3428 0x16, 0x28, 0x20, 0xE9,
3429 0x1D, 0x3B, 0x20, 0xE9,
3430
3431 0x1E, 0x2B, 0x20, 0xE9,
3432 0x2B, 0x32, 0x20, 0xE9,
3433
3434 0x1C, 0x23, 0x20, 0xE9,
3435 0x57, 0x36, 0x20, 0xE9,
3436
3437 0x00, 0x80, 0xA0, 0xE9,
3438 0x40, 0x40, 0xD8, 0xEC,
3439
3440 0xFF, 0x80, 0xC0, 0xE9,
3441 0x90, 0xE2,
3442 0x00, 0xE0,
3443
3444 0x68, 0xFF, 0x20, 0xEA,
3445 0x19, 0xC8, 0xC1, 0xCD,
3446
3447 0x1F, 0xD7, 0x18, 0xBD,
3448 0x3F, 0xD7, 0x22, 0xBD,
3449
3450 0x9F, 0x41, 0x49, 0xBD,
3451 0x00, 0x80, 0x00, 0xE8,
3452
3453 0x25, 0x41, 0x49, 0xBD,
3454 0x2D, 0x41, 0x51, 0xBD,
3455
3456 0x0D, 0x80, 0x07, 0xEA,
3457 0x00, 0x80, 0x00, 0xE8,
3458
3459 0x35, 0x40, 0x48, 0xBD,
3460 0x3D, 0x40, 0x50, 0xBD,
3461
3462 0x00, 0x80, 0x00, 0xE8,
3463 0x25, 0x30,
3464 0x2D, 0x30,
3465
3466 0x35, 0x30,
3467 0xB5, 0x30,
3468 0xBD, 0x30,
3469 0x3D, 0x30,
3470
3471 0x9C, 0xA7, 0x5B, 0x9F,
3472 0x00, 0x80, 0x00, 0xE8,
3473
3474 0x00, 0x80, 0x00, 0xE8,
3475 0x00, 0x80, 0x00, 0xE8,
3476
3477 0x00, 0x80, 0x00, 0xE8,
3478 0x00, 0x80, 0x00, 0xE8,
3479
3480 0x00, 0x80, 0x00, 0xE8,
3481 0x00, 0x80, 0x00, 0xE8,
3482
3483 0x00, 0x80, 0x00, 0xE8,
3484 0x00, 0x80, 0x00, 0xE8,
3485
3486 0x67, 0xFF, 0x0A, 0xEA,
3487 0x00, 0x80, 0x00, 0xE8,
3488
3489 0xC9, 0x41, 0xC8, 0xEC,
3490 0x42, 0xE1,
3491 0x00, 0xE0,
3492
3493 0x65, 0xFF, 0x20, 0xEA,
3494 0x00, 0x80, 0x00, 0xE8,
3495
3496 0x00, 0x80, 0x00, 0xE8,
3497 0x00, 0x80, 0x00, 0xE8,
3498
3499 0xC8, 0x40, 0xC0, 0xEC,
3500 0x00, 0x80, 0x00, 0xE8,
3501
3502 0x62, 0xFF, 0x20, 0xEA,
3503 0x00, 0x80, 0x00, 0xE8,
3504
3505 0x00, 0x80, 0x00, 0xE8,
3506 0x00, 0x80, 0x00, 0xE8,
3507
3508};
3509
3510static unsigned char warp_g200_tgzsaf[] = {
3511
3512 0x00, 0x80, 0x00, 0xE8,
3513 0x00, 0x80, 0x00, 0xE8,
3514
3515 0x00, 0x80, 0x00, 0xE8,
3516 0x00, 0x80, 0x00, 0xE8,
3517
3518 0x00, 0x80, 0x00, 0xE8,
3519 0x00, 0x80, 0x00, 0xE8,
3520
3521 0x00, 0x98, 0xA0, 0xE9,
3522 0x40, 0x40, 0xD8, 0xEC,
3523
3524 0xFF, 0x80, 0xC0, 0xE9,
3525 0x00, 0x80, 0x00, 0xE8,
3526
3527 0x1F, 0xD7, 0x18, 0xBD,
3528 0x3F, 0xD7, 0x22, 0xBD,
3529
3530 0x81, 0x04,
3531 0x89, 0x04,
3532 0x01, 0x04,
3533 0x09, 0x04,
3534
3535 0xC9, 0x41, 0xC0, 0xEC,
3536 0x11, 0x04,
3537 0x00, 0xE0,
3538
3539 0x41, 0xCC, 0x41, 0xCD,
3540 0x49, 0xCC, 0x49, 0xCD,
3541
3542 0xD1, 0x41, 0xC0, 0xEC,
3543 0x51, 0xCC, 0x51, 0xCD,
3544
3545 0x80, 0x04,
3546 0x10, 0x04,
3547 0x08, 0x04,
3548 0x00, 0xE0,
3549
3550 0x00, 0xCC, 0xC0, 0xCD,
3551 0xD1, 0x49, 0xC0, 0xEC,
3552
3553 0x8A, 0x1F, 0x20, 0xE9,
3554 0x8B, 0x3F, 0x20, 0xE9,
3555
3556 0x41, 0x3C, 0x41, 0xAD,
3557 0x49, 0x3C, 0x49, 0xAD,
3558
3559 0x10, 0xCC, 0x10, 0xCD,
3560 0x08, 0xCC, 0x08, 0xCD,
3561
3562 0xB9, 0x41, 0x49, 0xBB,
3563 0x1F, 0xF0, 0x41, 0xCD,
3564
3565 0x51, 0x3C, 0x51, 0xAD,
3566 0x00, 0x98, 0x80, 0xE9,
3567
3568 0x94, 0x80, 0x07, 0xEA,
3569 0x24, 0x1F, 0x20, 0xE9,
3570
3571 0x21, 0x45, 0x80, 0xE8,
3572 0x1A, 0x4D, 0x80, 0xE8,
3573
3574 0x31, 0x55, 0x80, 0xE8,
3575 0x00, 0x80, 0x00, 0xE8,
3576
3577 0x15, 0x41, 0x49, 0xBD,
3578 0x1D, 0x41, 0x51, 0xBD,
3579
3580 0x2E, 0x41, 0x2A, 0xB8,
3581 0x34, 0x53, 0xA0, 0xE8,
3582
3583 0x15, 0x30,
3584 0x1D, 0x30,
3585 0x58, 0xE3,
3586 0x00, 0xE0,
3587
3588 0xB5, 0x40, 0x48, 0xBD,
3589 0x3D, 0x40, 0x50, 0xBD,
3590
3591 0x24, 0x43, 0xA0, 0xE8,
3592 0x2C, 0x4B, 0xA0, 0xE8,
3593
3594 0x15, 0x72,
3595 0x09, 0xE3,
3596 0x00, 0xE0,
3597 0x1D, 0x72,
3598
3599 0x35, 0x30,
3600 0xB5, 0x30,
3601 0xBD, 0x30,
3602 0x3D, 0x30,
3603
3604 0x9C, 0x97, 0x57, 0x9F,
3605 0x00, 0x80, 0x00, 0xE8,
3606
3607 0x6C, 0x64, 0xC8, 0xEC,
3608 0x98, 0xE1,
3609 0xB5, 0x05,
3610
3611 0xBD, 0x05,
3612 0x2E, 0x30,
3613 0x32, 0xC0, 0xA0, 0xE8,
3614
3615 0x33, 0xC0, 0xA0, 0xE8,
3616 0x74, 0x64, 0xC8, 0xEC,
3617
3618 0x40, 0x3C, 0x40, 0xAD,
3619 0x32, 0x6A,
3620 0x2A, 0x30,
3621
3622 0x20, 0x73,
3623 0x33, 0x6A,
3624 0x00, 0xE0,
3625 0x28, 0x73,
3626
3627 0x1C, 0x72,
3628 0x83, 0xE2,
3629 0x80, 0x80, 0x15, 0xEA,
3630
3631 0xB8, 0x3D, 0x28, 0xDF,
3632 0x30, 0x35, 0x20, 0xDF,
3633
3634 0x40, 0x30,
3635 0x00, 0xE0,
3636 0xCC, 0xE2,
3637 0x64, 0x72,
3638
3639 0x25, 0x42, 0x52, 0xBF,
3640 0x2D, 0x42, 0x4A, 0xBF,
3641
3642 0x30, 0x2E, 0x30, 0xDF,
3643 0x38, 0x2E, 0x38, 0xDF,
3644
3645 0x18, 0x1D, 0x45, 0xE9,
3646 0x1E, 0x15, 0x45, 0xE9,
3647
3648 0x2B, 0x49, 0x51, 0xBD,
3649 0x00, 0xE0,
3650 0x1F, 0x73,
3651
3652 0x38, 0x38, 0x40, 0xAF,
3653 0x30, 0x30, 0x40, 0xAF,
3654
3655 0x24, 0x1F, 0x24, 0xDF,
3656 0x1D, 0x32, 0x20, 0xE9,
3657
3658 0x2C, 0x1F, 0x2C, 0xDF,
3659 0x1A, 0x33, 0x20, 0xE9,
3660
3661 0xB0, 0x10,
3662 0x08, 0xE3,
3663 0x40, 0x10,
3664 0xB8, 0x10,
3665
3666 0x26, 0xF0, 0x30, 0xCD,
3667 0x2F, 0xF0, 0x38, 0xCD,
3668
3669 0x2B, 0x80, 0x20, 0xE9,
3670 0x2A, 0x80, 0x20, 0xE9,
3671
3672 0xA6, 0x20,
3673 0x88, 0xE2,
3674 0x00, 0xE0,
3675 0xAF, 0x20,
3676
3677 0x28, 0x2A, 0x26, 0xAF,
3678 0x20, 0x2A, 0xC0, 0xAF,
3679
3680 0x34, 0x1F, 0x34, 0xDF,
3681 0x46, 0x24, 0x46, 0xDF,
3682
3683 0x28, 0x30, 0x80, 0xBF,
3684 0x20, 0x38, 0x80, 0xBF,
3685
3686 0x47, 0x24, 0x47, 0xDF,
3687 0x4E, 0x2C, 0x4E, 0xDF,
3688
3689 0x4F, 0x2C, 0x4F, 0xDF,
3690 0x56, 0x34, 0x56, 0xDF,
3691
3692 0x28, 0x15, 0x28, 0xDF,
3693 0x20, 0x1D, 0x20, 0xDF,
3694
3695 0x57, 0x34, 0x57, 0xDF,
3696 0x00, 0xE0,
3697 0x1D, 0x05,
3698
3699 0x04, 0x80, 0x10, 0xEA,
3700 0x89, 0xE2,
3701 0x2B, 0x30,
3702
3703 0x3F, 0xC1, 0x1D, 0xBD,
3704 0x00, 0x80, 0x00, 0xE8,
3705
3706 0x00, 0x80, 0x00, 0xE8,
3707 0x00, 0x80, 0x00, 0xE8,
3708
3709 0xA0, 0x68,
3710 0xBF, 0x25,
3711 0x00, 0x80, 0x00, 0xE8,
3712
3713 0x20, 0xC0, 0x20, 0xAF,
3714 0x28, 0x05,
3715 0x97, 0x74,
3716
3717 0x00, 0xE0,
3718 0x2A, 0x10,
3719 0x16, 0xC0, 0x20, 0xE9,
3720
3721 0x04, 0x80, 0x10, 0xEA,
3722 0x8C, 0xE2,
3723 0x95, 0x05,
3724
3725 0x28, 0xC1, 0x28, 0xAD,
3726 0x1F, 0xC1, 0x15, 0xBD,
3727
3728 0x00, 0x80, 0x00, 0xE8,
3729 0x00, 0x80, 0x00, 0xE8,
3730
3731 0xA8, 0x67,
3732 0x9F, 0x6B,
3733 0x00, 0x80, 0x00, 0xE8,
3734
3735 0x28, 0xC0, 0x28, 0xAD,
3736 0x1D, 0x25,
3737 0x20, 0x05,
3738
3739 0x28, 0x32, 0x80, 0xAD,
3740 0x40, 0x2A, 0x40, 0xBD,
3741
3742 0x1C, 0x80, 0x20, 0xE9,
3743 0x20, 0x33, 0x20, 0xAD,
3744
3745 0x20, 0x73,
3746 0x00, 0xE0,
3747 0xB6, 0x49, 0x51, 0xBB,
3748
3749 0x26, 0x2F, 0xB0, 0xE8,
3750 0x19, 0x20, 0x20, 0xE9,
3751
3752 0x35, 0x20, 0x35, 0xDF,
3753 0x3D, 0x20, 0x3D, 0xDF,
3754
3755 0x15, 0x20, 0x15, 0xDF,
3756 0x1D, 0x20, 0x1D, 0xDF,
3757
3758 0x26, 0xD0, 0x26, 0xCD,
3759 0x29, 0x49, 0x2A, 0xB8,
3760
3761 0x26, 0x40, 0x80, 0xBD,
3762 0x3B, 0x48, 0x50, 0xBD,
3763
3764 0x3E, 0x54, 0x57, 0x9F,
3765 0x00, 0xE0,
3766 0x82, 0xE1,
3767
3768 0x1E, 0xAF, 0x59, 0x9F,
3769 0x00, 0x80, 0x00, 0xE8,
3770
3771 0x26, 0x30,
3772 0x29, 0x30,
3773 0x48, 0x3C, 0x48, 0xAD,
3774
3775 0x2B, 0x72,
3776 0xC2, 0xE1,
3777 0x2C, 0xC0, 0x44, 0xC2,
3778
3779 0x05, 0x24, 0x34, 0xBF,
3780 0x0D, 0x24, 0x2C, 0xBF,
3781
3782 0x2D, 0x46, 0x4E, 0xBF,
3783 0x25, 0x46, 0x56, 0xBF,
3784
3785 0x20, 0x1D, 0x6F, 0x8F,
3786 0x32, 0x3E, 0x5F, 0xE9,
3787
3788 0x3E, 0x50, 0x56, 0x9F,
3789 0x00, 0xE0,
3790 0x3B, 0x30,
3791
3792 0x1E, 0x8F, 0x51, 0x9F,
3793 0x33, 0x1E, 0x5F, 0xE9,
3794
3795 0x05, 0x44, 0x54, 0xB2,
3796 0x0D, 0x44, 0x4C, 0xB2,
3797
3798 0x19, 0xC0, 0xB0, 0xE8,
3799 0x34, 0xC0, 0x44, 0xC4,
3800
3801 0x33, 0x73,
3802 0x00, 0xE0,
3803 0x3E, 0x62, 0x57, 0x9F,
3804
3805 0x1E, 0xAF, 0x59, 0x9F,
3806 0x00, 0xE0,
3807 0x0D, 0x20,
3808
3809 0x84, 0x3E, 0x58, 0xE9,
3810 0x28, 0x1D, 0x6F, 0x8F,
3811
3812 0x05, 0x20,
3813 0x00, 0xE0,
3814 0x85, 0x1E, 0x58, 0xE9,
3815
3816 0x9B, 0x3B, 0x33, 0xDF,
3817 0x20, 0x20, 0x42, 0xAF,
3818
3819 0x30, 0x42, 0x56, 0x9F,
3820 0x80, 0x3E, 0x57, 0xE9,
3821
3822 0x3F, 0x8F, 0x51, 0x9F,
3823 0x30, 0x80, 0x5F, 0xE9,
3824
3825 0x28, 0x28, 0x24, 0xAF,
3826 0x81, 0x1E, 0x57, 0xE9,
3827
3828 0x05, 0x47, 0x57, 0xBF,
3829 0x0D, 0x47, 0x4F, 0xBF,
3830
3831 0x88, 0x80, 0x58, 0xE9,
3832 0x1B, 0x29, 0x1B, 0xDF,
3833
3834 0x30, 0x1D, 0x6F, 0x8F,
3835 0x3A, 0x30, 0x4F, 0xE9,
3836
3837 0x1C, 0x30, 0x26, 0xDF,
3838 0x09, 0xE3,
3839 0x3B, 0x05,
3840
3841 0x3E, 0x50, 0x56, 0x9F,
3842 0x3B, 0x3F, 0x4F, 0xE9,
3843
3844 0x1E, 0x8F, 0x51, 0x9F,
3845 0x00, 0xE0,
3846 0xAC, 0x20,
3847
3848 0x2D, 0x44, 0x4C, 0xB4,
3849 0x2C, 0x1C, 0xC0, 0xAF,
3850
3851 0x25, 0x44, 0x54, 0xB4,
3852 0x00, 0xE0,
3853 0xC8, 0x30,
3854
3855 0x30, 0x46, 0x30, 0xAF,
3856 0x1B, 0x1B, 0x48, 0xAF,
3857
3858 0x00, 0xE0,
3859 0x25, 0x20,
3860 0x38, 0x2C, 0x4F, 0xE9,
3861
3862 0x86, 0x80, 0x57, 0xE9,
3863 0x38, 0x1D, 0x6F, 0x8F,
3864
3865 0x28, 0x74,
3866 0x00, 0xE0,
3867 0x0D, 0x44, 0x4C, 0xB0,
3868
3869 0x05, 0x44, 0x54, 0xB0,
3870 0x2D, 0x20,
3871 0x9B, 0x10,
3872
3873 0x82, 0x3E, 0x57, 0xE9,
3874 0x32, 0xF0, 0x1B, 0xCD,
3875
3876 0x1E, 0xBD, 0x59, 0x9F,
3877 0x83, 0x1E, 0x57, 0xE9,
3878
3879 0x38, 0x47, 0x38, 0xAF,
3880 0x34, 0x20,
3881 0x2A, 0x30,
3882
3883 0x00, 0xE0,
3884 0x0D, 0x20,
3885 0x32, 0x20,
3886 0x05, 0x20,
3887
3888 0x87, 0x80, 0x57, 0xE9,
3889 0x1F, 0x54, 0x57, 0x9F,
3890
3891 0x17, 0x42, 0x56, 0x9F,
3892 0x00, 0xE0,
3893 0x3B, 0x6A,
3894
3895 0x3F, 0x8F, 0x51, 0x9F,
3896 0x37, 0x1E, 0x4F, 0xE9,
3897
3898 0x37, 0x32, 0x2A, 0xAF,
3899 0x00, 0xE0,
3900 0x32, 0x00,
3901
3902 0x00, 0x80, 0x00, 0xE8,
3903 0x27, 0xC0, 0x44, 0xC0,
3904
3905 0x36, 0x1F, 0x4F, 0xE9,
3906 0x1F, 0x1F, 0x26, 0xDF,
3907
3908 0x37, 0x1B, 0x37, 0xBF,
3909 0x17, 0x26, 0x17, 0xDF,
3910
3911 0x3E, 0x17, 0x4F, 0xE9,
3912 0x3F, 0x3F, 0x4F, 0xE9,
3913
3914 0x34, 0x1F, 0x34, 0xAF,
3915 0x2B, 0x05,
3916 0xA7, 0x20,
3917
3918 0x33, 0x2B, 0x37, 0xDF,
3919 0x27, 0x17, 0xC0, 0xAF,
3920
3921 0x34, 0x80, 0x4F, 0xE9,
3922 0x00, 0x80, 0x00, 0xE8,
3923
3924 0x2D, 0x21, 0x1A, 0xB0,
3925 0x25, 0x21, 0x31, 0xB0,
3926
3927 0x0D, 0x21, 0x1A, 0xB2,
3928 0x05, 0x21, 0x31, 0xB2,
3929
3930 0x03, 0x80, 0x2A, 0xEA,
3931 0x17, 0xC1, 0x2B, 0xBD,
3932
3933 0x2D, 0x20,
3934 0x25, 0x20,
3935 0x05, 0x20,
3936 0x0D, 0x20,
3937
3938 0xB3, 0x68,
3939 0x97, 0x25,
3940 0x00, 0x80, 0x00, 0xE8,
3941
3942 0x33, 0xC0, 0x33, 0xAF,
3943 0x2F, 0xC0, 0x21, 0xC0,
3944
3945 0x16, 0x42, 0x56, 0x9F,
3946 0x3C, 0x27, 0x4F, 0xE9,
3947
3948 0x1E, 0x62, 0x57, 0x9F,
3949 0x00, 0x80, 0x00, 0xE8,
3950
3951 0x25, 0x21, 0x31, 0xB4,
3952 0x2D, 0x21, 0x1A, 0xB4,
3953
3954 0x3F, 0x2F, 0x5D, 0x9F,
3955 0x00, 0x80, 0x00, 0xE8,
3956
3957 0x33, 0x05,
3958 0x00, 0xE0,
3959 0x28, 0x19, 0x60, 0xEC,
3960
3961 0x0D, 0x21, 0x1A, 0xB6,
3962 0x05, 0x21, 0x31, 0xB6,
3963
3964 0x37, 0x0F, 0x5C, 0x9F,
3965 0x00, 0xE0,
3966 0x2F, 0x20,
3967
3968 0x23, 0x3B, 0x33, 0xAD,
3969 0x1E, 0x26, 0x1E, 0xDF,
3970
3971 0xA7, 0x1E, 0x4F, 0xE9,
3972 0x17, 0x26, 0x16, 0xDF,
3973
3974 0x2D, 0x20,
3975 0x00, 0xE0,
3976 0xA8, 0x3F, 0x4F, 0xE9,
3977
3978 0x2F, 0x2F, 0x1E, 0xAF,
3979 0x25, 0x20,
3980 0x00, 0xE0,
3981
3982 0xA4, 0x16, 0x4F, 0xE9,
3983 0x0F, 0xC0, 0x21, 0xC2,
3984
3985 0xA6, 0x80, 0x4F, 0xE9,
3986 0x1F, 0x62, 0x57, 0x9F,
3987
3988 0x0D, 0x20,
3989 0x05, 0x20,
3990 0x2F, 0xC0, 0x21, 0xC6,
3991
3992 0x2D, 0x44, 0x4C, 0xB6,
3993 0x25, 0x44, 0x54, 0xB6,
3994
3995 0x3F, 0x2F, 0x5D, 0x9F,
3996 0x00, 0xE0,
3997 0x0F, 0x20,
3998
3999 0x2D, 0x20,
4000 0x25, 0x20,
4001 0x07, 0xC0, 0x44, 0xC6,
4002
4003 0x17, 0x50, 0x56, 0x9F,
4004 0xA5, 0x37, 0x4F, 0xE9,
4005
4006 0x06, 0xC0, 0x21, 0xC4,
4007 0x0F, 0x17, 0x0F, 0xAF,
4008
4009 0x37, 0x0F, 0x5C, 0x9F,
4010 0x00, 0x80, 0x00, 0xE8,
4011
4012 0x1E, 0x62, 0x57, 0x9F,
4013 0x00, 0x80, 0x00, 0xE8,
4014
4015 0x3E, 0x3D, 0x5D, 0x9F,
4016 0x00, 0xE0,
4017 0x07, 0x20,
4018
4019 0x2F, 0x20,
4020 0x00, 0xE0,
4021 0xA3, 0x0F, 0x4F, 0xE9,
4022
4023 0x06, 0x20,
4024 0x00, 0xE0,
4025 0x1F, 0x26, 0x1F, 0xDF,
4026
4027 0x17, 0x26, 0x17, 0xDF,
4028 0xA1, 0x1F, 0x4F, 0xE9,
4029
4030 0x1E, 0x26, 0x1E, 0xDF,
4031 0x9D, 0x1E, 0x4F, 0xE9,
4032
4033 0x35, 0x17, 0x4F, 0xE9,
4034 0xA2, 0x3F, 0x4F, 0xE9,
4035
4036 0x06, 0x06, 0x1F, 0xAF,
4037 0x39, 0x37, 0x4F, 0xE9,
4038
4039 0x2F, 0x2F, 0x17, 0xAF,
4040 0x07, 0x07, 0x1E, 0xAF,
4041
4042 0xA0, 0x80, 0x4F, 0xE9,
4043 0x9E, 0x3E, 0x4F, 0xE9,
4044
4045 0x31, 0x80, 0x4F, 0xE9,
4046 0x9C, 0x80, 0x4F, 0xE9,
4047
4048 0x00, 0x80, 0x00, 0xE8,
4049 0x57, 0x39, 0x20, 0xE9,
4050
4051 0x16, 0x28, 0x20, 0xE9,
4052 0x1D, 0x3B, 0x20, 0xE9,
4053
4054 0x1E, 0x2B, 0x20, 0xE9,
4055 0x2B, 0x32, 0x20, 0xE9,
4056
4057 0x1C, 0x23, 0x20, 0xE9,
4058 0x57, 0x36, 0x20, 0xE9,
4059
4060 0x00, 0x80, 0xA0, 0xE9,
4061 0x40, 0x40, 0xD8, 0xEC,
4062
4063 0xFF, 0x80, 0xC0, 0xE9,
4064 0x90, 0xE2,
4065 0x00, 0xE0,
4066
4067 0x63, 0xFF, 0x20, 0xEA,
4068 0x19, 0xC8, 0xC1, 0xCD,
4069
4070 0x1F, 0xD7, 0x18, 0xBD,
4071 0x3F, 0xD7, 0x22, 0xBD,
4072
4073 0x9F, 0x41, 0x49, 0xBD,
4074 0x00, 0x80, 0x00, 0xE8,
4075
4076 0x25, 0x41, 0x49, 0xBD,
4077 0x2D, 0x41, 0x51, 0xBD,
4078
4079 0x0D, 0x80, 0x07, 0xEA,
4080 0x00, 0x80, 0x00, 0xE8,
4081
4082 0x35, 0x40, 0x48, 0xBD,
4083 0x3D, 0x40, 0x50, 0xBD,
4084
4085 0x00, 0x80, 0x00, 0xE8,
4086 0x25, 0x30,
4087 0x2D, 0x30,
4088
4089 0x35, 0x30,
4090 0xB5, 0x30,
4091 0xBD, 0x30,
4092 0x3D, 0x30,
4093
4094 0x9C, 0xA7, 0x5B, 0x9F,
4095 0x00, 0x80, 0x00, 0xE8,
4096
4097 0x00, 0x80, 0x00, 0xE8,
4098 0x00, 0x80, 0x00, 0xE8,
4099
4100 0x00, 0x80, 0x00, 0xE8,
4101 0x00, 0x80, 0x00, 0xE8,
4102
4103 0x00, 0x80, 0x00, 0xE8,
4104 0x00, 0x80, 0x00, 0xE8,
4105
4106 0x00, 0x80, 0x00, 0xE8,
4107 0x00, 0x80, 0x00, 0xE8,
4108
4109 0x62, 0xFF, 0x0A, 0xEA,
4110 0x00, 0x80, 0x00, 0xE8,
4111
4112 0xC9, 0x41, 0xC8, 0xEC,
4113 0x42, 0xE1,
4114 0x00, 0xE0,
4115
4116 0x60, 0xFF, 0x20, 0xEA,
4117 0x00, 0x80, 0x00, 0xE8,
4118
4119 0x00, 0x80, 0x00, 0xE8,
4120 0x00, 0x80, 0x00, 0xE8,
4121
4122 0xC8, 0x40, 0xC0, 0xEC,
4123 0x00, 0x80, 0x00, 0xE8,
4124
4125 0x5D, 0xFF, 0x20, 0xEA,
4126 0x00, 0x80, 0x00, 0xE8,
4127
4128 0x00, 0x80, 0x00, 0xE8,
4129 0x00, 0x80, 0x00, 0xE8,
4130
4131};
4132
4133static unsigned char warp_g200_tgzsf[] = {
4134
4135 0x00, 0x80, 0x00, 0xE8,
4136 0x00, 0x80, 0x00, 0xE8,
4137
4138 0x00, 0x80, 0x00, 0xE8,
4139 0x00, 0x80, 0x00, 0xE8,
4140
4141 0x00, 0x80, 0x00, 0xE8,
4142 0x00, 0x80, 0x00, 0xE8,
4143
4144 0x00, 0x80, 0x00, 0xE8,
4145 0x00, 0x80, 0x00, 0xE8,
4146
4147 0x00, 0x80, 0x00, 0xE8,
4148 0x00, 0x80, 0x00, 0xE8,
4149
4150 0x00, 0x80, 0x00, 0xE8,
4151 0x00, 0x80, 0x00, 0xE8,
4152
4153 0x00, 0x80, 0x00, 0xE8,
4154 0x00, 0x80, 0x00, 0xE8,
4155
4156 0x00, 0x80, 0x00, 0xE8,
4157 0x00, 0x80, 0x00, 0xE8,
4158
4159 0x00, 0x80, 0x00, 0xE8,
4160 0x00, 0x80, 0x00, 0xE8,
4161
4162 0x00, 0x80, 0x00, 0xE8,
4163 0x00, 0x80, 0x00, 0xE8,
4164
4165 0x00, 0x80, 0x00, 0xE8,
4166 0x00, 0x80, 0x00, 0xE8,
4167
4168 0x00, 0x80, 0x00, 0xE8,
4169 0x00, 0x80, 0x00, 0xE8,
4170
4171 0x00, 0x80, 0x00, 0xE8,
4172 0x00, 0x80, 0x00, 0xE8,
4173
4174 0x00, 0x98, 0xA0, 0xE9,
4175 0x40, 0x40, 0xD8, 0xEC,
4176
4177 0xFF, 0x80, 0xC0, 0xE9,
4178 0x00, 0x80, 0x00, 0xE8,
4179
4180 0x1F, 0xD7, 0x18, 0xBD,
4181 0x3F, 0xD7, 0x22, 0xBD,
4182
4183 0x81, 0x04,
4184 0x89, 0x04,
4185 0x01, 0x04,
4186 0x09, 0x04,
4187
4188 0xC9, 0x41, 0xC0, 0xEC,
4189 0x11, 0x04,
4190 0x00, 0xE0,
4191
4192 0x41, 0xCC, 0x41, 0xCD,
4193 0x49, 0xCC, 0x49, 0xCD,
4194
4195 0xD1, 0x41, 0xC0, 0xEC,
4196 0x51, 0xCC, 0x51, 0xCD,
4197
4198 0x80, 0x04,
4199 0x10, 0x04,
4200 0x08, 0x04,
4201 0x00, 0xE0,
4202
4203 0x00, 0xCC, 0xC0, 0xCD,
4204 0xD1, 0x49, 0xC0, 0xEC,
4205
4206 0x8A, 0x1F, 0x20, 0xE9,
4207 0x8B, 0x3F, 0x20, 0xE9,
4208
4209 0x41, 0x3C, 0x41, 0xAD,
4210 0x49, 0x3C, 0x49, 0xAD,
4211
4212 0x10, 0xCC, 0x10, 0xCD,
4213 0x08, 0xCC, 0x08, 0xCD,
4214
4215 0xB9, 0x41, 0x49, 0xBB,
4216 0x1F, 0xF0, 0x41, 0xCD,
4217
4218 0x51, 0x3C, 0x51, 0xAD,
4219 0x00, 0x98, 0x80, 0xE9,
4220
4221 0x8F, 0x80, 0x07, 0xEA,
4222 0x24, 0x1F, 0x20, 0xE9,
4223
4224 0x21, 0x45, 0x80, 0xE8,
4225 0x1A, 0x4D, 0x80, 0xE8,
4226
4227 0x31, 0x55, 0x80, 0xE8,
4228 0x00, 0x80, 0x00, 0xE8,
4229
4230 0x15, 0x41, 0x49, 0xBD,
4231 0x1D, 0x41, 0x51, 0xBD,
4232
4233 0x2E, 0x41, 0x2A, 0xB8,
4234 0x34, 0x53, 0xA0, 0xE8,
4235
4236 0x15, 0x30,
4237 0x1D, 0x30,
4238 0x58, 0xE3,
4239 0x00, 0xE0,
4240
4241 0xB5, 0x40, 0x48, 0xBD,
4242 0x3D, 0x40, 0x50, 0xBD,
4243
4244 0x24, 0x43, 0xA0, 0xE8,
4245 0x2C, 0x4B, 0xA0, 0xE8,
4246
4247 0x15, 0x72,
4248 0x09, 0xE3,
4249 0x00, 0xE0,
4250 0x1D, 0x72,
4251
4252 0x35, 0x30,
4253 0xB5, 0x30,
4254 0xBD, 0x30,
4255 0x3D, 0x30,
4256
4257 0x9C, 0x97, 0x57, 0x9F,
4258 0x00, 0x80, 0x00, 0xE8,
4259
4260 0x6C, 0x64, 0xC8, 0xEC,
4261 0x98, 0xE1,
4262 0xB5, 0x05,
4263
4264 0xBD, 0x05,
4265 0x2E, 0x30,
4266 0x32, 0xC0, 0xA0, 0xE8,
4267
4268 0x33, 0xC0, 0xA0, 0xE8,
4269 0x74, 0x64, 0xC8, 0xEC,
4270
4271 0x40, 0x3C, 0x40, 0xAD,
4272 0x32, 0x6A,
4273 0x2A, 0x30,
4274
4275 0x20, 0x73,
4276 0x33, 0x6A,
4277 0x00, 0xE0,
4278 0x28, 0x73,
4279
4280 0x1C, 0x72,
4281 0x83, 0xE2,
4282 0x7B, 0x80, 0x15, 0xEA,
4283
4284 0xB8, 0x3D, 0x28, 0xDF,
4285 0x30, 0x35, 0x20, 0xDF,
4286
4287 0x40, 0x30,
4288 0x00, 0xE0,
4289 0xCC, 0xE2,
4290 0x64, 0x72,
4291
4292 0x25, 0x42, 0x52, 0xBF,
4293 0x2D, 0x42, 0x4A, 0xBF,
4294
4295 0x30, 0x2E, 0x30, 0xDF,
4296 0x38, 0x2E, 0x38, 0xDF,
4297
4298 0x18, 0x1D, 0x45, 0xE9,
4299 0x1E, 0x15, 0x45, 0xE9,
4300
4301 0x2B, 0x49, 0x51, 0xBD,
4302 0x00, 0xE0,
4303 0x1F, 0x73,
4304
4305 0x38, 0x38, 0x40, 0xAF,
4306 0x30, 0x30, 0x40, 0xAF,
4307
4308 0x24, 0x1F, 0x24, 0xDF,
4309 0x1D, 0x32, 0x20, 0xE9,
4310
4311 0x2C, 0x1F, 0x2C, 0xDF,
4312 0x1A, 0x33, 0x20, 0xE9,
4313
4314 0xB0, 0x10,
4315 0x08, 0xE3,
4316 0x40, 0x10,
4317 0xB8, 0x10,
4318
4319 0x26, 0xF0, 0x30, 0xCD,
4320 0x2F, 0xF0, 0x38, 0xCD,
4321
4322 0x2B, 0x80, 0x20, 0xE9,
4323 0x2A, 0x80, 0x20, 0xE9,
4324
4325 0xA6, 0x20,
4326 0x88, 0xE2,
4327 0x00, 0xE0,
4328 0xAF, 0x20,
4329
4330 0x28, 0x2A, 0x26, 0xAF,
4331 0x20, 0x2A, 0xC0, 0xAF,
4332
4333 0x34, 0x1F, 0x34, 0xDF,
4334 0x46, 0x24, 0x46, 0xDF,
4335
4336 0x28, 0x30, 0x80, 0xBF,
4337 0x20, 0x38, 0x80, 0xBF,
4338
4339 0x47, 0x24, 0x47, 0xDF,
4340 0x4E, 0x2C, 0x4E, 0xDF,
4341
4342 0x4F, 0x2C, 0x4F, 0xDF,
4343 0x56, 0x34, 0x56, 0xDF,
4344
4345 0x28, 0x15, 0x28, 0xDF,
4346 0x20, 0x1D, 0x20, 0xDF,
4347
4348 0x57, 0x34, 0x57, 0xDF,
4349 0x00, 0xE0,
4350 0x1D, 0x05,
4351
4352 0x04, 0x80, 0x10, 0xEA,
4353 0x89, 0xE2,
4354 0x2B, 0x30,
4355
4356 0x3F, 0xC1, 0x1D, 0xBD,
4357 0x00, 0x80, 0x00, 0xE8,
4358
4359 0x00, 0x80, 0x00, 0xE8,
4360 0x00, 0x80, 0x00, 0xE8,
4361
4362 0xA0, 0x68,
4363 0xBF, 0x25,
4364 0x00, 0x80, 0x00, 0xE8,
4365
4366 0x20, 0xC0, 0x20, 0xAF,
4367 0x28, 0x05,
4368 0x97, 0x74,
4369
4370 0x00, 0xE0,
4371 0x2A, 0x10,
4372 0x16, 0xC0, 0x20, 0xE9,
4373
4374 0x04, 0x80, 0x10, 0xEA,
4375 0x8C, 0xE2,
4376 0x95, 0x05,
4377
4378 0x28, 0xC1, 0x28, 0xAD,
4379 0x1F, 0xC1, 0x15, 0xBD,
4380
4381 0x00, 0x80, 0x00, 0xE8,
4382 0x00, 0x80, 0x00, 0xE8,
4383
4384 0xA8, 0x67,
4385 0x9F, 0x6B,
4386 0x00, 0x80, 0x00, 0xE8,
4387
4388 0x28, 0xC0, 0x28, 0xAD,
4389 0x1D, 0x25,
4390 0x20, 0x05,
4391
4392 0x28, 0x32, 0x80, 0xAD,
4393 0x40, 0x2A, 0x40, 0xBD,
4394
4395 0x1C, 0x80, 0x20, 0xE9,
4396 0x20, 0x33, 0x20, 0xAD,
4397
4398 0x20, 0x73,
4399 0x00, 0xE0,
4400 0xB6, 0x49, 0x51, 0xBB,
4401
4402 0x26, 0x2F, 0xB0, 0xE8,
4403 0x19, 0x20, 0x20, 0xE9,
4404
4405 0x35, 0x20, 0x35, 0xDF,
4406 0x3D, 0x20, 0x3D, 0xDF,
4407
4408 0x15, 0x20, 0x15, 0xDF,
4409 0x1D, 0x20, 0x1D, 0xDF,
4410
4411 0x26, 0xD0, 0x26, 0xCD,
4412 0x29, 0x49, 0x2A, 0xB8,
4413
4414 0x26, 0x40, 0x80, 0xBD,
4415 0x3B, 0x48, 0x50, 0xBD,
4416
4417 0x3E, 0x54, 0x57, 0x9F,
4418 0x00, 0xE0,
4419 0x82, 0xE1,
4420
4421 0x1E, 0xAF, 0x59, 0x9F,
4422 0x00, 0x80, 0x00, 0xE8,
4423
4424 0x26, 0x30,
4425 0x29, 0x30,
4426 0x48, 0x3C, 0x48, 0xAD,
4427
4428 0x2B, 0x72,
4429 0xC2, 0xE1,
4430 0x2C, 0xC0, 0x44, 0xC2,
4431
4432 0x05, 0x24, 0x34, 0xBF,
4433 0x0D, 0x24, 0x2C, 0xBF,
4434
4435 0x2D, 0x46, 0x4E, 0xBF,
4436 0x25, 0x46, 0x56, 0xBF,
4437
4438 0x20, 0x1D, 0x6F, 0x8F,
4439 0x32, 0x3E, 0x5F, 0xE9,
4440
4441 0x3E, 0x50, 0x56, 0x9F,
4442 0x00, 0xE0,
4443 0x3B, 0x30,
4444
4445 0x1E, 0x8F, 0x51, 0x9F,
4446 0x33, 0x1E, 0x5F, 0xE9,
4447
4448 0x05, 0x44, 0x54, 0xB2,
4449 0x0D, 0x44, 0x4C, 0xB2,
4450
4451 0x19, 0xC0, 0xB0, 0xE8,
4452 0x34, 0xC0, 0x44, 0xC4,
4453
4454 0x33, 0x73,
4455 0x00, 0xE0,
4456 0x3E, 0x62, 0x57, 0x9F,
4457
4458 0x1E, 0xAF, 0x59, 0x9F,
4459 0x00, 0xE0,
4460 0x0D, 0x20,
4461
4462 0x84, 0x3E, 0x58, 0xE9,
4463 0x28, 0x1D, 0x6F, 0x8F,
4464
4465 0x05, 0x20,
4466 0x00, 0xE0,
4467 0x85, 0x1E, 0x58, 0xE9,
4468
4469 0x9B, 0x3B, 0x33, 0xDF,
4470 0x20, 0x20, 0x42, 0xAF,
4471
4472 0x30, 0x42, 0x56, 0x9F,
4473 0x80, 0x3E, 0x57, 0xE9,
4474
4475 0x3F, 0x8F, 0x51, 0x9F,
4476 0x30, 0x80, 0x5F, 0xE9,
4477
4478 0x28, 0x28, 0x24, 0xAF,
4479 0x81, 0x1E, 0x57, 0xE9,
4480
4481 0x05, 0x47, 0x57, 0xBF,
4482 0x0D, 0x47, 0x4F, 0xBF,
4483
4484 0x88, 0x80, 0x58, 0xE9,
4485 0x1B, 0x29, 0x1B, 0xDF,
4486
4487 0x30, 0x1D, 0x6F, 0x8F,
4488 0x3A, 0x30, 0x4F, 0xE9,
4489
4490 0x1C, 0x30, 0x26, 0xDF,
4491 0x09, 0xE3,
4492 0x3B, 0x05,
4493
4494 0x3E, 0x50, 0x56, 0x9F,
4495 0x3B, 0x3F, 0x4F, 0xE9,
4496
4497 0x1E, 0x8F, 0x51, 0x9F,
4498 0x00, 0xE0,
4499 0xAC, 0x20,
4500
4501 0x2D, 0x44, 0x4C, 0xB4,
4502 0x2C, 0x1C, 0xC0, 0xAF,
4503
4504 0x25, 0x44, 0x54, 0xB4,
4505 0x00, 0xE0,
4506 0xC8, 0x30,
4507
4508 0x30, 0x46, 0x30, 0xAF,
4509 0x1B, 0x1B, 0x48, 0xAF,
4510
4511 0x00, 0xE0,
4512 0x25, 0x20,
4513 0x38, 0x2C, 0x4F, 0xE9,
4514
4515 0x86, 0x80, 0x57, 0xE9,
4516 0x38, 0x1D, 0x6F, 0x8F,
4517
4518 0x28, 0x74,
4519 0x00, 0xE0,
4520 0x0D, 0x44, 0x4C, 0xB0,
4521
4522 0x05, 0x44, 0x54, 0xB0,
4523 0x2D, 0x20,
4524 0x9B, 0x10,
4525
4526 0x82, 0x3E, 0x57, 0xE9,
4527 0x32, 0xF0, 0x1B, 0xCD,
4528
4529 0x1E, 0xBD, 0x59, 0x9F,
4530 0x83, 0x1E, 0x57, 0xE9,
4531
4532 0x38, 0x47, 0x38, 0xAF,
4533 0x34, 0x20,
4534 0x2A, 0x30,
4535
4536 0x00, 0xE0,
4537 0x0D, 0x20,
4538 0x32, 0x20,
4539 0x05, 0x20,
4540
4541 0x87, 0x80, 0x57, 0xE9,
4542 0x1F, 0x54, 0x57, 0x9F,
4543
4544 0x17, 0x42, 0x56, 0x9F,
4545 0x00, 0xE0,
4546 0x3B, 0x6A,
4547
4548 0x3F, 0x8F, 0x51, 0x9F,
4549 0x37, 0x1E, 0x4F, 0xE9,
4550
4551 0x37, 0x32, 0x2A, 0xAF,
4552 0x00, 0xE0,
4553 0x32, 0x00,
4554
4555 0x00, 0x80, 0x00, 0xE8,
4556 0x27, 0xC0, 0x44, 0xC0,
4557
4558 0x36, 0x1F, 0x4F, 0xE9,
4559 0x1F, 0x1F, 0x26, 0xDF,
4560
4561 0x37, 0x1B, 0x37, 0xBF,
4562 0x17, 0x26, 0x17, 0xDF,
4563
4564 0x3E, 0x17, 0x4F, 0xE9,
4565 0x3F, 0x3F, 0x4F, 0xE9,
4566
4567 0x34, 0x1F, 0x34, 0xAF,
4568 0x2B, 0x05,
4569 0xA7, 0x20,
4570
4571 0x33, 0x2B, 0x37, 0xDF,
4572 0x27, 0x17, 0xC0, 0xAF,
4573
4574 0x34, 0x80, 0x4F, 0xE9,
4575 0x00, 0x80, 0x00, 0xE8,
4576
4577 0x2D, 0x21, 0x1A, 0xB0,
4578 0x25, 0x21, 0x31, 0xB0,
4579
4580 0x0D, 0x21, 0x1A, 0xB2,
4581 0x05, 0x21, 0x31, 0xB2,
4582
4583 0x03, 0x80, 0x2A, 0xEA,
4584 0x17, 0xC1, 0x2B, 0xBD,
4585
4586 0x2D, 0x20,
4587 0x25, 0x20,
4588 0x05, 0x20,
4589 0x0D, 0x20,
4590
4591 0xB3, 0x68,
4592 0x97, 0x25,
4593 0x00, 0x80, 0x00, 0xE8,
4594
4595 0x33, 0xC0, 0x33, 0xAF,
4596 0x2F, 0xC0, 0x21, 0xC0,
4597
4598 0x16, 0x42, 0x56, 0x9F,
4599 0x3C, 0x27, 0x4F, 0xE9,
4600
4601 0x1E, 0x62, 0x57, 0x9F,
4602 0x00, 0x80, 0x00, 0xE8,
4603
4604 0x25, 0x21, 0x31, 0xB4,
4605 0x2D, 0x21, 0x1A, 0xB4,
4606
4607 0x3F, 0x2F, 0x5D, 0x9F,
4608 0x00, 0x80, 0x00, 0xE8,
4609
4610 0x33, 0x05,
4611 0x00, 0xE0,
4612 0x28, 0x19, 0x60, 0xEC,
4613
4614 0x0D, 0x21, 0x1A, 0xB6,
4615 0x05, 0x21, 0x31, 0xB6,
4616
4617 0x37, 0x0F, 0x5C, 0x9F,
4618 0x00, 0xE0,
4619 0x2F, 0x20,
4620
4621 0x23, 0x3B, 0x33, 0xAD,
4622 0x1E, 0x26, 0x1E, 0xDF,
4623
4624 0xA7, 0x1E, 0x4F, 0xE9,
4625 0x17, 0x26, 0x16, 0xDF,
4626
4627 0x2D, 0x20,
4628 0x00, 0xE0,
4629 0xA8, 0x3F, 0x4F, 0xE9,
4630
4631 0x2F, 0x2F, 0x1E, 0xAF,
4632 0x25, 0x20,
4633 0x00, 0xE0,
4634
4635 0xA4, 0x16, 0x4F, 0xE9,
4636 0x0F, 0xC0, 0x21, 0xC2,
4637
4638 0xA6, 0x80, 0x4F, 0xE9,
4639 0x1F, 0x62, 0x57, 0x9F,
4640
4641 0x0D, 0x20,
4642 0x05, 0x20,
4643 0x2F, 0xC0, 0x21, 0xC6,
4644
4645 0x3F, 0x2F, 0x5D, 0x9F,
4646 0x00, 0xE0,
4647 0x0F, 0x20,
4648
4649 0x17, 0x50, 0x56, 0x9F,
4650 0xA5, 0x37, 0x4F, 0xE9,
4651
4652 0x06, 0xC0, 0x21, 0xC4,
4653 0x0F, 0x17, 0x0F, 0xAF,
4654
4655 0x37, 0x0F, 0x5C, 0x9F,
4656 0x00, 0x80, 0x00, 0xE8,
4657
4658 0x2F, 0x20,
4659 0x00, 0xE0,
4660 0xA3, 0x80, 0x4F, 0xE9,
4661
4662 0x06, 0x20,
4663 0x00, 0xE0,
4664 0x1F, 0x26, 0x1F, 0xDF,
4665
4666 0x17, 0x26, 0x17, 0xDF,
4667 0x35, 0x17, 0x4F, 0xE9,
4668
4669 0xA1, 0x1F, 0x4F, 0xE9,
4670 0xA2, 0x3F, 0x4F, 0xE9,
4671
4672 0x06, 0x06, 0x1F, 0xAF,
4673 0x39, 0x37, 0x4F, 0xE9,
4674
4675 0x2F, 0x2F, 0x17, 0xAF,
4676 0x00, 0x80, 0x00, 0xE8,
4677
4678 0xA0, 0x80, 0x4F, 0xE9,
4679 0x00, 0x80, 0x00, 0xE8,
4680
4681 0x31, 0x80, 0x4F, 0xE9,
4682 0x00, 0x80, 0x00, 0xE8,
4683
4684 0x00, 0x80, 0x00, 0xE8,
4685 0x57, 0x39, 0x20, 0xE9,
4686
4687 0x16, 0x28, 0x20, 0xE9,
4688 0x1D, 0x3B, 0x20, 0xE9,
4689
4690 0x1E, 0x2B, 0x20, 0xE9,
4691 0x2B, 0x32, 0x20, 0xE9,
4692
4693 0x1C, 0x23, 0x20, 0xE9,
4694 0x57, 0x36, 0x20, 0xE9,
4695
4696 0x00, 0x80, 0xA0, 0xE9,
4697 0x40, 0x40, 0xD8, 0xEC,
4698
4699 0xFF, 0x80, 0xC0, 0xE9,
4700 0x90, 0xE2,
4701 0x00, 0xE0,
4702
4703 0x68, 0xFF, 0x20, 0xEA,
4704 0x19, 0xC8, 0xC1, 0xCD,
4705
4706 0x1F, 0xD7, 0x18, 0xBD,
4707 0x3F, 0xD7, 0x22, 0xBD,
4708
4709 0x9F, 0x41, 0x49, 0xBD,
4710 0x00, 0x80, 0x00, 0xE8,
4711
4712 0x25, 0x41, 0x49, 0xBD,
4713 0x2D, 0x41, 0x51, 0xBD,
4714
4715 0x0D, 0x80, 0x07, 0xEA,
4716 0x00, 0x80, 0x00, 0xE8,
4717
4718 0x35, 0x40, 0x48, 0xBD,
4719 0x3D, 0x40, 0x50, 0xBD,
4720
4721 0x00, 0x80, 0x00, 0xE8,
4722 0x25, 0x30,
4723 0x2D, 0x30,
4724
4725 0x35, 0x30,
4726 0xB5, 0x30,
4727 0xBD, 0x30,
4728 0x3D, 0x30,
4729
4730 0x9C, 0xA7, 0x5B, 0x9F,
4731 0x00, 0x80, 0x00, 0xE8,
4732
4733 0x00, 0x80, 0x00, 0xE8,
4734 0x00, 0x80, 0x00, 0xE8,
4735
4736 0x00, 0x80, 0x00, 0xE8,
4737 0x00, 0x80, 0x00, 0xE8,
4738
4739 0x00, 0x80, 0x00, 0xE8,
4740 0x00, 0x80, 0x00, 0xE8,
4741
4742 0x00, 0x80, 0x00, 0xE8,
4743 0x00, 0x80, 0x00, 0xE8,
4744
4745 0x67, 0xFF, 0x0A, 0xEA,
4746 0x00, 0x80, 0x00, 0xE8,
4747
4748 0xC9, 0x41, 0xC8, 0xEC,
4749 0x42, 0xE1,
4750 0x00, 0xE0,
4751
4752 0x65, 0xFF, 0x20, 0xEA,
4753 0x00, 0x80, 0x00, 0xE8,
4754
4755 0x00, 0x80, 0x00, 0xE8,
4756 0x00, 0x80, 0x00, 0xE8,
4757
4758 0xC8, 0x40, 0xC0, 0xEC,
4759 0x00, 0x80, 0x00, 0xE8,
4760
4761 0x62, 0xFF, 0x20, 0xEA,
4762 0x00, 0x80, 0x00, 0xE8,
4763
4764 0x00, 0x80, 0x00, 0xE8,
4765 0x00, 0x80, 0x00, 0xE8,
4766
4767};
4768
4769static unsigned char warp_g400_t2gz[] = {
4770
4771 0x00, 0x8A, 0x98, 0xE9,
4772 0x00, 0x80, 0x00, 0xE8,
4773
4774 0x00, 0x80, 0xA0, 0xE9,
4775 0x00, 0x00, 0xD8, 0xEC,
4776
4777 0xFF, 0x80, 0xC0, 0xE9,
4778 0x00, 0x80, 0x00, 0xE8,
4779
4780 0x0A, 0x40, 0x50, 0xBF,
4781 0x2A, 0x40, 0x60, 0xBF,
4782
4783 0x32, 0x41, 0x51, 0xBF,
4784 0x3A, 0x41, 0x61, 0xBF,
4785
4786 0xC3, 0x6B,
4787 0xD3, 0x6B,
4788 0x00, 0x8A, 0x98, 0xE9,
4789
4790 0x73, 0x7B, 0xC8, 0xEC,
4791 0x96, 0xE2,
4792 0x41, 0x04,
4793
4794 0x7B, 0x43, 0xA0, 0xE8,
4795 0x73, 0x53, 0xA0, 0xE8,
4796
4797 0xAD, 0xEE, 0x23, 0x9F,
4798 0x00, 0xE0,
4799 0x51, 0x04,
4800
4801 0x90, 0xE2,
4802 0x61, 0x04,
4803 0x31, 0x46, 0xB1, 0xE8,
4804
4805 0x51, 0x41, 0xE0, 0xEC,
4806 0x39, 0x67, 0xB1, 0xE8,
4807
4808 0x00, 0x04,
4809 0x46, 0xE2,
4810 0x73, 0x63, 0xA0, 0xE8,
4811
4812 0x61, 0x41, 0xE0, 0xEC,
4813 0x31, 0x00,
4814 0x39, 0x00,
4815
4816 0x78, 0x80, 0x15, 0xEA,
4817 0x10, 0x04,
4818 0x20, 0x04,
4819
4820 0x61, 0x51, 0xE0, 0xEC,
4821 0x2F, 0x41, 0x60, 0xEA,
4822
4823 0x31, 0x20,
4824 0x39, 0x20,
4825 0x1F, 0x42, 0xA0, 0xE8,
4826
4827 0x2A, 0x42, 0x52, 0xBF,
4828 0x0F, 0x52, 0xA0, 0xE8,
4829
4830 0x1A, 0x42, 0x62, 0xBF,
4831 0x1E, 0x51, 0x60, 0xEA,
4832
4833 0x73, 0x7B, 0xC8, 0xEC,
4834 0x0E, 0x61, 0x60, 0xEA,
4835
4836 0x32, 0x40, 0x50, 0xBD,
4837 0x22, 0x40, 0x60, 0xBD,
4838
4839 0x12, 0x41, 0x51, 0xBD,
4840 0x3A, 0x41, 0x61, 0xBD,
4841
4842 0xBF, 0x2F, 0x0E, 0xBD,
4843 0x97, 0xE2,
4844 0x7B, 0x72,
4845
4846 0x32, 0x20,
4847 0x22, 0x20,
4848 0x12, 0x20,
4849 0x3A, 0x20,
4850
4851 0x35, 0x48, 0xB1, 0xE8,
4852 0x3D, 0x59, 0xB1, 0xE8,
4853
4854 0x46, 0x31, 0x46, 0xBF,
4855 0x56, 0x31, 0x56, 0xBF,
4856
4857 0xB3, 0xE2, 0x2D, 0x9F,
4858 0x00, 0x80, 0x00, 0xE8,
4859
4860 0x66, 0x31, 0x66, 0xBF,
4861 0x47, 0x39, 0x47, 0xBF,
4862
4863 0x57, 0x39, 0x57, 0xBF,
4864 0x67, 0x39, 0x67, 0xBF,
4865
4866 0x69, 0x80, 0x07, 0xEA,
4867 0x24, 0x41, 0x20, 0xE9,
4868
4869 0x35, 0x00,
4870 0x3D, 0x00,
4871 0x00, 0xE0,
4872 0x2D, 0x73,
4873
4874 0x33, 0x72,
4875 0x0C, 0xE3,
4876 0x8D, 0x2F, 0x1E, 0xBD,
4877
4878 0x43, 0x75, 0xF8, 0xEC,
4879 0x35, 0x20,
4880 0x3D, 0x20,
4881
4882 0x43, 0x43, 0x2D, 0xDF,
4883 0x53, 0x53, 0x2D, 0xDF,
4884
4885 0xAE, 0x1E, 0x0E, 0xBD,
4886 0x58, 0xE3,
4887 0x33, 0x66,
4888
4889 0x48, 0x35, 0x48, 0xBF,
4890 0x58, 0x35, 0x58, 0xBF,
4891
4892 0x68, 0x35, 0x68, 0xBF,
4893 0x49, 0x3D, 0x49, 0xBF,
4894
4895 0x59, 0x3D, 0x59, 0xBF,
4896 0x69, 0x3D, 0x69, 0xBF,
4897
4898 0x63, 0x63, 0x2D, 0xDF,
4899 0x4D, 0x7D, 0xF8, 0xEC,
4900
4901 0x59, 0xE3,
4902 0x00, 0xE0,
4903 0xB8, 0x38, 0x33, 0xBF,
4904
4905 0x2D, 0x73,
4906 0x30, 0x76,
4907 0x18, 0x3A, 0x41, 0xE9,
4908
4909 0x3F, 0x53, 0xA0, 0xE8,
4910 0x05, 0x80, 0x3D, 0xEA,
4911
4912 0x37, 0x43, 0xA0, 0xE8,
4913 0x3D, 0x63, 0xA0, 0xE8,
4914
4915 0x50, 0x70, 0xF8, 0xEC,
4916 0x2B, 0x50, 0x3C, 0xE9,
4917
4918 0x1F, 0x0F, 0xBC, 0xE8,
4919 0x00, 0x80, 0x00, 0xE8,
4920
4921 0x59, 0x78, 0xF8, 0xEC,
4922 0x00, 0x80, 0x00, 0xE8,
4923
4924 0x15, 0xC0, 0x20, 0xE9,
4925 0x15, 0xC0, 0x20, 0xE9,
4926
4927 0x15, 0xC0, 0x20, 0xE9,
4928 0x15, 0xC0, 0x20, 0xE9,
4929
4930 0x1E, 0x12, 0x41, 0xE9,
4931 0x1A, 0x22, 0x41, 0xE9,
4932
4933 0x46, 0x37, 0x46, 0xDF,
4934 0x56, 0x3F, 0x56, 0xDF,
4935
4936 0x2B, 0x40, 0x3D, 0xE9,
4937 0x66, 0x3D, 0x66, 0xDF,
4938
4939 0x1D, 0x32, 0x41, 0xE9,
4940 0x67, 0x3D, 0x67, 0xDF,
4941
4942 0x47, 0x37, 0x47, 0xDF,
4943 0x57, 0x3F, 0x57, 0xDF,
4944
4945 0x2A, 0x40, 0x20, 0xE9,
4946 0x59, 0x3F, 0x59, 0xDF,
4947
4948 0x16, 0x30, 0x20, 0xE9,
4949 0x69, 0x3D, 0x69, 0xDF,
4950
4951 0x48, 0x37, 0x48, 0xDF,
4952 0x58, 0x3F, 0x58, 0xDF,
4953
4954 0x12, 0x12, 0x2D, 0xDF,
4955 0x22, 0x22, 0x2D, 0xDF,
4956
4957 0x32, 0x32, 0x2D, 0xDF,
4958 0x3A, 0x3A, 0x2D, 0xDF,
4959
4960 0x68, 0x3D, 0x68, 0xDF,
4961 0x49, 0x37, 0x49, 0xDF,
4962
4963 0x3D, 0xCF, 0x74, 0xC0,
4964 0x37, 0xCF, 0x74, 0xC4,
4965
4966 0x31, 0x53, 0x2F, 0x9F,
4967 0x34, 0x80, 0x20, 0xE9,
4968
4969 0x39, 0xE5, 0x2C, 0x9F,
4970 0x3C, 0x3D, 0x20, 0xE9,
4971
4972 0x0A, 0x44, 0x54, 0xB0,
4973 0x02, 0x44, 0x64, 0xB0,
4974
4975 0x2A, 0x44, 0x54, 0xB2,
4976 0x1A, 0x44, 0x64, 0xB2,
4977
4978 0x25, 0x80, 0x3A, 0xEA,
4979 0x0A, 0x20,
4980 0x02, 0x20,
4981
4982 0x3D, 0xCF, 0x74, 0xC2,
4983 0x2A, 0x20,
4984 0x1A, 0x20,
4985
4986 0x30, 0x50, 0x2E, 0x9F,
4987 0x32, 0x31, 0x5F, 0xE9,
4988
4989 0x38, 0x21, 0x2C, 0x9F,
4990 0x33, 0x39, 0x5F, 0xE9,
4991
4992 0x31, 0x53, 0x2F, 0x9F,
4993 0x00, 0x80, 0x00, 0xE8,
4994
4995 0x2A, 0x44, 0x54, 0xB4,
4996 0x1A, 0x44, 0x64, 0xB4,
4997
4998 0x39, 0xE5, 0x2C, 0x9F,
4999 0x38, 0x3D, 0x20, 0xE9,
5000
5001 0x88, 0x73, 0x5E, 0xE9,
5002 0x2A, 0x20,
5003 0x1A, 0x20,
5004
5005 0x2A, 0x46, 0x56, 0xBF,
5006 0x1A, 0x46, 0x66, 0xBF,
5007
5008 0x31, 0x53, 0x2F, 0x9F,
5009 0x3E, 0x30, 0x4F, 0xE9,
5010
5011 0x39, 0xE5, 0x2C, 0x9F,
5012 0x3F, 0x38, 0x4F, 0xE9,
5013
5014 0x0A, 0x47, 0x57, 0xBF,
5015 0x02, 0x47, 0x67, 0xBF,
5016
5017 0x31, 0x53, 0x2F, 0x9F,
5018 0x3A, 0x31, 0x4F, 0xE9,
5019
5020 0x39, 0xE5, 0x2C, 0x9F,
5021 0x3B, 0x39, 0x4F, 0xE9,
5022
5023 0x2A, 0x43, 0x53, 0xBF,
5024 0x1A, 0x43, 0x63, 0xBF,
5025
5026 0x30, 0x50, 0x2E, 0x9F,
5027 0x36, 0x31, 0x4F, 0xE9,
5028
5029 0x38, 0x21, 0x2C, 0x9F,
5030 0x37, 0x39, 0x4F, 0xE9,
5031
5032 0x0A, 0x48, 0x58, 0xBF,
5033 0x02, 0x48, 0x68, 0xBF,
5034
5035 0x31, 0x53, 0x2F, 0x9F,
5036 0x80, 0x31, 0x57, 0xE9,
5037
5038 0x39, 0xE5, 0x2C, 0x9F,
5039 0x81, 0x39, 0x57, 0xE9,
5040
5041 0x2A, 0x49, 0x59, 0xBF,
5042 0x1A, 0x49, 0x69, 0xBF,
5043
5044 0x30, 0x50, 0x2E, 0x9F,
5045 0x82, 0x30, 0x57, 0xE9,
5046
5047 0x38, 0x21, 0x2C, 0x9F,
5048 0x83, 0x38, 0x57, 0xE9,
5049
5050 0x31, 0x53, 0x2F, 0x9F,
5051 0x84, 0x31, 0x5E, 0xE9,
5052
5053 0x39, 0xE5, 0x2C, 0x9F,
5054 0x85, 0x39, 0x5E, 0xE9,
5055
5056 0x86, 0x76, 0x57, 0xE9,
5057 0x8A, 0x36, 0x20, 0xE9,
5058
5059 0x87, 0x77, 0x57, 0xE9,
5060 0x8B, 0x3E, 0xBF, 0xEA,
5061
5062 0x80, 0x30, 0x57, 0xE9,
5063 0x81, 0x38, 0x57, 0xE9,
5064
5065 0x82, 0x31, 0x57, 0xE9,
5066 0x86, 0x78, 0x57, 0xE9,
5067
5068 0x83, 0x39, 0x57, 0xE9,
5069 0x87, 0x79, 0x57, 0xE9,
5070
5071 0x30, 0x1F, 0x5F, 0xE9,
5072 0x8A, 0x34, 0x20, 0xE9,
5073
5074 0x8B, 0x3C, 0x20, 0xE9,
5075 0x37, 0x50, 0x60, 0xBD,
5076
5077 0x57, 0x0D, 0x20, 0xE9,
5078 0x35, 0x51, 0x61, 0xBD,
5079
5080 0x2B, 0x50, 0x20, 0xE9,
5081 0x1D, 0x37, 0xE1, 0xEA,
5082
5083 0x1E, 0x35, 0xE1, 0xEA,
5084 0x00, 0xE0,
5085 0x0E, 0x77,
5086
5087 0x24, 0x51, 0x20, 0xE9,
5088 0x9F, 0xFF, 0x20, 0xEA,
5089
5090 0x16, 0x0E, 0x20, 0xE9,
5091 0x57, 0x2E, 0xBF, 0xEA,
5092
5093 0x0B, 0x46, 0xA0, 0xE8,
5094 0x1B, 0x56, 0xA0, 0xE8,
5095
5096 0x2B, 0x66, 0xA0, 0xE8,
5097 0x0C, 0x47, 0xA0, 0xE8,
5098
5099 0x1C, 0x57, 0xA0, 0xE8,
5100 0x2C, 0x67, 0xA0, 0xE8,
5101
5102 0x0B, 0x00,
5103 0x1B, 0x00,
5104 0x2B, 0x00,
5105 0x00, 0xE0,
5106
5107 0x0C, 0x00,
5108 0x1C, 0x00,
5109 0x2C, 0x00,
5110 0x00, 0xE0,
5111
5112 0x0B, 0x65,
5113 0x1B, 0x65,
5114 0x2B, 0x65,
5115 0x00, 0xE0,
5116
5117 0x0C, 0x65,
5118 0x1C, 0x65,
5119 0x2C, 0x65,
5120 0x00, 0xE0,
5121
5122 0x0B, 0x1B, 0x60, 0xEC,
5123 0x36, 0xD7, 0x36, 0xAD,
5124
5125 0x2B, 0x80, 0x60, 0xEC,
5126 0x0C, 0x1C, 0x60, 0xEC,
5127
5128 0x3E, 0xD7, 0x3E, 0xAD,
5129 0x2C, 0x80, 0x60, 0xEC,
5130
5131 0x0B, 0x2B, 0xDE, 0xE8,
5132 0x1B, 0x80, 0xDE, 0xE8,
5133
5134 0x36, 0x80, 0x36, 0xBD,
5135 0x3E, 0x80, 0x3E, 0xBD,
5136
5137 0x33, 0xD7, 0x0B, 0xBD,
5138 0x3B, 0xD7, 0x1B, 0xBD,
5139
5140 0x46, 0x80, 0x46, 0xCF,
5141 0x57, 0x80, 0x57, 0xCF,
5142
5143 0x66, 0x33, 0x66, 0xCF,
5144 0x47, 0x3B, 0x47, 0xCF,
5145
5146 0x56, 0x33, 0x56, 0xCF,
5147 0x67, 0x3B, 0x67, 0xCF,
5148
5149 0x0B, 0x48, 0xA0, 0xE8,
5150 0x1B, 0x58, 0xA0, 0xE8,
5151
5152 0x2B, 0x68, 0xA0, 0xE8,
5153 0x0C, 0x49, 0xA0, 0xE8,
5154
5155 0x1C, 0x59, 0xA0, 0xE8,
5156 0x2C, 0x69, 0xA0, 0xE8,
5157
5158 0x0B, 0x00,
5159 0x1B, 0x00,
5160 0x2B, 0x00,
5161 0x00, 0xE0,
5162
5163 0x0C, 0x00,
5164 0x1C, 0x00,
5165 0x2C, 0x00,
5166 0x00, 0xE0,
5167
5168 0x0B, 0x65,
5169 0x1B, 0x65,
5170 0x2B, 0x65,
5171 0x00, 0xE0,
5172
5173 0x0C, 0x65,
5174 0x1C, 0x65,
5175 0x2C, 0x65,
5176 0x00, 0xE0,
5177
5178 0x0B, 0x1B, 0x60, 0xEC,
5179 0x34, 0xD7, 0x34, 0xAD,
5180
5181 0x2B, 0x80, 0x60, 0xEC,
5182 0x0C, 0x1C, 0x60, 0xEC,
5183
5184 0x3C, 0xD7, 0x3C, 0xAD,
5185 0x2C, 0x80, 0x60, 0xEC,
5186
5187 0x0B, 0x2B, 0xDE, 0xE8,
5188 0x1B, 0x80, 0xDE, 0xE8,
5189
5190 0x34, 0x80, 0x34, 0xBD,
5191 0x3C, 0x80, 0x3C, 0xBD,
5192
5193 0x33, 0xD7, 0x0B, 0xBD,
5194 0x3B, 0xD7, 0x1B, 0xBD,
5195
5196 0x48, 0x80, 0x48, 0xCF,
5197 0x59, 0x80, 0x59, 0xCF,
5198
5199 0x68, 0x33, 0x68, 0xCF,
5200 0x49, 0x3B, 0x49, 0xCF,
5201
5202 0xBE, 0xFF, 0x20, 0xEA,
5203 0x00, 0x80, 0x00, 0xE8,
5204
5205 0x58, 0x33, 0x58, 0xCF,
5206 0x69, 0x3B, 0x69, 0xCF,
5207
5208 0x7D, 0xFF, 0x20, 0xEA,
5209 0x57, 0xC0, 0xBF, 0xEA,
5210
5211 0x00, 0x80, 0xA0, 0xE9,
5212 0x00, 0x00, 0xD8, 0xEC,
5213
5214};
5215
5216static unsigned char warp_g400_t2gza[] = {
5217
5218 0x00, 0x8A, 0x98, 0xE9,
5219 0x00, 0x80, 0x00, 0xE8,
5220
5221 0x00, 0x80, 0xA0, 0xE9,
5222 0x00, 0x00, 0xD8, 0xEC,
5223
5224 0xFF, 0x80, 0xC0, 0xE9,
5225 0x00, 0x80, 0x00, 0xE8,
5226
5227 0x0A, 0x40, 0x50, 0xBF,
5228 0x2A, 0x40, 0x60, 0xBF,
5229
5230 0x32, 0x41, 0x51, 0xBF,
5231 0x3A, 0x41, 0x61, 0xBF,
5232
5233 0xC3, 0x6B,
5234 0xD3, 0x6B,
5235 0x00, 0x8A, 0x98, 0xE9,
5236
5237 0x73, 0x7B, 0xC8, 0xEC,
5238 0x96, 0xE2,
5239 0x41, 0x04,
5240
5241 0x7B, 0x43, 0xA0, 0xE8,
5242 0x73, 0x53, 0xA0, 0xE8,
5243
5244 0xAD, 0xEE, 0x23, 0x9F,
5245 0x00, 0xE0,
5246 0x51, 0x04,
5247
5248 0x90, 0xE2,
5249 0x61, 0x04,
5250 0x31, 0x46, 0xB1, 0xE8,
5251
5252 0x51, 0x41, 0xE0, 0xEC,
5253 0x39, 0x67, 0xB1, 0xE8,
5254
5255 0x00, 0x04,
5256 0x46, 0xE2,
5257 0x73, 0x63, 0xA0, 0xE8,
5258
5259 0x61, 0x41, 0xE0, 0xEC,
5260 0x31, 0x00,
5261 0x39, 0x00,
5262
5263 0x7C, 0x80, 0x15, 0xEA,
5264 0x10, 0x04,
5265 0x20, 0x04,
5266
5267 0x61, 0x51, 0xE0, 0xEC,
5268 0x2F, 0x41, 0x60, 0xEA,
5269
5270 0x31, 0x20,
5271 0x39, 0x20,
5272 0x1F, 0x42, 0xA0, 0xE8,
5273
5274 0x2A, 0x42, 0x52, 0xBF,
5275 0x0F, 0x52, 0xA0, 0xE8,
5276
5277 0x1A, 0x42, 0x62, 0xBF,
5278 0x1E, 0x51, 0x60, 0xEA,
5279
5280 0x73, 0x7B, 0xC8, 0xEC,
5281 0x0E, 0x61, 0x60, 0xEA,
5282
5283 0x32, 0x40, 0x50, 0xBD,
5284 0x22, 0x40, 0x60, 0xBD,
5285
5286 0x12, 0x41, 0x51, 0xBD,
5287 0x3A, 0x41, 0x61, 0xBD,
5288
5289 0xBF, 0x2F, 0x0E, 0xBD,
5290 0x97, 0xE2,
5291 0x7B, 0x72,
5292
5293 0x32, 0x20,
5294 0x22, 0x20,
5295 0x12, 0x20,
5296 0x3A, 0x20,
5297
5298 0x35, 0x48, 0xB1, 0xE8,
5299 0x3D, 0x59, 0xB1, 0xE8,
5300
5301 0x46, 0x31, 0x46, 0xBF,
5302 0x56, 0x31, 0x56, 0xBF,
5303
5304 0xB3, 0xE2, 0x2D, 0x9F,
5305 0x00, 0x80, 0x00, 0xE8,
5306
5307 0x66, 0x31, 0x66, 0xBF,
5308 0x47, 0x39, 0x47, 0xBF,
5309
5310 0x57, 0x39, 0x57, 0xBF,
5311 0x67, 0x39, 0x67, 0xBF,
5312
5313 0x6D, 0x80, 0x07, 0xEA,
5314 0x24, 0x41, 0x20, 0xE9,
5315
5316 0x35, 0x00,
5317 0x3D, 0x00,
5318 0x00, 0xE0,
5319 0x2D, 0x73,
5320
5321 0x33, 0x72,
5322 0x0C, 0xE3,
5323 0x8D, 0x2F, 0x1E, 0xBD,
5324
5325 0x43, 0x75, 0xF8, 0xEC,
5326 0x35, 0x20,
5327 0x3D, 0x20,
5328
5329 0x43, 0x43, 0x2D, 0xDF,
5330 0x53, 0x53, 0x2D, 0xDF,
5331
5332 0xAE, 0x1E, 0x0E, 0xBD,
5333 0x58, 0xE3,
5334 0x33, 0x66,
5335
5336 0x48, 0x35, 0x48, 0xBF,
5337 0x58, 0x35, 0x58, 0xBF,
5338
5339 0x68, 0x35, 0x68, 0xBF,
5340 0x49, 0x3D, 0x49, 0xBF,
5341
5342 0x59, 0x3D, 0x59, 0xBF,
5343 0x69, 0x3D, 0x69, 0xBF,
5344
5345 0x63, 0x63, 0x2D, 0xDF,
5346 0x4D, 0x7D, 0xF8, 0xEC,
5347
5348 0x59, 0xE3,
5349 0x00, 0xE0,
5350 0xB8, 0x38, 0x33, 0xBF,
5351
5352 0x2D, 0x73,
5353 0x30, 0x76,
5354 0x18, 0x3A, 0x41, 0xE9,
5355
5356 0x3F, 0x53, 0xA0, 0xE8,
5357 0x05, 0x80, 0x3D, 0xEA,
5358
5359 0x37, 0x43, 0xA0, 0xE8,
5360 0x3D, 0x63, 0xA0, 0xE8,
5361
5362 0x50, 0x70, 0xF8, 0xEC,
5363 0x2B, 0x50, 0x3C, 0xE9,
5364
5365 0x1F, 0x0F, 0xBC, 0xE8,
5366 0x00, 0x80, 0x00, 0xE8,
5367
5368 0x59, 0x78, 0xF8, 0xEC,
5369 0x00, 0x80, 0x00, 0xE8,
5370
5371 0x15, 0xC0, 0x20, 0xE9,
5372 0x15, 0xC0, 0x20, 0xE9,
5373
5374 0x15, 0xC0, 0x20, 0xE9,
5375 0x15, 0xC0, 0x20, 0xE9,
5376
5377 0x1E, 0x12, 0x41, 0xE9,
5378 0x1A, 0x22, 0x41, 0xE9,
5379
5380 0x46, 0x37, 0x46, 0xDF,
5381 0x56, 0x3F, 0x56, 0xDF,
5382
5383 0x2B, 0x40, 0x3D, 0xE9,
5384 0x66, 0x3D, 0x66, 0xDF,
5385
5386 0x1D, 0x32, 0x41, 0xE9,
5387 0x67, 0x3D, 0x67, 0xDF,
5388
5389 0x47, 0x37, 0x47, 0xDF,
5390 0x57, 0x3F, 0x57, 0xDF,
5391
5392 0x2A, 0x40, 0x20, 0xE9,
5393 0x59, 0x3F, 0x59, 0xDF,
5394
5395 0x16, 0x30, 0x20, 0xE9,
5396 0x69, 0x3D, 0x69, 0xDF,
5397
5398 0x48, 0x37, 0x48, 0xDF,
5399 0x58, 0x3F, 0x58, 0xDF,
5400
5401 0x12, 0x12, 0x2D, 0xDF,
5402 0x22, 0x22, 0x2D, 0xDF,
5403
5404 0x32, 0x32, 0x2D, 0xDF,
5405 0x3A, 0x3A, 0x2D, 0xDF,
5406
5407 0x68, 0x3D, 0x68, 0xDF,
5408 0x49, 0x37, 0x49, 0xDF,
5409
5410 0x3D, 0xCF, 0x74, 0xC0,
5411 0x37, 0xCF, 0x74, 0xC4,
5412
5413 0x31, 0x53, 0x2F, 0x9F,
5414 0x34, 0x80, 0x20, 0xE9,
5415
5416 0x39, 0xE5, 0x2C, 0x9F,
5417 0x3C, 0x3D, 0x20, 0xE9,
5418
5419 0x0A, 0x44, 0x54, 0xB0,
5420 0x02, 0x44, 0x64, 0xB0,
5421
5422 0x2A, 0x44, 0x54, 0xB2,
5423 0x1A, 0x44, 0x64, 0xB2,
5424
5425 0x29, 0x80, 0x3A, 0xEA,
5426 0x0A, 0x20,
5427 0x02, 0x20,
5428
5429 0x0F, 0xCF, 0x74, 0xC6,
5430 0x3D, 0xCF, 0x74, 0xC2,
5431
5432 0x88, 0x73, 0x5E, 0xE9,
5433 0x2A, 0x20,
5434 0x1A, 0x20,
5435
5436 0x30, 0x50, 0x2E, 0x9F,
5437 0x32, 0x31, 0x5F, 0xE9,
5438
5439 0x38, 0x21, 0x2C, 0x9F,
5440 0x33, 0x39, 0x5F, 0xE9,
5441
5442 0x31, 0x53, 0x2F, 0x9F,
5443 0x9C, 0x0F, 0x20, 0xE9,
5444
5445 0x0A, 0x44, 0x54, 0xB4,
5446 0x02, 0x44, 0x64, 0xB4,
5447
5448 0x2A, 0x44, 0x54, 0xB6,
5449 0x1A, 0x44, 0x64, 0xB6,
5450
5451 0x39, 0xE5, 0x2C, 0x9F,
5452 0x38, 0x3D, 0x20, 0xE9,
5453
5454 0x0A, 0x20,
5455 0x02, 0x20,
5456 0x2A, 0x20,
5457 0x1A, 0x20,
5458
5459 0x0A, 0x47, 0x57, 0xBF,
5460 0x02, 0x47, 0x67, 0xBF,
5461
5462 0x30, 0x50, 0x2E, 0x9F,
5463 0x3E, 0x30, 0x4F, 0xE9,
5464
5465 0x38, 0x21, 0x2C, 0x9F,
5466 0x3F, 0x38, 0x4F, 0xE9,
5467
5468 0x2A, 0x46, 0x56, 0xBF,
5469 0x1A, 0x46, 0x66, 0xBF,
5470
5471 0x31, 0x53, 0x2F, 0x9F,
5472 0x3A, 0x31, 0x4F, 0xE9,
5473
5474 0x39, 0xE5, 0x2C, 0x9F,
5475 0x3B, 0x39, 0x4F, 0xE9,
5476
5477 0x31, 0x53, 0x2F, 0x9F,
5478 0x36, 0x30, 0x4F, 0xE9,
5479
5480 0x39, 0xE5, 0x2C, 0x9F,
5481 0x37, 0x38, 0x4F, 0xE9,
5482
5483 0x2A, 0x43, 0x53, 0xBF,
5484 0x1A, 0x43, 0x63, 0xBF,
5485
5486 0x30, 0x50, 0x2E, 0x9F,
5487 0x9D, 0x31, 0x4F, 0xE9,
5488
5489 0x38, 0x21, 0x2C, 0x9F,
5490 0x9E, 0x39, 0x4F, 0xE9,
5491
5492 0x0A, 0x48, 0x58, 0xBF,
5493 0x02, 0x48, 0x68, 0xBF,
5494
5495 0x31, 0x53, 0x2F, 0x9F,
5496 0x80, 0x31, 0x57, 0xE9,
5497
5498 0x39, 0xE5, 0x2C, 0x9F,
5499 0x81, 0x39, 0x57, 0xE9,
5500
5501 0x2A, 0x49, 0x59, 0xBF,
5502 0x1A, 0x49, 0x69, 0xBF,
5503
5504 0x30, 0x50, 0x2E, 0x9F,
5505 0x82, 0x30, 0x57, 0xE9,
5506
5507 0x38, 0x21, 0x2C, 0x9F,
5508 0x83, 0x38, 0x57, 0xE9,
5509
5510 0x31, 0x53, 0x2F, 0x9F,
5511 0x84, 0x31, 0x5E, 0xE9,
5512
5513 0x39, 0xE5, 0x2C, 0x9F,
5514 0x85, 0x39, 0x5E, 0xE9,
5515
5516 0x86, 0x76, 0x57, 0xE9,
5517 0x8A, 0x36, 0x20, 0xE9,
5518
5519 0x87, 0x77, 0x57, 0xE9,
5520 0x8B, 0x3E, 0xBF, 0xEA,
5521
5522 0x80, 0x30, 0x57, 0xE9,
5523 0x81, 0x38, 0x57, 0xE9,
5524
5525 0x82, 0x31, 0x57, 0xE9,
5526 0x86, 0x78, 0x57, 0xE9,
5527
5528 0x83, 0x39, 0x57, 0xE9,
5529 0x87, 0x79, 0x57, 0xE9,
5530
5531 0x30, 0x1F, 0x5F, 0xE9,
5532 0x8A, 0x34, 0x20, 0xE9,
5533
5534 0x8B, 0x3C, 0x20, 0xE9,
5535 0x37, 0x50, 0x60, 0xBD,
5536
5537 0x57, 0x0D, 0x20, 0xE9,
5538 0x35, 0x51, 0x61, 0xBD,
5539
5540 0x2B, 0x50, 0x20, 0xE9,
5541 0x1D, 0x37, 0xE1, 0xEA,
5542
5543 0x1E, 0x35, 0xE1, 0xEA,
5544 0x00, 0xE0,
5545 0x0E, 0x77,
5546
5547 0x24, 0x51, 0x20, 0xE9,
5548 0x9B, 0xFF, 0x20, 0xEA,
5549
5550 0x16, 0x0E, 0x20, 0xE9,
5551 0x57, 0x2E, 0xBF, 0xEA,
5552
5553 0x0B, 0x46, 0xA0, 0xE8,
5554 0x1B, 0x56, 0xA0, 0xE8,
5555
5556 0x2B, 0x66, 0xA0, 0xE8,
5557 0x0C, 0x47, 0xA0, 0xE8,
5558
5559 0x1C, 0x57, 0xA0, 0xE8,
5560 0x2C, 0x67, 0xA0, 0xE8,
5561
5562 0x0B, 0x00,
5563 0x1B, 0x00,
5564 0x2B, 0x00,
5565 0x00, 0xE0,
5566
5567 0x0C, 0x00,
5568 0x1C, 0x00,
5569 0x2C, 0x00,
5570 0x00, 0xE0,
5571
5572 0x0B, 0x65,
5573 0x1B, 0x65,
5574 0x2B, 0x65,
5575 0x00, 0xE0,
5576
5577 0x0C, 0x65,
5578 0x1C, 0x65,
5579 0x2C, 0x65,
5580 0x00, 0xE0,
5581
5582 0x0B, 0x1B, 0x60, 0xEC,
5583 0x36, 0xD7, 0x36, 0xAD,
5584
5585 0x2B, 0x80, 0x60, 0xEC,
5586 0x0C, 0x1C, 0x60, 0xEC,
5587
5588 0x3E, 0xD7, 0x3E, 0xAD,
5589 0x2C, 0x80, 0x60, 0xEC,
5590
5591 0x0B, 0x2B, 0xDE, 0xE8,
5592 0x1B, 0x80, 0xDE, 0xE8,
5593
5594 0x36, 0x80, 0x36, 0xBD,
5595 0x3E, 0x80, 0x3E, 0xBD,
5596
5597 0x33, 0xD7, 0x0B, 0xBD,
5598 0x3B, 0xD7, 0x1B, 0xBD,
5599
5600 0x46, 0x80, 0x46, 0xCF,
5601 0x57, 0x80, 0x57, 0xCF,
5602
5603 0x66, 0x33, 0x66, 0xCF,
5604 0x47, 0x3B, 0x47, 0xCF,
5605
5606 0x56, 0x33, 0x56, 0xCF,
5607 0x67, 0x3B, 0x67, 0xCF,
5608
5609 0x0B, 0x48, 0xA0, 0xE8,
5610 0x1B, 0x58, 0xA0, 0xE8,
5611
5612 0x2B, 0x68, 0xA0, 0xE8,
5613 0x0C, 0x49, 0xA0, 0xE8,
5614
5615 0x1C, 0x59, 0xA0, 0xE8,
5616 0x2C, 0x69, 0xA0, 0xE8,
5617
5618 0x0B, 0x00,
5619 0x1B, 0x00,
5620 0x2B, 0x00,
5621 0x00, 0xE0,
5622
5623 0x0C, 0x00,
5624 0x1C, 0x00,
5625 0x2C, 0x00,
5626 0x00, 0xE0,
5627
5628 0x0B, 0x65,
5629 0x1B, 0x65,
5630 0x2B, 0x65,
5631 0x00, 0xE0,
5632
5633 0x0C, 0x65,
5634 0x1C, 0x65,
5635 0x2C, 0x65,
5636 0x00, 0xE0,
5637
5638 0x0B, 0x1B, 0x60, 0xEC,
5639 0x34, 0xD7, 0x34, 0xAD,
5640
5641 0x2B, 0x80, 0x60, 0xEC,
5642 0x0C, 0x1C, 0x60, 0xEC,
5643
5644 0x3C, 0xD7, 0x3C, 0xAD,
5645 0x2C, 0x80, 0x60, 0xEC,
5646
5647 0x0B, 0x2B, 0xDE, 0xE8,
5648 0x1B, 0x80, 0xDE, 0xE8,
5649
5650 0x34, 0x80, 0x34, 0xBD,
5651 0x3C, 0x80, 0x3C, 0xBD,
5652
5653 0x33, 0xD7, 0x0B, 0xBD,
5654 0x3B, 0xD7, 0x1B, 0xBD,
5655
5656 0x48, 0x80, 0x48, 0xCF,
5657 0x59, 0x80, 0x59, 0xCF,
5658
5659 0x68, 0x33, 0x68, 0xCF,
5660 0x49, 0x3B, 0x49, 0xCF,
5661
5662 0xBA, 0xFF, 0x20, 0xEA,
5663 0x00, 0x80, 0x00, 0xE8,
5664
5665 0x58, 0x33, 0x58, 0xCF,
5666 0x69, 0x3B, 0x69, 0xCF,
5667
5668 0x79, 0xFF, 0x20, 0xEA,
5669 0x57, 0xC0, 0xBF, 0xEA,
5670
5671 0x00, 0x80, 0xA0, 0xE9,
5672 0x00, 0x00, 0xD8, 0xEC,
5673
5674};
5675
5676static unsigned char warp_g400_t2gzaf[] = {
5677
5678 0x00, 0x8A, 0x98, 0xE9,
5679 0x00, 0x80, 0x00, 0xE8,
5680
5681 0x00, 0x80, 0xA0, 0xE9,
5682 0x00, 0x00, 0xD8, 0xEC,
5683
5684 0xFF, 0x80, 0xC0, 0xE9,
5685 0x00, 0x80, 0x00, 0xE8,
5686
5687 0x0A, 0x40, 0x50, 0xBF,
5688 0x2A, 0x40, 0x60, 0xBF,
5689
5690 0x32, 0x41, 0x51, 0xBF,
5691 0x3A, 0x41, 0x61, 0xBF,
5692
5693 0xC3, 0x6B,
5694 0xD3, 0x6B,
5695 0x00, 0x8A, 0x98, 0xE9,
5696
5697 0x73, 0x7B, 0xC8, 0xEC,
5698 0x96, 0xE2,
5699 0x41, 0x04,
5700
5701 0x7B, 0x43, 0xA0, 0xE8,
5702 0x73, 0x53, 0xA0, 0xE8,
5703
5704 0xAD, 0xEE, 0x23, 0x9F,
5705 0x00, 0xE0,
5706 0x51, 0x04,
5707
5708 0x90, 0xE2,
5709 0x61, 0x04,
5710 0x31, 0x46, 0xB1, 0xE8,
5711
5712 0x51, 0x41, 0xE0, 0xEC,
5713 0x39, 0x67, 0xB1, 0xE8,
5714
5715 0x00, 0x04,
5716 0x46, 0xE2,
5717 0x73, 0x63, 0xA0, 0xE8,
5718
5719 0x61, 0x41, 0xE0, 0xEC,
5720 0x31, 0x00,
5721 0x39, 0x00,
5722
5723 0x81, 0x80, 0x15, 0xEA,
5724 0x10, 0x04,
5725 0x20, 0x04,
5726
5727 0x61, 0x51, 0xE0, 0xEC,
5728 0x2F, 0x41, 0x60, 0xEA,
5729
5730 0x31, 0x20,
5731 0x39, 0x20,
5732 0x1F, 0x42, 0xA0, 0xE8,
5733
5734 0x2A, 0x42, 0x52, 0xBF,
5735 0x0F, 0x52, 0xA0, 0xE8,
5736
5737 0x1A, 0x42, 0x62, 0xBF,
5738 0x1E, 0x51, 0x60, 0xEA,
5739
5740 0x73, 0x7B, 0xC8, 0xEC,
5741 0x0E, 0x61, 0x60, 0xEA,
5742
5743 0x32, 0x40, 0x50, 0xBD,
5744 0x22, 0x40, 0x60, 0xBD,
5745
5746 0x12, 0x41, 0x51, 0xBD,
5747 0x3A, 0x41, 0x61, 0xBD,
5748
5749 0xBF, 0x2F, 0x0E, 0xBD,
5750 0x97, 0xE2,
5751 0x7B, 0x72,
5752
5753 0x32, 0x20,
5754 0x22, 0x20,
5755 0x12, 0x20,
5756 0x3A, 0x20,
5757
5758 0x35, 0x48, 0xB1, 0xE8,
5759 0x3D, 0x59, 0xB1, 0xE8,
5760
5761 0x46, 0x31, 0x46, 0xBF,
5762 0x56, 0x31, 0x56, 0xBF,
5763
5764 0xB3, 0xE2, 0x2D, 0x9F,
5765 0x00, 0x80, 0x00, 0xE8,
5766
5767 0x66, 0x31, 0x66, 0xBF,
5768 0x47, 0x39, 0x47, 0xBF,
5769
5770 0x57, 0x39, 0x57, 0xBF,
5771 0x67, 0x39, 0x67, 0xBF,
5772
5773 0x72, 0x80, 0x07, 0xEA,
5774 0x24, 0x41, 0x20, 0xE9,
5775
5776 0x35, 0x00,
5777 0x3D, 0x00,
5778 0x00, 0xE0,
5779 0x2D, 0x73,
5780
5781 0x33, 0x72,
5782 0x0C, 0xE3,
5783 0x8D, 0x2F, 0x1E, 0xBD,
5784
5785 0x43, 0x75, 0xF8, 0xEC,
5786 0x35, 0x20,
5787 0x3D, 0x20,
5788
5789 0x43, 0x43, 0x2D, 0xDF,
5790 0x53, 0x53, 0x2D, 0xDF,
5791
5792 0xAE, 0x1E, 0x0E, 0xBD,
5793 0x58, 0xE3,
5794 0x33, 0x66,
5795
5796 0x48, 0x35, 0x48, 0xBF,
5797 0x58, 0x35, 0x58, 0xBF,
5798
5799 0x68, 0x35, 0x68, 0xBF,
5800 0x49, 0x3D, 0x49, 0xBF,
5801
5802 0x59, 0x3D, 0x59, 0xBF,
5803 0x69, 0x3D, 0x69, 0xBF,
5804
5805 0x63, 0x63, 0x2D, 0xDF,
5806 0x4D, 0x7D, 0xF8, 0xEC,
5807
5808 0x59, 0xE3,
5809 0x00, 0xE0,
5810 0xB8, 0x38, 0x33, 0xBF,
5811
5812 0x2D, 0x73,
5813 0x30, 0x76,
5814 0x18, 0x3A, 0x41, 0xE9,
5815
5816 0x3F, 0x53, 0xA0, 0xE8,
5817 0x05, 0x80, 0x3D, 0xEA,
5818
5819 0x37, 0x43, 0xA0, 0xE8,
5820 0x3D, 0x63, 0xA0, 0xE8,
5821
5822 0x50, 0x70, 0xF8, 0xEC,
5823 0x2B, 0x50, 0x3C, 0xE9,
5824
5825 0x1F, 0x0F, 0xBC, 0xE8,
5826 0x00, 0x80, 0x00, 0xE8,
5827
5828 0x59, 0x78, 0xF8, 0xEC,
5829 0x00, 0x80, 0x00, 0xE8,
5830
5831 0x15, 0xC0, 0x20, 0xE9,
5832 0x15, 0xC0, 0x20, 0xE9,
5833
5834 0x15, 0xC0, 0x20, 0xE9,
5835 0x15, 0xC0, 0x20, 0xE9,
5836
5837 0x1E, 0x12, 0x41, 0xE9,
5838 0x1A, 0x22, 0x41, 0xE9,
5839
5840 0x46, 0x37, 0x46, 0xDF,
5841 0x56, 0x3F, 0x56, 0xDF,
5842
5843 0x2B, 0x40, 0x3D, 0xE9,
5844 0x66, 0x3D, 0x66, 0xDF,
5845
5846 0x1D, 0x32, 0x41, 0xE9,
5847 0x67, 0x3D, 0x67, 0xDF,
5848
5849 0x47, 0x37, 0x47, 0xDF,
5850 0x57, 0x3F, 0x57, 0xDF,
5851
5852 0x2A, 0x40, 0x20, 0xE9,
5853 0x59, 0x3F, 0x59, 0xDF,
5854
5855 0x16, 0x30, 0x20, 0xE9,
5856 0x69, 0x3D, 0x69, 0xDF,
5857
5858 0x48, 0x37, 0x48, 0xDF,
5859 0x58, 0x3F, 0x58, 0xDF,
5860
5861 0x12, 0x12, 0x2D, 0xDF,
5862 0x22, 0x22, 0x2D, 0xDF,
5863
5864 0x32, 0x32, 0x2D, 0xDF,
5865 0x3A, 0x3A, 0x2D, 0xDF,
5866
5867 0x68, 0x3D, 0x68, 0xDF,
5868 0x49, 0x37, 0x49, 0xDF,
5869
5870 0x3D, 0xCF, 0x74, 0xC0,
5871 0x37, 0xCF, 0x74, 0xC4,
5872
5873 0x0A, 0x44, 0x54, 0xB0,
5874 0x02, 0x44, 0x64, 0xB0,
5875
5876 0x31, 0x53, 0x2F, 0x9F,
5877 0x34, 0x37, 0x20, 0xE9,
5878
5879 0x39, 0xE5, 0x2C, 0x9F,
5880 0x3C, 0x3D, 0x20, 0xE9,
5881
5882 0x2A, 0x44, 0x54, 0xB2,
5883 0x1A, 0x44, 0x64, 0xB2,
5884
5885 0x2E, 0x80, 0x3A, 0xEA,
5886 0x0A, 0x20,
5887 0x02, 0x20,
5888
5889 0x88, 0x73, 0x5E, 0xE9,
5890 0x2A, 0x20,
5891 0x1A, 0x20,
5892
5893 0x3D, 0xCF, 0x74, 0xC2,
5894 0x0F, 0xCF, 0x74, 0xC6,
5895
5896 0x30, 0x50, 0x2E, 0x9F,
5897 0x32, 0x31, 0x5F, 0xE9,
5898
5899 0x38, 0x21, 0x2C, 0x9F,
5900 0x33, 0x39, 0x5F, 0xE9,
5901
5902 0x31, 0x53, 0x2F, 0x9F,
5903 0x9C, 0x0F, 0x20, 0xE9,
5904
5905 0x0A, 0x44, 0x54, 0xB4,
5906 0x02, 0x44, 0x64, 0xB4,
5907
5908 0x2A, 0x44, 0x54, 0xB6,
5909 0x1A, 0x44, 0x64, 0xB6,
5910
5911 0x39, 0xE5, 0x2C, 0x9F,
5912 0x38, 0x3D, 0x20, 0xE9,
5913
5914 0x0A, 0x20,
5915 0x02, 0x20,
5916 0x2A, 0x20,
5917 0x1A, 0x20,
5918
5919 0x3D, 0xCF, 0x75, 0xC6,
5920 0x00, 0x80, 0x00, 0xE8,
5921
5922 0x30, 0x50, 0x2E, 0x9F,
5923 0x3E, 0x30, 0x4F, 0xE9,
5924
5925 0x38, 0x21, 0x2C, 0x9F,
5926 0x3F, 0x38, 0x4F, 0xE9,
5927
5928 0x0A, 0x45, 0x55, 0xB6,
5929 0x02, 0x45, 0x65, 0xB6,
5930
5931 0x31, 0x53, 0x2F, 0x9F,
5932 0x3A, 0x31, 0x4F, 0xE9,
5933
5934 0x39, 0xE5, 0x2C, 0x9F,
5935 0x3B, 0x39, 0x4F, 0xE9,
5936
5937 0x31, 0x3D, 0x20, 0xE9,
5938 0x0A, 0x20,
5939 0x02, 0x20,
5940
5941 0x2A, 0x46, 0x56, 0xBF,
5942 0x1A, 0x46, 0x66, 0xBF,
5943
5944 0x0A, 0x47, 0x57, 0xBF,
5945 0x02, 0x47, 0x67, 0xBF,
5946
5947 0x30, 0x50, 0x2E, 0x9F,
5948 0x36, 0x30, 0x4F, 0xE9,
5949
5950 0x38, 0x21, 0x2C, 0x9F,
5951 0x37, 0x38, 0x4F, 0xE9,
5952
5953 0x31, 0x53, 0x2F, 0x9F,
5954 0x9D, 0x31, 0x4F, 0xE9,
5955
5956 0x39, 0xE5, 0x2C, 0x9F,
5957 0x9E, 0x39, 0x4F, 0xE9,
5958
5959 0x2A, 0x43, 0x53, 0xBF,
5960 0x1A, 0x43, 0x63, 0xBF,
5961
5962 0x30, 0x50, 0x2E, 0x9F,
5963 0x35, 0x30, 0x4F, 0xE9,
5964
5965 0x38, 0x21, 0x2C, 0x9F,
5966 0x39, 0x38, 0x4F, 0xE9,
5967
5968 0x0A, 0x48, 0x58, 0xBF,
5969 0x02, 0x48, 0x68, 0xBF,
5970
5971 0x31, 0x53, 0x2F, 0x9F,
5972 0x80, 0x31, 0x57, 0xE9,
5973
5974 0x39, 0xE5, 0x2C, 0x9F,
5975 0x81, 0x39, 0x57, 0xE9,
5976
5977 0x2A, 0x49, 0x59, 0xBF,
5978 0x1A, 0x49, 0x69, 0xBF,
5979
5980 0x30, 0x50, 0x2E, 0x9F,
5981 0x82, 0x30, 0x57, 0xE9,
5982
5983 0x38, 0x21, 0x2C, 0x9F,
5984 0x83, 0x38, 0x57, 0xE9,
5985
5986 0x31, 0x53, 0x2F, 0x9F,
5987 0x84, 0x31, 0x5E, 0xE9,
5988
5989 0x39, 0xE5, 0x2C, 0x9F,
5990 0x85, 0x39, 0x5E, 0xE9,
5991
5992 0x86, 0x76, 0x57, 0xE9,
5993 0x8A, 0x36, 0x20, 0xE9,
5994
5995 0x87, 0x77, 0x57, 0xE9,
5996 0x8B, 0x3E, 0xBF, 0xEA,
5997
5998 0x80, 0x30, 0x57, 0xE9,
5999 0x81, 0x38, 0x57, 0xE9,
6000
6001 0x82, 0x31, 0x57, 0xE9,
6002 0x86, 0x78, 0x57, 0xE9,
6003
6004 0x83, 0x39, 0x57, 0xE9,
6005 0x87, 0x79, 0x57, 0xE9,
6006
6007 0x30, 0x1F, 0x5F, 0xE9,
6008 0x8A, 0x34, 0x20, 0xE9,
6009
6010 0x8B, 0x3C, 0x20, 0xE9,
6011 0x37, 0x50, 0x60, 0xBD,
6012
6013 0x57, 0x0D, 0x20, 0xE9,
6014 0x35, 0x51, 0x61, 0xBD,
6015
6016 0x2B, 0x50, 0x20, 0xE9,
6017 0x1D, 0x37, 0xE1, 0xEA,
6018
6019 0x1E, 0x35, 0xE1, 0xEA,
6020 0x00, 0xE0,
6021 0x0E, 0x77,
6022
6023 0x24, 0x51, 0x20, 0xE9,
6024 0x96, 0xFF, 0x20, 0xEA,
6025
6026 0x16, 0x0E, 0x20, 0xE9,
6027 0x57, 0x2E, 0xBF, 0xEA,
6028
6029 0x0B, 0x46, 0xA0, 0xE8,
6030 0x1B, 0x56, 0xA0, 0xE8,
6031
6032 0x2B, 0x66, 0xA0, 0xE8,
6033 0x0C, 0x47, 0xA0, 0xE8,
6034
6035 0x1C, 0x57, 0xA0, 0xE8,
6036 0x2C, 0x67, 0xA0, 0xE8,
6037
6038 0x0B, 0x00,
6039 0x1B, 0x00,
6040 0x2B, 0x00,
6041 0x00, 0xE0,
6042
6043 0x0C, 0x00,
6044 0x1C, 0x00,
6045 0x2C, 0x00,
6046 0x00, 0xE0,
6047
6048 0x0B, 0x65,
6049 0x1B, 0x65,
6050 0x2B, 0x65,
6051 0x00, 0xE0,
6052
6053 0x0C, 0x65,
6054 0x1C, 0x65,
6055 0x2C, 0x65,
6056 0x00, 0xE0,
6057
6058 0x0B, 0x1B, 0x60, 0xEC,
6059 0x36, 0xD7, 0x36, 0xAD,
6060
6061 0x2B, 0x80, 0x60, 0xEC,
6062 0x0C, 0x1C, 0x60, 0xEC,
6063
6064 0x3E, 0xD7, 0x3E, 0xAD,
6065 0x2C, 0x80, 0x60, 0xEC,
6066
6067 0x0B, 0x2B, 0xDE, 0xE8,
6068 0x1B, 0x80, 0xDE, 0xE8,
6069
6070 0x36, 0x80, 0x36, 0xBD,
6071 0x3E, 0x80, 0x3E, 0xBD,
6072
6073 0x33, 0xD7, 0x0B, 0xBD,
6074 0x3B, 0xD7, 0x1B, 0xBD,
6075
6076 0x46, 0x80, 0x46, 0xCF,
6077 0x57, 0x80, 0x57, 0xCF,
6078
6079 0x66, 0x33, 0x66, 0xCF,
6080 0x47, 0x3B, 0x47, 0xCF,
6081
6082 0x56, 0x33, 0x56, 0xCF,
6083 0x67, 0x3B, 0x67, 0xCF,
6084
6085 0x0B, 0x48, 0xA0, 0xE8,
6086 0x1B, 0x58, 0xA0, 0xE8,
6087
6088 0x2B, 0x68, 0xA0, 0xE8,
6089 0x0C, 0x49, 0xA0, 0xE8,
6090
6091 0x1C, 0x59, 0xA0, 0xE8,
6092 0x2C, 0x69, 0xA0, 0xE8,
6093
6094 0x0B, 0x00,
6095 0x1B, 0x00,
6096 0x2B, 0x00,
6097 0x00, 0xE0,
6098
6099 0x0C, 0x00,
6100 0x1C, 0x00,
6101 0x2C, 0x00,
6102 0x00, 0xE0,
6103
6104 0x0B, 0x65,
6105 0x1B, 0x65,
6106 0x2B, 0x65,
6107 0x00, 0xE0,
6108
6109 0x0C, 0x65,
6110 0x1C, 0x65,
6111 0x2C, 0x65,
6112 0x00, 0xE0,
6113
6114 0x0B, 0x1B, 0x60, 0xEC,
6115 0x34, 0xD7, 0x34, 0xAD,
6116
6117 0x2B, 0x80, 0x60, 0xEC,
6118 0x0C, 0x1C, 0x60, 0xEC,
6119
6120 0x3C, 0xD7, 0x3C, 0xAD,
6121 0x2C, 0x80, 0x60, 0xEC,
6122
6123 0x0B, 0x2B, 0xDE, 0xE8,
6124 0x1B, 0x80, 0xDE, 0xE8,
6125
6126 0x34, 0x80, 0x34, 0xBD,
6127 0x3C, 0x80, 0x3C, 0xBD,
6128
6129 0x33, 0xD7, 0x0B, 0xBD,
6130 0x3B, 0xD7, 0x1B, 0xBD,
6131
6132 0x48, 0x80, 0x48, 0xCF,
6133 0x59, 0x80, 0x59, 0xCF,
6134
6135 0x68, 0x33, 0x68, 0xCF,
6136 0x49, 0x3B, 0x49, 0xCF,
6137
6138 0xB5, 0xFF, 0x20, 0xEA,
6139 0x00, 0x80, 0x00, 0xE8,
6140
6141 0x58, 0x33, 0x58, 0xCF,
6142 0x69, 0x3B, 0x69, 0xCF,
6143
6144 0x74, 0xFF, 0x20, 0xEA,
6145 0x57, 0xC0, 0xBF, 0xEA,
6146
6147 0x00, 0x80, 0xA0, 0xE9,
6148 0x00, 0x00, 0xD8, 0xEC,
6149
6150};
6151
6152static unsigned char warp_g400_t2gzf[] = {
6153
6154 0x00, 0x8A, 0x98, 0xE9,
6155 0x00, 0x80, 0x00, 0xE8,
6156
6157 0x00, 0x80, 0xA0, 0xE9,
6158 0x00, 0x00, 0xD8, 0xEC,
6159
6160 0xFF, 0x80, 0xC0, 0xE9,
6161 0x00, 0x80, 0x00, 0xE8,
6162
6163 0x0A, 0x40, 0x50, 0xBF,
6164 0x2A, 0x40, 0x60, 0xBF,
6165
6166 0x32, 0x41, 0x51, 0xBF,
6167 0x3A, 0x41, 0x61, 0xBF,
6168
6169 0xC3, 0x6B,
6170 0xD3, 0x6B,
6171 0x00, 0x8A, 0x98, 0xE9,
6172
6173 0x73, 0x7B, 0xC8, 0xEC,
6174 0x96, 0xE2,
6175 0x41, 0x04,
6176
6177 0x7B, 0x43, 0xA0, 0xE8,
6178 0x73, 0x53, 0xA0, 0xE8,
6179
6180 0xAD, 0xEE, 0x23, 0x9F,
6181 0x00, 0xE0,
6182 0x51, 0x04,
6183
6184 0x90, 0xE2,
6185 0x61, 0x04,
6186 0x31, 0x46, 0xB1, 0xE8,
6187
6188 0x51, 0x41, 0xE0, 0xEC,
6189 0x39, 0x67, 0xB1, 0xE8,
6190
6191 0x00, 0x04,
6192 0x46, 0xE2,
6193 0x73, 0x63, 0xA0, 0xE8,
6194
6195 0x61, 0x41, 0xE0, 0xEC,
6196 0x31, 0x00,
6197 0x39, 0x00,
6198
6199 0x7D, 0x80, 0x15, 0xEA,
6200 0x10, 0x04,
6201 0x20, 0x04,
6202
6203 0x61, 0x51, 0xE0, 0xEC,
6204 0x2F, 0x41, 0x60, 0xEA,
6205
6206 0x31, 0x20,
6207 0x39, 0x20,
6208 0x1F, 0x42, 0xA0, 0xE8,
6209
6210 0x2A, 0x42, 0x52, 0xBF,
6211 0x0F, 0x52, 0xA0, 0xE8,
6212
6213 0x1A, 0x42, 0x62, 0xBF,
6214 0x1E, 0x51, 0x60, 0xEA,
6215
6216 0x73, 0x7B, 0xC8, 0xEC,
6217 0x0E, 0x61, 0x60, 0xEA,
6218
6219 0x32, 0x40, 0x50, 0xBD,
6220 0x22, 0x40, 0x60, 0xBD,
6221
6222 0x12, 0x41, 0x51, 0xBD,
6223 0x3A, 0x41, 0x61, 0xBD,
6224
6225 0xBF, 0x2F, 0x0E, 0xBD,
6226 0x97, 0xE2,
6227 0x7B, 0x72,
6228
6229 0x32, 0x20,
6230 0x22, 0x20,
6231 0x12, 0x20,
6232 0x3A, 0x20,
6233
6234 0x35, 0x48, 0xB1, 0xE8,
6235 0x3D, 0x59, 0xB1, 0xE8,
6236
6237 0x46, 0x31, 0x46, 0xBF,
6238 0x56, 0x31, 0x56, 0xBF,
6239
6240 0xB3, 0xE2, 0x2D, 0x9F,
6241 0x00, 0x80, 0x00, 0xE8,
6242
6243 0x66, 0x31, 0x66, 0xBF,
6244 0x47, 0x39, 0x47, 0xBF,
6245
6246 0x57, 0x39, 0x57, 0xBF,
6247 0x67, 0x39, 0x67, 0xBF,
6248
6249 0x6E, 0x80, 0x07, 0xEA,
6250 0x24, 0x41, 0x20, 0xE9,
6251
6252 0x35, 0x00,
6253 0x3D, 0x00,
6254 0x00, 0xE0,
6255 0x2D, 0x73,
6256
6257 0x33, 0x72,
6258 0x0C, 0xE3,
6259 0x8D, 0x2F, 0x1E, 0xBD,
6260
6261 0x43, 0x75, 0xF8, 0xEC,
6262 0x35, 0x20,
6263 0x3D, 0x20,
6264
6265 0x43, 0x43, 0x2D, 0xDF,
6266 0x53, 0x53, 0x2D, 0xDF,
6267
6268 0xAE, 0x1E, 0x0E, 0xBD,
6269 0x58, 0xE3,
6270 0x33, 0x66,
6271
6272 0x48, 0x35, 0x48, 0xBF,
6273 0x58, 0x35, 0x58, 0xBF,
6274
6275 0x68, 0x35, 0x68, 0xBF,
6276 0x49, 0x3D, 0x49, 0xBF,
6277
6278 0x59, 0x3D, 0x59, 0xBF,
6279 0x69, 0x3D, 0x69, 0xBF,
6280
6281 0x63, 0x63, 0x2D, 0xDF,
6282 0x4D, 0x7D, 0xF8, 0xEC,
6283
6284 0x59, 0xE3,
6285 0x00, 0xE0,
6286 0xB8, 0x38, 0x33, 0xBF,
6287
6288 0x2D, 0x73,
6289 0x30, 0x76,
6290 0x18, 0x3A, 0x41, 0xE9,
6291
6292 0x3F, 0x53, 0xA0, 0xE8,
6293 0x05, 0x80, 0x3D, 0xEA,
6294
6295 0x37, 0x43, 0xA0, 0xE8,
6296 0x3D, 0x63, 0xA0, 0xE8,
6297
6298 0x50, 0x70, 0xF8, 0xEC,
6299 0x2B, 0x50, 0x3C, 0xE9,
6300
6301 0x1F, 0x0F, 0xBC, 0xE8,
6302 0x00, 0x80, 0x00, 0xE8,
6303
6304 0x59, 0x78, 0xF8, 0xEC,
6305 0x00, 0x80, 0x00, 0xE8,
6306
6307 0x15, 0xC0, 0x20, 0xE9,
6308 0x15, 0xC0, 0x20, 0xE9,
6309
6310 0x15, 0xC0, 0x20, 0xE9,
6311 0x15, 0xC0, 0x20, 0xE9,
6312
6313 0x1E, 0x12, 0x41, 0xE9,
6314 0x1A, 0x22, 0x41, 0xE9,
6315
6316 0x46, 0x37, 0x46, 0xDF,
6317 0x56, 0x3F, 0x56, 0xDF,
6318
6319 0x2B, 0x40, 0x3D, 0xE9,
6320 0x66, 0x3D, 0x66, 0xDF,
6321
6322 0x1D, 0x32, 0x41, 0xE9,
6323 0x67, 0x3D, 0x67, 0xDF,
6324
6325 0x47, 0x37, 0x47, 0xDF,
6326 0x57, 0x3F, 0x57, 0xDF,
6327
6328 0x2A, 0x40, 0x20, 0xE9,
6329 0x59, 0x3F, 0x59, 0xDF,
6330
6331 0x16, 0x30, 0x20, 0xE9,
6332 0x69, 0x3D, 0x69, 0xDF,
6333
6334 0x48, 0x37, 0x48, 0xDF,
6335 0x58, 0x3F, 0x58, 0xDF,
6336
6337 0x12, 0x12, 0x2D, 0xDF,
6338 0x22, 0x22, 0x2D, 0xDF,
6339
6340 0x32, 0x32, 0x2D, 0xDF,
6341 0x3A, 0x3A, 0x2D, 0xDF,
6342
6343 0x68, 0x3D, 0x68, 0xDF,
6344 0x49, 0x37, 0x49, 0xDF,
6345
6346 0x3D, 0xCF, 0x74, 0xC0,
6347 0x37, 0xCF, 0x74, 0xC4,
6348
6349 0x39, 0xE5, 0x2C, 0x9F,
6350 0x34, 0x80, 0x20, 0xE9,
6351
6352 0x31, 0x53, 0x2F, 0x9F,
6353 0x00, 0x80, 0x00, 0xE8,
6354
6355 0x88, 0x73, 0x5E, 0xE9,
6356 0x00, 0x80, 0x00, 0xE8,
6357
6358 0x0F, 0xCF, 0x75, 0xC6,
6359 0x3C, 0x3D, 0x20, 0xE9,
6360
6361 0x0A, 0x44, 0x54, 0xB0,
6362 0x02, 0x44, 0x64, 0xB0,
6363
6364 0x2A, 0x44, 0x54, 0xB2,
6365 0x1A, 0x44, 0x64, 0xB2,
6366
6367 0x28, 0x80, 0x3A, 0xEA,
6368 0x0A, 0x20,
6369 0x02, 0x20,
6370
6371 0x3D, 0xCF, 0x74, 0xC2,
6372 0x2A, 0x20,
6373 0x1A, 0x20,
6374
6375 0x30, 0x50, 0x2E, 0x9F,
6376 0x32, 0x31, 0x5F, 0xE9,
6377
6378 0x38, 0x21, 0x2C, 0x9F,
6379 0x33, 0x39, 0x5F, 0xE9,
6380
6381 0x31, 0x53, 0x2F, 0x9F,
6382 0x31, 0x0F, 0x20, 0xE9,
6383
6384 0x0A, 0x44, 0x54, 0xB4,
6385 0x02, 0x44, 0x64, 0xB4,
6386
6387 0x2A, 0x45, 0x55, 0xB6,
6388 0x1A, 0x45, 0x65, 0xB6,
6389
6390 0x39, 0xE5, 0x2C, 0x9F,
6391 0x38, 0x3D, 0x20, 0xE9,
6392
6393 0x0A, 0x20,
6394 0x02, 0x20,
6395 0x2A, 0x20,
6396 0x1A, 0x20,
6397
6398 0x0A, 0x47, 0x57, 0xBF,
6399 0x02, 0x47, 0x67, 0xBF,
6400
6401 0x30, 0x50, 0x2E, 0x9F,
6402 0x3E, 0x30, 0x4F, 0xE9,
6403
6404 0x38, 0x21, 0x2C, 0x9F,
6405 0x3F, 0x38, 0x4F, 0xE9,
6406
6407 0x2A, 0x46, 0x56, 0xBF,
6408 0x1A, 0x46, 0x66, 0xBF,
6409
6410 0x31, 0x53, 0x2F, 0x9F,
6411 0x3A, 0x31, 0x4F, 0xE9,
6412
6413 0x39, 0xE5, 0x2C, 0x9F,
6414 0x3B, 0x39, 0x4F, 0xE9,
6415
6416 0x31, 0x53, 0x2F, 0x9F,
6417 0x36, 0x30, 0x4F, 0xE9,
6418
6419 0x39, 0xE5, 0x2C, 0x9F,
6420 0x37, 0x38, 0x4F, 0xE9,
6421
6422 0x2A, 0x43, 0x53, 0xBF,
6423 0x1A, 0x43, 0x63, 0xBF,
6424
6425 0x30, 0x50, 0x2E, 0x9F,
6426 0x35, 0x31, 0x4F, 0xE9,
6427
6428 0x38, 0x21, 0x2C, 0x9F,
6429 0x39, 0x39, 0x4F, 0xE9,
6430
6431 0x0A, 0x48, 0x58, 0xBF,
6432 0x02, 0x48, 0x68, 0xBF,
6433
6434 0x31, 0x53, 0x2F, 0x9F,
6435 0x80, 0x31, 0x57, 0xE9,
6436
6437 0x39, 0xE5, 0x2C, 0x9F,
6438 0x81, 0x39, 0x57, 0xE9,
6439
6440 0x2A, 0x49, 0x59, 0xBF,
6441 0x1A, 0x49, 0x69, 0xBF,
6442
6443 0x30, 0x50, 0x2E, 0x9F,
6444 0x82, 0x30, 0x57, 0xE9,
6445
6446 0x38, 0x21, 0x2C, 0x9F,
6447 0x83, 0x38, 0x57, 0xE9,
6448
6449 0x31, 0x53, 0x2F, 0x9F,
6450 0x84, 0x31, 0x5E, 0xE9,
6451
6452 0x39, 0xE5, 0x2C, 0x9F,
6453 0x85, 0x39, 0x5E, 0xE9,
6454
6455 0x86, 0x76, 0x57, 0xE9,
6456 0x8A, 0x36, 0x20, 0xE9,
6457
6458 0x87, 0x77, 0x57, 0xE9,
6459 0x8B, 0x3E, 0xBF, 0xEA,
6460
6461 0x80, 0x30, 0x57, 0xE9,
6462 0x81, 0x38, 0x57, 0xE9,
6463
6464 0x82, 0x31, 0x57, 0xE9,
6465 0x86, 0x78, 0x57, 0xE9,
6466
6467 0x83, 0x39, 0x57, 0xE9,
6468 0x87, 0x79, 0x57, 0xE9,
6469
6470 0x30, 0x1F, 0x5F, 0xE9,
6471 0x8A, 0x34, 0x20, 0xE9,
6472
6473 0x8B, 0x3C, 0x20, 0xE9,
6474 0x37, 0x50, 0x60, 0xBD,
6475
6476 0x57, 0x0D, 0x20, 0xE9,
6477 0x35, 0x51, 0x61, 0xBD,
6478
6479 0x2B, 0x50, 0x20, 0xE9,
6480 0x1D, 0x37, 0xE1, 0xEA,
6481
6482 0x1E, 0x35, 0xE1, 0xEA,
6483 0x00, 0xE0,
6484 0x0E, 0x77,
6485
6486 0x24, 0x51, 0x20, 0xE9,
6487 0x9A, 0xFF, 0x20, 0xEA,
6488
6489 0x16, 0x0E, 0x20, 0xE9,
6490 0x57, 0x2E, 0xBF, 0xEA,
6491
6492 0x0B, 0x46, 0xA0, 0xE8,
6493 0x1B, 0x56, 0xA0, 0xE8,
6494
6495 0x2B, 0x66, 0xA0, 0xE8,
6496 0x0C, 0x47, 0xA0, 0xE8,
6497
6498 0x1C, 0x57, 0xA0, 0xE8,
6499 0x2C, 0x67, 0xA0, 0xE8,
6500
6501 0x0B, 0x00,
6502 0x1B, 0x00,
6503 0x2B, 0x00,
6504 0x00, 0xE0,
6505
6506 0x0C, 0x00,
6507 0x1C, 0x00,
6508 0x2C, 0x00,
6509 0x00, 0xE0,
6510
6511 0x0B, 0x65,
6512 0x1B, 0x65,
6513 0x2B, 0x65,
6514 0x00, 0xE0,
6515
6516 0x0C, 0x65,
6517 0x1C, 0x65,
6518 0x2C, 0x65,
6519 0x00, 0xE0,
6520
6521 0x0B, 0x1B, 0x60, 0xEC,
6522 0x36, 0xD7, 0x36, 0xAD,
6523
6524 0x2B, 0x80, 0x60, 0xEC,
6525 0x0C, 0x1C, 0x60, 0xEC,
6526
6527 0x3E, 0xD7, 0x3E, 0xAD,
6528 0x2C, 0x80, 0x60, 0xEC,
6529
6530 0x0B, 0x2B, 0xDE, 0xE8,
6531 0x1B, 0x80, 0xDE, 0xE8,
6532
6533 0x36, 0x80, 0x36, 0xBD,
6534 0x3E, 0x80, 0x3E, 0xBD,
6535
6536 0x33, 0xD7, 0x0B, 0xBD,
6537 0x3B, 0xD7, 0x1B, 0xBD,
6538
6539 0x46, 0x80, 0x46, 0xCF,
6540 0x57, 0x80, 0x57, 0xCF,
6541
6542 0x66, 0x33, 0x66, 0xCF,
6543 0x47, 0x3B, 0x47, 0xCF,
6544
6545 0x56, 0x33, 0x56, 0xCF,
6546 0x67, 0x3B, 0x67, 0xCF,
6547
6548 0x0B, 0x48, 0xA0, 0xE8,
6549 0x1B, 0x58, 0xA0, 0xE8,
6550
6551 0x2B, 0x68, 0xA0, 0xE8,
6552 0x0C, 0x49, 0xA0, 0xE8,
6553
6554 0x1C, 0x59, 0xA0, 0xE8,
6555 0x2C, 0x69, 0xA0, 0xE8,
6556
6557 0x0B, 0x00,
6558 0x1B, 0x00,
6559 0x2B, 0x00,
6560 0x00, 0xE0,
6561
6562 0x0C, 0x00,
6563 0x1C, 0x00,
6564 0x2C, 0x00,
6565 0x00, 0xE0,
6566
6567 0x0B, 0x65,
6568 0x1B, 0x65,
6569 0x2B, 0x65,
6570 0x00, 0xE0,
6571
6572 0x0C, 0x65,
6573 0x1C, 0x65,
6574 0x2C, 0x65,
6575 0x00, 0xE0,
6576
6577 0x0B, 0x1B, 0x60, 0xEC,
6578 0x34, 0xD7, 0x34, 0xAD,
6579
6580 0x2B, 0x80, 0x60, 0xEC,
6581 0x0C, 0x1C, 0x60, 0xEC,
6582
6583 0x3C, 0xD7, 0x3C, 0xAD,
6584 0x2C, 0x80, 0x60, 0xEC,
6585
6586 0x0B, 0x2B, 0xDE, 0xE8,
6587 0x1B, 0x80, 0xDE, 0xE8,
6588
6589 0x34, 0x80, 0x34, 0xBD,
6590 0x3C, 0x80, 0x3C, 0xBD,
6591
6592 0x33, 0xD7, 0x0B, 0xBD,
6593 0x3B, 0xD7, 0x1B, 0xBD,
6594
6595 0x48, 0x80, 0x48, 0xCF,
6596 0x59, 0x80, 0x59, 0xCF,
6597
6598 0x68, 0x33, 0x68, 0xCF,
6599 0x49, 0x3B, 0x49, 0xCF,
6600
6601 0xBB, 0xFF, 0x20, 0xEA,
6602 0x00, 0x80, 0x00, 0xE8,
6603
6604 0x58, 0x33, 0x58, 0xCF,
6605 0x69, 0x3B, 0x69, 0xCF,
6606
6607 0x78, 0xFF, 0x20, 0xEA,
6608 0x57, 0xC0, 0xBF, 0xEA,
6609
6610 0x00, 0x80, 0xA0, 0xE9,
6611 0x00, 0x00, 0xD8, 0xEC,
6612
6613};
6614
6615static unsigned char warp_g400_t2gzs[] = {
6616
6617 0x00, 0x8A, 0x98, 0xE9,
6618 0x00, 0x80, 0x00, 0xE8,
6619
6620 0x00, 0x80, 0xA0, 0xE9,
6621 0x00, 0x00, 0xD8, 0xEC,
6622
6623 0xFF, 0x80, 0xC0, 0xE9,
6624 0x00, 0x80, 0x00, 0xE8,
6625
6626 0x0A, 0x40, 0x50, 0xBF,
6627 0x2A, 0x40, 0x60, 0xBF,
6628
6629 0x32, 0x41, 0x51, 0xBF,
6630 0x3A, 0x41, 0x61, 0xBF,
6631
6632 0xC3, 0x6B,
6633 0xD3, 0x6B,
6634 0x00, 0x8A, 0x98, 0xE9,
6635
6636 0x73, 0x7B, 0xC8, 0xEC,
6637 0x96, 0xE2,
6638 0x41, 0x04,
6639
6640 0x7B, 0x43, 0xA0, 0xE8,
6641 0x73, 0x53, 0xA0, 0xE8,
6642
6643 0xAD, 0xEE, 0x23, 0x9F,
6644 0x00, 0xE0,
6645 0x51, 0x04,
6646
6647 0x90, 0xE2,
6648 0x61, 0x04,
6649 0x31, 0x46, 0xB1, 0xE8,
6650
6651 0x51, 0x41, 0xE0, 0xEC,
6652 0x39, 0x67, 0xB1, 0xE8,
6653
6654 0x00, 0x04,
6655 0x46, 0xE2,
6656 0x73, 0x63, 0xA0, 0xE8,
6657
6658 0x61, 0x41, 0xE0, 0xEC,
6659 0x31, 0x00,
6660 0x39, 0x00,
6661
6662 0x85, 0x80, 0x15, 0xEA,
6663 0x10, 0x04,
6664 0x20, 0x04,
6665
6666 0x61, 0x51, 0xE0, 0xEC,
6667 0x2F, 0x41, 0x60, 0xEA,
6668
6669 0x31, 0x20,
6670 0x39, 0x20,
6671 0x1F, 0x42, 0xA0, 0xE8,
6672
6673 0x2A, 0x42, 0x52, 0xBF,
6674 0x0F, 0x52, 0xA0, 0xE8,
6675
6676 0x1A, 0x42, 0x62, 0xBF,
6677 0x1E, 0x51, 0x60, 0xEA,
6678
6679 0x73, 0x7B, 0xC8, 0xEC,
6680 0x0E, 0x61, 0x60, 0xEA,
6681
6682 0x32, 0x40, 0x50, 0xBD,
6683 0x22, 0x40, 0x60, 0xBD,
6684
6685 0x12, 0x41, 0x51, 0xBD,
6686 0x3A, 0x41, 0x61, 0xBD,
6687
6688 0xBF, 0x2F, 0x0E, 0xBD,
6689 0x97, 0xE2,
6690 0x7B, 0x72,
6691
6692 0x32, 0x20,
6693 0x22, 0x20,
6694 0x12, 0x20,
6695 0x3A, 0x20,
6696
6697 0x35, 0x48, 0xB1, 0xE8,
6698 0x3D, 0x59, 0xB1, 0xE8,
6699
6700 0x46, 0x31, 0x46, 0xBF,
6701 0x56, 0x31, 0x56, 0xBF,
6702
6703 0xB3, 0xE2, 0x2D, 0x9F,
6704 0x00, 0x80, 0x00, 0xE8,
6705
6706 0x66, 0x31, 0x66, 0xBF,
6707 0x47, 0x39, 0x47, 0xBF,
6708
6709 0x57, 0x39, 0x57, 0xBF,
6710 0x67, 0x39, 0x67, 0xBF,
6711
6712 0x76, 0x80, 0x07, 0xEA,
6713 0x24, 0x41, 0x20, 0xE9,
6714
6715 0x35, 0x00,
6716 0x3D, 0x00,
6717 0x00, 0xE0,
6718 0x2D, 0x73,
6719
6720 0x33, 0x72,
6721 0x0C, 0xE3,
6722 0x8D, 0x2F, 0x1E, 0xBD,
6723
6724 0x43, 0x75, 0xF8, 0xEC,
6725 0x35, 0x20,
6726 0x3D, 0x20,
6727
6728 0x43, 0x43, 0x2D, 0xDF,
6729 0x53, 0x53, 0x2D, 0xDF,
6730
6731 0xAE, 0x1E, 0x0E, 0xBD,
6732 0x58, 0xE3,
6733 0x33, 0x66,
6734
6735 0x48, 0x35, 0x48, 0xBF,
6736 0x58, 0x35, 0x58, 0xBF,
6737
6738 0x68, 0x35, 0x68, 0xBF,
6739 0x49, 0x3D, 0x49, 0xBF,
6740
6741 0x59, 0x3D, 0x59, 0xBF,
6742 0x69, 0x3D, 0x69, 0xBF,
6743
6744 0x63, 0x63, 0x2D, 0xDF,
6745 0x4D, 0x7D, 0xF8, 0xEC,
6746
6747 0x59, 0xE3,
6748 0x00, 0xE0,
6749 0xB8, 0x38, 0x33, 0xBF,
6750
6751 0x2D, 0x73,
6752 0x30, 0x76,
6753 0x18, 0x3A, 0x41, 0xE9,
6754
6755 0x3F, 0x53, 0xA0, 0xE8,
6756 0x05, 0x80, 0x3D, 0xEA,
6757
6758 0x37, 0x43, 0xA0, 0xE8,
6759 0x3D, 0x63, 0xA0, 0xE8,
6760
6761 0x50, 0x70, 0xF8, 0xEC,
6762 0x2B, 0x50, 0x3C, 0xE9,
6763
6764 0x1F, 0x0F, 0xBC, 0xE8,
6765 0x00, 0x80, 0x00, 0xE8,
6766
6767 0x59, 0x78, 0xF8, 0xEC,
6768 0x00, 0x80, 0x00, 0xE8,
6769
6770 0x15, 0xC0, 0x20, 0xE9,
6771 0x15, 0xC0, 0x20, 0xE9,
6772
6773 0x15, 0xC0, 0x20, 0xE9,
6774 0x15, 0xC0, 0x20, 0xE9,
6775
6776 0x1E, 0x12, 0x41, 0xE9,
6777 0x1A, 0x22, 0x41, 0xE9,
6778
6779 0x46, 0x37, 0x46, 0xDF,
6780 0x56, 0x3F, 0x56, 0xDF,
6781
6782 0x2B, 0x40, 0x3D, 0xE9,
6783 0x66, 0x3D, 0x66, 0xDF,
6784
6785 0x1D, 0x32, 0x41, 0xE9,
6786 0x67, 0x3D, 0x67, 0xDF,
6787
6788 0x47, 0x37, 0x47, 0xDF,
6789 0x57, 0x3F, 0x57, 0xDF,
6790
6791 0x2A, 0x40, 0x20, 0xE9,
6792 0x59, 0x3F, 0x59, 0xDF,
6793
6794 0x16, 0x30, 0x20, 0xE9,
6795 0x69, 0x3D, 0x69, 0xDF,
6796
6797 0x48, 0x37, 0x48, 0xDF,
6798 0x58, 0x3F, 0x58, 0xDF,
6799
6800 0x68, 0x3D, 0x68, 0xDF,
6801 0x49, 0x37, 0x49, 0xDF,
6802
6803 0x32, 0x32, 0x2D, 0xDF,
6804 0x22, 0x22, 0x2D, 0xDF,
6805
6806 0x12, 0x12, 0x2D, 0xDF,
6807 0x3A, 0x3A, 0x2D, 0xDF,
6808
6809 0x0F, 0xCF, 0x74, 0xC2,
6810 0x37, 0xCF, 0x74, 0xC4,
6811
6812 0x0A, 0x44, 0x54, 0xB0,
6813 0x02, 0x44, 0x64, 0xB0,
6814
6815 0x3D, 0xCF, 0x74, 0xC0,
6816 0x34, 0x37, 0x20, 0xE9,
6817
6818 0x31, 0x53, 0x2F, 0x9F,
6819 0x38, 0x0F, 0x20, 0xE9,
6820
6821 0x39, 0xE5, 0x2C, 0x9F,
6822 0x3C, 0x3D, 0x20, 0xE9,
6823
6824 0x2A, 0x44, 0x54, 0xB2,
6825 0x1A, 0x44, 0x64, 0xB2,
6826
6827 0x31, 0x80, 0x3A, 0xEA,
6828 0x0A, 0x20,
6829 0x02, 0x20,
6830
6831 0x0F, 0xCF, 0x75, 0xC0,
6832 0x2A, 0x20,
6833 0x1A, 0x20,
6834
6835 0x30, 0x50, 0x2E, 0x9F,
6836 0x32, 0x31, 0x5F, 0xE9,
6837
6838 0x38, 0x21, 0x2C, 0x9F,
6839 0x33, 0x39, 0x5F, 0xE9,
6840
6841 0x3D, 0xCF, 0x75, 0xC2,
6842 0x37, 0xCF, 0x75, 0xC4,
6843
6844 0x31, 0x53, 0x2F, 0x9F,
6845 0xA6, 0x0F, 0x20, 0xE9,
6846
6847 0x39, 0xE5, 0x2C, 0x9F,
6848 0xA3, 0x3D, 0x20, 0xE9,
6849
6850 0x2A, 0x44, 0x54, 0xB4,
6851 0x1A, 0x44, 0x64, 0xB4,
6852
6853 0x0A, 0x45, 0x55, 0xB0,
6854 0x02, 0x45, 0x65, 0xB0,
6855
6856 0x88, 0x73, 0x5E, 0xE9,
6857 0x2A, 0x20,
6858 0x1A, 0x20,
6859
6860 0xA0, 0x37, 0x20, 0xE9,
6861 0x0A, 0x20,
6862 0x02, 0x20,
6863
6864 0x31, 0x53, 0x2F, 0x9F,
6865 0x3E, 0x30, 0x4F, 0xE9,
6866
6867 0x39, 0xE5, 0x2C, 0x9F,
6868 0x3F, 0x38, 0x4F, 0xE9,
6869
6870 0x30, 0x50, 0x2E, 0x9F,
6871 0x3A, 0x31, 0x4F, 0xE9,
6872
6873 0x2A, 0x45, 0x55, 0xB2,
6874 0x1A, 0x45, 0x65, 0xB2,
6875
6876 0x0A, 0x45, 0x55, 0xB4,
6877 0x02, 0x45, 0x65, 0xB4,
6878
6879 0x38, 0x21, 0x2C, 0x9F,
6880 0x3B, 0x39, 0x4F, 0xE9,
6881
6882 0x2A, 0x20,
6883 0x1A, 0x20,
6884 0x0A, 0x20,
6885 0x02, 0x20,
6886
6887 0x2A, 0x46, 0x56, 0xBF,
6888 0x1A, 0x46, 0x66, 0xBF,
6889
6890 0x31, 0x53, 0x2F, 0x9F,
6891 0x36, 0x31, 0x4F, 0xE9,
6892
6893 0x39, 0xE5, 0x2C, 0x9F,
6894 0x37, 0x39, 0x4F, 0xE9,
6895
6896 0x30, 0x50, 0x2E, 0x9F,
6897 0xA7, 0x30, 0x4F, 0xE9,
6898
6899 0x38, 0x21, 0x2C, 0x9F,
6900 0xA8, 0x38, 0x4F, 0xE9,
6901
6902 0x0A, 0x47, 0x57, 0xBF,
6903 0x02, 0x47, 0x67, 0xBF,
6904
6905 0x31, 0x53, 0x2F, 0x9F,
6906 0xA4, 0x31, 0x4F, 0xE9,
6907
6908 0x39, 0xE5, 0x2C, 0x9F,
6909 0xA5, 0x39, 0x4F, 0xE9,
6910
6911 0x2A, 0x43, 0x53, 0xBF,
6912 0x1A, 0x43, 0x63, 0xBF,
6913
6914 0x30, 0x50, 0x2E, 0x9F,
6915 0xA1, 0x30, 0x4F, 0xE9,
6916
6917 0x38, 0x21, 0x2C, 0x9F,
6918 0xA2, 0x38, 0x4F, 0xE9,
6919
6920 0x0A, 0x48, 0x58, 0xBF,
6921 0x02, 0x48, 0x68, 0xBF,
6922
6923 0x31, 0x53, 0x2F, 0x9F,
6924 0x80, 0x31, 0x57, 0xE9,
6925
6926 0x39, 0xE5, 0x2C, 0x9F,
6927 0x81, 0x39, 0x57, 0xE9,
6928
6929 0x2A, 0x49, 0x59, 0xBF,
6930 0x1A, 0x49, 0x69, 0xBF,
6931
6932 0x30, 0x50, 0x2E, 0x9F,
6933 0x82, 0x30, 0x57, 0xE9,
6934
6935 0x38, 0x21, 0x2C, 0x9F,
6936 0x83, 0x38, 0x57, 0xE9,
6937
6938 0x31, 0x53, 0x2F, 0x9F,
6939 0x84, 0x31, 0x5E, 0xE9,
6940
6941 0x39, 0xE5, 0x2C, 0x9F,
6942 0x85, 0x39, 0x5E, 0xE9,
6943
6944 0x86, 0x76, 0x57, 0xE9,
6945 0x8A, 0x36, 0x20, 0xE9,
6946
6947 0x87, 0x77, 0x57, 0xE9,
6948 0x8B, 0x3E, 0xBF, 0xEA,
6949
6950 0x80, 0x30, 0x57, 0xE9,
6951 0x81, 0x38, 0x57, 0xE9,
6952
6953 0x82, 0x31, 0x57, 0xE9,
6954 0x86, 0x78, 0x57, 0xE9,
6955
6956 0x83, 0x39, 0x57, 0xE9,
6957 0x87, 0x79, 0x57, 0xE9,
6958
6959 0x30, 0x1F, 0x5F, 0xE9,
6960 0x8A, 0x34, 0x20, 0xE9,
6961
6962 0x8B, 0x3C, 0x20, 0xE9,
6963 0x37, 0x50, 0x60, 0xBD,
6964
6965 0x57, 0x0D, 0x20, 0xE9,
6966 0x35, 0x51, 0x61, 0xBD,
6967
6968 0x2B, 0x50, 0x20, 0xE9,
6969 0x1D, 0x37, 0xE1, 0xEA,
6970
6971 0x1E, 0x35, 0xE1, 0xEA,
6972 0x00, 0xE0,
6973 0x0E, 0x77,
6974
6975 0x24, 0x51, 0x20, 0xE9,
6976 0x92, 0xFF, 0x20, 0xEA,
6977
6978 0x16, 0x0E, 0x20, 0xE9,
6979 0x57, 0x2E, 0xBF, 0xEA,
6980
6981 0x0B, 0x46, 0xA0, 0xE8,
6982 0x1B, 0x56, 0xA0, 0xE8,
6983
6984 0x2B, 0x66, 0xA0, 0xE8,
6985 0x0C, 0x47, 0xA0, 0xE8,
6986
6987 0x1C, 0x57, 0xA0, 0xE8,
6988 0x2C, 0x67, 0xA0, 0xE8,
6989
6990 0x0B, 0x00,
6991 0x1B, 0x00,
6992 0x2B, 0x00,
6993 0x00, 0xE0,
6994
6995 0x0C, 0x00,
6996 0x1C, 0x00,
6997 0x2C, 0x00,
6998 0x00, 0xE0,
6999
7000 0x0B, 0x65,
7001 0x1B, 0x65,
7002 0x2B, 0x65,
7003 0x00, 0xE0,
7004
7005 0x0C, 0x65,
7006 0x1C, 0x65,
7007 0x2C, 0x65,
7008 0x00, 0xE0,
7009
7010 0x0B, 0x1B, 0x60, 0xEC,
7011 0x36, 0xD7, 0x36, 0xAD,
7012
7013 0x2B, 0x80, 0x60, 0xEC,
7014 0x0C, 0x1C, 0x60, 0xEC,
7015
7016 0x3E, 0xD7, 0x3E, 0xAD,
7017 0x2C, 0x80, 0x60, 0xEC,
7018
7019 0x0B, 0x2B, 0xDE, 0xE8,
7020 0x1B, 0x80, 0xDE, 0xE8,
7021
7022 0x36, 0x80, 0x36, 0xBD,
7023 0x3E, 0x80, 0x3E, 0xBD,
7024
7025 0x33, 0xD7, 0x0B, 0xBD,
7026 0x3B, 0xD7, 0x1B, 0xBD,
7027
7028 0x46, 0x80, 0x46, 0xCF,
7029 0x57, 0x80, 0x57, 0xCF,
7030
7031 0x66, 0x33, 0x66, 0xCF,
7032 0x47, 0x3B, 0x47, 0xCF,
7033
7034 0x56, 0x33, 0x56, 0xCF,
7035 0x67, 0x3B, 0x67, 0xCF,
7036
7037 0x0B, 0x48, 0xA0, 0xE8,
7038 0x1B, 0x58, 0xA0, 0xE8,
7039
7040 0x2B, 0x68, 0xA0, 0xE8,
7041 0x0C, 0x49, 0xA0, 0xE8,
7042
7043 0x1C, 0x59, 0xA0, 0xE8,
7044 0x2C, 0x69, 0xA0, 0xE8,
7045
7046 0x0B, 0x00,
7047 0x1B, 0x00,
7048 0x2B, 0x00,
7049 0x00, 0xE0,
7050
7051 0x0C, 0x00,
7052 0x1C, 0x00,
7053 0x2C, 0x00,
7054 0x00, 0xE0,
7055
7056 0x0B, 0x65,
7057 0x1B, 0x65,
7058 0x2B, 0x65,
7059 0x00, 0xE0,
7060
7061 0x0C, 0x65,
7062 0x1C, 0x65,
7063 0x2C, 0x65,
7064 0x00, 0xE0,
7065
7066 0x0B, 0x1B, 0x60, 0xEC,
7067 0x34, 0xD7, 0x34, 0xAD,
7068
7069 0x2B, 0x80, 0x60, 0xEC,
7070 0x0C, 0x1C, 0x60, 0xEC,
7071
7072 0x3C, 0xD7, 0x3C, 0xAD,
7073 0x2C, 0x80, 0x60, 0xEC,
7074
7075 0x0B, 0x2B, 0xDE, 0xE8,
7076 0x1B, 0x80, 0xDE, 0xE8,
7077
7078 0x34, 0x80, 0x34, 0xBD,
7079 0x3C, 0x80, 0x3C, 0xBD,
7080
7081 0x33, 0xD7, 0x0B, 0xBD,
7082 0x3B, 0xD7, 0x1B, 0xBD,
7083
7084 0x48, 0x80, 0x48, 0xCF,
7085 0x59, 0x80, 0x59, 0xCF,
7086
7087 0x68, 0x33, 0x68, 0xCF,
7088 0x49, 0x3B, 0x49, 0xCF,
7089
7090 0xB2, 0xFF, 0x20, 0xEA,
7091 0x00, 0x80, 0x00, 0xE8,
7092
7093 0x58, 0x33, 0x58, 0xCF,
7094 0x69, 0x3B, 0x69, 0xCF,
7095
7096 0x70, 0xFF, 0x20, 0xEA,
7097 0x57, 0xC0, 0xBF, 0xEA,
7098
7099 0x00, 0x80, 0xA0, 0xE9,
7100 0x00, 0x00, 0xD8, 0xEC,
7101
7102};
7103
7104static unsigned char warp_g400_t2gzsa[] = {
7105
7106 0x00, 0x8A, 0x98, 0xE9,
7107 0x00, 0x80, 0x00, 0xE8,
7108
7109 0x00, 0x80, 0xA0, 0xE9,
7110 0x00, 0x00, 0xD8, 0xEC,
7111
7112 0xFF, 0x80, 0xC0, 0xE9,
7113 0x00, 0x80, 0x00, 0xE8,
7114
7115 0x0A, 0x40, 0x50, 0xBF,
7116 0x2A, 0x40, 0x60, 0xBF,
7117
7118 0x32, 0x41, 0x51, 0xBF,
7119 0x3A, 0x41, 0x61, 0xBF,
7120
7121 0xC3, 0x6B,
7122 0xD3, 0x6B,
7123 0x00, 0x8A, 0x98, 0xE9,
7124
7125 0x73, 0x7B, 0xC8, 0xEC,
7126 0x96, 0xE2,
7127 0x41, 0x04,
7128
7129 0x7B, 0x43, 0xA0, 0xE8,
7130 0x73, 0x53, 0xA0, 0xE8,
7131
7132 0xAD, 0xEE, 0x23, 0x9F,
7133 0x00, 0xE0,
7134 0x51, 0x04,
7135
7136 0x90, 0xE2,
7137 0x61, 0x04,
7138 0x31, 0x46, 0xB1, 0xE8,
7139
7140 0x51, 0x41, 0xE0, 0xEC,
7141 0x39, 0x67, 0xB1, 0xE8,
7142
7143 0x00, 0x04,
7144 0x46, 0xE2,
7145 0x73, 0x63, 0xA0, 0xE8,
7146
7147 0x61, 0x41, 0xE0, 0xEC,
7148 0x31, 0x00,
7149 0x39, 0x00,
7150
7151 0x8A, 0x80, 0x15, 0xEA,
7152 0x10, 0x04,
7153 0x20, 0x04,
7154
7155 0x61, 0x51, 0xE0, 0xEC,
7156 0x2F, 0x41, 0x60, 0xEA,
7157
7158 0x31, 0x20,
7159 0x39, 0x20,
7160 0x1F, 0x42, 0xA0, 0xE8,
7161
7162 0x2A, 0x42, 0x52, 0xBF,
7163 0x0F, 0x52, 0xA0, 0xE8,
7164
7165 0x1A, 0x42, 0x62, 0xBF,
7166 0x1E, 0x51, 0x60, 0xEA,
7167
7168 0x73, 0x7B, 0xC8, 0xEC,
7169 0x0E, 0x61, 0x60, 0xEA,
7170
7171 0x32, 0x40, 0x50, 0xBD,
7172 0x22, 0x40, 0x60, 0xBD,
7173
7174 0x12, 0x41, 0x51, 0xBD,
7175 0x3A, 0x41, 0x61, 0xBD,
7176
7177 0xBF, 0x2F, 0x0E, 0xBD,
7178 0x97, 0xE2,
7179 0x7B, 0x72,
7180
7181 0x32, 0x20,
7182 0x22, 0x20,
7183 0x12, 0x20,
7184 0x3A, 0x20,
7185
7186 0x35, 0x48, 0xB1, 0xE8,
7187 0x3D, 0x59, 0xB1, 0xE8,
7188
7189 0x46, 0x31, 0x46, 0xBF,
7190 0x56, 0x31, 0x56, 0xBF,
7191
7192 0xB3, 0xE2, 0x2D, 0x9F,
7193 0x00, 0x80, 0x00, 0xE8,
7194
7195 0x66, 0x31, 0x66, 0xBF,
7196 0x47, 0x39, 0x47, 0xBF,
7197
7198 0x57, 0x39, 0x57, 0xBF,
7199 0x67, 0x39, 0x67, 0xBF,
7200
7201 0x7B, 0x80, 0x07, 0xEA,
7202 0x24, 0x41, 0x20, 0xE9,
7203
7204 0x35, 0x00,
7205 0x3D, 0x00,
7206 0x00, 0xE0,
7207 0x2D, 0x73,
7208
7209 0x33, 0x72,
7210 0x0C, 0xE3,
7211 0x8D, 0x2F, 0x1E, 0xBD,
7212
7213 0x43, 0x75, 0xF8, 0xEC,
7214 0x35, 0x20,
7215 0x3D, 0x20,
7216
7217 0x43, 0x43, 0x2D, 0xDF,
7218 0x53, 0x53, 0x2D, 0xDF,
7219
7220 0xAE, 0x1E, 0x0E, 0xBD,
7221 0x58, 0xE3,
7222 0x33, 0x66,
7223
7224 0x48, 0x35, 0x48, 0xBF,
7225 0x58, 0x35, 0x58, 0xBF,
7226
7227 0x68, 0x35, 0x68, 0xBF,
7228 0x49, 0x3D, 0x49, 0xBF,
7229
7230 0x59, 0x3D, 0x59, 0xBF,
7231 0x69, 0x3D, 0x69, 0xBF,
7232
7233 0x63, 0x63, 0x2D, 0xDF,
7234 0x4D, 0x7D, 0xF8, 0xEC,
7235
7236 0x59, 0xE3,
7237 0x00, 0xE0,
7238 0xB8, 0x38, 0x33, 0xBF,
7239
7240 0x2D, 0x73,
7241 0x30, 0x76,
7242 0x18, 0x3A, 0x41, 0xE9,
7243
7244 0x3F, 0x53, 0xA0, 0xE8,
7245 0x05, 0x80, 0x3D, 0xEA,
7246
7247 0x37, 0x43, 0xA0, 0xE8,
7248 0x3D, 0x63, 0xA0, 0xE8,
7249
7250 0x50, 0x70, 0xF8, 0xEC,
7251 0x2B, 0x50, 0x3C, 0xE9,
7252
7253 0x1F, 0x0F, 0xBC, 0xE8,
7254 0x00, 0x80, 0x00, 0xE8,
7255
7256 0x59, 0x78, 0xF8, 0xEC,
7257 0x00, 0x80, 0x00, 0xE8,
7258
7259 0x15, 0xC0, 0x20, 0xE9,
7260 0x15, 0xC0, 0x20, 0xE9,
7261
7262 0x15, 0xC0, 0x20, 0xE9,
7263 0x15, 0xC0, 0x20, 0xE9,
7264
7265 0x1E, 0x12, 0x41, 0xE9,
7266 0x1A, 0x22, 0x41, 0xE9,
7267
7268 0x46, 0x37, 0x46, 0xDF,
7269 0x56, 0x3F, 0x56, 0xDF,
7270
7271 0x2B, 0x40, 0x3D, 0xE9,
7272 0x66, 0x3D, 0x66, 0xDF,
7273
7274 0x1D, 0x32, 0x41, 0xE9,
7275 0x67, 0x3D, 0x67, 0xDF,
7276
7277 0x47, 0x37, 0x47, 0xDF,
7278 0x57, 0x3F, 0x57, 0xDF,
7279
7280 0x2A, 0x40, 0x20, 0xE9,
7281 0x59, 0x3F, 0x59, 0xDF,
7282
7283 0x16, 0x30, 0x20, 0xE9,
7284 0x69, 0x3D, 0x69, 0xDF,
7285
7286 0x48, 0x37, 0x48, 0xDF,
7287 0x58, 0x3F, 0x58, 0xDF,
7288
7289 0x68, 0x3D, 0x68, 0xDF,
7290 0x49, 0x37, 0x49, 0xDF,
7291
7292 0x32, 0x32, 0x2D, 0xDF,
7293 0x22, 0x22, 0x2D, 0xDF,
7294
7295 0x12, 0x12, 0x2D, 0xDF,
7296 0x3A, 0x3A, 0x2D, 0xDF,
7297
7298 0x0F, 0xCF, 0x74, 0xC2,
7299 0x37, 0xCF, 0x74, 0xC4,
7300
7301 0x0A, 0x44, 0x54, 0xB0,
7302 0x02, 0x44, 0x64, 0xB0,
7303
7304 0x3D, 0xCF, 0x74, 0xC0,
7305 0x34, 0x37, 0x20, 0xE9,
7306
7307 0x31, 0x53, 0x2F, 0x9F,
7308 0x38, 0x0F, 0x20, 0xE9,
7309
7310 0x39, 0xE5, 0x2C, 0x9F,
7311 0x3C, 0x3D, 0x20, 0xE9,
7312
7313 0x2A, 0x44, 0x54, 0xB2,
7314 0x1A, 0x44, 0x64, 0xB2,
7315
7316 0x36, 0x80, 0x3A, 0xEA,
7317 0x0A, 0x20,
7318 0x02, 0x20,
7319
7320 0x0F, 0xCF, 0x75, 0xC0,
7321 0x2A, 0x20,
7322 0x1A, 0x20,
7323
7324 0x30, 0x50, 0x2E, 0x9F,
7325 0x32, 0x31, 0x5F, 0xE9,
7326
7327 0x38, 0x21, 0x2C, 0x9F,
7328 0x33, 0x39, 0x5F, 0xE9,
7329
7330 0x3D, 0xCF, 0x75, 0xC2,
7331 0x37, 0xCF, 0x75, 0xC4,
7332
7333 0x31, 0x53, 0x2F, 0x9F,
7334 0xA6, 0x0F, 0x20, 0xE9,
7335
7336 0x39, 0xE5, 0x2C, 0x9F,
7337 0xA3, 0x3D, 0x20, 0xE9,
7338
7339 0x2A, 0x44, 0x54, 0xB4,
7340 0x1A, 0x44, 0x64, 0xB4,
7341
7342 0x0A, 0x45, 0x55, 0xB0,
7343 0x02, 0x45, 0x65, 0xB0,
7344
7345 0x88, 0x73, 0x5E, 0xE9,
7346 0x2A, 0x20,
7347 0x1A, 0x20,
7348
7349 0xA0, 0x37, 0x20, 0xE9,
7350 0x0A, 0x20,
7351 0x02, 0x20,
7352
7353 0x31, 0x53, 0x2F, 0x9F,
7354 0x3E, 0x30, 0x4F, 0xE9,
7355
7356 0x39, 0xE5, 0x2C, 0x9F,
7357 0x3F, 0x38, 0x4F, 0xE9,
7358
7359 0x30, 0x50, 0x2E, 0x9F,
7360 0x3A, 0x31, 0x4F, 0xE9,
7361
7362 0x38, 0x21, 0x2C, 0x9F,
7363 0x3B, 0x39, 0x4F, 0xE9,
7364
7365 0x2A, 0x45, 0x55, 0xB2,
7366 0x1A, 0x45, 0x65, 0xB2,
7367
7368 0x0A, 0x45, 0x55, 0xB4,
7369 0x02, 0x45, 0x65, 0xB4,
7370
7371 0x0F, 0xCF, 0x74, 0xC6,
7372 0x2A, 0x20,
7373 0x1A, 0x20,
7374
7375 0xA7, 0x30, 0x4F, 0xE9,
7376 0x0A, 0x20,
7377 0x02, 0x20,
7378
7379 0x31, 0x53, 0x2F, 0x9F,
7380 0x9C, 0x0F, 0x20, 0xE9,
7381
7382 0x39, 0xE5, 0x2C, 0x9F,
7383 0xA8, 0x38, 0x4F, 0xE9,
7384
7385 0x2A, 0x44, 0x54, 0xB6,
7386 0x1A, 0x44, 0x64, 0xB6,
7387
7388 0x30, 0x50, 0x2E, 0x9F,
7389 0x36, 0x31, 0x4F, 0xE9,
7390
7391 0x38, 0x21, 0x2C, 0x9F,
7392 0x37, 0x39, 0x4F, 0xE9,
7393
7394 0x00, 0x80, 0x00, 0xE8,
7395 0x2A, 0x20,
7396 0x1A, 0x20,
7397
7398 0x2A, 0x46, 0x56, 0xBF,
7399 0x1A, 0x46, 0x66, 0xBF,
7400
7401 0x31, 0x53, 0x2F, 0x9F,
7402 0xA4, 0x31, 0x4F, 0xE9,
7403
7404 0x39, 0xE5, 0x2C, 0x9F,
7405 0xA5, 0x39, 0x4F, 0xE9,
7406
7407 0x0A, 0x47, 0x57, 0xBF,
7408 0x02, 0x47, 0x67, 0xBF,
7409
7410 0x31, 0x53, 0x2F, 0x9F,
7411 0xA1, 0x30, 0x4F, 0xE9,
7412
7413 0x39, 0xE5, 0x2C, 0x9F,
7414 0xA2, 0x38, 0x4F, 0xE9,
7415
7416 0x2A, 0x43, 0x53, 0xBF,
7417 0x1A, 0x43, 0x63, 0xBF,
7418
7419 0x30, 0x50, 0x2E, 0x9F,
7420 0x9D, 0x31, 0x4F, 0xE9,
7421
7422 0x38, 0x21, 0x2C, 0x9F,
7423 0x9E, 0x39, 0x4F, 0xE9,
7424
7425 0x0A, 0x48, 0x58, 0xBF,
7426 0x02, 0x48, 0x68, 0xBF,
7427
7428 0x31, 0x53, 0x2F, 0x9F,
7429 0x80, 0x31, 0x57, 0xE9,
7430
7431 0x39, 0xE5, 0x2C, 0x9F,
7432 0x81, 0x39, 0x57, 0xE9,
7433
7434 0x2A, 0x49, 0x59, 0xBF,
7435 0x1A, 0x49, 0x69, 0xBF,
7436
7437 0x30, 0x50, 0x2E, 0x9F,
7438 0x82, 0x30, 0x57, 0xE9,
7439
7440 0x38, 0x21, 0x2C, 0x9F,
7441 0x83, 0x38, 0x57, 0xE9,
7442
7443 0x31, 0x53, 0x2F, 0x9F,
7444 0x84, 0x31, 0x5E, 0xE9,
7445
7446 0x39, 0xE5, 0x2C, 0x9F,
7447 0x85, 0x39, 0x5E, 0xE9,
7448
7449 0x86, 0x76, 0x57, 0xE9,
7450 0x8A, 0x36, 0x20, 0xE9,
7451
7452 0x87, 0x77, 0x57, 0xE9,
7453 0x8B, 0x3E, 0xBF, 0xEA,
7454
7455 0x80, 0x30, 0x57, 0xE9,
7456 0x81, 0x38, 0x57, 0xE9,
7457
7458 0x82, 0x31, 0x57, 0xE9,
7459 0x86, 0x78, 0x57, 0xE9,
7460
7461 0x83, 0x39, 0x57, 0xE9,
7462 0x87, 0x79, 0x57, 0xE9,
7463
7464 0x30, 0x1F, 0x5F, 0xE9,
7465 0x8A, 0x34, 0x20, 0xE9,
7466
7467 0x8B, 0x3C, 0x20, 0xE9,
7468 0x37, 0x50, 0x60, 0xBD,
7469
7470 0x57, 0x0D, 0x20, 0xE9,
7471 0x35, 0x51, 0x61, 0xBD,
7472
7473 0x2B, 0x50, 0x20, 0xE9,
7474 0x1D, 0x37, 0xE1, 0xEA,
7475
7476 0x1E, 0x35, 0xE1, 0xEA,
7477 0x00, 0xE0,
7478 0x0E, 0x77,
7479
7480 0x24, 0x51, 0x20, 0xE9,
7481 0x8D, 0xFF, 0x20, 0xEA,
7482
7483 0x16, 0x0E, 0x20, 0xE9,
7484 0x57, 0x2E, 0xBF, 0xEA,
7485
7486 0x0B, 0x46, 0xA0, 0xE8,
7487 0x1B, 0x56, 0xA0, 0xE8,
7488
7489 0x2B, 0x66, 0xA0, 0xE8,
7490 0x0C, 0x47, 0xA0, 0xE8,
7491
7492 0x1C, 0x57, 0xA0, 0xE8,
7493 0x2C, 0x67, 0xA0, 0xE8,
7494
7495 0x0B, 0x00,
7496 0x1B, 0x00,
7497 0x2B, 0x00,
7498 0x00, 0xE0,
7499
7500 0x0C, 0x00,
7501 0x1C, 0x00,
7502 0x2C, 0x00,
7503 0x00, 0xE0,
7504
7505 0x0B, 0x65,
7506 0x1B, 0x65,
7507 0x2B, 0x65,
7508 0x00, 0xE0,
7509
7510 0x0C, 0x65,
7511 0x1C, 0x65,
7512 0x2C, 0x65,
7513 0x00, 0xE0,
7514
7515 0x0B, 0x1B, 0x60, 0xEC,
7516 0x36, 0xD7, 0x36, 0xAD,
7517
7518 0x2B, 0x80, 0x60, 0xEC,
7519 0x0C, 0x1C, 0x60, 0xEC,
7520
7521 0x3E, 0xD7, 0x3E, 0xAD,
7522 0x2C, 0x80, 0x60, 0xEC,
7523
7524 0x0B, 0x2B, 0xDE, 0xE8,
7525 0x1B, 0x80, 0xDE, 0xE8,
7526
7527 0x36, 0x80, 0x36, 0xBD,
7528 0x3E, 0x80, 0x3E, 0xBD,
7529
7530 0x33, 0xD7, 0x0B, 0xBD,
7531 0x3B, 0xD7, 0x1B, 0xBD,
7532
7533 0x46, 0x80, 0x46, 0xCF,
7534 0x57, 0x80, 0x57, 0xCF,
7535
7536 0x66, 0x33, 0x66, 0xCF,
7537 0x47, 0x3B, 0x47, 0xCF,
7538
7539 0x56, 0x33, 0x56, 0xCF,
7540 0x67, 0x3B, 0x67, 0xCF,
7541
7542 0x0B, 0x48, 0xA0, 0xE8,
7543 0x1B, 0x58, 0xA0, 0xE8,
7544
7545 0x2B, 0x68, 0xA0, 0xE8,
7546 0x0C, 0x49, 0xA0, 0xE8,
7547
7548 0x1C, 0x59, 0xA0, 0xE8,
7549 0x2C, 0x69, 0xA0, 0xE8,
7550
7551 0x0B, 0x00,
7552 0x1B, 0x00,
7553 0x2B, 0x00,
7554 0x00, 0xE0,
7555
7556 0x0C, 0x00,
7557 0x1C, 0x00,
7558 0x2C, 0x00,
7559 0x00, 0xE0,
7560
7561 0x0B, 0x65,
7562 0x1B, 0x65,
7563 0x2B, 0x65,
7564 0x00, 0xE0,
7565
7566 0x0C, 0x65,
7567 0x1C, 0x65,
7568 0x2C, 0x65,
7569 0x00, 0xE0,
7570
7571 0x0B, 0x1B, 0x60, 0xEC,
7572 0x34, 0xD7, 0x34, 0xAD,
7573
7574 0x2B, 0x80, 0x60, 0xEC,
7575 0x0C, 0x1C, 0x60, 0xEC,
7576
7577 0x3C, 0xD7, 0x3C, 0xAD,
7578 0x2C, 0x80, 0x60, 0xEC,
7579
7580 0x0B, 0x2B, 0xDE, 0xE8,
7581 0x1B, 0x80, 0xDE, 0xE8,
7582
7583 0x34, 0x80, 0x34, 0xBD,
7584 0x3C, 0x80, 0x3C, 0xBD,
7585
7586 0x33, 0xD7, 0x0B, 0xBD,
7587 0x3B, 0xD7, 0x1B, 0xBD,
7588
7589 0x48, 0x80, 0x48, 0xCF,
7590 0x59, 0x80, 0x59, 0xCF,
7591
7592 0x68, 0x33, 0x68, 0xCF,
7593 0x49, 0x3B, 0x49, 0xCF,
7594
7595 0xAD, 0xFF, 0x20, 0xEA,
7596 0x00, 0x80, 0x00, 0xE8,
7597
7598 0x58, 0x33, 0x58, 0xCF,
7599 0x69, 0x3B, 0x69, 0xCF,
7600
7601 0x6B, 0xFF, 0x20, 0xEA,
7602 0x57, 0xC0, 0xBF, 0xEA,
7603
7604 0x00, 0x80, 0xA0, 0xE9,
7605 0x00, 0x00, 0xD8, 0xEC,
7606
7607};
7608
7609static unsigned char warp_g400_t2gzsaf[] = {
7610
7611 0x00, 0x8A, 0x98, 0xE9,
7612 0x00, 0x80, 0x00, 0xE8,
7613
7614 0x00, 0x80, 0xA0, 0xE9,
7615 0x00, 0x00, 0xD8, 0xEC,
7616
7617 0xFF, 0x80, 0xC0, 0xE9,
7618 0x00, 0x80, 0x00, 0xE8,
7619
7620 0x0A, 0x40, 0x50, 0xBF,
7621 0x2A, 0x40, 0x60, 0xBF,
7622
7623 0x32, 0x41, 0x51, 0xBF,
7624 0x3A, 0x41, 0x61, 0xBF,
7625
7626 0xC3, 0x6B,
7627 0xD3, 0x6B,
7628 0x00, 0x8A, 0x98, 0xE9,
7629
7630 0x73, 0x7B, 0xC8, 0xEC,
7631 0x96, 0xE2,
7632 0x41, 0x04,
7633
7634 0x7B, 0x43, 0xA0, 0xE8,
7635 0x73, 0x53, 0xA0, 0xE8,
7636
7637 0xAD, 0xEE, 0x23, 0x9F,
7638 0x00, 0xE0,
7639 0x51, 0x04,
7640
7641 0x90, 0xE2,
7642 0x61, 0x04,
7643 0x31, 0x46, 0xB1, 0xE8,
7644
7645 0x51, 0x41, 0xE0, 0xEC,
7646 0x39, 0x67, 0xB1, 0xE8,
7647
7648 0x00, 0x04,
7649 0x46, 0xE2,
7650 0x73, 0x63, 0xA0, 0xE8,
7651
7652 0x61, 0x41, 0xE0, 0xEC,
7653 0x31, 0x00,
7654 0x39, 0x00,
7655
7656 0x8E, 0x80, 0x15, 0xEA,
7657 0x10, 0x04,
7658 0x20, 0x04,
7659
7660 0x61, 0x51, 0xE0, 0xEC,
7661 0x2F, 0x41, 0x60, 0xEA,
7662
7663 0x31, 0x20,
7664 0x39, 0x20,
7665 0x1F, 0x42, 0xA0, 0xE8,
7666
7667 0x2A, 0x42, 0x52, 0xBF,
7668 0x0F, 0x52, 0xA0, 0xE8,
7669
7670 0x1A, 0x42, 0x62, 0xBF,
7671 0x1E, 0x51, 0x60, 0xEA,
7672
7673 0x73, 0x7B, 0xC8, 0xEC,
7674 0x0E, 0x61, 0x60, 0xEA,
7675
7676 0x32, 0x40, 0x50, 0xBD,
7677 0x22, 0x40, 0x60, 0xBD,
7678
7679 0x12, 0x41, 0x51, 0xBD,
7680 0x3A, 0x41, 0x61, 0xBD,
7681
7682 0xBF, 0x2F, 0x0E, 0xBD,
7683 0x97, 0xE2,
7684 0x7B, 0x72,
7685
7686 0x32, 0x20,
7687 0x22, 0x20,
7688 0x12, 0x20,
7689 0x3A, 0x20,
7690
7691 0x35, 0x48, 0xB1, 0xE8,
7692 0x3D, 0x59, 0xB1, 0xE8,
7693
7694 0x46, 0x31, 0x46, 0xBF,
7695 0x56, 0x31, 0x56, 0xBF,
7696
7697 0xB3, 0xE2, 0x2D, 0x9F,
7698 0x00, 0x80, 0x00, 0xE8,
7699
7700 0x66, 0x31, 0x66, 0xBF,
7701 0x47, 0x39, 0x47, 0xBF,
7702
7703 0x57, 0x39, 0x57, 0xBF,
7704 0x67, 0x39, 0x67, 0xBF,
7705
7706 0x7F, 0x80, 0x07, 0xEA,
7707 0x24, 0x41, 0x20, 0xE9,
7708
7709 0x35, 0x00,
7710 0x3D, 0x00,
7711 0x00, 0xE0,
7712 0x2D, 0x73,
7713
7714 0x33, 0x72,
7715 0x0C, 0xE3,
7716 0x8D, 0x2F, 0x1E, 0xBD,
7717
7718 0x43, 0x75, 0xF8, 0xEC,
7719 0x35, 0x20,
7720 0x3D, 0x20,
7721
7722 0x43, 0x43, 0x2D, 0xDF,
7723 0x53, 0x53, 0x2D, 0xDF,
7724
7725 0xAE, 0x1E, 0x0E, 0xBD,
7726 0x58, 0xE3,
7727 0x33, 0x66,
7728
7729 0x48, 0x35, 0x48, 0xBF,
7730 0x58, 0x35, 0x58, 0xBF,
7731
7732 0x68, 0x35, 0x68, 0xBF,
7733 0x49, 0x3D, 0x49, 0xBF,
7734
7735 0x59, 0x3D, 0x59, 0xBF,
7736 0x69, 0x3D, 0x69, 0xBF,
7737
7738 0x63, 0x63, 0x2D, 0xDF,
7739 0x4D, 0x7D, 0xF8, 0xEC,
7740
7741 0x59, 0xE3,
7742 0x00, 0xE0,
7743 0xB8, 0x38, 0x33, 0xBF,
7744
7745 0x2D, 0x73,
7746 0x30, 0x76,
7747 0x18, 0x3A, 0x41, 0xE9,
7748
7749 0x3F, 0x53, 0xA0, 0xE8,
7750 0x05, 0x80, 0x3D, 0xEA,
7751
7752 0x37, 0x43, 0xA0, 0xE8,
7753 0x3D, 0x63, 0xA0, 0xE8,
7754
7755 0x50, 0x70, 0xF8, 0xEC,
7756 0x2B, 0x50, 0x3C, 0xE9,
7757
7758 0x1F, 0x0F, 0xBC, 0xE8,
7759 0x00, 0x80, 0x00, 0xE8,
7760
7761 0x59, 0x78, 0xF8, 0xEC,
7762 0x00, 0x80, 0x00, 0xE8,
7763
7764 0x15, 0xC0, 0x20, 0xE9,
7765 0x15, 0xC0, 0x20, 0xE9,
7766
7767 0x15, 0xC0, 0x20, 0xE9,
7768 0x15, 0xC0, 0x20, 0xE9,
7769
7770 0x1E, 0x12, 0x41, 0xE9,
7771 0x1A, 0x22, 0x41, 0xE9,
7772
7773 0x46, 0x37, 0x46, 0xDF,
7774 0x56, 0x3F, 0x56, 0xDF,
7775
7776 0x2B, 0x40, 0x3D, 0xE9,
7777 0x66, 0x3D, 0x66, 0xDF,
7778
7779 0x1D, 0x32, 0x41, 0xE9,
7780 0x67, 0x3D, 0x67, 0xDF,
7781
7782 0x47, 0x37, 0x47, 0xDF,
7783 0x57, 0x3F, 0x57, 0xDF,
7784
7785 0x2A, 0x40, 0x20, 0xE9,
7786 0x59, 0x3F, 0x59, 0xDF,
7787
7788 0x16, 0x30, 0x20, 0xE9,
7789 0x69, 0x3D, 0x69, 0xDF,
7790
7791 0x48, 0x37, 0x48, 0xDF,
7792 0x58, 0x3F, 0x58, 0xDF,
7793
7794 0x68, 0x3D, 0x68, 0xDF,
7795 0x49, 0x37, 0x49, 0xDF,
7796
7797 0x32, 0x32, 0x2D, 0xDF,
7798 0x22, 0x22, 0x2D, 0xDF,
7799
7800 0x12, 0x12, 0x2D, 0xDF,
7801 0x3A, 0x3A, 0x2D, 0xDF,
7802
7803 0x0F, 0xCF, 0x74, 0xC2,
7804 0x37, 0xCF, 0x74, 0xC4,
7805
7806 0x0A, 0x44, 0x54, 0xB0,
7807 0x02, 0x44, 0x64, 0xB0,
7808
7809 0x3D, 0xCF, 0x74, 0xC0,
7810 0x34, 0x37, 0x20, 0xE9,
7811
7812 0x31, 0x53, 0x2F, 0x9F,
7813 0x38, 0x0F, 0x20, 0xE9,
7814
7815 0x39, 0xE5, 0x2C, 0x9F,
7816 0x3C, 0x3D, 0x20, 0xE9,
7817
7818 0x2A, 0x44, 0x54, 0xB2,
7819 0x1A, 0x44, 0x64, 0xB2,
7820
7821 0x3A, 0x80, 0x3A, 0xEA,
7822 0x0A, 0x20,
7823 0x02, 0x20,
7824
7825 0x0F, 0xCF, 0x75, 0xC0,
7826 0x2A, 0x20,
7827 0x1A, 0x20,
7828
7829 0x30, 0x50, 0x2E, 0x9F,
7830 0x32, 0x31, 0x5F, 0xE9,
7831
7832 0x38, 0x21, 0x2C, 0x9F,
7833 0x33, 0x39, 0x5F, 0xE9,
7834
7835 0x3D, 0xCF, 0x75, 0xC2,
7836 0x37, 0xCF, 0x75, 0xC4,
7837
7838 0x31, 0x53, 0x2F, 0x9F,
7839 0xA6, 0x0F, 0x20, 0xE9,
7840
7841 0x39, 0xE5, 0x2C, 0x9F,
7842 0xA3, 0x3D, 0x20, 0xE9,
7843
7844 0x2A, 0x44, 0x54, 0xB4,
7845 0x1A, 0x44, 0x64, 0xB4,
7846
7847 0x0A, 0x45, 0x55, 0xB0,
7848 0x02, 0x45, 0x65, 0xB0,
7849
7850 0x88, 0x73, 0x5E, 0xE9,
7851 0x2A, 0x20,
7852 0x1A, 0x20,
7853
7854 0xA0, 0x37, 0x20, 0xE9,
7855 0x0A, 0x20,
7856 0x02, 0x20,
7857
7858 0x31, 0x53, 0x2F, 0x9F,
7859 0x3E, 0x30, 0x4F, 0xE9,
7860
7861 0x39, 0xE5, 0x2C, 0x9F,
7862 0x3F, 0x38, 0x4F, 0xE9,
7863
7864 0x30, 0x50, 0x2E, 0x9F,
7865 0x3A, 0x31, 0x4F, 0xE9,
7866
7867 0x38, 0x21, 0x2C, 0x9F,
7868 0x3B, 0x39, 0x4F, 0xE9,
7869
7870 0x2A, 0x45, 0x55, 0xB2,
7871 0x1A, 0x45, 0x65, 0xB2,
7872
7873 0x0A, 0x45, 0x55, 0xB4,
7874 0x02, 0x45, 0x65, 0xB4,
7875
7876 0x0F, 0xCF, 0x74, 0xC6,
7877 0x2A, 0x20,
7878 0x1A, 0x20,
7879
7880 0xA7, 0x30, 0x4F, 0xE9,
7881 0x0A, 0x20,
7882 0x02, 0x20,
7883
7884 0x31, 0x53, 0x2F, 0x9F,
7885 0x9C, 0x0F, 0x20, 0xE9,
7886
7887 0x39, 0xE5, 0x2C, 0x9F,
7888 0xA8, 0x38, 0x4F, 0xE9,
7889
7890 0x2A, 0x44, 0x54, 0xB6,
7891 0x1A, 0x44, 0x64, 0xB6,
7892
7893 0x30, 0x50, 0x2E, 0x9F,
7894 0x36, 0x31, 0x4F, 0xE9,
7895
7896 0x38, 0x21, 0x2C, 0x9F,
7897 0x37, 0x39, 0x4F, 0xE9,
7898
7899 0x0A, 0x45, 0x55, 0xB6,
7900 0x02, 0x45, 0x65, 0xB6,
7901
7902 0x3D, 0xCF, 0x75, 0xC6,
7903 0x2A, 0x20,
7904 0x1A, 0x20,
7905
7906 0x2A, 0x46, 0x56, 0xBF,
7907 0x1A, 0x46, 0x66, 0xBF,
7908
7909 0x31, 0x53, 0x2F, 0x9F,
7910 0xA4, 0x31, 0x4F, 0xE9,
7911
7912 0x39, 0xE5, 0x2C, 0x9F,
7913 0xA5, 0x39, 0x4F, 0xE9,
7914
7915 0x31, 0x3D, 0x20, 0xE9,
7916 0x0A, 0x20,
7917 0x02, 0x20,
7918
7919 0x0A, 0x47, 0x57, 0xBF,
7920 0x02, 0x47, 0x67, 0xBF,
7921
7922 0x30, 0x50, 0x2E, 0x9F,
7923 0xA1, 0x30, 0x4F, 0xE9,
7924
7925 0x38, 0x21, 0x2C, 0x9F,
7926 0xA2, 0x38, 0x4F, 0xE9,
7927
7928 0x31, 0x53, 0x2F, 0x9F,
7929 0x9D, 0x31, 0x4F, 0xE9,
7930
7931 0x39, 0xE5, 0x2C, 0x9F,
7932 0x9E, 0x39, 0x4F, 0xE9,
7933
7934 0x2A, 0x43, 0x53, 0xBF,
7935 0x1A, 0x43, 0x63, 0xBF,
7936
7937 0x30, 0x50, 0x2E, 0x9F,
7938 0x35, 0x30, 0x4F, 0xE9,
7939
7940 0x38, 0x21, 0x2C, 0x9F,
7941 0x39, 0x38, 0x4F, 0xE9,
7942
7943 0x0A, 0x48, 0x58, 0xBF,
7944 0x02, 0x48, 0x68, 0xBF,
7945
7946 0x31, 0x53, 0x2F, 0x9F,
7947 0x80, 0x31, 0x57, 0xE9,
7948
7949 0x39, 0xE5, 0x2C, 0x9F,
7950 0x81, 0x39, 0x57, 0xE9,
7951
7952 0x2A, 0x49, 0x59, 0xBF,
7953 0x1A, 0x49, 0x69, 0xBF,
7954
7955 0x30, 0x50, 0x2E, 0x9F,
7956 0x82, 0x30, 0x57, 0xE9,
7957
7958 0x38, 0x21, 0x2C, 0x9F,
7959 0x83, 0x38, 0x57, 0xE9,
7960
7961 0x31, 0x53, 0x2F, 0x9F,
7962 0x84, 0x31, 0x5E, 0xE9,
7963
7964 0x39, 0xE5, 0x2C, 0x9F,
7965 0x85, 0x39, 0x5E, 0xE9,
7966
7967 0x86, 0x76, 0x57, 0xE9,
7968 0x8A, 0x36, 0x20, 0xE9,
7969
7970 0x87, 0x77, 0x57, 0xE9,
7971 0x8B, 0x3E, 0xBF, 0xEA,
7972
7973 0x80, 0x30, 0x57, 0xE9,
7974 0x81, 0x38, 0x57, 0xE9,
7975
7976 0x82, 0x31, 0x57, 0xE9,
7977 0x86, 0x78, 0x57, 0xE9,
7978
7979 0x83, 0x39, 0x57, 0xE9,
7980 0x87, 0x79, 0x57, 0xE9,
7981
7982 0x30, 0x1F, 0x5F, 0xE9,
7983 0x8A, 0x34, 0x20, 0xE9,
7984
7985 0x8B, 0x3C, 0x20, 0xE9,
7986 0x37, 0x50, 0x60, 0xBD,
7987
7988 0x57, 0x0D, 0x20, 0xE9,
7989 0x35, 0x51, 0x61, 0xBD,
7990
7991 0x2B, 0x50, 0x20, 0xE9,
7992 0x1D, 0x37, 0xE1, 0xEA,
7993
7994 0x1E, 0x35, 0xE1, 0xEA,
7995 0x00, 0xE0,
7996 0x0E, 0x77,
7997
7998 0x24, 0x51, 0x20, 0xE9,
7999 0x89, 0xFF, 0x20, 0xEA,
8000
8001 0x16, 0x0E, 0x20, 0xE9,
8002 0x57, 0x2E, 0xBF, 0xEA,
8003
8004 0x0B, 0x46, 0xA0, 0xE8,
8005 0x1B, 0x56, 0xA0, 0xE8,
8006
8007 0x2B, 0x66, 0xA0, 0xE8,
8008 0x0C, 0x47, 0xA0, 0xE8,
8009
8010 0x1C, 0x57, 0xA0, 0xE8,
8011 0x2C, 0x67, 0xA0, 0xE8,
8012
8013 0x0B, 0x00,
8014 0x1B, 0x00,
8015 0x2B, 0x00,
8016 0x00, 0xE0,
8017
8018 0x0C, 0x00,
8019 0x1C, 0x00,
8020 0x2C, 0x00,
8021 0x00, 0xE0,
8022
8023 0x0B, 0x65,
8024 0x1B, 0x65,
8025 0x2B, 0x65,
8026 0x00, 0xE0,
8027
8028 0x0C, 0x65,
8029 0x1C, 0x65,
8030 0x2C, 0x65,
8031 0x00, 0xE0,
8032
8033 0x0B, 0x1B, 0x60, 0xEC,
8034 0x36, 0xD7, 0x36, 0xAD,
8035
8036 0x2B, 0x80, 0x60, 0xEC,
8037 0x0C, 0x1C, 0x60, 0xEC,
8038
8039 0x3E, 0xD7, 0x3E, 0xAD,
8040 0x2C, 0x80, 0x60, 0xEC,
8041
8042 0x0B, 0x2B, 0xDE, 0xE8,
8043 0x1B, 0x80, 0xDE, 0xE8,
8044
8045 0x36, 0x80, 0x36, 0xBD,
8046 0x3E, 0x80, 0x3E, 0xBD,
8047
8048 0x33, 0xD7, 0x0B, 0xBD,
8049 0x3B, 0xD7, 0x1B, 0xBD,
8050
8051 0x46, 0x80, 0x46, 0xCF,
8052 0x57, 0x80, 0x57, 0xCF,
8053
8054 0x66, 0x33, 0x66, 0xCF,
8055 0x47, 0x3B, 0x47, 0xCF,
8056
8057 0x56, 0x33, 0x56, 0xCF,
8058 0x67, 0x3B, 0x67, 0xCF,
8059
8060 0x0B, 0x48, 0xA0, 0xE8,
8061 0x1B, 0x58, 0xA0, 0xE8,
8062
8063 0x2B, 0x68, 0xA0, 0xE8,
8064 0x0C, 0x49, 0xA0, 0xE8,
8065
8066 0x1C, 0x59, 0xA0, 0xE8,
8067 0x2C, 0x69, 0xA0, 0xE8,
8068
8069 0x0B, 0x00,
8070 0x1B, 0x00,
8071 0x2B, 0x00,
8072 0x00, 0xE0,
8073
8074 0x0C, 0x00,
8075 0x1C, 0x00,
8076 0x2C, 0x00,
8077 0x00, 0xE0,
8078
8079 0x0B, 0x65,
8080 0x1B, 0x65,
8081 0x2B, 0x65,
8082 0x00, 0xE0,
8083
8084 0x0C, 0x65,
8085 0x1C, 0x65,
8086 0x2C, 0x65,
8087 0x00, 0xE0,
8088
8089 0x0B, 0x1B, 0x60, 0xEC,
8090 0x34, 0xD7, 0x34, 0xAD,
8091
8092 0x2B, 0x80, 0x60, 0xEC,
8093 0x0C, 0x1C, 0x60, 0xEC,
8094
8095 0x3C, 0xD7, 0x3C, 0xAD,
8096 0x2C, 0x80, 0x60, 0xEC,
8097
8098 0x0B, 0x2B, 0xDE, 0xE8,
8099 0x1B, 0x80, 0xDE, 0xE8,
8100
8101 0x34, 0x80, 0x34, 0xBD,
8102 0x3C, 0x80, 0x3C, 0xBD,
8103
8104 0x33, 0xD7, 0x0B, 0xBD,
8105 0x3B, 0xD7, 0x1B, 0xBD,
8106
8107 0x48, 0x80, 0x48, 0xCF,
8108 0x59, 0x80, 0x59, 0xCF,
8109
8110 0x68, 0x33, 0x68, 0xCF,
8111 0x49, 0x3B, 0x49, 0xCF,
8112
8113 0xA9, 0xFF, 0x20, 0xEA,
8114 0x00, 0x80, 0x00, 0xE8,
8115
8116 0x58, 0x33, 0x58, 0xCF,
8117 0x69, 0x3B, 0x69, 0xCF,
8118
8119 0x67, 0xFF, 0x20, 0xEA,
8120 0x57, 0xC0, 0xBF, 0xEA,
8121
8122 0x00, 0x80, 0xA0, 0xE9,
8123 0x00, 0x00, 0xD8, 0xEC,
8124
8125};
8126
8127static unsigned char warp_g400_t2gzsf[] = {
8128
8129 0x00, 0x8A, 0x98, 0xE9,
8130 0x00, 0x80, 0x00, 0xE8,
8131
8132 0x00, 0x80, 0xA0, 0xE9,
8133 0x00, 0x00, 0xD8, 0xEC,
8134
8135 0xFF, 0x80, 0xC0, 0xE9,
8136 0x00, 0x80, 0x00, 0xE8,
8137
8138 0x0A, 0x40, 0x50, 0xBF,
8139 0x2A, 0x40, 0x60, 0xBF,
8140
8141 0x32, 0x41, 0x51, 0xBF,
8142 0x3A, 0x41, 0x61, 0xBF,
8143
8144 0xC3, 0x6B,
8145 0xD3, 0x6B,
8146 0x00, 0x8A, 0x98, 0xE9,
8147
8148 0x73, 0x7B, 0xC8, 0xEC,
8149 0x96, 0xE2,
8150 0x41, 0x04,
8151
8152 0x7B, 0x43, 0xA0, 0xE8,
8153 0x73, 0x53, 0xA0, 0xE8,
8154
8155 0xAD, 0xEE, 0x23, 0x9F,
8156 0x00, 0xE0,
8157 0x51, 0x04,
8158
8159 0x90, 0xE2,
8160 0x61, 0x04,
8161 0x31, 0x46, 0xB1, 0xE8,
8162
8163 0x51, 0x41, 0xE0, 0xEC,
8164 0x39, 0x67, 0xB1, 0xE8,
8165
8166 0x00, 0x04,
8167 0x46, 0xE2,
8168 0x73, 0x63, 0xA0, 0xE8,
8169
8170 0x61, 0x41, 0xE0, 0xEC,
8171 0x31, 0x00,
8172 0x39, 0x00,
8173
8174 0x8A, 0x80, 0x15, 0xEA,
8175 0x10, 0x04,
8176 0x20, 0x04,
8177
8178 0x61, 0x51, 0xE0, 0xEC,
8179 0x2F, 0x41, 0x60, 0xEA,
8180
8181 0x31, 0x20,
8182 0x39, 0x20,
8183 0x1F, 0x42, 0xA0, 0xE8,
8184
8185 0x2A, 0x42, 0x52, 0xBF,
8186 0x0F, 0x52, 0xA0, 0xE8,
8187
8188 0x1A, 0x42, 0x62, 0xBF,
8189 0x1E, 0x51, 0x60, 0xEA,
8190
8191 0x73, 0x7B, 0xC8, 0xEC,
8192 0x0E, 0x61, 0x60, 0xEA,
8193
8194 0x32, 0x40, 0x50, 0xBD,
8195 0x22, 0x40, 0x60, 0xBD,
8196
8197 0x12, 0x41, 0x51, 0xBD,
8198 0x3A, 0x41, 0x61, 0xBD,
8199
8200 0xBF, 0x2F, 0x0E, 0xBD,
8201 0x97, 0xE2,
8202 0x7B, 0x72,
8203
8204 0x32, 0x20,
8205 0x22, 0x20,
8206 0x12, 0x20,
8207 0x3A, 0x20,
8208
8209 0x35, 0x48, 0xB1, 0xE8,
8210 0x3D, 0x59, 0xB1, 0xE8,
8211
8212 0x46, 0x31, 0x46, 0xBF,
8213 0x56, 0x31, 0x56, 0xBF,
8214
8215 0xB3, 0xE2, 0x2D, 0x9F,
8216 0x00, 0x80, 0x00, 0xE8,
8217
8218 0x66, 0x31, 0x66, 0xBF,
8219 0x47, 0x39, 0x47, 0xBF,
8220
8221 0x57, 0x39, 0x57, 0xBF,
8222 0x67, 0x39, 0x67, 0xBF,
8223
8224 0x7B, 0x80, 0x07, 0xEA,
8225 0x24, 0x41, 0x20, 0xE9,
8226
8227 0x35, 0x00,
8228 0x3D, 0x00,
8229 0x00, 0xE0,
8230 0x2D, 0x73,
8231
8232 0x33, 0x72,
8233 0x0C, 0xE3,
8234 0x8D, 0x2F, 0x1E, 0xBD,
8235
8236 0x43, 0x75, 0xF8, 0xEC,
8237 0x35, 0x20,
8238 0x3D, 0x20,
8239
8240 0x43, 0x43, 0x2D, 0xDF,
8241 0x53, 0x53, 0x2D, 0xDF,
8242
8243 0xAE, 0x1E, 0x0E, 0xBD,
8244 0x58, 0xE3,
8245 0x33, 0x66,
8246
8247 0x48, 0x35, 0x48, 0xBF,
8248 0x58, 0x35, 0x58, 0xBF,
8249
8250 0x68, 0x35, 0x68, 0xBF,
8251 0x49, 0x3D, 0x49, 0xBF,
8252
8253 0x59, 0x3D, 0x59, 0xBF,
8254 0x69, 0x3D, 0x69, 0xBF,
8255
8256 0x63, 0x63, 0x2D, 0xDF,
8257 0x4D, 0x7D, 0xF8, 0xEC,
8258
8259 0x59, 0xE3,
8260 0x00, 0xE0,
8261 0xB8, 0x38, 0x33, 0xBF,
8262
8263 0x2D, 0x73,
8264 0x30, 0x76,
8265 0x18, 0x3A, 0x41, 0xE9,
8266
8267 0x3F, 0x53, 0xA0, 0xE8,
8268 0x05, 0x80, 0x3D, 0xEA,
8269
8270 0x37, 0x43, 0xA0, 0xE8,
8271 0x3D, 0x63, 0xA0, 0xE8,
8272
8273 0x50, 0x70, 0xF8, 0xEC,
8274 0x2B, 0x50, 0x3C, 0xE9,
8275
8276 0x1F, 0x0F, 0xBC, 0xE8,
8277 0x00, 0x80, 0x00, 0xE8,
8278
8279 0x59, 0x78, 0xF8, 0xEC,
8280 0x00, 0x80, 0x00, 0xE8,
8281
8282 0x15, 0xC0, 0x20, 0xE9,
8283 0x15, 0xC0, 0x20, 0xE9,
8284
8285 0x15, 0xC0, 0x20, 0xE9,
8286 0x15, 0xC0, 0x20, 0xE9,
8287
8288 0x1E, 0x12, 0x41, 0xE9,
8289 0x1A, 0x22, 0x41, 0xE9,
8290
8291 0x46, 0x37, 0x46, 0xDF,
8292 0x56, 0x3F, 0x56, 0xDF,
8293
8294 0x2B, 0x40, 0x3D, 0xE9,
8295 0x66, 0x3D, 0x66, 0xDF,
8296
8297 0x1D, 0x32, 0x41, 0xE9,
8298 0x67, 0x3D, 0x67, 0xDF,
8299
8300 0x47, 0x37, 0x47, 0xDF,
8301 0x57, 0x3F, 0x57, 0xDF,
8302
8303 0x2A, 0x40, 0x20, 0xE9,
8304 0x59, 0x3F, 0x59, 0xDF,
8305
8306 0x16, 0x30, 0x20, 0xE9,
8307 0x69, 0x3D, 0x69, 0xDF,
8308
8309 0x48, 0x37, 0x48, 0xDF,
8310 0x58, 0x3F, 0x58, 0xDF,
8311
8312 0x68, 0x3D, 0x68, 0xDF,
8313 0x49, 0x37, 0x49, 0xDF,
8314
8315 0x32, 0x32, 0x2D, 0xDF,
8316 0x22, 0x22, 0x2D, 0xDF,
8317
8318 0x12, 0x12, 0x2D, 0xDF,
8319 0x3A, 0x3A, 0x2D, 0xDF,
8320
8321 0x0F, 0xCF, 0x74, 0xC2,
8322 0x37, 0xCF, 0x74, 0xC4,
8323
8324 0x0A, 0x44, 0x54, 0xB0,
8325 0x02, 0x44, 0x64, 0xB0,
8326
8327 0x3D, 0xCF, 0x74, 0xC0,
8328 0x34, 0x37, 0x20, 0xE9,
8329
8330 0x31, 0x53, 0x2F, 0x9F,
8331 0x38, 0x0F, 0x20, 0xE9,
8332
8333 0x39, 0xE5, 0x2C, 0x9F,
8334 0x3C, 0x3D, 0x20, 0xE9,
8335
8336 0x2A, 0x44, 0x54, 0xB2,
8337 0x1A, 0x44, 0x64, 0xB2,
8338
8339 0x36, 0x80, 0x3A, 0xEA,
8340 0x0A, 0x20,
8341 0x02, 0x20,
8342
8343 0x0F, 0xCF, 0x75, 0xC0,
8344 0x2A, 0x20,
8345 0x1A, 0x20,
8346
8347 0x30, 0x50, 0x2E, 0x9F,
8348 0x32, 0x31, 0x5F, 0xE9,
8349
8350 0x38, 0x21, 0x2C, 0x9F,
8351 0x33, 0x39, 0x5F, 0xE9,
8352
8353 0x3D, 0xCF, 0x75, 0xC2,
8354 0x37, 0xCF, 0x75, 0xC4,
8355
8356 0x31, 0x53, 0x2F, 0x9F,
8357 0xA6, 0x0F, 0x20, 0xE9,
8358
8359 0x39, 0xE5, 0x2C, 0x9F,
8360 0xA3, 0x3D, 0x20, 0xE9,
8361
8362 0x2A, 0x44, 0x54, 0xB4,
8363 0x1A, 0x44, 0x64, 0xB4,
8364
8365 0x0A, 0x45, 0x55, 0xB0,
8366 0x02, 0x45, 0x65, 0xB0,
8367
8368 0x88, 0x73, 0x5E, 0xE9,
8369 0x2A, 0x20,
8370 0x1A, 0x20,
8371
8372 0xA0, 0x37, 0x20, 0xE9,
8373 0x0A, 0x20,
8374 0x02, 0x20,
8375
8376 0x31, 0x53, 0x2F, 0x9F,
8377 0x3E, 0x30, 0x4F, 0xE9,
8378
8379 0x39, 0xE5, 0x2C, 0x9F,
8380 0x3F, 0x38, 0x4F, 0xE9,
8381
8382 0x30, 0x50, 0x2E, 0x9F,
8383 0x3A, 0x31, 0x4F, 0xE9,
8384
8385 0x38, 0x21, 0x2C, 0x9F,
8386 0x3B, 0x39, 0x4F, 0xE9,
8387
8388 0x2A, 0x45, 0x55, 0xB2,
8389 0x1A, 0x45, 0x65, 0xB2,
8390
8391 0x0A, 0x45, 0x55, 0xB4,
8392 0x02, 0x45, 0x65, 0xB4,
8393
8394 0x0F, 0xCF, 0x75, 0xC6,
8395 0x2A, 0x20,
8396 0x1A, 0x20,
8397
8398 0xA7, 0x30, 0x4F, 0xE9,
8399 0x0A, 0x20,
8400 0x02, 0x20,
8401
8402 0x31, 0x53, 0x2F, 0x9F,
8403 0x31, 0x0F, 0x20, 0xE9,
8404
8405 0x39, 0xE5, 0x2C, 0x9F,
8406 0xA8, 0x38, 0x4F, 0xE9,
8407
8408 0x2A, 0x45, 0x55, 0xB6,
8409 0x1A, 0x45, 0x65, 0xB6,
8410
8411 0x30, 0x50, 0x2E, 0x9F,
8412 0x36, 0x31, 0x4F, 0xE9,
8413
8414 0x38, 0x21, 0x2C, 0x9F,
8415 0x37, 0x39, 0x4F, 0xE9,
8416
8417 0x00, 0x80, 0x00, 0xE8,
8418 0x2A, 0x20,
8419 0x1A, 0x20,
8420
8421 0x2A, 0x46, 0x56, 0xBF,
8422 0x1A, 0x46, 0x66, 0xBF,
8423
8424 0x31, 0x53, 0x2F, 0x9F,
8425 0xA4, 0x31, 0x4F, 0xE9,
8426
8427 0x39, 0xE5, 0x2C, 0x9F,
8428 0xA5, 0x39, 0x4F, 0xE9,
8429
8430 0x0A, 0x47, 0x57, 0xBF,
8431 0x02, 0x47, 0x67, 0xBF,
8432
8433 0x31, 0x53, 0x2F, 0x9F,
8434 0xA1, 0x30, 0x4F, 0xE9,
8435
8436 0x39, 0xE5, 0x2C, 0x9F,
8437 0xA2, 0x38, 0x4F, 0xE9,
8438
8439 0x2A, 0x43, 0x53, 0xBF,
8440 0x1A, 0x43, 0x63, 0xBF,
8441
8442 0x30, 0x50, 0x2E, 0x9F,
8443 0x35, 0x31, 0x4F, 0xE9,
8444
8445 0x38, 0x21, 0x2C, 0x9F,
8446 0x39, 0x39, 0x4F, 0xE9,
8447
8448 0x0A, 0x48, 0x58, 0xBF,
8449 0x02, 0x48, 0x68, 0xBF,
8450
8451 0x31, 0x53, 0x2F, 0x9F,
8452 0x80, 0x31, 0x57, 0xE9,
8453
8454 0x39, 0xE5, 0x2C, 0x9F,
8455 0x81, 0x39, 0x57, 0xE9,
8456
8457 0x2A, 0x49, 0x59, 0xBF,
8458 0x1A, 0x49, 0x69, 0xBF,
8459
8460 0x30, 0x50, 0x2E, 0x9F,
8461 0x82, 0x30, 0x57, 0xE9,
8462
8463 0x38, 0x21, 0x2C, 0x9F,
8464 0x83, 0x38, 0x57, 0xE9,
8465
8466 0x31, 0x53, 0x2F, 0x9F,
8467 0x84, 0x31, 0x5E, 0xE9,
8468
8469 0x39, 0xE5, 0x2C, 0x9F,
8470 0x85, 0x39, 0x5E, 0xE9,
8471
8472 0x86, 0x76, 0x57, 0xE9,
8473 0x8A, 0x36, 0x20, 0xE9,
8474
8475 0x87, 0x77, 0x57, 0xE9,
8476 0x8B, 0x3E, 0xBF, 0xEA,
8477
8478 0x80, 0x30, 0x57, 0xE9,
8479 0x81, 0x38, 0x57, 0xE9,
8480
8481 0x82, 0x31, 0x57, 0xE9,
8482 0x86, 0x78, 0x57, 0xE9,
8483
8484 0x83, 0x39, 0x57, 0xE9,
8485 0x87, 0x79, 0x57, 0xE9,
8486
8487 0x30, 0x1F, 0x5F, 0xE9,
8488 0x8A, 0x34, 0x20, 0xE9,
8489
8490 0x8B, 0x3C, 0x20, 0xE9,
8491 0x37, 0x50, 0x60, 0xBD,
8492
8493 0x57, 0x0D, 0x20, 0xE9,
8494 0x35, 0x51, 0x61, 0xBD,
8495
8496 0x2B, 0x50, 0x20, 0xE9,
8497 0x1D, 0x37, 0xE1, 0xEA,
8498
8499 0x1E, 0x35, 0xE1, 0xEA,
8500 0x00, 0xE0,
8501 0x0E, 0x77,
8502
8503 0x24, 0x51, 0x20, 0xE9,
8504 0x8D, 0xFF, 0x20, 0xEA,
8505
8506 0x16, 0x0E, 0x20, 0xE9,
8507 0x57, 0x2E, 0xBF, 0xEA,
8508
8509 0x0B, 0x46, 0xA0, 0xE8,
8510 0x1B, 0x56, 0xA0, 0xE8,
8511
8512 0x2B, 0x66, 0xA0, 0xE8,
8513 0x0C, 0x47, 0xA0, 0xE8,
8514
8515 0x1C, 0x57, 0xA0, 0xE8,
8516 0x2C, 0x67, 0xA0, 0xE8,
8517
8518 0x0B, 0x00,
8519 0x1B, 0x00,
8520 0x2B, 0x00,
8521 0x00, 0xE0,
8522
8523 0x0C, 0x00,
8524 0x1C, 0x00,
8525 0x2C, 0x00,
8526 0x00, 0xE0,
8527
8528 0x0B, 0x65,
8529 0x1B, 0x65,
8530 0x2B, 0x65,
8531 0x00, 0xE0,
8532
8533 0x0C, 0x65,
8534 0x1C, 0x65,
8535 0x2C, 0x65,
8536 0x00, 0xE0,
8537
8538 0x0B, 0x1B, 0x60, 0xEC,
8539 0x36, 0xD7, 0x36, 0xAD,
8540
8541 0x2B, 0x80, 0x60, 0xEC,
8542 0x0C, 0x1C, 0x60, 0xEC,
8543
8544 0x3E, 0xD7, 0x3E, 0xAD,
8545 0x2C, 0x80, 0x60, 0xEC,
8546
8547 0x0B, 0x2B, 0xDE, 0xE8,
8548 0x1B, 0x80, 0xDE, 0xE8,
8549
8550 0x36, 0x80, 0x36, 0xBD,
8551 0x3E, 0x80, 0x3E, 0xBD,
8552
8553 0x33, 0xD7, 0x0B, 0xBD,
8554 0x3B, 0xD7, 0x1B, 0xBD,
8555
8556 0x46, 0x80, 0x46, 0xCF,
8557 0x57, 0x80, 0x57, 0xCF,
8558
8559 0x66, 0x33, 0x66, 0xCF,
8560 0x47, 0x3B, 0x47, 0xCF,
8561
8562 0x56, 0x33, 0x56, 0xCF,
8563 0x67, 0x3B, 0x67, 0xCF,
8564
8565 0x0B, 0x48, 0xA0, 0xE8,
8566 0x1B, 0x58, 0xA0, 0xE8,
8567
8568 0x2B, 0x68, 0xA0, 0xE8,
8569 0x0C, 0x49, 0xA0, 0xE8,
8570
8571 0x1C, 0x59, 0xA0, 0xE8,
8572 0x2C, 0x69, 0xA0, 0xE8,
8573
8574 0x0B, 0x00,
8575 0x1B, 0x00,
8576 0x2B, 0x00,
8577 0x00, 0xE0,
8578
8579 0x0C, 0x00,
8580 0x1C, 0x00,
8581 0x2C, 0x00,
8582 0x00, 0xE0,
8583
8584 0x0B, 0x65,
8585 0x1B, 0x65,
8586 0x2B, 0x65,
8587 0x00, 0xE0,
8588
8589 0x0C, 0x65,
8590 0x1C, 0x65,
8591 0x2C, 0x65,
8592 0x00, 0xE0,
8593
8594 0x0B, 0x1B, 0x60, 0xEC,
8595 0x34, 0xD7, 0x34, 0xAD,
8596
8597 0x2B, 0x80, 0x60, 0xEC,
8598 0x0C, 0x1C, 0x60, 0xEC,
8599
8600 0x3C, 0xD7, 0x3C, 0xAD,
8601 0x2C, 0x80, 0x60, 0xEC,
8602
8603 0x0B, 0x2B, 0xDE, 0xE8,
8604 0x1B, 0x80, 0xDE, 0xE8,
8605
8606 0x34, 0x80, 0x34, 0xBD,
8607 0x3C, 0x80, 0x3C, 0xBD,
8608
8609 0x33, 0xD7, 0x0B, 0xBD,
8610 0x3B, 0xD7, 0x1B, 0xBD,
8611
8612 0x48, 0x80, 0x48, 0xCF,
8613 0x59, 0x80, 0x59, 0xCF,
8614
8615 0x68, 0x33, 0x68, 0xCF,
8616 0x49, 0x3B, 0x49, 0xCF,
8617
8618 0xAD, 0xFF, 0x20, 0xEA,
8619 0x00, 0x80, 0x00, 0xE8,
8620
8621 0x58, 0x33, 0x58, 0xCF,
8622 0x69, 0x3B, 0x69, 0xCF,
8623
8624 0x6B, 0xFF, 0x20, 0xEA,
8625 0x57, 0xC0, 0xBF, 0xEA,
8626
8627 0x00, 0x80, 0xA0, 0xE9,
8628 0x00, 0x00, 0xD8, 0xEC,
8629
8630};
8631
8632static unsigned char warp_g400_tgz[] = {
8633
8634 0x00, 0x88, 0x98, 0xE9,
8635 0x00, 0x80, 0x00, 0xE8,
8636
8637 0x00, 0x80, 0xA0, 0xE9,
8638 0x00, 0x00, 0xD8, 0xEC,
8639
8640 0xFF, 0x80, 0xC0, 0xE9,
8641 0x00, 0x80, 0x00, 0xE8,
8642
8643 0x22, 0x40, 0x48, 0xBF,
8644 0x2A, 0x40, 0x50, 0xBF,
8645
8646 0x32, 0x41, 0x49, 0xBF,
8647 0x3A, 0x41, 0x51, 0xBF,
8648
8649 0xC3, 0x6B,
8650 0xCB, 0x6B,
8651 0x00, 0x88, 0x98, 0xE9,
8652
8653 0x73, 0x7B, 0xC8, 0xEC,
8654 0x96, 0xE2,
8655 0x41, 0x04,
8656
8657 0x7B, 0x43, 0xA0, 0xE8,
8658 0x73, 0x4B, 0xA0, 0xE8,
8659
8660 0xAD, 0xEE, 0x29, 0x9F,
8661 0x00, 0xE0,
8662 0x49, 0x04,
8663
8664 0x90, 0xE2,
8665 0x51, 0x04,
8666 0x31, 0x46, 0xB1, 0xE8,
8667
8668 0x49, 0x41, 0xC0, 0xEC,
8669 0x39, 0x57, 0xB1, 0xE8,
8670
8671 0x00, 0x04,
8672 0x46, 0xE2,
8673 0x73, 0x53, 0xA0, 0xE8,
8674
8675 0x51, 0x41, 0xC0, 0xEC,
8676 0x31, 0x00,
8677 0x39, 0x00,
8678
8679 0x58, 0x80, 0x15, 0xEA,
8680 0x08, 0x04,
8681 0x10, 0x04,
8682
8683 0x51, 0x49, 0xC0, 0xEC,
8684 0x2F, 0x41, 0x60, 0xEA,
8685
8686 0x31, 0x20,
8687 0x39, 0x20,
8688 0x1F, 0x42, 0xA0, 0xE8,
8689
8690 0x2A, 0x42, 0x4A, 0xBF,
8691 0x27, 0x4A, 0xA0, 0xE8,
8692
8693 0x1A, 0x42, 0x52, 0xBF,
8694 0x1E, 0x49, 0x60, 0xEA,
8695
8696 0x73, 0x7B, 0xC8, 0xEC,
8697 0x26, 0x51, 0x60, 0xEA,
8698
8699 0x32, 0x40, 0x48, 0xBD,
8700 0x22, 0x40, 0x50, 0xBD,
8701
8702 0x12, 0x41, 0x49, 0xBD,
8703 0x3A, 0x41, 0x51, 0xBD,
8704
8705 0xBF, 0x2F, 0x26, 0xBD,
8706 0x00, 0xE0,
8707 0x7B, 0x72,
8708
8709 0x32, 0x20,
8710 0x22, 0x20,
8711 0x12, 0x20,
8712 0x3A, 0x20,
8713
8714 0x46, 0x31, 0x46, 0xBF,
8715 0x4E, 0x31, 0x4E, 0xBF,
8716
8717 0xB3, 0xE2, 0x2D, 0x9F,
8718 0x00, 0x80, 0x00, 0xE8,
8719
8720 0x56, 0x31, 0x56, 0xBF,
8721 0x47, 0x39, 0x47, 0xBF,
8722
8723 0x4F, 0x39, 0x4F, 0xBF,
8724 0x57, 0x39, 0x57, 0xBF,
8725
8726 0x4A, 0x80, 0x07, 0xEA,
8727 0x24, 0x41, 0x20, 0xE9,
8728
8729 0x42, 0x73, 0xF8, 0xEC,
8730 0x00, 0xE0,
8731 0x2D, 0x73,
8732
8733 0x33, 0x72,
8734 0x0C, 0xE3,
8735 0xA5, 0x2F, 0x1E, 0xBD,
8736
8737 0x43, 0x43, 0x2D, 0xDF,
8738 0x4B, 0x4B, 0x2D, 0xDF,
8739
8740 0xAE, 0x1E, 0x26, 0xBD,
8741 0x58, 0xE3,
8742 0x33, 0x66,
8743
8744 0x53, 0x53, 0x2D, 0xDF,
8745 0x00, 0x80, 0x00, 0xE8,
8746
8747 0xB8, 0x38, 0x33, 0xBF,
8748 0x00, 0xE0,
8749 0x59, 0xE3,
8750
8751 0x1E, 0x12, 0x41, 0xE9,
8752 0x1A, 0x22, 0x41, 0xE9,
8753
8754 0x2B, 0x40, 0x3D, 0xE9,
8755 0x3F, 0x4B, 0xA0, 0xE8,
8756
8757 0x2D, 0x73,
8758 0x30, 0x76,
8759 0x05, 0x80, 0x3D, 0xEA,
8760
8761 0x37, 0x43, 0xA0, 0xE8,
8762 0x3D, 0x53, 0xA0, 0xE8,
8763
8764 0x48, 0x70, 0xF8, 0xEC,
8765 0x2B, 0x48, 0x3C, 0xE9,
8766
8767 0x1F, 0x27, 0xBC, 0xE8,
8768 0x00, 0x80, 0x00, 0xE8,
8769
8770 0x00, 0x80, 0x00, 0xE8,
8771 0x00, 0x80, 0x00, 0xE8,
8772
8773 0x15, 0xC0, 0x20, 0xE9,
8774 0x15, 0xC0, 0x20, 0xE9,
8775
8776 0x15, 0xC0, 0x20, 0xE9,
8777 0x15, 0xC0, 0x20, 0xE9,
8778
8779 0x18, 0x3A, 0x41, 0xE9,
8780 0x1D, 0x32, 0x41, 0xE9,
8781
8782 0x2A, 0x40, 0x20, 0xE9,
8783 0x56, 0x3D, 0x56, 0xDF,
8784
8785 0x46, 0x37, 0x46, 0xDF,
8786 0x4E, 0x3F, 0x4E, 0xDF,
8787
8788 0x16, 0x30, 0x20, 0xE9,
8789 0x4F, 0x3F, 0x4F, 0xDF,
8790
8791 0x32, 0x32, 0x2D, 0xDF,
8792 0x22, 0x22, 0x2D, 0xDF,
8793
8794 0x12, 0x12, 0x2D, 0xDF,
8795 0x3A, 0x3A, 0x2D, 0xDF,
8796
8797 0x47, 0x37, 0x47, 0xDF,
8798 0x57, 0x3D, 0x57, 0xDF,
8799
8800 0x3D, 0xCF, 0x74, 0xC0,
8801 0x37, 0xCF, 0x74, 0xC4,
8802
8803 0x31, 0x53, 0x2F, 0x9F,
8804 0x34, 0x80, 0x20, 0xE9,
8805
8806 0x39, 0xE5, 0x2C, 0x9F,
8807 0x3C, 0x3D, 0x20, 0xE9,
8808
8809 0x0A, 0x44, 0x4C, 0xB0,
8810 0x02, 0x44, 0x54, 0xB0,
8811
8812 0x2A, 0x44, 0x4C, 0xB2,
8813 0x1A, 0x44, 0x54, 0xB2,
8814
8815 0x1D, 0x80, 0x3A, 0xEA,
8816 0x0A, 0x20,
8817 0x02, 0x20,
8818
8819 0x3D, 0xCF, 0x74, 0xC2,
8820 0x2A, 0x20,
8821 0x1A, 0x20,
8822
8823 0x30, 0x50, 0x2E, 0x9F,
8824 0x32, 0x31, 0x5F, 0xE9,
8825
8826 0x38, 0x21, 0x2C, 0x9F,
8827 0x33, 0x39, 0x5F, 0xE9,
8828
8829 0x31, 0x53, 0x2F, 0x9F,
8830 0x00, 0x80, 0x00, 0xE8,
8831
8832 0x2A, 0x44, 0x4C, 0xB4,
8833 0x1A, 0x44, 0x54, 0xB4,
8834
8835 0x39, 0xE5, 0x2C, 0x9F,
8836 0x38, 0x3D, 0x20, 0xE9,
8837
8838 0x88, 0x73, 0x5E, 0xE9,
8839 0x2A, 0x20,
8840 0x1A, 0x20,
8841
8842 0x2A, 0x46, 0x4E, 0xBF,
8843 0x1A, 0x46, 0x56, 0xBF,
8844
8845 0x31, 0x53, 0x2F, 0x9F,
8846 0x3E, 0x30, 0x4F, 0xE9,
8847
8848 0x39, 0xE5, 0x2C, 0x9F,
8849 0x3F, 0x38, 0x4F, 0xE9,
8850
8851 0x0A, 0x47, 0x4F, 0xBF,
8852 0x02, 0x47, 0x57, 0xBF,
8853
8854 0x31, 0x53, 0x2F, 0x9F,
8855 0x3A, 0x31, 0x4F, 0xE9,
8856
8857 0x39, 0xE5, 0x2C, 0x9F,
8858 0x3B, 0x39, 0x4F, 0xE9,
8859
8860 0x2A, 0x43, 0x4B, 0xBF,
8861 0x1A, 0x43, 0x53, 0xBF,
8862
8863 0x30, 0x50, 0x2E, 0x9F,
8864 0x36, 0x31, 0x4F, 0xE9,
8865
8866 0x38, 0x21, 0x2C, 0x9F,
8867 0x37, 0x39, 0x4F, 0xE9,
8868
8869 0x31, 0x53, 0x2F, 0x9F,
8870 0x80, 0x31, 0x57, 0xE9,
8871
8872 0x39, 0xE5, 0x2C, 0x9F,
8873 0x81, 0x39, 0x57, 0xE9,
8874
8875 0x37, 0x48, 0x50, 0xBD,
8876 0x8A, 0x36, 0x20, 0xE9,
8877
8878 0x86, 0x76, 0x57, 0xE9,
8879 0x8B, 0x3E, 0x20, 0xE9,
8880
8881 0x82, 0x30, 0x57, 0xE9,
8882 0x87, 0x77, 0x57, 0xE9,
8883
8884 0x83, 0x38, 0x57, 0xE9,
8885 0x35, 0x49, 0x51, 0xBD,
8886
8887 0x84, 0x31, 0x5E, 0xE9,
8888 0x30, 0x1F, 0x5F, 0xE9,
8889
8890 0x85, 0x39, 0x5E, 0xE9,
8891 0x57, 0x25, 0x20, 0xE9,
8892
8893 0x2B, 0x48, 0x20, 0xE9,
8894 0x1D, 0x37, 0xE1, 0xEA,
8895
8896 0x1E, 0x35, 0xE1, 0xEA,
8897 0x00, 0xE0,
8898 0x26, 0x77,
8899
8900 0x24, 0x49, 0x20, 0xE9,
8901 0xAF, 0xFF, 0x20, 0xEA,
8902
8903 0x16, 0x26, 0x20, 0xE9,
8904 0x57, 0x2E, 0xBF, 0xEA,
8905
8906 0x1C, 0x46, 0xA0, 0xE8,
8907 0x23, 0x4E, 0xA0, 0xE8,
8908
8909 0x2B, 0x56, 0xA0, 0xE8,
8910 0x1D, 0x47, 0xA0, 0xE8,
8911
8912 0x24, 0x4F, 0xA0, 0xE8,
8913 0x2C, 0x57, 0xA0, 0xE8,
8914
8915 0x1C, 0x00,
8916 0x23, 0x00,
8917 0x2B, 0x00,
8918 0x00, 0xE0,
8919
8920 0x1D, 0x00,
8921 0x24, 0x00,
8922 0x2C, 0x00,
8923 0x00, 0xE0,
8924
8925 0x1C, 0x65,
8926 0x23, 0x65,
8927 0x2B, 0x65,
8928 0x00, 0xE0,
8929
8930 0x1D, 0x65,
8931 0x24, 0x65,
8932 0x2C, 0x65,
8933 0x00, 0xE0,
8934
8935 0x1C, 0x23, 0x60, 0xEC,
8936 0x36, 0xD7, 0x36, 0xAD,
8937
8938 0x2B, 0x80, 0x60, 0xEC,
8939 0x1D, 0x24, 0x60, 0xEC,
8940
8941 0x3E, 0xD7, 0x3E, 0xAD,
8942 0x2C, 0x80, 0x60, 0xEC,
8943
8944 0x1C, 0x2B, 0xDE, 0xE8,
8945 0x23, 0x80, 0xDE, 0xE8,
8946
8947 0x36, 0x80, 0x36, 0xBD,
8948 0x3E, 0x80, 0x3E, 0xBD,
8949
8950 0x33, 0xD7, 0x1C, 0xBD,
8951 0x3B, 0xD7, 0x23, 0xBD,
8952
8953 0x46, 0x80, 0x46, 0xCF,
8954 0x4F, 0x80, 0x4F, 0xCF,
8955
8956 0x56, 0x33, 0x56, 0xCF,
8957 0x47, 0x3B, 0x47, 0xCF,
8958
8959 0xD6, 0xFF, 0x20, 0xEA,
8960 0x00, 0x80, 0x00, 0xE8,
8961
8962 0x4E, 0x33, 0x4E, 0xCF,
8963 0x57, 0x3B, 0x57, 0xCF,
8964
8965 0x9D, 0xFF, 0x20, 0xEA,
8966 0x57, 0xC0, 0xBF, 0xEA,
8967
8968 0x00, 0x80, 0xA0, 0xE9,
8969 0x00, 0x00, 0xD8, 0xEC,
8970
8971};
8972
8973static unsigned char warp_g400_tgza[] = {
8974
8975 0x00, 0x88, 0x98, 0xE9,
8976 0x00, 0x80, 0x00, 0xE8,
8977
8978 0x00, 0x80, 0xA0, 0xE9,
8979 0x00, 0x00, 0xD8, 0xEC,
8980
8981 0xFF, 0x80, 0xC0, 0xE9,
8982 0x00, 0x80, 0x00, 0xE8,
8983
8984 0x22, 0x40, 0x48, 0xBF,
8985 0x2A, 0x40, 0x50, 0xBF,
8986
8987 0x32, 0x41, 0x49, 0xBF,
8988 0x3A, 0x41, 0x51, 0xBF,
8989
8990 0xC3, 0x6B,
8991 0xCB, 0x6B,
8992 0x00, 0x88, 0x98, 0xE9,
8993
8994 0x73, 0x7B, 0xC8, 0xEC,
8995 0x96, 0xE2,
8996 0x41, 0x04,
8997
8998 0x7B, 0x43, 0xA0, 0xE8,
8999 0x73, 0x4B, 0xA0, 0xE8,
9000
9001 0xAD, 0xEE, 0x29, 0x9F,
9002 0x00, 0xE0,
9003 0x49, 0x04,
9004
9005 0x90, 0xE2,
9006 0x51, 0x04,
9007 0x31, 0x46, 0xB1, 0xE8,
9008
9009 0x49, 0x41, 0xC0, 0xEC,
9010 0x39, 0x57, 0xB1, 0xE8,
9011
9012 0x00, 0x04,
9013 0x46, 0xE2,
9014 0x73, 0x53, 0xA0, 0xE8,
9015
9016 0x51, 0x41, 0xC0, 0xEC,
9017 0x31, 0x00,
9018 0x39, 0x00,
9019
9020 0x5C, 0x80, 0x15, 0xEA,
9021 0x08, 0x04,
9022 0x10, 0x04,
9023
9024 0x51, 0x49, 0xC0, 0xEC,
9025 0x2F, 0x41, 0x60, 0xEA,
9026
9027 0x31, 0x20,
9028 0x39, 0x20,
9029 0x1F, 0x42, 0xA0, 0xE8,
9030
9031 0x2A, 0x42, 0x4A, 0xBF,
9032 0x27, 0x4A, 0xA0, 0xE8,
9033
9034 0x1A, 0x42, 0x52, 0xBF,
9035 0x1E, 0x49, 0x60, 0xEA,
9036
9037 0x73, 0x7B, 0xC8, 0xEC,
9038 0x26, 0x51, 0x60, 0xEA,
9039
9040 0x32, 0x40, 0x48, 0xBD,
9041 0x22, 0x40, 0x50, 0xBD,
9042
9043 0x12, 0x41, 0x49, 0xBD,
9044 0x3A, 0x41, 0x51, 0xBD,
9045
9046 0xBF, 0x2F, 0x26, 0xBD,
9047 0x00, 0xE0,
9048 0x7B, 0x72,
9049
9050 0x32, 0x20,
9051 0x22, 0x20,
9052 0x12, 0x20,
9053 0x3A, 0x20,
9054
9055 0x46, 0x31, 0x46, 0xBF,
9056 0x4E, 0x31, 0x4E, 0xBF,
9057
9058 0xB3, 0xE2, 0x2D, 0x9F,
9059 0x00, 0x80, 0x00, 0xE8,
9060
9061 0x56, 0x31, 0x56, 0xBF,
9062 0x47, 0x39, 0x47, 0xBF,
9063
9064 0x4F, 0x39, 0x4F, 0xBF,
9065 0x57, 0x39, 0x57, 0xBF,
9066
9067 0x4E, 0x80, 0x07, 0xEA,
9068 0x24, 0x41, 0x20, 0xE9,
9069
9070 0x42, 0x73, 0xF8, 0xEC,
9071 0x00, 0xE0,
9072 0x2D, 0x73,
9073
9074 0x33, 0x72,
9075 0x0C, 0xE3,
9076 0xA5, 0x2F, 0x1E, 0xBD,
9077
9078 0x43, 0x43, 0x2D, 0xDF,
9079 0x4B, 0x4B, 0x2D, 0xDF,
9080
9081 0xAE, 0x1E, 0x26, 0xBD,
9082 0x58, 0xE3,
9083 0x33, 0x66,
9084
9085 0x53, 0x53, 0x2D, 0xDF,
9086 0x00, 0x80, 0x00, 0xE8,
9087
9088 0xB8, 0x38, 0x33, 0xBF,
9089 0x00, 0xE0,
9090 0x59, 0xE3,
9091
9092 0x1E, 0x12, 0x41, 0xE9,
9093 0x1A, 0x22, 0x41, 0xE9,
9094
9095 0x2B, 0x40, 0x3D, 0xE9,
9096 0x3F, 0x4B, 0xA0, 0xE8,
9097
9098 0x2D, 0x73,
9099 0x30, 0x76,
9100 0x05, 0x80, 0x3D, 0xEA,
9101
9102 0x37, 0x43, 0xA0, 0xE8,
9103 0x3D, 0x53, 0xA0, 0xE8,
9104
9105 0x48, 0x70, 0xF8, 0xEC,
9106 0x2B, 0x48, 0x3C, 0xE9,
9107
9108 0x1F, 0x27, 0xBC, 0xE8,
9109 0x00, 0x80, 0x00, 0xE8,
9110
9111 0x00, 0x80, 0x00, 0xE8,
9112 0x00, 0x80, 0x00, 0xE8,
9113
9114 0x15, 0xC0, 0x20, 0xE9,
9115 0x15, 0xC0, 0x20, 0xE9,
9116
9117 0x15, 0xC0, 0x20, 0xE9,
9118 0x15, 0xC0, 0x20, 0xE9,
9119
9120 0x18, 0x3A, 0x41, 0xE9,
9121 0x1D, 0x32, 0x41, 0xE9,
9122
9123 0x2A, 0x40, 0x20, 0xE9,
9124 0x56, 0x3D, 0x56, 0xDF,
9125
9126 0x46, 0x37, 0x46, 0xDF,
9127 0x4E, 0x3F, 0x4E, 0xDF,
9128
9129 0x16, 0x30, 0x20, 0xE9,
9130 0x4F, 0x3F, 0x4F, 0xDF,
9131
9132 0x32, 0x32, 0x2D, 0xDF,
9133 0x22, 0x22, 0x2D, 0xDF,
9134
9135 0x12, 0x12, 0x2D, 0xDF,
9136 0x3A, 0x3A, 0x2D, 0xDF,
9137
9138 0x47, 0x37, 0x47, 0xDF,
9139 0x57, 0x3D, 0x57, 0xDF,
9140
9141 0x3D, 0xCF, 0x74, 0xC0,
9142 0x37, 0xCF, 0x74, 0xC4,
9143
9144 0x31, 0x53, 0x2F, 0x9F,
9145 0x34, 0x80, 0x20, 0xE9,
9146
9147 0x39, 0xE5, 0x2C, 0x9F,
9148 0x3C, 0x3D, 0x20, 0xE9,
9149
9150 0x27, 0xCF, 0x74, 0xC6,
9151 0x3D, 0xCF, 0x74, 0xC2,
9152
9153 0x0A, 0x44, 0x4C, 0xB0,
9154 0x02, 0x44, 0x54, 0xB0,
9155
9156 0x2A, 0x44, 0x4C, 0xB2,
9157 0x1A, 0x44, 0x54, 0xB2,
9158
9159 0x20, 0x80, 0x3A, 0xEA,
9160 0x0A, 0x20,
9161 0x02, 0x20,
9162
9163 0x88, 0x73, 0x5E, 0xE9,
9164 0x2A, 0x20,
9165 0x1A, 0x20,
9166
9167 0x30, 0x50, 0x2E, 0x9F,
9168 0x32, 0x31, 0x5F, 0xE9,
9169
9170 0x38, 0x21, 0x2C, 0x9F,
9171 0x33, 0x39, 0x5F, 0xE9,
9172
9173 0x31, 0x53, 0x2F, 0x9F,
9174 0x9C, 0x27, 0x20, 0xE9,
9175
9176 0x0A, 0x44, 0x4C, 0xB4,
9177 0x02, 0x44, 0x54, 0xB4,
9178
9179 0x2A, 0x44, 0x4C, 0xB6,
9180 0x1A, 0x44, 0x54, 0xB6,
9181
9182 0x39, 0xE5, 0x2C, 0x9F,
9183 0x38, 0x3D, 0x20, 0xE9,
9184
9185 0x0A, 0x20,
9186 0x02, 0x20,
9187 0x2A, 0x20,
9188 0x1A, 0x20,
9189
9190 0x0A, 0x47, 0x4F, 0xBF,
9191 0x02, 0x47, 0x57, 0xBF,
9192
9193 0x30, 0x50, 0x2E, 0x9F,
9194 0x3E, 0x30, 0x4F, 0xE9,
9195
9196 0x38, 0x21, 0x2C, 0x9F,
9197 0x3F, 0x38, 0x4F, 0xE9,
9198
9199 0x2A, 0x46, 0x4E, 0xBF,
9200 0x1A, 0x46, 0x56, 0xBF,
9201
9202 0x31, 0x53, 0x2F, 0x9F,
9203 0x3A, 0x31, 0x4F, 0xE9,
9204
9205 0x39, 0xE5, 0x2C, 0x9F,
9206 0x3B, 0x39, 0x4F, 0xE9,
9207
9208 0x31, 0x53, 0x2F, 0x9F,
9209 0x36, 0x30, 0x4F, 0xE9,
9210
9211 0x39, 0xE5, 0x2C, 0x9F,
9212 0x37, 0x38, 0x4F, 0xE9,
9213
9214 0x2A, 0x43, 0x4B, 0xBF,
9215 0x1A, 0x43, 0x53, 0xBF,
9216
9217 0x30, 0x50, 0x2E, 0x9F,
9218 0x9D, 0x31, 0x4F, 0xE9,
9219
9220 0x38, 0x21, 0x2C, 0x9F,
9221 0x9E, 0x39, 0x4F, 0xE9,
9222
9223 0x31, 0x53, 0x2F, 0x9F,
9224 0x80, 0x31, 0x57, 0xE9,
9225
9226 0x39, 0xE5, 0x2C, 0x9F,
9227 0x81, 0x39, 0x57, 0xE9,
9228
9229 0x37, 0x48, 0x50, 0xBD,
9230 0x8A, 0x36, 0x20, 0xE9,
9231
9232 0x86, 0x76, 0x57, 0xE9,
9233 0x8B, 0x3E, 0x20, 0xE9,
9234
9235 0x82, 0x30, 0x57, 0xE9,
9236 0x87, 0x77, 0x57, 0xE9,
9237
9238 0x83, 0x38, 0x57, 0xE9,
9239 0x35, 0x49, 0x51, 0xBD,
9240
9241 0x84, 0x31, 0x5E, 0xE9,
9242 0x30, 0x1F, 0x5F, 0xE9,
9243
9244 0x85, 0x39, 0x5E, 0xE9,
9245 0x57, 0x25, 0x20, 0xE9,
9246
9247 0x2B, 0x48, 0x20, 0xE9,
9248 0x1D, 0x37, 0xE1, 0xEA,
9249
9250 0x1E, 0x35, 0xE1, 0xEA,
9251 0x00, 0xE0,
9252 0x26, 0x77,
9253
9254 0x24, 0x49, 0x20, 0xE9,
9255 0xAB, 0xFF, 0x20, 0xEA,
9256
9257 0x16, 0x26, 0x20, 0xE9,
9258 0x57, 0x2E, 0xBF, 0xEA,
9259
9260 0x1C, 0x46, 0xA0, 0xE8,
9261 0x23, 0x4E, 0xA0, 0xE8,
9262
9263 0x2B, 0x56, 0xA0, 0xE8,
9264 0x1D, 0x47, 0xA0, 0xE8,
9265
9266 0x24, 0x4F, 0xA0, 0xE8,
9267 0x2C, 0x57, 0xA0, 0xE8,
9268
9269 0x1C, 0x00,
9270 0x23, 0x00,
9271 0x2B, 0x00,
9272 0x00, 0xE0,
9273
9274 0x1D, 0x00,
9275 0x24, 0x00,
9276 0x2C, 0x00,
9277 0x00, 0xE0,
9278
9279 0x1C, 0x65,
9280 0x23, 0x65,
9281 0x2B, 0x65,
9282 0x00, 0xE0,
9283
9284 0x1D, 0x65,
9285 0x24, 0x65,
9286 0x2C, 0x65,
9287 0x00, 0xE0,
9288
9289 0x1C, 0x23, 0x60, 0xEC,
9290 0x36, 0xD7, 0x36, 0xAD,
9291
9292 0x2B, 0x80, 0x60, 0xEC,
9293 0x1D, 0x24, 0x60, 0xEC,
9294
9295 0x3E, 0xD7, 0x3E, 0xAD,
9296 0x2C, 0x80, 0x60, 0xEC,
9297
9298 0x1C, 0x2B, 0xDE, 0xE8,
9299 0x23, 0x80, 0xDE, 0xE8,
9300
9301 0x36, 0x80, 0x36, 0xBD,
9302 0x3E, 0x80, 0x3E, 0xBD,
9303
9304 0x33, 0xD7, 0x1C, 0xBD,
9305 0x3B, 0xD7, 0x23, 0xBD,
9306
9307 0x46, 0x80, 0x46, 0xCF,
9308 0x4F, 0x80, 0x4F, 0xCF,
9309
9310 0x56, 0x33, 0x56, 0xCF,
9311 0x47, 0x3B, 0x47, 0xCF,
9312
9313 0xD3, 0xFF, 0x20, 0xEA,
9314 0x00, 0x80, 0x00, 0xE8,
9315
9316 0x4E, 0x33, 0x4E, 0xCF,
9317 0x57, 0x3B, 0x57, 0xCF,
9318
9319 0x99, 0xFF, 0x20, 0xEA,
9320 0x57, 0xC0, 0xBF, 0xEA,
9321
9322 0x00, 0x80, 0xA0, 0xE9,
9323 0x00, 0x00, 0xD8, 0xEC,
9324
9325};
9326
9327static unsigned char warp_g400_tgzaf[] = {
9328
9329 0x00, 0x88, 0x98, 0xE9,
9330 0x00, 0x80, 0x00, 0xE8,
9331
9332 0x00, 0x80, 0xA0, 0xE9,
9333 0x00, 0x00, 0xD8, 0xEC,
9334
9335 0xFF, 0x80, 0xC0, 0xE9,
9336 0x00, 0x80, 0x00, 0xE8,
9337
9338 0x22, 0x40, 0x48, 0xBF,
9339 0x2A, 0x40, 0x50, 0xBF,
9340
9341 0x32, 0x41, 0x49, 0xBF,
9342 0x3A, 0x41, 0x51, 0xBF,
9343
9344 0xC3, 0x6B,
9345 0xCB, 0x6B,
9346 0x00, 0x88, 0x98, 0xE9,
9347
9348 0x73, 0x7B, 0xC8, 0xEC,
9349 0x96, 0xE2,
9350 0x41, 0x04,
9351
9352 0x7B, 0x43, 0xA0, 0xE8,
9353 0x73, 0x4B, 0xA0, 0xE8,
9354
9355 0xAD, 0xEE, 0x29, 0x9F,
9356 0x00, 0xE0,
9357 0x49, 0x04,
9358
9359 0x90, 0xE2,
9360 0x51, 0x04,
9361 0x31, 0x46, 0xB1, 0xE8,
9362
9363 0x49, 0x41, 0xC0, 0xEC,
9364 0x39, 0x57, 0xB1, 0xE8,
9365
9366 0x00, 0x04,
9367 0x46, 0xE2,
9368 0x73, 0x53, 0xA0, 0xE8,
9369
9370 0x51, 0x41, 0xC0, 0xEC,
9371 0x31, 0x00,
9372 0x39, 0x00,
9373
9374 0x61, 0x80, 0x15, 0xEA,
9375 0x08, 0x04,
9376 0x10, 0x04,
9377
9378 0x51, 0x49, 0xC0, 0xEC,
9379 0x2F, 0x41, 0x60, 0xEA,
9380
9381 0x31, 0x20,
9382 0x39, 0x20,
9383 0x1F, 0x42, 0xA0, 0xE8,
9384
9385 0x2A, 0x42, 0x4A, 0xBF,
9386 0x27, 0x4A, 0xA0, 0xE8,
9387
9388 0x1A, 0x42, 0x52, 0xBF,
9389 0x1E, 0x49, 0x60, 0xEA,
9390
9391 0x73, 0x7B, 0xC8, 0xEC,
9392 0x26, 0x51, 0x60, 0xEA,
9393
9394 0x32, 0x40, 0x48, 0xBD,
9395 0x22, 0x40, 0x50, 0xBD,
9396
9397 0x12, 0x41, 0x49, 0xBD,
9398 0x3A, 0x41, 0x51, 0xBD,
9399
9400 0xBF, 0x2F, 0x26, 0xBD,
9401 0x00, 0xE0,
9402 0x7B, 0x72,
9403
9404 0x32, 0x20,
9405 0x22, 0x20,
9406 0x12, 0x20,
9407 0x3A, 0x20,
9408
9409 0x46, 0x31, 0x46, 0xBF,
9410 0x4E, 0x31, 0x4E, 0xBF,
9411
9412 0xB3, 0xE2, 0x2D, 0x9F,
9413 0x00, 0x80, 0x00, 0xE8,
9414
9415 0x56, 0x31, 0x56, 0xBF,
9416 0x47, 0x39, 0x47, 0xBF,
9417
9418 0x4F, 0x39, 0x4F, 0xBF,
9419 0x57, 0x39, 0x57, 0xBF,
9420
9421 0x53, 0x80, 0x07, 0xEA,
9422 0x24, 0x41, 0x20, 0xE9,
9423
9424 0x42, 0x73, 0xF8, 0xEC,
9425 0x00, 0xE0,
9426 0x2D, 0x73,
9427
9428 0x33, 0x72,
9429 0x0C, 0xE3,
9430 0xA5, 0x2F, 0x1E, 0xBD,
9431
9432 0x43, 0x43, 0x2D, 0xDF,
9433 0x4B, 0x4B, 0x2D, 0xDF,
9434
9435 0xAE, 0x1E, 0x26, 0xBD,
9436 0x58, 0xE3,
9437 0x33, 0x66,
9438
9439 0x53, 0x53, 0x2D, 0xDF,
9440 0x00, 0x80, 0x00, 0xE8,
9441
9442 0xB8, 0x38, 0x33, 0xBF,
9443 0x00, 0xE0,
9444 0x59, 0xE3,
9445
9446 0x1E, 0x12, 0x41, 0xE9,
9447 0x1A, 0x22, 0x41, 0xE9,
9448
9449 0x2B, 0x40, 0x3D, 0xE9,
9450 0x3F, 0x4B, 0xA0, 0xE8,
9451
9452 0x2D, 0x73,
9453 0x30, 0x76,
9454 0x05, 0x80, 0x3D, 0xEA,
9455
9456 0x37, 0x43, 0xA0, 0xE8,
9457 0x3D, 0x53, 0xA0, 0xE8,
9458
9459 0x48, 0x70, 0xF8, 0xEC,
9460 0x2B, 0x48, 0x3C, 0xE9,
9461
9462 0x1F, 0x27, 0xBC, 0xE8,
9463 0x00, 0x80, 0x00, 0xE8,
9464
9465 0x00, 0x80, 0x00, 0xE8,
9466 0x00, 0x80, 0x00, 0xE8,
9467
9468 0x15, 0xC0, 0x20, 0xE9,
9469 0x15, 0xC0, 0x20, 0xE9,
9470
9471 0x15, 0xC0, 0x20, 0xE9,
9472 0x15, 0xC0, 0x20, 0xE9,
9473
9474 0x18, 0x3A, 0x41, 0xE9,
9475 0x1D, 0x32, 0x41, 0xE9,
9476
9477 0x2A, 0x40, 0x20, 0xE9,
9478 0x56, 0x3D, 0x56, 0xDF,
9479
9480 0x46, 0x37, 0x46, 0xDF,
9481 0x4E, 0x3F, 0x4E, 0xDF,
9482
9483 0x16, 0x30, 0x20, 0xE9,
9484 0x4F, 0x3F, 0x4F, 0xDF,
9485
9486 0x32, 0x32, 0x2D, 0xDF,
9487 0x22, 0x22, 0x2D, 0xDF,
9488
9489 0x12, 0x12, 0x2D, 0xDF,
9490 0x3A, 0x3A, 0x2D, 0xDF,
9491
9492 0x47, 0x37, 0x47, 0xDF,
9493 0x57, 0x3D, 0x57, 0xDF,
9494
9495 0x3D, 0xCF, 0x74, 0xC0,
9496 0x37, 0xCF, 0x74, 0xC4,
9497
9498 0x0A, 0x44, 0x4C, 0xB0,
9499 0x02, 0x44, 0x54, 0xB0,
9500
9501 0x31, 0x53, 0x2F, 0x9F,
9502 0x34, 0x37, 0x20, 0xE9,
9503
9504 0x39, 0xE5, 0x2C, 0x9F,
9505 0x3C, 0x3D, 0x20, 0xE9,
9506
9507 0x2A, 0x44, 0x4C, 0xB2,
9508 0x1A, 0x44, 0x54, 0xB2,
9509
9510 0x26, 0x80, 0x3A, 0xEA,
9511 0x0A, 0x20,
9512 0x02, 0x20,
9513
9514 0x88, 0x73, 0x5E, 0xE9,
9515 0x2A, 0x20,
9516 0x1A, 0x20,
9517
9518 0x3D, 0xCF, 0x74, 0xC2,
9519 0x27, 0xCF, 0x74, 0xC6,
9520
9521 0x30, 0x50, 0x2E, 0x9F,
9522 0x32, 0x31, 0x5F, 0xE9,
9523
9524 0x38, 0x21, 0x2C, 0x9F,
9525 0x33, 0x39, 0x5F, 0xE9,
9526
9527 0x31, 0x53, 0x2F, 0x9F,
9528 0x9C, 0x27, 0x20, 0xE9,
9529
9530 0x0A, 0x44, 0x4C, 0xB4,
9531 0x02, 0x44, 0x54, 0xB4,
9532
9533 0x2A, 0x44, 0x4C, 0xB6,
9534 0x1A, 0x44, 0x54, 0xB6,
9535
9536 0x39, 0xE5, 0x2C, 0x9F,
9537 0x38, 0x3D, 0x20, 0xE9,
9538
9539 0x0A, 0x20,
9540 0x02, 0x20,
9541 0x2A, 0x20,
9542 0x1A, 0x20,
9543
9544 0x3D, 0xCF, 0x75, 0xC6,
9545 0x00, 0x80, 0x00, 0xE8,
9546
9547 0x30, 0x50, 0x2E, 0x9F,
9548 0x3E, 0x30, 0x4F, 0xE9,
9549
9550 0x38, 0x21, 0x2C, 0x9F,
9551 0x3F, 0x38, 0x4F, 0xE9,
9552
9553 0x0A, 0x45, 0x4D, 0xB6,
9554 0x02, 0x45, 0x55, 0xB6,
9555
9556 0x31, 0x53, 0x2F, 0x9F,
9557 0x3A, 0x31, 0x4F, 0xE9,
9558
9559 0x39, 0xE5, 0x2C, 0x9F,
9560 0x3B, 0x39, 0x4F, 0xE9,
9561
9562 0x31, 0x3D, 0x20, 0xE9,
9563 0x0A, 0x20,
9564 0x02, 0x20,
9565
9566 0x2A, 0x46, 0x4E, 0xBF,
9567 0x1A, 0x46, 0x56, 0xBF,
9568
9569 0x0A, 0x47, 0x4F, 0xBF,
9570 0x02, 0x47, 0x57, 0xBF,
9571
9572 0x30, 0x50, 0x2E, 0x9F,
9573 0x36, 0x30, 0x4F, 0xE9,
9574
9575 0x38, 0x21, 0x2C, 0x9F,
9576 0x37, 0x38, 0x4F, 0xE9,
9577
9578 0x31, 0x53, 0x2F, 0x9F,
9579 0x9D, 0x31, 0x4F, 0xE9,
9580
9581 0x39, 0xE5, 0x2C, 0x9F,
9582 0x9E, 0x39, 0x4F, 0xE9,
9583
9584 0x2A, 0x43, 0x4B, 0xBF,
9585 0x1A, 0x43, 0x53, 0xBF,
9586
9587 0x30, 0x50, 0x2E, 0x9F,
9588 0x35, 0x30, 0x4F, 0xE9,
9589
9590 0x38, 0x21, 0x2C, 0x9F,
9591 0x39, 0x38, 0x4F, 0xE9,
9592
9593 0x31, 0x53, 0x2F, 0x9F,
9594 0x80, 0x31, 0x57, 0xE9,
9595
9596 0x39, 0xE5, 0x2C, 0x9F,
9597 0x81, 0x39, 0x57, 0xE9,
9598
9599 0x37, 0x48, 0x50, 0xBD,
9600 0x8A, 0x36, 0x20, 0xE9,
9601
9602 0x86, 0x76, 0x57, 0xE9,
9603 0x8B, 0x3E, 0x20, 0xE9,
9604
9605 0x82, 0x30, 0x57, 0xE9,
9606 0x87, 0x77, 0x57, 0xE9,
9607
9608 0x83, 0x38, 0x57, 0xE9,
9609 0x35, 0x49, 0x51, 0xBD,
9610
9611 0x84, 0x31, 0x5E, 0xE9,
9612 0x30, 0x1F, 0x5F, 0xE9,
9613
9614 0x85, 0x39, 0x5E, 0xE9,
9615 0x57, 0x25, 0x20, 0xE9,
9616
9617 0x2B, 0x48, 0x20, 0xE9,
9618 0x1D, 0x37, 0xE1, 0xEA,
9619
9620 0x1E, 0x35, 0xE1, 0xEA,
9621 0x00, 0xE0,
9622 0x26, 0x77,
9623
9624 0x24, 0x49, 0x20, 0xE9,
9625 0xA6, 0xFF, 0x20, 0xEA,
9626
9627 0x16, 0x26, 0x20, 0xE9,
9628 0x57, 0x2E, 0xBF, 0xEA,
9629
9630 0x1C, 0x46, 0xA0, 0xE8,
9631 0x23, 0x4E, 0xA0, 0xE8,
9632
9633 0x2B, 0x56, 0xA0, 0xE8,
9634 0x1D, 0x47, 0xA0, 0xE8,
9635
9636 0x24, 0x4F, 0xA0, 0xE8,
9637 0x2C, 0x57, 0xA0, 0xE8,
9638
9639 0x1C, 0x00,
9640 0x23, 0x00,
9641 0x2B, 0x00,
9642 0x00, 0xE0,
9643
9644 0x1D, 0x00,
9645 0x24, 0x00,
9646 0x2C, 0x00,
9647 0x00, 0xE0,
9648
9649 0x1C, 0x65,
9650 0x23, 0x65,
9651 0x2B, 0x65,
9652 0x00, 0xE0,
9653
9654 0x1D, 0x65,
9655 0x24, 0x65,
9656 0x2C, 0x65,
9657 0x00, 0xE0,
9658
9659 0x1C, 0x23, 0x60, 0xEC,
9660 0x36, 0xD7, 0x36, 0xAD,
9661
9662 0x2B, 0x80, 0x60, 0xEC,
9663 0x1D, 0x24, 0x60, 0xEC,
9664
9665 0x3E, 0xD7, 0x3E, 0xAD,
9666 0x2C, 0x80, 0x60, 0xEC,
9667
9668 0x1C, 0x2B, 0xDE, 0xE8,
9669 0x23, 0x80, 0xDE, 0xE8,
9670
9671 0x36, 0x80, 0x36, 0xBD,
9672 0x3E, 0x80, 0x3E, 0xBD,
9673
9674 0x33, 0xD7, 0x1C, 0xBD,
9675 0x3B, 0xD7, 0x23, 0xBD,
9676
9677 0x46, 0x80, 0x46, 0xCF,
9678 0x4F, 0x80, 0x4F, 0xCF,
9679
9680 0x56, 0x33, 0x56, 0xCF,
9681 0x47, 0x3B, 0x47, 0xCF,
9682
9683 0xCD, 0xFF, 0x20, 0xEA,
9684 0x00, 0x80, 0x00, 0xE8,
9685
9686 0x4E, 0x33, 0x4E, 0xCF,
9687 0x57, 0x3B, 0x57, 0xCF,
9688
9689 0x94, 0xFF, 0x20, 0xEA,
9690 0x57, 0xC0, 0xBF, 0xEA,
9691
9692 0x00, 0x80, 0xA0, 0xE9,
9693 0x00, 0x00, 0xD8, 0xEC,
9694
9695};
9696
9697static unsigned char warp_g400_tgzf[] = {
9698
9699 0x00, 0x88, 0x98, 0xE9,
9700 0x00, 0x80, 0x00, 0xE8,
9701
9702 0x00, 0x80, 0xA0, 0xE9,
9703 0x00, 0x00, 0xD8, 0xEC,
9704
9705 0xFF, 0x80, 0xC0, 0xE9,
9706 0x00, 0x80, 0x00, 0xE8,
9707
9708 0x22, 0x40, 0x48, 0xBF,
9709 0x2A, 0x40, 0x50, 0xBF,
9710
9711 0x32, 0x41, 0x49, 0xBF,
9712 0x3A, 0x41, 0x51, 0xBF,
9713
9714 0xC3, 0x6B,
9715 0xCB, 0x6B,
9716 0x00, 0x88, 0x98, 0xE9,
9717
9718 0x73, 0x7B, 0xC8, 0xEC,
9719 0x96, 0xE2,
9720 0x41, 0x04,
9721
9722 0x7B, 0x43, 0xA0, 0xE8,
9723 0x73, 0x4B, 0xA0, 0xE8,
9724
9725 0xAD, 0xEE, 0x29, 0x9F,
9726 0x00, 0xE0,
9727 0x49, 0x04,
9728
9729 0x90, 0xE2,
9730 0x51, 0x04,
9731 0x31, 0x46, 0xB1, 0xE8,
9732
9733 0x49, 0x41, 0xC0, 0xEC,
9734 0x39, 0x57, 0xB1, 0xE8,
9735
9736 0x00, 0x04,
9737 0x46, 0xE2,
9738 0x73, 0x53, 0xA0, 0xE8,
9739
9740 0x51, 0x41, 0xC0, 0xEC,
9741 0x31, 0x00,
9742 0x39, 0x00,
9743
9744 0x5D, 0x80, 0x15, 0xEA,
9745 0x08, 0x04,
9746 0x10, 0x04,
9747
9748 0x51, 0x49, 0xC0, 0xEC,
9749 0x2F, 0x41, 0x60, 0xEA,
9750
9751 0x31, 0x20,
9752 0x39, 0x20,
9753 0x1F, 0x42, 0xA0, 0xE8,
9754
9755 0x2A, 0x42, 0x4A, 0xBF,
9756 0x27, 0x4A, 0xA0, 0xE8,
9757
9758 0x1A, 0x42, 0x52, 0xBF,
9759 0x1E, 0x49, 0x60, 0xEA,
9760
9761 0x73, 0x7B, 0xC8, 0xEC,
9762 0x26, 0x51, 0x60, 0xEA,
9763
9764 0x32, 0x40, 0x48, 0xBD,
9765 0x22, 0x40, 0x50, 0xBD,
9766
9767 0x12, 0x41, 0x49, 0xBD,
9768 0x3A, 0x41, 0x51, 0xBD,
9769
9770 0xBF, 0x2F, 0x26, 0xBD,
9771 0x00, 0xE0,
9772 0x7B, 0x72,
9773
9774 0x32, 0x20,
9775 0x22, 0x20,
9776 0x12, 0x20,
9777 0x3A, 0x20,
9778
9779 0x46, 0x31, 0x46, 0xBF,
9780 0x4E, 0x31, 0x4E, 0xBF,
9781
9782 0xB3, 0xE2, 0x2D, 0x9F,
9783 0x00, 0x80, 0x00, 0xE8,
9784
9785 0x56, 0x31, 0x56, 0xBF,
9786 0x47, 0x39, 0x47, 0xBF,
9787
9788 0x4F, 0x39, 0x4F, 0xBF,
9789 0x57, 0x39, 0x57, 0xBF,
9790
9791 0x4F, 0x80, 0x07, 0xEA,
9792 0x24, 0x41, 0x20, 0xE9,
9793
9794 0x42, 0x73, 0xF8, 0xEC,
9795 0x00, 0xE0,
9796 0x2D, 0x73,
9797
9798 0x33, 0x72,
9799 0x0C, 0xE3,
9800 0xA5, 0x2F, 0x1E, 0xBD,
9801
9802 0x43, 0x43, 0x2D, 0xDF,
9803 0x4B, 0x4B, 0x2D, 0xDF,
9804
9805 0xAE, 0x1E, 0x26, 0xBD,
9806 0x58, 0xE3,
9807 0x33, 0x66,
9808
9809 0x53, 0x53, 0x2D, 0xDF,
9810 0x00, 0x80, 0x00, 0xE8,
9811
9812 0xB8, 0x38, 0x33, 0xBF,
9813 0x00, 0xE0,
9814 0x59, 0xE3,
9815
9816 0x1E, 0x12, 0x41, 0xE9,
9817 0x1A, 0x22, 0x41, 0xE9,
9818
9819 0x2B, 0x40, 0x3D, 0xE9,
9820 0x3F, 0x4B, 0xA0, 0xE8,
9821
9822 0x2D, 0x73,
9823 0x30, 0x76,
9824 0x05, 0x80, 0x3D, 0xEA,
9825
9826 0x37, 0x43, 0xA0, 0xE8,
9827 0x3D, 0x53, 0xA0, 0xE8,
9828
9829 0x48, 0x70, 0xF8, 0xEC,
9830 0x2B, 0x48, 0x3C, 0xE9,
9831
9832 0x1F, 0x27, 0xBC, 0xE8,
9833 0x00, 0x80, 0x00, 0xE8,
9834
9835 0x00, 0x80, 0x00, 0xE8,
9836 0x00, 0x80, 0x00, 0xE8,
9837
9838 0x15, 0xC0, 0x20, 0xE9,
9839 0x15, 0xC0, 0x20, 0xE9,
9840
9841 0x15, 0xC0, 0x20, 0xE9,
9842 0x15, 0xC0, 0x20, 0xE9,
9843
9844 0x18, 0x3A, 0x41, 0xE9,
9845 0x1D, 0x32, 0x41, 0xE9,
9846
9847 0x2A, 0x40, 0x20, 0xE9,
9848 0x56, 0x3D, 0x56, 0xDF,
9849
9850 0x46, 0x37, 0x46, 0xDF,
9851 0x4E, 0x3F, 0x4E, 0xDF,
9852
9853 0x16, 0x30, 0x20, 0xE9,
9854 0x4F, 0x3F, 0x4F, 0xDF,
9855
9856 0x32, 0x32, 0x2D, 0xDF,
9857 0x22, 0x22, 0x2D, 0xDF,
9858
9859 0x12, 0x12, 0x2D, 0xDF,
9860 0x3A, 0x3A, 0x2D, 0xDF,
9861
9862 0x47, 0x37, 0x47, 0xDF,
9863 0x57, 0x3D, 0x57, 0xDF,
9864
9865 0x3D, 0xCF, 0x74, 0xC0,
9866 0x37, 0xCF, 0x74, 0xC4,
9867
9868 0x39, 0xE5, 0x2C, 0x9F,
9869 0x34, 0x80, 0x20, 0xE9,
9870
9871 0x31, 0x53, 0x2F, 0x9F,
9872 0x00, 0x80, 0x00, 0xE8,
9873
9874 0x88, 0x73, 0x5E, 0xE9,
9875 0x00, 0x80, 0x00, 0xE8,
9876
9877 0x27, 0xCF, 0x75, 0xC6,
9878 0x3C, 0x3D, 0x20, 0xE9,
9879
9880 0x0A, 0x44, 0x4C, 0xB0,
9881 0x02, 0x44, 0x54, 0xB0,
9882
9883 0x2A, 0x44, 0x4C, 0xB2,
9884 0x1A, 0x44, 0x54, 0xB2,
9885
9886 0x20, 0x80, 0x3A, 0xEA,
9887 0x0A, 0x20,
9888 0x02, 0x20,
9889
9890 0x3D, 0xCF, 0x74, 0xC2,
9891 0x2A, 0x20,
9892 0x1A, 0x20,
9893
9894 0x30, 0x50, 0x2E, 0x9F,
9895 0x32, 0x31, 0x5F, 0xE9,
9896
9897 0x38, 0x21, 0x2C, 0x9F,
9898 0x33, 0x39, 0x5F, 0xE9,
9899
9900 0x31, 0x53, 0x2F, 0x9F,
9901 0x31, 0x27, 0x20, 0xE9,
9902
9903 0x0A, 0x44, 0x4C, 0xB4,
9904 0x02, 0x44, 0x54, 0xB4,
9905
9906 0x2A, 0x45, 0x4D, 0xB6,
9907 0x1A, 0x45, 0x55, 0xB6,
9908
9909 0x39, 0xE5, 0x2C, 0x9F,
9910 0x38, 0x3D, 0x20, 0xE9,
9911
9912 0x0A, 0x20,
9913 0x02, 0x20,
9914 0x2A, 0x20,
9915 0x1A, 0x20,
9916
9917 0x0A, 0x47, 0x4F, 0xBF,
9918 0x02, 0x47, 0x57, 0xBF,
9919
9920 0x30, 0x50, 0x2E, 0x9F,
9921 0x3E, 0x30, 0x4F, 0xE9,
9922
9923 0x38, 0x21, 0x2C, 0x9F,
9924 0x3F, 0x38, 0x4F, 0xE9,
9925
9926 0x2A, 0x46, 0x4E, 0xBF,
9927 0x1A, 0x46, 0x56, 0xBF,
9928
9929 0x31, 0x53, 0x2F, 0x9F,
9930 0x3A, 0x31, 0x4F, 0xE9,
9931
9932 0x39, 0xE5, 0x2C, 0x9F,
9933 0x3B, 0x39, 0x4F, 0xE9,
9934
9935 0x31, 0x53, 0x2F, 0x9F,
9936 0x36, 0x30, 0x4F, 0xE9,
9937
9938 0x39, 0xE5, 0x2C, 0x9F,
9939 0x37, 0x38, 0x4F, 0xE9,
9940
9941 0x2A, 0x43, 0x4B, 0xBF,
9942 0x1A, 0x43, 0x53, 0xBF,
9943
9944 0x30, 0x50, 0x2E, 0x9F,
9945 0x35, 0x31, 0x4F, 0xE9,
9946
9947 0x38, 0x21, 0x2C, 0x9F,
9948 0x39, 0x39, 0x4F, 0xE9,
9949
9950 0x31, 0x53, 0x2F, 0x9F,
9951 0x80, 0x31, 0x57, 0xE9,
9952
9953 0x39, 0xE5, 0x2C, 0x9F,
9954 0x81, 0x39, 0x57, 0xE9,
9955
9956 0x37, 0x48, 0x50, 0xBD,
9957 0x8A, 0x36, 0x20, 0xE9,
9958
9959 0x86, 0x76, 0x57, 0xE9,
9960 0x8B, 0x3E, 0x20, 0xE9,
9961
9962 0x82, 0x30, 0x57, 0xE9,
9963 0x87, 0x77, 0x57, 0xE9,
9964
9965 0x83, 0x38, 0x57, 0xE9,
9966 0x35, 0x49, 0x51, 0xBD,
9967
9968 0x84, 0x31, 0x5E, 0xE9,
9969 0x30, 0x1F, 0x5F, 0xE9,
9970
9971 0x85, 0x39, 0x5E, 0xE9,
9972 0x57, 0x25, 0x20, 0xE9,
9973
9974 0x2B, 0x48, 0x20, 0xE9,
9975 0x1D, 0x37, 0xE1, 0xEA,
9976
9977 0x1E, 0x35, 0xE1, 0xEA,
9978 0x00, 0xE0,
9979 0x26, 0x77,
9980
9981 0x24, 0x49, 0x20, 0xE9,
9982 0xAA, 0xFF, 0x20, 0xEA,
9983
9984 0x16, 0x26, 0x20, 0xE9,
9985 0x57, 0x2E, 0xBF, 0xEA,
9986
9987 0x1C, 0x46, 0xA0, 0xE8,
9988 0x23, 0x4E, 0xA0, 0xE8,
9989
9990 0x2B, 0x56, 0xA0, 0xE8,
9991 0x1D, 0x47, 0xA0, 0xE8,
9992
9993 0x24, 0x4F, 0xA0, 0xE8,
9994 0x2C, 0x57, 0xA0, 0xE8,
9995
9996 0x1C, 0x00,
9997 0x23, 0x00,
9998 0x2B, 0x00,
9999 0x00, 0xE0,
10000
10001 0x1D, 0x00,
10002 0x24, 0x00,
10003 0x2C, 0x00,
10004 0x00, 0xE0,
10005
10006 0x1C, 0x65,
10007 0x23, 0x65,
10008 0x2B, 0x65,
10009 0x00, 0xE0,
10010
10011 0x1D, 0x65,
10012 0x24, 0x65,
10013 0x2C, 0x65,
10014 0x00, 0xE0,
10015
10016 0x1C, 0x23, 0x60, 0xEC,
10017 0x36, 0xD7, 0x36, 0xAD,
10018
10019 0x2B, 0x80, 0x60, 0xEC,
10020 0x1D, 0x24, 0x60, 0xEC,
10021
10022 0x3E, 0xD7, 0x3E, 0xAD,
10023 0x2C, 0x80, 0x60, 0xEC,
10024
10025 0x1C, 0x2B, 0xDE, 0xE8,
10026 0x23, 0x80, 0xDE, 0xE8,
10027
10028 0x36, 0x80, 0x36, 0xBD,
10029 0x3E, 0x80, 0x3E, 0xBD,
10030
10031 0x33, 0xD7, 0x1C, 0xBD,
10032 0x3B, 0xD7, 0x23, 0xBD,
10033
10034 0x46, 0x80, 0x46, 0xCF,
10035 0x4F, 0x80, 0x4F, 0xCF,
10036
10037 0x56, 0x33, 0x56, 0xCF,
10038 0x47, 0x3B, 0x47, 0xCF,
10039
10040 0xD3, 0xFF, 0x20, 0xEA,
10041 0x00, 0x80, 0x00, 0xE8,
10042
10043 0x4E, 0x33, 0x4E, 0xCF,
10044 0x57, 0x3B, 0x57, 0xCF,
10045
10046 0x98, 0xFF, 0x20, 0xEA,
10047 0x57, 0xC0, 0xBF, 0xEA,
10048
10049 0x00, 0x80, 0xA0, 0xE9,
10050 0x00, 0x00, 0xD8, 0xEC,
10051
10052};
10053
10054static unsigned char warp_g400_tgzs[] = {
10055
10056 0x00, 0x88, 0x98, 0xE9,
10057 0x00, 0x80, 0x00, 0xE8,
10058
10059 0x00, 0x80, 0xA0, 0xE9,
10060 0x00, 0x00, 0xD8, 0xEC,
10061
10062 0xFF, 0x80, 0xC0, 0xE9,
10063 0x00, 0x80, 0x00, 0xE8,
10064
10065 0x22, 0x40, 0x48, 0xBF,
10066 0x2A, 0x40, 0x50, 0xBF,
10067
10068 0x32, 0x41, 0x49, 0xBF,
10069 0x3A, 0x41, 0x51, 0xBF,
10070
10071 0xC3, 0x6B,
10072 0xCB, 0x6B,
10073 0x00, 0x88, 0x98, 0xE9,
10074
10075 0x73, 0x7B, 0xC8, 0xEC,
10076 0x96, 0xE2,
10077 0x41, 0x04,
10078
10079 0x7B, 0x43, 0xA0, 0xE8,
10080 0x73, 0x4B, 0xA0, 0xE8,
10081
10082 0xAD, 0xEE, 0x29, 0x9F,
10083 0x00, 0xE0,
10084 0x49, 0x04,
10085
10086 0x90, 0xE2,
10087 0x51, 0x04,
10088 0x31, 0x46, 0xB1, 0xE8,
10089
10090 0x49, 0x41, 0xC0, 0xEC,
10091 0x39, 0x57, 0xB1, 0xE8,
10092
10093 0x00, 0x04,
10094 0x46, 0xE2,
10095 0x73, 0x53, 0xA0, 0xE8,
10096
10097 0x51, 0x41, 0xC0, 0xEC,
10098 0x31, 0x00,
10099 0x39, 0x00,
10100
10101 0x65, 0x80, 0x15, 0xEA,
10102 0x08, 0x04,
10103 0x10, 0x04,
10104
10105 0x51, 0x49, 0xC0, 0xEC,
10106 0x2F, 0x41, 0x60, 0xEA,
10107
10108 0x31, 0x20,
10109 0x39, 0x20,
10110 0x1F, 0x42, 0xA0, 0xE8,
10111
10112 0x2A, 0x42, 0x4A, 0xBF,
10113 0x27, 0x4A, 0xA0, 0xE8,
10114
10115 0x1A, 0x42, 0x52, 0xBF,
10116 0x1E, 0x49, 0x60, 0xEA,
10117
10118 0x73, 0x7B, 0xC8, 0xEC,
10119 0x26, 0x51, 0x60, 0xEA,
10120
10121 0x32, 0x40, 0x48, 0xBD,
10122 0x22, 0x40, 0x50, 0xBD,
10123
10124 0x12, 0x41, 0x49, 0xBD,
10125 0x3A, 0x41, 0x51, 0xBD,
10126
10127 0xBF, 0x2F, 0x26, 0xBD,
10128 0x00, 0xE0,
10129 0x7B, 0x72,
10130
10131 0x32, 0x20,
10132 0x22, 0x20,
10133 0x12, 0x20,
10134 0x3A, 0x20,
10135
10136 0x46, 0x31, 0x46, 0xBF,
10137 0x4E, 0x31, 0x4E, 0xBF,
10138
10139 0xB3, 0xE2, 0x2D, 0x9F,
10140 0x00, 0x80, 0x00, 0xE8,
10141
10142 0x56, 0x31, 0x56, 0xBF,
10143 0x47, 0x39, 0x47, 0xBF,
10144
10145 0x4F, 0x39, 0x4F, 0xBF,
10146 0x57, 0x39, 0x57, 0xBF,
10147
10148 0x57, 0x80, 0x07, 0xEA,
10149 0x24, 0x41, 0x20, 0xE9,
10150
10151 0x42, 0x73, 0xF8, 0xEC,
10152 0x00, 0xE0,
10153 0x2D, 0x73,
10154
10155 0x33, 0x72,
10156 0x0C, 0xE3,
10157 0xA5, 0x2F, 0x1E, 0xBD,
10158
10159 0x43, 0x43, 0x2D, 0xDF,
10160 0x4B, 0x4B, 0x2D, 0xDF,
10161
10162 0xAE, 0x1E, 0x26, 0xBD,
10163 0x58, 0xE3,
10164 0x33, 0x66,
10165
10166 0x53, 0x53, 0x2D, 0xDF,
10167 0x00, 0x80, 0x00, 0xE8,
10168
10169 0xB8, 0x38, 0x33, 0xBF,
10170 0x00, 0xE0,
10171 0x59, 0xE3,
10172
10173 0x1E, 0x12, 0x41, 0xE9,
10174 0x1A, 0x22, 0x41, 0xE9,
10175
10176 0x2B, 0x40, 0x3D, 0xE9,
10177 0x3F, 0x4B, 0xA0, 0xE8,
10178
10179 0x2D, 0x73,
10180 0x30, 0x76,
10181 0x05, 0x80, 0x3D, 0xEA,
10182
10183 0x37, 0x43, 0xA0, 0xE8,
10184 0x3D, 0x53, 0xA0, 0xE8,
10185
10186 0x48, 0x70, 0xF8, 0xEC,
10187 0x2B, 0x48, 0x3C, 0xE9,
10188
10189 0x1F, 0x27, 0xBC, 0xE8,
10190 0x00, 0x80, 0x00, 0xE8,
10191
10192 0x00, 0x80, 0x00, 0xE8,
10193 0x00, 0x80, 0x00, 0xE8,
10194
10195 0x15, 0xC0, 0x20, 0xE9,
10196 0x15, 0xC0, 0x20, 0xE9,
10197
10198 0x15, 0xC0, 0x20, 0xE9,
10199 0x15, 0xC0, 0x20, 0xE9,
10200
10201 0x18, 0x3A, 0x41, 0xE9,
10202 0x1D, 0x32, 0x41, 0xE9,
10203
10204 0x2A, 0x40, 0x20, 0xE9,
10205 0x56, 0x3D, 0x56, 0xDF,
10206
10207 0x46, 0x37, 0x46, 0xDF,
10208 0x4E, 0x3F, 0x4E, 0xDF,
10209
10210 0x16, 0x30, 0x20, 0xE9,
10211 0x4F, 0x3F, 0x4F, 0xDF,
10212
10213 0x47, 0x37, 0x47, 0xDF,
10214 0x57, 0x3D, 0x57, 0xDF,
10215
10216 0x32, 0x32, 0x2D, 0xDF,
10217 0x22, 0x22, 0x2D, 0xDF,
10218
10219 0x12, 0x12, 0x2D, 0xDF,
10220 0x3A, 0x3A, 0x2D, 0xDF,
10221
10222 0x27, 0xCF, 0x74, 0xC2,
10223 0x37, 0xCF, 0x74, 0xC4,
10224
10225 0x0A, 0x44, 0x4C, 0xB0,
10226 0x02, 0x44, 0x54, 0xB0,
10227
10228 0x3D, 0xCF, 0x74, 0xC0,
10229 0x34, 0x37, 0x20, 0xE9,
10230
10231 0x31, 0x53, 0x2F, 0x9F,
10232 0x38, 0x27, 0x20, 0xE9,
10233
10234 0x39, 0xE5, 0x2C, 0x9F,
10235 0x3C, 0x3D, 0x20, 0xE9,
10236
10237 0x2A, 0x44, 0x4C, 0xB2,
10238 0x1A, 0x44, 0x54, 0xB2,
10239
10240 0x29, 0x80, 0x3A, 0xEA,
10241 0x0A, 0x20,
10242 0x02, 0x20,
10243
10244 0x27, 0xCF, 0x75, 0xC0,
10245 0x2A, 0x20,
10246 0x1A, 0x20,
10247
10248 0x30, 0x50, 0x2E, 0x9F,
10249 0x32, 0x31, 0x5F, 0xE9,
10250
10251 0x38, 0x21, 0x2C, 0x9F,
10252 0x33, 0x39, 0x5F, 0xE9,
10253
10254 0x3D, 0xCF, 0x75, 0xC2,
10255 0x37, 0xCF, 0x75, 0xC4,
10256
10257 0x31, 0x53, 0x2F, 0x9F,
10258 0xA6, 0x27, 0x20, 0xE9,
10259
10260 0x39, 0xE5, 0x2C, 0x9F,
10261 0xA3, 0x3D, 0x20, 0xE9,
10262
10263 0x2A, 0x44, 0x4C, 0xB4,
10264 0x1A, 0x44, 0x54, 0xB4,
10265
10266 0x0A, 0x45, 0x4D, 0xB0,
10267 0x02, 0x45, 0x55, 0xB0,
10268
10269 0x88, 0x73, 0x5E, 0xE9,
10270 0x2A, 0x20,
10271 0x1A, 0x20,
10272
10273 0xA0, 0x37, 0x20, 0xE9,
10274 0x0A, 0x20,
10275 0x02, 0x20,
10276
10277 0x31, 0x53, 0x2F, 0x9F,
10278 0x3E, 0x30, 0x4F, 0xE9,
10279
10280 0x39, 0xE5, 0x2C, 0x9F,
10281 0x3F, 0x38, 0x4F, 0xE9,
10282
10283 0x30, 0x50, 0x2E, 0x9F,
10284 0x3A, 0x31, 0x4F, 0xE9,
10285
10286 0x2A, 0x45, 0x4D, 0xB2,
10287 0x1A, 0x45, 0x55, 0xB2,
10288
10289 0x0A, 0x45, 0x4D, 0xB4,
10290 0x02, 0x45, 0x55, 0xB4,
10291
10292 0x38, 0x21, 0x2C, 0x9F,
10293 0x3B, 0x39, 0x4F, 0xE9,
10294
10295 0x0A, 0x20,
10296 0x02, 0x20,
10297 0x2A, 0x20,
10298 0x1A, 0x20,
10299
10300 0x2A, 0x46, 0x4E, 0xBF,
10301 0x1A, 0x46, 0x56, 0xBF,
10302
10303 0x31, 0x53, 0x2F, 0x9F,
10304 0x36, 0x31, 0x4F, 0xE9,
10305
10306 0x39, 0xE5, 0x2C, 0x9F,
10307 0x37, 0x39, 0x4F, 0xE9,
10308
10309 0x30, 0x50, 0x2E, 0x9F,
10310 0xA7, 0x30, 0x4F, 0xE9,
10311
10312 0x38, 0x21, 0x2C, 0x9F,
10313 0xA8, 0x38, 0x4F, 0xE9,
10314
10315 0x0A, 0x47, 0x4F, 0xBF,
10316 0x02, 0x47, 0x57, 0xBF,
10317
10318 0x31, 0x53, 0x2F, 0x9F,
10319 0xA4, 0x31, 0x4F, 0xE9,
10320
10321 0x39, 0xE5, 0x2C, 0x9F,
10322 0xA5, 0x39, 0x4F, 0xE9,
10323
10324 0x2A, 0x43, 0x4B, 0xBF,
10325 0x1A, 0x43, 0x53, 0xBF,
10326
10327 0x30, 0x50, 0x2E, 0x9F,
10328 0xA1, 0x30, 0x4F, 0xE9,
10329
10330 0x38, 0x21, 0x2C, 0x9F,
10331 0xA2, 0x38, 0x4F, 0xE9,
10332
10333 0x31, 0x53, 0x2F, 0x9F,
10334 0x80, 0x31, 0x57, 0xE9,
10335
10336 0x39, 0xE5, 0x2C, 0x9F,
10337 0x81, 0x39, 0x57, 0xE9,
10338
10339 0x37, 0x48, 0x50, 0xBD,
10340 0x8A, 0x36, 0x20, 0xE9,
10341
10342 0x86, 0x76, 0x57, 0xE9,
10343 0x8B, 0x3E, 0x20, 0xE9,
10344
10345 0x82, 0x30, 0x57, 0xE9,
10346 0x87, 0x77, 0x57, 0xE9,
10347
10348 0x83, 0x38, 0x57, 0xE9,
10349 0x35, 0x49, 0x51, 0xBD,
10350
10351 0x84, 0x31, 0x5E, 0xE9,
10352 0x30, 0x1F, 0x5F, 0xE9,
10353
10354 0x85, 0x39, 0x5E, 0xE9,
10355 0x57, 0x25, 0x20, 0xE9,
10356
10357 0x2B, 0x48, 0x20, 0xE9,
10358 0x1D, 0x37, 0xE1, 0xEA,
10359
10360 0x1E, 0x35, 0xE1, 0xEA,
10361 0x00, 0xE0,
10362 0x26, 0x77,
10363
10364 0x24, 0x49, 0x20, 0xE9,
10365 0xA2, 0xFF, 0x20, 0xEA,
10366
10367 0x16, 0x26, 0x20, 0xE9,
10368 0x57, 0x2E, 0xBF, 0xEA,
10369
10370 0x1C, 0x46, 0xA0, 0xE8,
10371 0x23, 0x4E, 0xA0, 0xE8,
10372
10373 0x2B, 0x56, 0xA0, 0xE8,
10374 0x1D, 0x47, 0xA0, 0xE8,
10375
10376 0x24, 0x4F, 0xA0, 0xE8,
10377 0x2C, 0x57, 0xA0, 0xE8,
10378
10379 0x1C, 0x00,
10380 0x23, 0x00,
10381 0x2B, 0x00,
10382 0x00, 0xE0,
10383
10384 0x1D, 0x00,
10385 0x24, 0x00,
10386 0x2C, 0x00,
10387 0x00, 0xE0,
10388
10389 0x1C, 0x65,
10390 0x23, 0x65,
10391 0x2B, 0x65,
10392 0x00, 0xE0,
10393
10394 0x1D, 0x65,
10395 0x24, 0x65,
10396 0x2C, 0x65,
10397 0x00, 0xE0,
10398
10399 0x1C, 0x23, 0x60, 0xEC,
10400 0x36, 0xD7, 0x36, 0xAD,
10401
10402 0x2B, 0x80, 0x60, 0xEC,
10403 0x1D, 0x24, 0x60, 0xEC,
10404
10405 0x3E, 0xD7, 0x3E, 0xAD,
10406 0x2C, 0x80, 0x60, 0xEC,
10407
10408 0x1C, 0x2B, 0xDE, 0xE8,
10409 0x23, 0x80, 0xDE, 0xE8,
10410
10411 0x36, 0x80, 0x36, 0xBD,
10412 0x3E, 0x80, 0x3E, 0xBD,
10413
10414 0x33, 0xD7, 0x1C, 0xBD,
10415 0x3B, 0xD7, 0x23, 0xBD,
10416
10417 0x46, 0x80, 0x46, 0xCF,
10418 0x4F, 0x80, 0x4F, 0xCF,
10419
10420 0x56, 0x33, 0x56, 0xCF,
10421 0x47, 0x3B, 0x47, 0xCF,
10422
10423 0xCA, 0xFF, 0x20, 0xEA,
10424 0x00, 0x80, 0x00, 0xE8,
10425
10426 0x4E, 0x33, 0x4E, 0xCF,
10427 0x57, 0x3B, 0x57, 0xCF,
10428
10429 0x90, 0xFF, 0x20, 0xEA,
10430 0x57, 0xC0, 0xBF, 0xEA,
10431
10432 0x00, 0x80, 0xA0, 0xE9,
10433 0x00, 0x00, 0xD8, 0xEC,
10434
10435};
10436
10437static unsigned char warp_g400_tgzsa[] = {
10438
10439 0x00, 0x88, 0x98, 0xE9,
10440 0x00, 0x80, 0x00, 0xE8,
10441
10442 0x00, 0x80, 0xA0, 0xE9,
10443 0x00, 0x00, 0xD8, 0xEC,
10444
10445 0xFF, 0x80, 0xC0, 0xE9,
10446 0x00, 0x80, 0x00, 0xE8,
10447
10448 0x22, 0x40, 0x48, 0xBF,
10449 0x2A, 0x40, 0x50, 0xBF,
10450
10451 0x32, 0x41, 0x49, 0xBF,
10452 0x3A, 0x41, 0x51, 0xBF,
10453
10454 0xC3, 0x6B,
10455 0xCB, 0x6B,
10456 0x00, 0x88, 0x98, 0xE9,
10457
10458 0x73, 0x7B, 0xC8, 0xEC,
10459 0x96, 0xE2,
10460 0x41, 0x04,
10461
10462 0x7B, 0x43, 0xA0, 0xE8,
10463 0x73, 0x4B, 0xA0, 0xE8,
10464
10465 0xAD, 0xEE, 0x29, 0x9F,
10466 0x00, 0xE0,
10467 0x49, 0x04,
10468
10469 0x90, 0xE2,
10470 0x51, 0x04,
10471 0x31, 0x46, 0xB1, 0xE8,
10472
10473 0x49, 0x41, 0xC0, 0xEC,
10474 0x39, 0x57, 0xB1, 0xE8,
10475
10476 0x00, 0x04,
10477 0x46, 0xE2,
10478 0x73, 0x53, 0xA0, 0xE8,
10479
10480 0x51, 0x41, 0xC0, 0xEC,
10481 0x31, 0x00,
10482 0x39, 0x00,
10483
10484 0x6A, 0x80, 0x15, 0xEA,
10485 0x08, 0x04,
10486 0x10, 0x04,
10487
10488 0x51, 0x49, 0xC0, 0xEC,
10489 0x2F, 0x41, 0x60, 0xEA,
10490
10491 0x31, 0x20,
10492 0x39, 0x20,
10493 0x1F, 0x42, 0xA0, 0xE8,
10494
10495 0x2A, 0x42, 0x4A, 0xBF,
10496 0x27, 0x4A, 0xA0, 0xE8,
10497
10498 0x1A, 0x42, 0x52, 0xBF,
10499 0x1E, 0x49, 0x60, 0xEA,
10500
10501 0x73, 0x7B, 0xC8, 0xEC,
10502 0x26, 0x51, 0x60, 0xEA,
10503
10504 0x32, 0x40, 0x48, 0xBD,
10505 0x22, 0x40, 0x50, 0xBD,
10506
10507 0x12, 0x41, 0x49, 0xBD,
10508 0x3A, 0x41, 0x51, 0xBD,
10509
10510 0xBF, 0x2F, 0x26, 0xBD,
10511 0x00, 0xE0,
10512 0x7B, 0x72,
10513
10514 0x32, 0x20,
10515 0x22, 0x20,
10516 0x12, 0x20,
10517 0x3A, 0x20,
10518
10519 0x46, 0x31, 0x46, 0xBF,
10520 0x4E, 0x31, 0x4E, 0xBF,
10521
10522 0xB3, 0xE2, 0x2D, 0x9F,
10523 0x00, 0x80, 0x00, 0xE8,
10524
10525 0x56, 0x31, 0x56, 0xBF,
10526 0x47, 0x39, 0x47, 0xBF,
10527
10528 0x4F, 0x39, 0x4F, 0xBF,
10529 0x57, 0x39, 0x57, 0xBF,
10530
10531 0x5C, 0x80, 0x07, 0xEA,
10532 0x24, 0x41, 0x20, 0xE9,
10533
10534 0x42, 0x73, 0xF8, 0xEC,
10535 0x00, 0xE0,
10536 0x2D, 0x73,
10537
10538 0x33, 0x72,
10539 0x0C, 0xE3,
10540 0xA5, 0x2F, 0x1E, 0xBD,
10541
10542 0x43, 0x43, 0x2D, 0xDF,
10543 0x4B, 0x4B, 0x2D, 0xDF,
10544
10545 0xAE, 0x1E, 0x26, 0xBD,
10546 0x58, 0xE3,
10547 0x33, 0x66,
10548
10549 0x53, 0x53, 0x2D, 0xDF,
10550 0x00, 0x80, 0x00, 0xE8,
10551
10552 0xB8, 0x38, 0x33, 0xBF,
10553 0x00, 0xE0,
10554 0x59, 0xE3,
10555
10556 0x1E, 0x12, 0x41, 0xE9,
10557 0x1A, 0x22, 0x41, 0xE9,
10558
10559 0x2B, 0x40, 0x3D, 0xE9,
10560 0x3F, 0x4B, 0xA0, 0xE8,
10561
10562 0x2D, 0x73,
10563 0x30, 0x76,
10564 0x05, 0x80, 0x3D, 0xEA,
10565
10566 0x37, 0x43, 0xA0, 0xE8,
10567 0x3D, 0x53, 0xA0, 0xE8,
10568
10569 0x48, 0x70, 0xF8, 0xEC,
10570 0x2B, 0x48, 0x3C, 0xE9,
10571
10572 0x1F, 0x27, 0xBC, 0xE8,
10573 0x00, 0x80, 0x00, 0xE8,
10574
10575 0x00, 0x80, 0x00, 0xE8,
10576 0x00, 0x80, 0x00, 0xE8,
10577
10578 0x15, 0xC0, 0x20, 0xE9,
10579 0x15, 0xC0, 0x20, 0xE9,
10580
10581 0x15, 0xC0, 0x20, 0xE9,
10582 0x15, 0xC0, 0x20, 0xE9,
10583
10584 0x18, 0x3A, 0x41, 0xE9,
10585 0x1D, 0x32, 0x41, 0xE9,
10586
10587 0x2A, 0x40, 0x20, 0xE9,
10588 0x56, 0x3D, 0x56, 0xDF,
10589
10590 0x46, 0x37, 0x46, 0xDF,
10591 0x4E, 0x3F, 0x4E, 0xDF,
10592
10593 0x16, 0x30, 0x20, 0xE9,
10594 0x4F, 0x3F, 0x4F, 0xDF,
10595
10596 0x47, 0x37, 0x47, 0xDF,
10597 0x57, 0x3D, 0x57, 0xDF,
10598
10599 0x32, 0x32, 0x2D, 0xDF,
10600 0x22, 0x22, 0x2D, 0xDF,
10601
10602 0x12, 0x12, 0x2D, 0xDF,
10603 0x3A, 0x3A, 0x2D, 0xDF,
10604
10605 0x27, 0xCF, 0x74, 0xC2,
10606 0x37, 0xCF, 0x74, 0xC4,
10607
10608 0x0A, 0x44, 0x4C, 0xB0,
10609 0x02, 0x44, 0x54, 0xB0,
10610
10611 0x3D, 0xCF, 0x74, 0xC0,
10612 0x34, 0x37, 0x20, 0xE9,
10613
10614 0x31, 0x53, 0x2F, 0x9F,
10615 0x38, 0x27, 0x20, 0xE9,
10616
10617 0x39, 0xE5, 0x2C, 0x9F,
10618 0x3C, 0x3D, 0x20, 0xE9,
10619
10620 0x2A, 0x44, 0x4C, 0xB2,
10621 0x1A, 0x44, 0x54, 0xB2,
10622
10623 0x2E, 0x80, 0x3A, 0xEA,
10624 0x0A, 0x20,
10625 0x02, 0x20,
10626
10627 0x27, 0xCF, 0x75, 0xC0,
10628 0x2A, 0x20,
10629 0x1A, 0x20,
10630
10631 0x30, 0x50, 0x2E, 0x9F,
10632 0x32, 0x31, 0x5F, 0xE9,
10633
10634 0x38, 0x21, 0x2C, 0x9F,
10635 0x33, 0x39, 0x5F, 0xE9,
10636
10637 0x3D, 0xCF, 0x75, 0xC2,
10638 0x37, 0xCF, 0x75, 0xC4,
10639
10640 0x31, 0x53, 0x2F, 0x9F,
10641 0xA6, 0x27, 0x20, 0xE9,
10642
10643 0x39, 0xE5, 0x2C, 0x9F,
10644 0xA3, 0x3D, 0x20, 0xE9,
10645
10646 0x2A, 0x44, 0x4C, 0xB4,
10647 0x1A, 0x44, 0x54, 0xB4,
10648
10649 0x0A, 0x45, 0x4D, 0xB0,
10650 0x02, 0x45, 0x55, 0xB0,
10651
10652 0x88, 0x73, 0x5E, 0xE9,
10653 0x2A, 0x20,
10654 0x1A, 0x20,
10655
10656 0xA0, 0x37, 0x20, 0xE9,
10657 0x0A, 0x20,
10658 0x02, 0x20,
10659
10660 0x31, 0x53, 0x2F, 0x9F,
10661 0x3E, 0x30, 0x4F, 0xE9,
10662
10663 0x39, 0xE5, 0x2C, 0x9F,
10664 0x3F, 0x38, 0x4F, 0xE9,
10665
10666 0x30, 0x50, 0x2E, 0x9F,
10667 0x3A, 0x31, 0x4F, 0xE9,
10668
10669 0x38, 0x21, 0x2C, 0x9F,
10670 0x3B, 0x39, 0x4F, 0xE9,
10671
10672 0x2A, 0x45, 0x4D, 0xB2,
10673 0x1A, 0x45, 0x55, 0xB2,
10674
10675 0x0A, 0x45, 0x4D, 0xB4,
10676 0x02, 0x45, 0x55, 0xB4,
10677
10678 0x27, 0xCF, 0x74, 0xC6,
10679 0x2A, 0x20,
10680 0x1A, 0x20,
10681
10682 0xA7, 0x30, 0x4F, 0xE9,
10683 0x0A, 0x20,
10684 0x02, 0x20,
10685
10686 0x31, 0x53, 0x2F, 0x9F,
10687 0x9C, 0x27, 0x20, 0xE9,
10688
10689 0x39, 0xE5, 0x2C, 0x9F,
10690 0xA8, 0x38, 0x4F, 0xE9,
10691
10692 0x2A, 0x44, 0x4C, 0xB6,
10693 0x1A, 0x44, 0x54, 0xB6,
10694
10695 0x30, 0x50, 0x2E, 0x9F,
10696 0x36, 0x31, 0x4F, 0xE9,
10697
10698 0x38, 0x21, 0x2C, 0x9F,
10699 0x37, 0x39, 0x4F, 0xE9,
10700
10701 0x00, 0x80, 0x00, 0xE8,
10702 0x2A, 0x20,
10703 0x1A, 0x20,
10704
10705 0x2A, 0x46, 0x4E, 0xBF,
10706 0x1A, 0x46, 0x56, 0xBF,
10707
10708 0x31, 0x53, 0x2F, 0x9F,
10709 0xA4, 0x31, 0x4F, 0xE9,
10710
10711 0x39, 0xE5, 0x2C, 0x9F,
10712 0xA5, 0x39, 0x4F, 0xE9,
10713
10714 0x0A, 0x47, 0x4F, 0xBF,
10715 0x02, 0x47, 0x57, 0xBF,
10716
10717 0x31, 0x53, 0x2F, 0x9F,
10718 0xA1, 0x30, 0x4F, 0xE9,
10719
10720 0x39, 0xE5, 0x2C, 0x9F,
10721 0xA2, 0x38, 0x4F, 0xE9,
10722
10723 0x2A, 0x43, 0x4B, 0xBF,
10724 0x1A, 0x43, 0x53, 0xBF,
10725
10726 0x30, 0x50, 0x2E, 0x9F,
10727 0x9D, 0x31, 0x4F, 0xE9,
10728
10729 0x38, 0x21, 0x2C, 0x9F,
10730 0x9E, 0x39, 0x4F, 0xE9,
10731
10732 0x31, 0x53, 0x2F, 0x9F,
10733 0x80, 0x31, 0x57, 0xE9,
10734
10735 0x39, 0xE5, 0x2C, 0x9F,
10736 0x81, 0x39, 0x57, 0xE9,
10737
10738 0x37, 0x48, 0x50, 0xBD,
10739 0x8A, 0x36, 0x20, 0xE9,
10740
10741 0x86, 0x76, 0x57, 0xE9,
10742 0x8B, 0x3E, 0x20, 0xE9,
10743
10744 0x82, 0x30, 0x57, 0xE9,
10745 0x87, 0x77, 0x57, 0xE9,
10746
10747 0x83, 0x38, 0x57, 0xE9,
10748 0x35, 0x49, 0x51, 0xBD,
10749
10750 0x84, 0x31, 0x5E, 0xE9,
10751 0x30, 0x1F, 0x5F, 0xE9,
10752
10753 0x85, 0x39, 0x5E, 0xE9,
10754 0x57, 0x25, 0x20, 0xE9,
10755
10756 0x2B, 0x48, 0x20, 0xE9,
10757 0x1D, 0x37, 0xE1, 0xEA,
10758
10759 0x1E, 0x35, 0xE1, 0xEA,
10760 0x00, 0xE0,
10761 0x26, 0x77,
10762
10763 0x24, 0x49, 0x20, 0xE9,
10764 0x9D, 0xFF, 0x20, 0xEA,
10765
10766 0x16, 0x26, 0x20, 0xE9,
10767 0x57, 0x2E, 0xBF, 0xEA,
10768
10769 0x1C, 0x46, 0xA0, 0xE8,
10770 0x23, 0x4E, 0xA0, 0xE8,
10771
10772 0x2B, 0x56, 0xA0, 0xE8,
10773 0x1D, 0x47, 0xA0, 0xE8,
10774
10775 0x24, 0x4F, 0xA0, 0xE8,
10776 0x2C, 0x57, 0xA0, 0xE8,
10777
10778 0x1C, 0x00,
10779 0x23, 0x00,
10780 0x2B, 0x00,
10781 0x00, 0xE0,
10782
10783 0x1D, 0x00,
10784 0x24, 0x00,
10785 0x2C, 0x00,
10786 0x00, 0xE0,
10787
10788 0x1C, 0x65,
10789 0x23, 0x65,
10790 0x2B, 0x65,
10791 0x00, 0xE0,
10792
10793 0x1D, 0x65,
10794 0x24, 0x65,
10795 0x2C, 0x65,
10796 0x00, 0xE0,
10797
10798 0x1C, 0x23, 0x60, 0xEC,
10799 0x36, 0xD7, 0x36, 0xAD,
10800
10801 0x2B, 0x80, 0x60, 0xEC,
10802 0x1D, 0x24, 0x60, 0xEC,
10803
10804 0x3E, 0xD7, 0x3E, 0xAD,
10805 0x2C, 0x80, 0x60, 0xEC,
10806
10807 0x1C, 0x2B, 0xDE, 0xE8,
10808 0x23, 0x80, 0xDE, 0xE8,
10809
10810 0x36, 0x80, 0x36, 0xBD,
10811 0x3E, 0x80, 0x3E, 0xBD,
10812
10813 0x33, 0xD7, 0x1C, 0xBD,
10814 0x3B, 0xD7, 0x23, 0xBD,
10815
10816 0x46, 0x80, 0x46, 0xCF,
10817 0x4F, 0x80, 0x4F, 0xCF,
10818
10819 0x56, 0x33, 0x56, 0xCF,
10820 0x47, 0x3B, 0x47, 0xCF,
10821
10822 0xC5, 0xFF, 0x20, 0xEA,
10823 0x00, 0x80, 0x00, 0xE8,
10824
10825 0x4E, 0x33, 0x4E, 0xCF,
10826 0x57, 0x3B, 0x57, 0xCF,
10827
10828 0x8B, 0xFF, 0x20, 0xEA,
10829 0x57, 0xC0, 0xBF, 0xEA,
10830
10831 0x00, 0x80, 0xA0, 0xE9,
10832 0x00, 0x00, 0xD8, 0xEC,
10833
10834};
10835
10836static unsigned char warp_g400_tgzsaf[] = {
10837
10838 0x00, 0x88, 0x98, 0xE9,
10839 0x00, 0x80, 0x00, 0xE8,
10840
10841 0x00, 0x80, 0xA0, 0xE9,
10842 0x00, 0x00, 0xD8, 0xEC,
10843
10844 0xFF, 0x80, 0xC0, 0xE9,
10845 0x00, 0x80, 0x00, 0xE8,
10846
10847 0x22, 0x40, 0x48, 0xBF,
10848 0x2A, 0x40, 0x50, 0xBF,
10849
10850 0x32, 0x41, 0x49, 0xBF,
10851 0x3A, 0x41, 0x51, 0xBF,
10852
10853 0xC3, 0x6B,
10854 0xCB, 0x6B,
10855 0x00, 0x88, 0x98, 0xE9,
10856
10857 0x73, 0x7B, 0xC8, 0xEC,
10858 0x96, 0xE2,
10859 0x41, 0x04,
10860
10861 0x7B, 0x43, 0xA0, 0xE8,
10862 0x73, 0x4B, 0xA0, 0xE8,
10863
10864 0xAD, 0xEE, 0x29, 0x9F,
10865 0x00, 0xE0,
10866 0x49, 0x04,
10867
10868 0x90, 0xE2,
10869 0x51, 0x04,
10870 0x31, 0x46, 0xB1, 0xE8,
10871
10872 0x49, 0x41, 0xC0, 0xEC,
10873 0x39, 0x57, 0xB1, 0xE8,
10874
10875 0x00, 0x04,
10876 0x46, 0xE2,
10877 0x73, 0x53, 0xA0, 0xE8,
10878
10879 0x51, 0x41, 0xC0, 0xEC,
10880 0x31, 0x00,
10881 0x39, 0x00,
10882
10883 0x6E, 0x80, 0x15, 0xEA,
10884 0x08, 0x04,
10885 0x10, 0x04,
10886
10887 0x51, 0x49, 0xC0, 0xEC,
10888 0x2F, 0x41, 0x60, 0xEA,
10889
10890 0x31, 0x20,
10891 0x39, 0x20,
10892 0x1F, 0x42, 0xA0, 0xE8,
10893
10894 0x2A, 0x42, 0x4A, 0xBF,
10895 0x27, 0x4A, 0xA0, 0xE8,
10896
10897 0x1A, 0x42, 0x52, 0xBF,
10898 0x1E, 0x49, 0x60, 0xEA,
10899
10900 0x73, 0x7B, 0xC8, 0xEC,
10901 0x26, 0x51, 0x60, 0xEA,
10902
10903 0x32, 0x40, 0x48, 0xBD,
10904 0x22, 0x40, 0x50, 0xBD,
10905
10906 0x12, 0x41, 0x49, 0xBD,
10907 0x3A, 0x41, 0x51, 0xBD,
10908
10909 0xBF, 0x2F, 0x26, 0xBD,
10910 0x00, 0xE0,
10911 0x7B, 0x72,
10912
10913 0x32, 0x20,
10914 0x22, 0x20,
10915 0x12, 0x20,
10916 0x3A, 0x20,
10917
10918 0x46, 0x31, 0x46, 0xBF,
10919 0x4E, 0x31, 0x4E, 0xBF,
10920
10921 0xB3, 0xE2, 0x2D, 0x9F,
10922 0x00, 0x80, 0x00, 0xE8,
10923
10924 0x56, 0x31, 0x56, 0xBF,
10925 0x47, 0x39, 0x47, 0xBF,
10926
10927 0x4F, 0x39, 0x4F, 0xBF,
10928 0x57, 0x39, 0x57, 0xBF,
10929
10930 0x60, 0x80, 0x07, 0xEA,
10931 0x24, 0x41, 0x20, 0xE9,
10932
10933 0x42, 0x73, 0xF8, 0xEC,
10934 0x00, 0xE0,
10935 0x2D, 0x73,
10936
10937 0x33, 0x72,
10938 0x0C, 0xE3,
10939 0xA5, 0x2F, 0x1E, 0xBD,
10940
10941 0x43, 0x43, 0x2D, 0xDF,
10942 0x4B, 0x4B, 0x2D, 0xDF,
10943
10944 0xAE, 0x1E, 0x26, 0xBD,
10945 0x58, 0xE3,
10946 0x33, 0x66,
10947
10948 0x53, 0x53, 0x2D, 0xDF,
10949 0x00, 0x80, 0x00, 0xE8,
10950
10951 0xB8, 0x38, 0x33, 0xBF,
10952 0x00, 0xE0,
10953 0x59, 0xE3,
10954
10955 0x1E, 0x12, 0x41, 0xE9,
10956 0x1A, 0x22, 0x41, 0xE9,
10957
10958 0x2B, 0x40, 0x3D, 0xE9,
10959 0x3F, 0x4B, 0xA0, 0xE8,
10960
10961 0x2D, 0x73,
10962 0x30, 0x76,
10963 0x05, 0x80, 0x3D, 0xEA,
10964
10965 0x37, 0x43, 0xA0, 0xE8,
10966 0x3D, 0x53, 0xA0, 0xE8,
10967
10968 0x48, 0x70, 0xF8, 0xEC,
10969 0x2B, 0x48, 0x3C, 0xE9,
10970
10971 0x1F, 0x27, 0xBC, 0xE8,
10972 0x00, 0x80, 0x00, 0xE8,
10973
10974 0x00, 0x80, 0x00, 0xE8,
10975 0x00, 0x80, 0x00, 0xE8,
10976
10977 0x15, 0xC0, 0x20, 0xE9,
10978 0x15, 0xC0, 0x20, 0xE9,
10979
10980 0x15, 0xC0, 0x20, 0xE9,
10981 0x15, 0xC0, 0x20, 0xE9,
10982
10983 0x18, 0x3A, 0x41, 0xE9,
10984 0x1D, 0x32, 0x41, 0xE9,
10985
10986 0x2A, 0x40, 0x20, 0xE9,
10987 0x56, 0x3D, 0x56, 0xDF,
10988
10989 0x46, 0x37, 0x46, 0xDF,
10990 0x4E, 0x3F, 0x4E, 0xDF,
10991
10992 0x16, 0x30, 0x20, 0xE9,
10993 0x4F, 0x3F, 0x4F, 0xDF,
10994
10995 0x47, 0x37, 0x47, 0xDF,
10996 0x57, 0x3D, 0x57, 0xDF,
10997
10998 0x32, 0x32, 0x2D, 0xDF,
10999 0x22, 0x22, 0x2D, 0xDF,
11000
11001 0x12, 0x12, 0x2D, 0xDF,
11002 0x3A, 0x3A, 0x2D, 0xDF,
11003
11004 0x27, 0xCF, 0x74, 0xC2,
11005 0x37, 0xCF, 0x74, 0xC4,
11006
11007 0x0A, 0x44, 0x4C, 0xB0,
11008 0x02, 0x44, 0x54, 0xB0,
11009
11010 0x3D, 0xCF, 0x74, 0xC0,
11011 0x34, 0x37, 0x20, 0xE9,
11012
11013 0x31, 0x53, 0x2F, 0x9F,
11014 0x38, 0x27, 0x20, 0xE9,
11015
11016 0x39, 0xE5, 0x2C, 0x9F,
11017 0x3C, 0x3D, 0x20, 0xE9,
11018
11019 0x2A, 0x44, 0x4C, 0xB2,
11020 0x1A, 0x44, 0x54, 0xB2,
11021
11022 0x32, 0x80, 0x3A, 0xEA,
11023 0x0A, 0x20,
11024 0x02, 0x20,
11025
11026 0x27, 0xCF, 0x75, 0xC0,
11027 0x2A, 0x20,
11028 0x1A, 0x20,
11029
11030 0x30, 0x50, 0x2E, 0x9F,
11031 0x32, 0x31, 0x5F, 0xE9,
11032
11033 0x38, 0x21, 0x2C, 0x9F,
11034 0x33, 0x39, 0x5F, 0xE9,
11035
11036 0x3D, 0xCF, 0x75, 0xC2,
11037 0x37, 0xCF, 0x75, 0xC4,
11038
11039 0x31, 0x53, 0x2F, 0x9F,
11040 0xA6, 0x27, 0x20, 0xE9,
11041
11042 0x39, 0xE5, 0x2C, 0x9F,
11043 0xA3, 0x3D, 0x20, 0xE9,
11044
11045 0x2A, 0x44, 0x4C, 0xB4,
11046 0x1A, 0x44, 0x54, 0xB4,
11047
11048 0x0A, 0x45, 0x4D, 0xB0,
11049 0x02, 0x45, 0x55, 0xB0,
11050
11051 0x88, 0x73, 0x5E, 0xE9,
11052 0x2A, 0x20,
11053 0x1A, 0x20,
11054
11055 0xA0, 0x37, 0x20, 0xE9,
11056 0x0A, 0x20,
11057 0x02, 0x20,
11058
11059 0x31, 0x53, 0x2F, 0x9F,
11060 0x3E, 0x30, 0x4F, 0xE9,
11061
11062 0x39, 0xE5, 0x2C, 0x9F,
11063 0x3F, 0x38, 0x4F, 0xE9,
11064
11065 0x30, 0x50, 0x2E, 0x9F,
11066 0x3A, 0x31, 0x4F, 0xE9,
11067
11068 0x38, 0x21, 0x2C, 0x9F,
11069 0x3B, 0x39, 0x4F, 0xE9,
11070
11071 0x2A, 0x45, 0x4D, 0xB2,
11072 0x1A, 0x45, 0x55, 0xB2,
11073
11074 0x0A, 0x45, 0x4D, 0xB4,
11075 0x02, 0x45, 0x55, 0xB4,
11076
11077 0x27, 0xCF, 0x74, 0xC6,
11078 0x2A, 0x20,
11079 0x1A, 0x20,
11080
11081 0xA7, 0x30, 0x4F, 0xE9,
11082 0x0A, 0x20,
11083 0x02, 0x20,
11084
11085 0x31, 0x53, 0x2F, 0x9F,
11086 0x9C, 0x27, 0x20, 0xE9,
11087
11088 0x39, 0xE5, 0x2C, 0x9F,
11089 0xA8, 0x38, 0x4F, 0xE9,
11090
11091 0x2A, 0x44, 0x4C, 0xB6,
11092 0x1A, 0x44, 0x54, 0xB6,
11093
11094 0x30, 0x50, 0x2E, 0x9F,
11095 0x36, 0x31, 0x4F, 0xE9,
11096
11097 0x38, 0x21, 0x2C, 0x9F,
11098 0x37, 0x39, 0x4F, 0xE9,
11099
11100 0x0A, 0x45, 0x4D, 0xB6,
11101 0x02, 0x45, 0x55, 0xB6,
11102
11103 0x3D, 0xCF, 0x75, 0xC6,
11104 0x2A, 0x20,
11105 0x1A, 0x20,
11106
11107 0x2A, 0x46, 0x4E, 0xBF,
11108 0x1A, 0x46, 0x56, 0xBF,
11109
11110 0x31, 0x53, 0x2F, 0x9F,
11111 0xA4, 0x31, 0x4F, 0xE9,
11112
11113 0x39, 0xE5, 0x2C, 0x9F,
11114 0xA5, 0x39, 0x4F, 0xE9,
11115
11116 0x31, 0x3D, 0x20, 0xE9,
11117 0x0A, 0x20,
11118 0x02, 0x20,
11119
11120 0x0A, 0x47, 0x4F, 0xBF,
11121 0x02, 0x47, 0x57, 0xBF,
11122
11123 0x30, 0x50, 0x2E, 0x9F,
11124 0xA1, 0x30, 0x4F, 0xE9,
11125
11126 0x38, 0x21, 0x2C, 0x9F,
11127 0xA2, 0x38, 0x4F, 0xE9,
11128
11129 0x31, 0x53, 0x2F, 0x9F,
11130 0x9D, 0x31, 0x4F, 0xE9,
11131
11132 0x39, 0xE5, 0x2C, 0x9F,
11133 0x9E, 0x39, 0x4F, 0xE9,
11134
11135 0x2A, 0x43, 0x4B, 0xBF,
11136 0x1A, 0x43, 0x53, 0xBF,
11137
11138 0x30, 0x50, 0x2E, 0x9F,
11139 0x35, 0x30, 0x4F, 0xE9,
11140
11141 0x38, 0x21, 0x2C, 0x9F,
11142 0x39, 0x38, 0x4F, 0xE9,
11143
11144 0x31, 0x53, 0x2F, 0x9F,
11145 0x80, 0x31, 0x57, 0xE9,
11146
11147 0x39, 0xE5, 0x2C, 0x9F,
11148 0x81, 0x39, 0x57, 0xE9,
11149
11150 0x37, 0x48, 0x50, 0xBD,
11151 0x8A, 0x36, 0x20, 0xE9,
11152
11153 0x86, 0x76, 0x57, 0xE9,
11154 0x8B, 0x3E, 0x20, 0xE9,
11155
11156 0x82, 0x30, 0x57, 0xE9,
11157 0x87, 0x77, 0x57, 0xE9,
11158
11159 0x83, 0x38, 0x57, 0xE9,
11160 0x35, 0x49, 0x51, 0xBD,
11161
11162 0x84, 0x31, 0x5E, 0xE9,
11163 0x30, 0x1F, 0x5F, 0xE9,
11164
11165 0x85, 0x39, 0x5E, 0xE9,
11166 0x57, 0x25, 0x20, 0xE9,
11167
11168 0x2B, 0x48, 0x20, 0xE9,
11169 0x1D, 0x37, 0xE1, 0xEA,
11170
11171 0x1E, 0x35, 0xE1, 0xEA,
11172 0x00, 0xE0,
11173 0x26, 0x77,
11174
11175 0x24, 0x49, 0x20, 0xE9,
11176 0x99, 0xFF, 0x20, 0xEA,
11177
11178 0x16, 0x26, 0x20, 0xE9,
11179 0x57, 0x2E, 0xBF, 0xEA,
11180
11181 0x1C, 0x46, 0xA0, 0xE8,
11182 0x23, 0x4E, 0xA0, 0xE8,
11183
11184 0x2B, 0x56, 0xA0, 0xE8,
11185 0x1D, 0x47, 0xA0, 0xE8,
11186
11187 0x24, 0x4F, 0xA0, 0xE8,
11188 0x2C, 0x57, 0xA0, 0xE8,
11189
11190 0x1C, 0x00,
11191 0x23, 0x00,
11192 0x2B, 0x00,
11193 0x00, 0xE0,
11194
11195 0x1D, 0x00,
11196 0x24, 0x00,
11197 0x2C, 0x00,
11198 0x00, 0xE0,
11199
11200 0x1C, 0x65,
11201 0x23, 0x65,
11202 0x2B, 0x65,
11203 0x00, 0xE0,
11204
11205 0x1D, 0x65,
11206 0x24, 0x65,
11207 0x2C, 0x65,
11208 0x00, 0xE0,
11209
11210 0x1C, 0x23, 0x60, 0xEC,
11211 0x36, 0xD7, 0x36, 0xAD,
11212
11213 0x2B, 0x80, 0x60, 0xEC,
11214 0x1D, 0x24, 0x60, 0xEC,
11215
11216 0x3E, 0xD7, 0x3E, 0xAD,
11217 0x2C, 0x80, 0x60, 0xEC,
11218
11219 0x1C, 0x2B, 0xDE, 0xE8,
11220 0x23, 0x80, 0xDE, 0xE8,
11221
11222 0x36, 0x80, 0x36, 0xBD,
11223 0x3E, 0x80, 0x3E, 0xBD,
11224
11225 0x33, 0xD7, 0x1C, 0xBD,
11226 0x3B, 0xD7, 0x23, 0xBD,
11227
11228 0x46, 0x80, 0x46, 0xCF,
11229 0x4F, 0x80, 0x4F, 0xCF,
11230
11231 0x56, 0x33, 0x56, 0xCF,
11232 0x47, 0x3B, 0x47, 0xCF,
11233
11234 0xC1, 0xFF, 0x20, 0xEA,
11235 0x00, 0x80, 0x00, 0xE8,
11236
11237 0x4E, 0x33, 0x4E, 0xCF,
11238 0x57, 0x3B, 0x57, 0xCF,
11239
11240 0x87, 0xFF, 0x20, 0xEA,
11241 0x57, 0xC0, 0xBF, 0xEA,
11242
11243 0x00, 0x80, 0xA0, 0xE9,
11244 0x00, 0x00, 0xD8, 0xEC,
11245
11246};
11247
11248static unsigned char warp_g400_tgzsf[] = {
11249
11250 0x00, 0x88, 0x98, 0xE9,
11251 0x00, 0x80, 0x00, 0xE8,
11252
11253 0x00, 0x80, 0xA0, 0xE9,
11254 0x00, 0x00, 0xD8, 0xEC,
11255
11256 0xFF, 0x80, 0xC0, 0xE9,
11257 0x00, 0x80, 0x00, 0xE8,
11258
11259 0x22, 0x40, 0x48, 0xBF,
11260 0x2A, 0x40, 0x50, 0xBF,
11261
11262 0x32, 0x41, 0x49, 0xBF,
11263 0x3A, 0x41, 0x51, 0xBF,
11264
11265 0xC3, 0x6B,
11266 0xCB, 0x6B,
11267 0x00, 0x88, 0x98, 0xE9,
11268
11269 0x73, 0x7B, 0xC8, 0xEC,
11270 0x96, 0xE2,
11271 0x41, 0x04,
11272
11273 0x7B, 0x43, 0xA0, 0xE8,
11274 0x73, 0x4B, 0xA0, 0xE8,
11275
11276 0xAD, 0xEE, 0x29, 0x9F,
11277 0x00, 0xE0,
11278 0x49, 0x04,
11279
11280 0x90, 0xE2,
11281 0x51, 0x04,
11282 0x31, 0x46, 0xB1, 0xE8,
11283
11284 0x49, 0x41, 0xC0, 0xEC,
11285 0x39, 0x57, 0xB1, 0xE8,
11286
11287 0x00, 0x04,
11288 0x46, 0xE2,
11289 0x73, 0x53, 0xA0, 0xE8,
11290
11291 0x51, 0x41, 0xC0, 0xEC,
11292 0x31, 0x00,
11293 0x39, 0x00,
11294
11295 0x6A, 0x80, 0x15, 0xEA,
11296 0x08, 0x04,
11297 0x10, 0x04,
11298
11299 0x51, 0x49, 0xC0, 0xEC,
11300 0x2F, 0x41, 0x60, 0xEA,
11301
11302 0x31, 0x20,
11303 0x39, 0x20,
11304 0x1F, 0x42, 0xA0, 0xE8,
11305
11306 0x2A, 0x42, 0x4A, 0xBF,
11307 0x27, 0x4A, 0xA0, 0xE8,
11308
11309 0x1A, 0x42, 0x52, 0xBF,
11310 0x1E, 0x49, 0x60, 0xEA,
11311
11312 0x73, 0x7B, 0xC8, 0xEC,
11313 0x26, 0x51, 0x60, 0xEA,
11314
11315 0x32, 0x40, 0x48, 0xBD,
11316 0x22, 0x40, 0x50, 0xBD,
11317
11318 0x12, 0x41, 0x49, 0xBD,
11319 0x3A, 0x41, 0x51, 0xBD,
11320
11321 0xBF, 0x2F, 0x26, 0xBD,
11322 0x00, 0xE0,
11323 0x7B, 0x72,
11324
11325 0x32, 0x20,
11326 0x22, 0x20,
11327 0x12, 0x20,
11328 0x3A, 0x20,
11329
11330 0x46, 0x31, 0x46, 0xBF,
11331 0x4E, 0x31, 0x4E, 0xBF,
11332
11333 0xB3, 0xE2, 0x2D, 0x9F,
11334 0x00, 0x80, 0x00, 0xE8,
11335
11336 0x56, 0x31, 0x56, 0xBF,
11337 0x47, 0x39, 0x47, 0xBF,
11338
11339 0x4F, 0x39, 0x4F, 0xBF,
11340 0x57, 0x39, 0x57, 0xBF,
11341
11342 0x5C, 0x80, 0x07, 0xEA,
11343 0x24, 0x41, 0x20, 0xE9,
11344
11345 0x42, 0x73, 0xF8, 0xEC,
11346 0x00, 0xE0,
11347 0x2D, 0x73,
11348
11349 0x33, 0x72,
11350 0x0C, 0xE3,
11351 0xA5, 0x2F, 0x1E, 0xBD,
11352
11353 0x43, 0x43, 0x2D, 0xDF,
11354 0x4B, 0x4B, 0x2D, 0xDF,
11355
11356 0xAE, 0x1E, 0x26, 0xBD,
11357 0x58, 0xE3,
11358 0x33, 0x66,
11359
11360 0x53, 0x53, 0x2D, 0xDF,
11361 0x00, 0x80, 0x00, 0xE8,
11362
11363 0xB8, 0x38, 0x33, 0xBF,
11364 0x00, 0xE0,
11365 0x59, 0xE3,
11366
11367 0x1E, 0x12, 0x41, 0xE9,
11368 0x1A, 0x22, 0x41, 0xE9,
11369
11370 0x2B, 0x40, 0x3D, 0xE9,
11371 0x3F, 0x4B, 0xA0, 0xE8,
11372
11373 0x2D, 0x73,
11374 0x30, 0x76,
11375 0x05, 0x80, 0x3D, 0xEA,
11376
11377 0x37, 0x43, 0xA0, 0xE8,
11378 0x3D, 0x53, 0xA0, 0xE8,
11379
11380 0x48, 0x70, 0xF8, 0xEC,
11381 0x2B, 0x48, 0x3C, 0xE9,
11382
11383 0x1F, 0x27, 0xBC, 0xE8,
11384 0x00, 0x80, 0x00, 0xE8,
11385
11386 0x00, 0x80, 0x00, 0xE8,
11387 0x00, 0x80, 0x00, 0xE8,
11388
11389 0x15, 0xC0, 0x20, 0xE9,
11390 0x15, 0xC0, 0x20, 0xE9,
11391
11392 0x15, 0xC0, 0x20, 0xE9,
11393 0x15, 0xC0, 0x20, 0xE9,
11394
11395 0x18, 0x3A, 0x41, 0xE9,
11396 0x1D, 0x32, 0x41, 0xE9,
11397
11398 0x2A, 0x40, 0x20, 0xE9,
11399 0x56, 0x3D, 0x56, 0xDF,
11400
11401 0x46, 0x37, 0x46, 0xDF,
11402 0x4E, 0x3F, 0x4E, 0xDF,
11403
11404 0x16, 0x30, 0x20, 0xE9,
11405 0x4F, 0x3F, 0x4F, 0xDF,
11406
11407 0x47, 0x37, 0x47, 0xDF,
11408 0x57, 0x3D, 0x57, 0xDF,
11409
11410 0x32, 0x32, 0x2D, 0xDF,
11411 0x22, 0x22, 0x2D, 0xDF,
11412
11413 0x12, 0x12, 0x2D, 0xDF,
11414 0x3A, 0x3A, 0x2D, 0xDF,
11415
11416 0x27, 0xCF, 0x74, 0xC2,
11417 0x37, 0xCF, 0x74, 0xC4,
11418
11419 0x0A, 0x44, 0x4C, 0xB0,
11420 0x02, 0x44, 0x54, 0xB0,
11421
11422 0x3D, 0xCF, 0x74, 0xC0,
11423 0x34, 0x37, 0x20, 0xE9,
11424
11425 0x31, 0x53, 0x2F, 0x9F,
11426 0x38, 0x27, 0x20, 0xE9,
11427
11428 0x39, 0xE5, 0x2C, 0x9F,
11429 0x3C, 0x3D, 0x20, 0xE9,
11430
11431 0x2A, 0x44, 0x4C, 0xB2,
11432 0x1A, 0x44, 0x54, 0xB2,
11433
11434 0x2E, 0x80, 0x3A, 0xEA,
11435 0x0A, 0x20,
11436 0x02, 0x20,
11437
11438 0x27, 0xCF, 0x75, 0xC0,
11439 0x2A, 0x20,
11440 0x1A, 0x20,
11441
11442 0x30, 0x50, 0x2E, 0x9F,
11443 0x32, 0x31, 0x5F, 0xE9,
11444
11445 0x38, 0x21, 0x2C, 0x9F,
11446 0x33, 0x39, 0x5F, 0xE9,
11447
11448 0x3D, 0xCF, 0x75, 0xC2,
11449 0x37, 0xCF, 0x75, 0xC4,
11450
11451 0x31, 0x53, 0x2F, 0x9F,
11452 0xA6, 0x27, 0x20, 0xE9,
11453
11454 0x39, 0xE5, 0x2C, 0x9F,
11455 0xA3, 0x3D, 0x20, 0xE9,
11456
11457 0x2A, 0x44, 0x4C, 0xB4,
11458 0x1A, 0x44, 0x54, 0xB4,
11459
11460 0x0A, 0x45, 0x4D, 0xB0,
11461 0x02, 0x45, 0x55, 0xB0,
11462
11463 0x88, 0x73, 0x5E, 0xE9,
11464 0x2A, 0x20,
11465 0x1A, 0x20,
11466
11467 0xA0, 0x37, 0x20, 0xE9,
11468 0x0A, 0x20,
11469 0x02, 0x20,
11470
11471 0x31, 0x53, 0x2F, 0x9F,
11472 0x3E, 0x30, 0x4F, 0xE9,
11473
11474 0x39, 0xE5, 0x2C, 0x9F,
11475 0x3F, 0x38, 0x4F, 0xE9,
11476
11477 0x30, 0x50, 0x2E, 0x9F,
11478 0x3A, 0x31, 0x4F, 0xE9,
11479
11480 0x38, 0x21, 0x2C, 0x9F,
11481 0x3B, 0x39, 0x4F, 0xE9,
11482
11483 0x2A, 0x45, 0x4D, 0xB2,
11484 0x1A, 0x45, 0x55, 0xB2,
11485
11486 0x0A, 0x45, 0x4D, 0xB4,
11487 0x02, 0x45, 0x55, 0xB4,
11488
11489 0x27, 0xCF, 0x75, 0xC6,
11490 0x2A, 0x20,
11491 0x1A, 0x20,
11492
11493 0xA7, 0x30, 0x4F, 0xE9,
11494 0x0A, 0x20,
11495 0x02, 0x20,
11496
11497 0x31, 0x53, 0x2F, 0x9F,
11498 0x31, 0x27, 0x20, 0xE9,
11499
11500 0x39, 0xE5, 0x2C, 0x9F,
11501 0xA8, 0x38, 0x4F, 0xE9,
11502
11503 0x2A, 0x45, 0x4D, 0xB6,
11504 0x1A, 0x45, 0x55, 0xB6,
11505
11506 0x30, 0x50, 0x2E, 0x9F,
11507 0x36, 0x31, 0x4F, 0xE9,
11508
11509 0x38, 0x21, 0x2C, 0x9F,
11510 0x37, 0x39, 0x4F, 0xE9,
11511
11512 0x00, 0x80, 0x00, 0xE8,
11513 0x2A, 0x20,
11514 0x1A, 0x20,
11515
11516 0x2A, 0x46, 0x4E, 0xBF,
11517 0x1A, 0x46, 0x56, 0xBF,
11518
11519 0x31, 0x53, 0x2F, 0x9F,
11520 0xA4, 0x31, 0x4F, 0xE9,
11521
11522 0x39, 0xE5, 0x2C, 0x9F,
11523 0xA5, 0x39, 0x4F, 0xE9,
11524
11525 0x0A, 0x47, 0x4F, 0xBF,
11526 0x02, 0x47, 0x57, 0xBF,
11527
11528 0x31, 0x53, 0x2F, 0x9F,
11529 0xA1, 0x30, 0x4F, 0xE9,
11530
11531 0x39, 0xE5, 0x2C, 0x9F,
11532 0xA2, 0x38, 0x4F, 0xE9,
11533
11534 0x2A, 0x43, 0x4B, 0xBF,
11535 0x1A, 0x43, 0x53, 0xBF,
11536
11537 0x30, 0x50, 0x2E, 0x9F,
11538 0x35, 0x31, 0x4F, 0xE9,
11539
11540 0x38, 0x21, 0x2C, 0x9F,
11541 0x39, 0x39, 0x4F, 0xE9,
11542
11543 0x31, 0x53, 0x2F, 0x9F,
11544 0x80, 0x31, 0x57, 0xE9,
11545
11546 0x39, 0xE5, 0x2C, 0x9F,
11547 0x81, 0x39, 0x57, 0xE9,
11548
11549 0x37, 0x48, 0x50, 0xBD,
11550 0x8A, 0x36, 0x20, 0xE9,
11551
11552 0x86, 0x76, 0x57, 0xE9,
11553 0x8B, 0x3E, 0x20, 0xE9,
11554
11555 0x82, 0x30, 0x57, 0xE9,
11556 0x87, 0x77, 0x57, 0xE9,
11557
11558 0x83, 0x38, 0x57, 0xE9,
11559 0x35, 0x49, 0x51, 0xBD,
11560
11561 0x84, 0x31, 0x5E, 0xE9,
11562 0x30, 0x1F, 0x5F, 0xE9,
11563
11564 0x85, 0x39, 0x5E, 0xE9,
11565 0x57, 0x25, 0x20, 0xE9,
11566
11567 0x2B, 0x48, 0x20, 0xE9,
11568 0x1D, 0x37, 0xE1, 0xEA,
11569
11570 0x1E, 0x35, 0xE1, 0xEA,
11571 0x00, 0xE0,
11572 0x26, 0x77,
11573
11574 0x24, 0x49, 0x20, 0xE9,
11575 0x9D, 0xFF, 0x20, 0xEA,
11576
11577 0x16, 0x26, 0x20, 0xE9,
11578 0x57, 0x2E, 0xBF, 0xEA,
11579
11580 0x1C, 0x46, 0xA0, 0xE8,
11581 0x23, 0x4E, 0xA0, 0xE8,
11582
11583 0x2B, 0x56, 0xA0, 0xE8,
11584 0x1D, 0x47, 0xA0, 0xE8,
11585
11586 0x24, 0x4F, 0xA0, 0xE8,
11587 0x2C, 0x57, 0xA0, 0xE8,
11588
11589 0x1C, 0x00,
11590 0x23, 0x00,
11591 0x2B, 0x00,
11592 0x00, 0xE0,
11593
11594 0x1D, 0x00,
11595 0x24, 0x00,
11596 0x2C, 0x00,
11597 0x00, 0xE0,
11598
11599 0x1C, 0x65,
11600 0x23, 0x65,
11601 0x2B, 0x65,
11602 0x00, 0xE0,
11603
11604 0x1D, 0x65,
11605 0x24, 0x65,
11606 0x2C, 0x65,
11607 0x00, 0xE0,
11608
11609 0x1C, 0x23, 0x60, 0xEC,
11610 0x36, 0xD7, 0x36, 0xAD,
11611
11612 0x2B, 0x80, 0x60, 0xEC,
11613 0x1D, 0x24, 0x60, 0xEC,
11614
11615 0x3E, 0xD7, 0x3E, 0xAD,
11616 0x2C, 0x80, 0x60, 0xEC,
11617
11618 0x1C, 0x2B, 0xDE, 0xE8,
11619 0x23, 0x80, 0xDE, 0xE8,
11620
11621 0x36, 0x80, 0x36, 0xBD,
11622 0x3E, 0x80, 0x3E, 0xBD,
11623
11624 0x33, 0xD7, 0x1C, 0xBD,
11625 0x3B, 0xD7, 0x23, 0xBD,
11626
11627 0x46, 0x80, 0x46, 0xCF,
11628 0x4F, 0x80, 0x4F, 0xCF,
11629
11630 0x56, 0x33, 0x56, 0xCF,
11631 0x47, 0x3B, 0x47, 0xCF,
11632
11633 0xC5, 0xFF, 0x20, 0xEA,
11634 0x00, 0x80, 0x00, 0xE8,
11635
11636 0x4E, 0x33, 0x4E, 0xCF,
11637 0x57, 0x3B, 0x57, 0xCF,
11638
11639 0x8B, 0xFF, 0x20, 0xEA,
11640 0x57, 0xC0, 0xBF, 0xEA,
11641
11642 0x00, 0x80, 0xA0, 0xE9,
11643 0x00, 0x00, 0xD8, 0xEC,
11644
11645};
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
new file mode 100644
index 000000000000..651b93c8ab5d
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -0,0 +1,193 @@
1/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
2 * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
3 *
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24 * OTHER DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Gareth Hughes <gareth@valinux.com>
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "mga_drm.h"
33#include "mga_drv.h"
34#include "mga_ucode.h"
35
36#define MGA_WARP_CODE_ALIGN 256 /* in bytes */
37
38#define WARP_UCODE_SIZE( which ) \
39 ((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN)
40
41#define WARP_UCODE_INSTALL( which, where ) \
42do { \
43 DRM_DEBUG( " pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase );\
44 dev_priv->warp_pipe_phys[where] = pcbase; \
45 memcpy( vcbase, which, sizeof(which) ); \
46 pcbase += WARP_UCODE_SIZE( which ); \
47 vcbase += WARP_UCODE_SIZE( which ); \
48} while (0)
49
50static const unsigned int mga_warp_g400_microcode_size =
51 (WARP_UCODE_SIZE(warp_g400_tgz) +
52 WARP_UCODE_SIZE(warp_g400_tgza) +
53 WARP_UCODE_SIZE(warp_g400_tgzaf) +
54 WARP_UCODE_SIZE(warp_g400_tgzf) +
55 WARP_UCODE_SIZE(warp_g400_tgzs) +
56 WARP_UCODE_SIZE(warp_g400_tgzsa) +
57 WARP_UCODE_SIZE(warp_g400_tgzsaf) +
58 WARP_UCODE_SIZE(warp_g400_tgzsf) +
59 WARP_UCODE_SIZE(warp_g400_t2gz) +
60 WARP_UCODE_SIZE(warp_g400_t2gza) +
61 WARP_UCODE_SIZE(warp_g400_t2gzaf) +
62 WARP_UCODE_SIZE(warp_g400_t2gzf) +
63 WARP_UCODE_SIZE(warp_g400_t2gzs) +
64 WARP_UCODE_SIZE(warp_g400_t2gzsa) +
65 WARP_UCODE_SIZE(warp_g400_t2gzsaf) + WARP_UCODE_SIZE(warp_g400_t2gzsf));
66
67static const unsigned int mga_warp_g200_microcode_size =
68 (WARP_UCODE_SIZE(warp_g200_tgz) +
69 WARP_UCODE_SIZE(warp_g200_tgza) +
70 WARP_UCODE_SIZE(warp_g200_tgzaf) +
71 WARP_UCODE_SIZE(warp_g200_tgzf) +
72 WARP_UCODE_SIZE(warp_g200_tgzs) +
73 WARP_UCODE_SIZE(warp_g200_tgzsa) +
74 WARP_UCODE_SIZE(warp_g200_tgzsaf) + WARP_UCODE_SIZE(warp_g200_tgzsf));
75
76unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
77{
78 switch (dev_priv->chipset) {
79 case MGA_CARD_TYPE_G400:
80 case MGA_CARD_TYPE_G550:
81 return PAGE_ALIGN(mga_warp_g400_microcode_size);
82 case MGA_CARD_TYPE_G200:
83 return PAGE_ALIGN(mga_warp_g200_microcode_size);
84 default:
85 return 0;
86 }
87}
88
89static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
90{
91 unsigned char *vcbase = dev_priv->warp->handle;
92 unsigned long pcbase = dev_priv->warp->offset;
93
94 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
95
96 WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
97 WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
98 WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA);
99 WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF);
100 WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS);
101 WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF);
102 WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA);
103 WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF);
104
105 WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ);
106 WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF);
107 WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA);
108 WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF);
109 WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS);
110 WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF);
111 WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA);
112 WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF);
113
114 return 0;
115}
116
117static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
118{
119 unsigned char *vcbase = dev_priv->warp->handle;
120 unsigned long pcbase = dev_priv->warp->offset;
121
122 memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
123
124 WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
125 WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
126 WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
127 WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
128 WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
129 WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
130 WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
131 WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
132
133 return 0;
134}
135
136int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
137{
138 const unsigned int size = mga_warp_microcode_size(dev_priv);
139
140 DRM_DEBUG("MGA ucode size = %d bytes\n", size);
141 if (size > dev_priv->warp->size) {
142 DRM_ERROR("microcode too large! (%u > %lu)\n",
143 size, dev_priv->warp->size);
144 return -ENOMEM;
145 }
146
147 switch (dev_priv->chipset) {
148 case MGA_CARD_TYPE_G400:
149 case MGA_CARD_TYPE_G550:
150 return mga_warp_install_g400_microcode(dev_priv);
151 case MGA_CARD_TYPE_G200:
152 return mga_warp_install_g200_microcode(dev_priv);
153 default:
154 return -EINVAL;
155 }
156}
157
158#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
159
160int mga_warp_init(drm_mga_private_t * dev_priv)
161{
162 u32 wmisc;
163
164 /* FIXME: Get rid of these damned magic numbers...
165 */
166 switch (dev_priv->chipset) {
167 case MGA_CARD_TYPE_G400:
168 case MGA_CARD_TYPE_G550:
169 MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
170 MGA_WRITE(MGA_WGETMSB, 0x00000E00);
171 MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
172 MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
173 break;
174 case MGA_CARD_TYPE_G200:
175 MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
176 MGA_WRITE(MGA_WGETMSB, 0x1606);
177 MGA_WRITE(MGA_WVRTXSZ, 7);
178 break;
179 default:
180 return -EINVAL;
181 }
182
183 MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
184 MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
185 wmisc = MGA_READ(MGA_WMISC);
186 if (wmisc != WMISC_EXPECTED) {
187 DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
188 wmisc, WMISC_EXPECTED);
189 return -EINVAL;
190 }
191
192 return 0;
193}
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
new file mode 100644
index 000000000000..1cc72ae3a880
--- /dev/null
+++ b/drivers/gpu/drm/r128/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o
7
8r128-$(CONFIG_COMPAT) += r128_ioc32.o
9
10obj-$(CONFIG_DRM_R128) += r128.o
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
new file mode 100644
index 000000000000..c31afbde62e7
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -0,0 +1,935 @@
1/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
3 */
4/*
5 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "r128_drm.h"
35#include "r128_drv.h"
36
37#define R128_FIFO_DEBUG 0
38
39/* CCE microcode (from ATI) */
40static u32 r128_cce_microcode[] = {
41 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
42 1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
43 599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
44 11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
45 262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
46 1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
47 30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
48 1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
49 15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
50 12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
51 46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
52 459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
53 18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
54 15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
55 268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
56 15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
57 1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
58 3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
59 1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
60 15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
61 180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
62 114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
63 33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
64 1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
65 14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
66 1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
67 198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
68 114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
69 1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
70 1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
71 16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
72 174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
73 33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
74 33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
75 409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
76 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
77 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
78 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
79 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
80 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
81 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
82};
83
84static int R128_READ_PLL(struct drm_device * dev, int addr)
85{
86 drm_r128_private_t *dev_priv = dev->dev_private;
87
88 R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
89 return R128_READ(R128_CLOCK_CNTL_DATA);
90}
91
92#if R128_FIFO_DEBUG
93static void r128_status(drm_r128_private_t * dev_priv)
94{
95 printk("GUI_STAT = 0x%08x\n",
96 (unsigned int)R128_READ(R128_GUI_STAT));
97 printk("PM4_STAT = 0x%08x\n",
98 (unsigned int)R128_READ(R128_PM4_STAT));
99 printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
100 (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
101 printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
102 (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
103 printk("PM4_MICRO_CNTL = 0x%08x\n",
104 (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
105 printk("PM4_BUFFER_CNTL = 0x%08x\n",
106 (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
107}
108#endif
109
110/* ================================================================
111 * Engine, FIFO control
112 */
113
114static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
115{
116 u32 tmp;
117 int i;
118
119 tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
120 R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
121
122 for (i = 0; i < dev_priv->usec_timeout; i++) {
123 if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
124 return 0;
125 }
126 DRM_UDELAY(1);
127 }
128
129#if R128_FIFO_DEBUG
130 DRM_ERROR("failed!\n");
131#endif
132 return -EBUSY;
133}
134
135static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
136{
137 int i;
138
139 for (i = 0; i < dev_priv->usec_timeout; i++) {
140 int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
141 if (slots >= entries)
142 return 0;
143 DRM_UDELAY(1);
144 }
145
146#if R128_FIFO_DEBUG
147 DRM_ERROR("failed!\n");
148#endif
149 return -EBUSY;
150}
151
152static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
153{
154 int i, ret;
155
156 ret = r128_do_wait_for_fifo(dev_priv, 64);
157 if (ret)
158 return ret;
159
160 for (i = 0; i < dev_priv->usec_timeout; i++) {
161 if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
162 r128_do_pixcache_flush(dev_priv);
163 return 0;
164 }
165 DRM_UDELAY(1);
166 }
167
168#if R128_FIFO_DEBUG
169 DRM_ERROR("failed!\n");
170#endif
171 return -EBUSY;
172}
173
174/* ================================================================
175 * CCE control, initialization
176 */
177
178/* Load the microcode for the CCE */
179static void r128_cce_load_microcode(drm_r128_private_t * dev_priv)
180{
181 int i;
182
183 DRM_DEBUG("\n");
184
185 r128_do_wait_for_idle(dev_priv);
186
187 R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
188 for (i = 0; i < 256; i++) {
189 R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
190 R128_WRITE(R128_PM4_MICROCODE_DATAL,
191 r128_cce_microcode[i * 2 + 1]);
192 }
193}
194
195/* Flush any pending commands to the CCE. This should only be used just
196 * prior to a wait for idle, as it informs the engine that the command
197 * stream is ending.
198 */
199static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
200{
201 u32 tmp;
202
203 tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
204 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
205}
206
207/* Wait for the CCE to go idle.
208 */
209int r128_do_cce_idle(drm_r128_private_t * dev_priv)
210{
211 int i;
212
213 for (i = 0; i < dev_priv->usec_timeout; i++) {
214 if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
215 int pm4stat = R128_READ(R128_PM4_STAT);
216 if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
217 dev_priv->cce_fifo_size) &&
218 !(pm4stat & (R128_PM4_BUSY |
219 R128_PM4_GUI_ACTIVE))) {
220 return r128_do_pixcache_flush(dev_priv);
221 }
222 }
223 DRM_UDELAY(1);
224 }
225
226#if R128_FIFO_DEBUG
227 DRM_ERROR("failed!\n");
228 r128_status(dev_priv);
229#endif
230 return -EBUSY;
231}
232
233/* Start the Concurrent Command Engine.
234 */
235static void r128_do_cce_start(drm_r128_private_t * dev_priv)
236{
237 r128_do_wait_for_idle(dev_priv);
238
239 R128_WRITE(R128_PM4_BUFFER_CNTL,
240 dev_priv->cce_mode | dev_priv->ring.size_l2qw
241 | R128_PM4_BUFFER_CNTL_NOUPDATE);
242 R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
243 R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
244
245 dev_priv->cce_running = 1;
246}
247
248/* Reset the Concurrent Command Engine. This will not flush any pending
249 * commands, so you must wait for the CCE command stream to complete
250 * before calling this routine.
251 */
252static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
253{
254 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
255 R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
256 dev_priv->ring.tail = 0;
257}
258
259/* Stop the Concurrent Command Engine. This will not flush any pending
260 * commands, so you must flush the command stream and wait for the CCE
261 * to go idle before calling this routine.
262 */
263static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
264{
265 R128_WRITE(R128_PM4_MICRO_CNTL, 0);
266 R128_WRITE(R128_PM4_BUFFER_CNTL,
267 R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
268
269 dev_priv->cce_running = 0;
270}
271
272/* Reset the engine. This will stop the CCE if it is running.
273 */
274static int r128_do_engine_reset(struct drm_device * dev)
275{
276 drm_r128_private_t *dev_priv = dev->dev_private;
277 u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
278
279 r128_do_pixcache_flush(dev_priv);
280
281 clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
282 mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
283
284 R128_WRITE_PLL(R128_MCLK_CNTL,
285 mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
286
287 gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
288
289 /* Taken from the sample code - do not change */
290 R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
291 R128_READ(R128_GEN_RESET_CNTL);
292 R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
293 R128_READ(R128_GEN_RESET_CNTL);
294
295 R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
296 R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
297 R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
298
299 /* Reset the CCE ring */
300 r128_do_cce_reset(dev_priv);
301
302 /* The CCE is no longer running after an engine reset */
303 dev_priv->cce_running = 0;
304
305 /* Reset any pending vertex, indirect buffers */
306 r128_freelist_reset(dev);
307
308 return 0;
309}
310
311static void r128_cce_init_ring_buffer(struct drm_device * dev,
312 drm_r128_private_t * dev_priv)
313{
314 u32 ring_start;
315 u32 tmp;
316
317 DRM_DEBUG("\n");
318
319 /* The manual (p. 2) says this address is in "VM space". This
320 * means it's an offset from the start of AGP space.
321 */
322#if __OS_HAS_AGP
323 if (!dev_priv->is_pci)
324 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
325 else
326#endif
327 ring_start = dev_priv->cce_ring->offset -
328 (unsigned long)dev->sg->virtual;
329
330 R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
331
332 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
333 R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
334
335 /* Set watermark control */
336 R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
337 ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
338 | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
339 | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
340 | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
341
342 /* Force read. Why? Because it's in the examples... */
343 R128_READ(R128_PM4_BUFFER_ADDR);
344
345 /* Turn on bus mastering */
346 tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
347 R128_WRITE(R128_BUS_CNTL, tmp);
348}
349
350static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
351{
352 drm_r128_private_t *dev_priv;
353
354 DRM_DEBUG("\n");
355
356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
357 if (dev_priv == NULL)
358 return -ENOMEM;
359
360 memset(dev_priv, 0, sizeof(drm_r128_private_t));
361
362 dev_priv->is_pci = init->is_pci;
363
364 if (dev_priv->is_pci && !dev->sg) {
365 DRM_ERROR("PCI GART memory not allocated!\n");
366 dev->dev_private = (void *)dev_priv;
367 r128_do_cleanup_cce(dev);
368 return -EINVAL;
369 }
370
371 dev_priv->usec_timeout = init->usec_timeout;
372 if (dev_priv->usec_timeout < 1 ||
373 dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
374 DRM_DEBUG("TIMEOUT problem!\n");
375 dev->dev_private = (void *)dev_priv;
376 r128_do_cleanup_cce(dev);
377 return -EINVAL;
378 }
379
380 dev_priv->cce_mode = init->cce_mode;
381
382 /* GH: Simple idle check.
383 */
384 atomic_set(&dev_priv->idle_count, 0);
385
386 /* We don't support anything other than bus-mastering ring mode,
387 * but the ring can be in either AGP or PCI space for the ring
388 * read pointer.
389 */
390 if ((init->cce_mode != R128_PM4_192BM) &&
391 (init->cce_mode != R128_PM4_128BM_64INDBM) &&
392 (init->cce_mode != R128_PM4_64BM_128INDBM) &&
393 (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
394 DRM_DEBUG("Bad cce_mode!\n");
395 dev->dev_private = (void *)dev_priv;
396 r128_do_cleanup_cce(dev);
397 return -EINVAL;
398 }
399
400 switch (init->cce_mode) {
401 case R128_PM4_NONPM4:
402 dev_priv->cce_fifo_size = 0;
403 break;
404 case R128_PM4_192PIO:
405 case R128_PM4_192BM:
406 dev_priv->cce_fifo_size = 192;
407 break;
408 case R128_PM4_128PIO_64INDBM:
409 case R128_PM4_128BM_64INDBM:
410 dev_priv->cce_fifo_size = 128;
411 break;
412 case R128_PM4_64PIO_128INDBM:
413 case R128_PM4_64BM_128INDBM:
414 case R128_PM4_64PIO_64VCBM_64INDBM:
415 case R128_PM4_64BM_64VCBM_64INDBM:
416 case R128_PM4_64PIO_64VCPIO_64INDPIO:
417 dev_priv->cce_fifo_size = 64;
418 break;
419 }
420
421 switch (init->fb_bpp) {
422 case 16:
423 dev_priv->color_fmt = R128_DATATYPE_RGB565;
424 break;
425 case 32:
426 default:
427 dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
428 break;
429 }
430 dev_priv->front_offset = init->front_offset;
431 dev_priv->front_pitch = init->front_pitch;
432 dev_priv->back_offset = init->back_offset;
433 dev_priv->back_pitch = init->back_pitch;
434
435 switch (init->depth_bpp) {
436 case 16:
437 dev_priv->depth_fmt = R128_DATATYPE_RGB565;
438 break;
439 case 24:
440 case 32:
441 default:
442 dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
443 break;
444 }
445 dev_priv->depth_offset = init->depth_offset;
446 dev_priv->depth_pitch = init->depth_pitch;
447 dev_priv->span_offset = init->span_offset;
448
449 dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
450 (dev_priv->front_offset >> 5));
451 dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
452 (dev_priv->back_offset >> 5));
453 dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
454 (dev_priv->depth_offset >> 5) |
455 R128_DST_TILE);
456 dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
457 (dev_priv->span_offset >> 5));
458
459 dev_priv->sarea = drm_getsarea(dev);
460 if (!dev_priv->sarea) {
461 DRM_ERROR("could not find sarea!\n");
462 dev->dev_private = (void *)dev_priv;
463 r128_do_cleanup_cce(dev);
464 return -EINVAL;
465 }
466
467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
468 if (!dev_priv->mmio) {
469 DRM_ERROR("could not find mmio region!\n");
470 dev->dev_private = (void *)dev_priv;
471 r128_do_cleanup_cce(dev);
472 return -EINVAL;
473 }
474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
475 if (!dev_priv->cce_ring) {
476 DRM_ERROR("could not find cce ring region!\n");
477 dev->dev_private = (void *)dev_priv;
478 r128_do_cleanup_cce(dev);
479 return -EINVAL;
480 }
481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
482 if (!dev_priv->ring_rptr) {
483 DRM_ERROR("could not find ring read pointer!\n");
484 dev->dev_private = (void *)dev_priv;
485 r128_do_cleanup_cce(dev);
486 return -EINVAL;
487 }
488 dev->agp_buffer_token = init->buffers_offset;
489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
490 if (!dev->agp_buffer_map) {
491 DRM_ERROR("could not find dma buffer region!\n");
492 dev->dev_private = (void *)dev_priv;
493 r128_do_cleanup_cce(dev);
494 return -EINVAL;
495 }
496
497 if (!dev_priv->is_pci) {
498 dev_priv->agp_textures =
499 drm_core_findmap(dev, init->agp_textures_offset);
500 if (!dev_priv->agp_textures) {
501 DRM_ERROR("could not find agp texture region!\n");
502 dev->dev_private = (void *)dev_priv;
503 r128_do_cleanup_cce(dev);
504 return -EINVAL;
505 }
506 }
507
508 dev_priv->sarea_priv =
509 (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
510 init->sarea_priv_offset);
511
512#if __OS_HAS_AGP
513 if (!dev_priv->is_pci) {
514 drm_core_ioremap(dev_priv->cce_ring, dev);
515 drm_core_ioremap(dev_priv->ring_rptr, dev);
516 drm_core_ioremap(dev->agp_buffer_map, dev);
517 if (!dev_priv->cce_ring->handle ||
518 !dev_priv->ring_rptr->handle ||
519 !dev->agp_buffer_map->handle) {
520 DRM_ERROR("Could not ioremap agp regions!\n");
521 dev->dev_private = (void *)dev_priv;
522 r128_do_cleanup_cce(dev);
523 return -ENOMEM;
524 }
525 } else
526#endif
527 {
528 dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
529 dev_priv->ring_rptr->handle =
530 (void *)dev_priv->ring_rptr->offset;
531 dev->agp_buffer_map->handle =
532 (void *)dev->agp_buffer_map->offset;
533 }
534
535#if __OS_HAS_AGP
536 if (!dev_priv->is_pci)
537 dev_priv->cce_buffers_offset = dev->agp->base;
538 else
539#endif
540 dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
541
542 dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
543 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
544 + init->ring_size / sizeof(u32));
545 dev_priv->ring.size = init->ring_size;
546 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
547
548 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
549
550 dev_priv->ring.high_mark = 128;
551
552 dev_priv->sarea_priv->last_frame = 0;
553 R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
554
555 dev_priv->sarea_priv->last_dispatch = 0;
556 R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
557
558#if __OS_HAS_AGP
559 if (dev_priv->is_pci) {
560#endif
561 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
562 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
563 dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
564 dev_priv->gart_info.addr = NULL;
565 dev_priv->gart_info.bus_addr = 0;
566 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
567 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
568 DRM_ERROR("failed to init PCI GART!\n");
569 dev->dev_private = (void *)dev_priv;
570 r128_do_cleanup_cce(dev);
571 return -ENOMEM;
572 }
573 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
574#if __OS_HAS_AGP
575 }
576#endif
577
578 r128_cce_init_ring_buffer(dev, dev_priv);
579 r128_cce_load_microcode(dev_priv);
580
581 dev->dev_private = (void *)dev_priv;
582
583 r128_do_engine_reset(dev);
584
585 return 0;
586}
587
588int r128_do_cleanup_cce(struct drm_device * dev)
589{
590
591 /* Make sure interrupts are disabled here because the uninstall ioctl
592 * may not have been called from userspace and after dev_private
593 * is freed, it's too late.
594 */
595 if (dev->irq_enabled)
596 drm_irq_uninstall(dev);
597
598 if (dev->dev_private) {
599 drm_r128_private_t *dev_priv = dev->dev_private;
600
601#if __OS_HAS_AGP
602 if (!dev_priv->is_pci) {
603 if (dev_priv->cce_ring != NULL)
604 drm_core_ioremapfree(dev_priv->cce_ring, dev);
605 if (dev_priv->ring_rptr != NULL)
606 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
607 if (dev->agp_buffer_map != NULL) {
608 drm_core_ioremapfree(dev->agp_buffer_map, dev);
609 dev->agp_buffer_map = NULL;
610 }
611 } else
612#endif
613 {
614 if (dev_priv->gart_info.bus_addr)
615 if (!drm_ati_pcigart_cleanup(dev,
616 &dev_priv->gart_info))
617 DRM_ERROR
618 ("failed to cleanup PCI GART!\n");
619 }
620
621 drm_free(dev->dev_private, sizeof(drm_r128_private_t),
622 DRM_MEM_DRIVER);
623 dev->dev_private = NULL;
624 }
625
626 return 0;
627}
628
629int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
630{
631 drm_r128_init_t *init = data;
632
633 DRM_DEBUG("\n");
634
635 LOCK_TEST_WITH_RETURN(dev, file_priv);
636
637 switch (init->func) {
638 case R128_INIT_CCE:
639 return r128_do_init_cce(dev, init);
640 case R128_CLEANUP_CCE:
641 return r128_do_cleanup_cce(dev);
642 }
643
644 return -EINVAL;
645}
646
647int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
648{
649 drm_r128_private_t *dev_priv = dev->dev_private;
650 DRM_DEBUG("\n");
651
652 LOCK_TEST_WITH_RETURN(dev, file_priv);
653
654 if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
655 DRM_DEBUG("while CCE running\n");
656 return 0;
657 }
658
659 r128_do_cce_start(dev_priv);
660
661 return 0;
662}
663
664/* Stop the CCE. The engine must have been idled before calling this
665 * routine.
666 */
667int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
668{
669 drm_r128_private_t *dev_priv = dev->dev_private;
670 drm_r128_cce_stop_t *stop = data;
671 int ret;
672 DRM_DEBUG("\n");
673
674 LOCK_TEST_WITH_RETURN(dev, file_priv);
675
676 /* Flush any pending CCE commands. This ensures any outstanding
677 * commands are exectuted by the engine before we turn it off.
678 */
679 if (stop->flush) {
680 r128_do_cce_flush(dev_priv);
681 }
682
683 /* If we fail to make the engine go idle, we return an error
684 * code so that the DRM ioctl wrapper can try again.
685 */
686 if (stop->idle) {
687 ret = r128_do_cce_idle(dev_priv);
688 if (ret)
689 return ret;
690 }
691
692 /* Finally, we can turn off the CCE. If the engine isn't idle,
693 * we will get some dropped triangles as they won't be fully
694 * rendered before the CCE is shut down.
695 */
696 r128_do_cce_stop(dev_priv);
697
698 /* Reset the engine */
699 r128_do_engine_reset(dev);
700
701 return 0;
702}
703
704/* Just reset the CCE ring. Called as part of an X Server engine reset.
705 */
706int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
707{
708 drm_r128_private_t *dev_priv = dev->dev_private;
709 DRM_DEBUG("\n");
710
711 LOCK_TEST_WITH_RETURN(dev, file_priv);
712
713 if (!dev_priv) {
714 DRM_DEBUG("called before init done\n");
715 return -EINVAL;
716 }
717
718 r128_do_cce_reset(dev_priv);
719
720 /* The CCE is no longer running after an engine reset */
721 dev_priv->cce_running = 0;
722
723 return 0;
724}
725
726int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
727{
728 drm_r128_private_t *dev_priv = dev->dev_private;
729 DRM_DEBUG("\n");
730
731 LOCK_TEST_WITH_RETURN(dev, file_priv);
732
733 if (dev_priv->cce_running) {
734 r128_do_cce_flush(dev_priv);
735 }
736
737 return r128_do_cce_idle(dev_priv);
738}
739
740int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
741{
742 DRM_DEBUG("\n");
743
744 LOCK_TEST_WITH_RETURN(dev, file_priv);
745
746 return r128_do_engine_reset(dev);
747}
748
749int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
750{
751 return -EINVAL;
752}
753
754/* ================================================================
755 * Freelist management
756 */
757#define R128_BUFFER_USED 0xffffffff
758#define R128_BUFFER_FREE 0
759
760#if 0
761static int r128_freelist_init(struct drm_device * dev)
762{
763 struct drm_device_dma *dma = dev->dma;
764 drm_r128_private_t *dev_priv = dev->dev_private;
765 struct drm_buf *buf;
766 drm_r128_buf_priv_t *buf_priv;
767 drm_r128_freelist_t *entry;
768 int i;
769
770 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
771 if (dev_priv->head == NULL)
772 return -ENOMEM;
773
774 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
775 dev_priv->head->age = R128_BUFFER_USED;
776
777 for (i = 0; i < dma->buf_count; i++) {
778 buf = dma->buflist[i];
779 buf_priv = buf->dev_private;
780
781 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
782 if (!entry)
783 return -ENOMEM;
784
785 entry->age = R128_BUFFER_FREE;
786 entry->buf = buf;
787 entry->prev = dev_priv->head;
788 entry->next = dev_priv->head->next;
789 if (!entry->next)
790 dev_priv->tail = entry;
791
792 buf_priv->discard = 0;
793 buf_priv->dispatched = 0;
794 buf_priv->list_entry = entry;
795
796 dev_priv->head->next = entry;
797
798 if (dev_priv->head->next)
799 dev_priv->head->next->prev = entry;
800 }
801
802 return 0;
803
804}
805#endif
806
807static struct drm_buf *r128_freelist_get(struct drm_device * dev)
808{
809 struct drm_device_dma *dma = dev->dma;
810 drm_r128_private_t *dev_priv = dev->dev_private;
811 drm_r128_buf_priv_t *buf_priv;
812 struct drm_buf *buf;
813 int i, t;
814
815 /* FIXME: Optimize -- use freelist code */
816
817 for (i = 0; i < dma->buf_count; i++) {
818 buf = dma->buflist[i];
819 buf_priv = buf->dev_private;
820 if (!buf->file_priv)
821 return buf;
822 }
823
824 for (t = 0; t < dev_priv->usec_timeout; t++) {
825 u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
826
827 for (i = 0; i < dma->buf_count; i++) {
828 buf = dma->buflist[i];
829 buf_priv = buf->dev_private;
830 if (buf->pending && buf_priv->age <= done_age) {
831 /* The buffer has been processed, so it
832 * can now be used.
833 */
834 buf->pending = 0;
835 return buf;
836 }
837 }
838 DRM_UDELAY(1);
839 }
840
841 DRM_DEBUG("returning NULL!\n");
842 return NULL;
843}
844
845void r128_freelist_reset(struct drm_device * dev)
846{
847 struct drm_device_dma *dma = dev->dma;
848 int i;
849
850 for (i = 0; i < dma->buf_count; i++) {
851 struct drm_buf *buf = dma->buflist[i];
852 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
853 buf_priv->age = 0;
854 }
855}
856
857/* ================================================================
858 * CCE command submission
859 */
860
861int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
862{
863 drm_r128_ring_buffer_t *ring = &dev_priv->ring;
864 int i;
865
866 for (i = 0; i < dev_priv->usec_timeout; i++) {
867 r128_update_ring_snapshot(dev_priv);
868 if (ring->space >= n)
869 return 0;
870 DRM_UDELAY(1);
871 }
872
873 /* FIXME: This is being ignored... */
874 DRM_ERROR("failed!\n");
875 return -EBUSY;
876}
877
878static int r128_cce_get_buffers(struct drm_device * dev,
879 struct drm_file *file_priv,
880 struct drm_dma * d)
881{
882 int i;
883 struct drm_buf *buf;
884
885 for (i = d->granted_count; i < d->request_count; i++) {
886 buf = r128_freelist_get(dev);
887 if (!buf)
888 return -EAGAIN;
889
890 buf->file_priv = file_priv;
891
892 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
893 sizeof(buf->idx)))
894 return -EFAULT;
895 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
896 sizeof(buf->total)))
897 return -EFAULT;
898
899 d->granted_count++;
900 }
901 return 0;
902}
903
904int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
905{
906 struct drm_device_dma *dma = dev->dma;
907 int ret = 0;
908 struct drm_dma *d = data;
909
910 LOCK_TEST_WITH_RETURN(dev, file_priv);
911
912 /* Please don't send us buffers.
913 */
914 if (d->send_count != 0) {
915 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
916 DRM_CURRENTPID, d->send_count);
917 return -EINVAL;
918 }
919
920 /* We'll send you buffers.
921 */
922 if (d->request_count < 0 || d->request_count > dma->buf_count) {
923 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
924 DRM_CURRENTPID, d->request_count, dma->buf_count);
925 return -EINVAL;
926 }
927
928 d->granted_count = 0;
929
930 if (d->request_count) {
931 ret = r128_cce_get_buffers(dev, file_priv, d);
932 }
933
934 return ret;
935}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
new file mode 100644
index 000000000000..6108e7587e12
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -0,0 +1,103 @@
1/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
2 * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "r128_drm.h"
35#include "r128_drv.h"
36
37#include "drm_pciids.h"
38
39static struct pci_device_id pciidlist[] = {
40 r128_PCI_IDS
41};
42
43static struct drm_driver driver = {
44 .driver_features =
45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
46 DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
47 DRIVER_IRQ_VBL,
48 .dev_priv_size = sizeof(drm_r128_buf_priv_t),
49 .preclose = r128_driver_preclose,
50 .lastclose = r128_driver_lastclose,
51 .vblank_wait = r128_driver_vblank_wait,
52 .irq_preinstall = r128_driver_irq_preinstall,
53 .irq_postinstall = r128_driver_irq_postinstall,
54 .irq_uninstall = r128_driver_irq_uninstall,
55 .irq_handler = r128_driver_irq_handler,
56 .reclaim_buffers = drm_core_reclaim_buffers,
57 .get_map_ofs = drm_core_get_map_ofs,
58 .get_reg_ofs = drm_core_get_reg_ofs,
59 .ioctls = r128_ioctls,
60 .dma_ioctl = r128_cce_buffers,
61 .fops = {
62 .owner = THIS_MODULE,
63 .open = drm_open,
64 .release = drm_release,
65 .ioctl = drm_ioctl,
66 .mmap = drm_mmap,
67 .poll = drm_poll,
68 .fasync = drm_fasync,
69#ifdef CONFIG_COMPAT
70 .compat_ioctl = r128_compat_ioctl,
71#endif
72 },
73
74 .pci_driver = {
75 .name = DRIVER_NAME,
76 .id_table = pciidlist,
77 },
78
79 .name = DRIVER_NAME,
80 .desc = DRIVER_DESC,
81 .date = DRIVER_DATE,
82 .major = DRIVER_MAJOR,
83 .minor = DRIVER_MINOR,
84 .patchlevel = DRIVER_PATCHLEVEL,
85};
86
87static int __init r128_init(void)
88{
89 driver.num_ioctls = r128_max_ioctl;
90 return drm_init(&driver);
91}
92
93static void __exit r128_exit(void)
94{
95 drm_exit(&driver);
96}
97
98module_init(r128_init);
99module_exit(r128_exit);
100
101MODULE_AUTHOR(DRIVER_AUTHOR);
102MODULE_DESCRIPTION(DRIVER_DESC);
103MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
new file mode 100644
index 000000000000..011105e51ac6
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -0,0 +1,522 @@
1/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
2 * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
3 */
4/*
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7 * All rights reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Rickard E. (Rik) Faith <faith@valinux.com>
30 * Kevin E. Martin <martin@valinux.com>
31 * Gareth Hughes <gareth@valinux.com>
32 * Michel Dänzer <daenzerm@student.ethz.ch>
33 */
34
35#ifndef __R128_DRV_H__
36#define __R128_DRV_H__
37
38/* General customization:
39 */
40#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
41
42#define DRIVER_NAME "r128"
43#define DRIVER_DESC "ATI Rage 128"
44#define DRIVER_DATE "20030725"
45
46/* Interface history:
47 *
48 * ?? - ??
49 * 2.4 - Add support for ycbcr textures (no new ioctls)
50 * 2.5 - Add FLIP ioctl, disable FULLSCREEN.
51 */
52#define DRIVER_MAJOR 2
53#define DRIVER_MINOR 5
54#define DRIVER_PATCHLEVEL 0
55
56#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
57
58typedef struct drm_r128_freelist {
59 unsigned int age;
60 struct drm_buf *buf;
61 struct drm_r128_freelist *next;
62 struct drm_r128_freelist *prev;
63} drm_r128_freelist_t;
64
65typedef struct drm_r128_ring_buffer {
66 u32 *start;
67 u32 *end;
68 int size;
69 int size_l2qw;
70
71 u32 tail;
72 u32 tail_mask;
73 int space;
74
75 int high_mark;
76} drm_r128_ring_buffer_t;
77
78typedef struct drm_r128_private {
79 drm_r128_ring_buffer_t ring;
80 drm_r128_sarea_t *sarea_priv;
81
82 int cce_mode;
83 int cce_fifo_size;
84 int cce_running;
85
86 drm_r128_freelist_t *head;
87 drm_r128_freelist_t *tail;
88
89 int usec_timeout;
90 int is_pci;
91 unsigned long cce_buffers_offset;
92
93 atomic_t idle_count;
94
95 int page_flipping;
96 int current_page;
97 u32 crtc_offset;
98 u32 crtc_offset_cntl;
99
100 u32 color_fmt;
101 unsigned int front_offset;
102 unsigned int front_pitch;
103 unsigned int back_offset;
104 unsigned int back_pitch;
105
106 u32 depth_fmt;
107 unsigned int depth_offset;
108 unsigned int depth_pitch;
109 unsigned int span_offset;
110
111 u32 front_pitch_offset_c;
112 u32 back_pitch_offset_c;
113 u32 depth_pitch_offset_c;
114 u32 span_pitch_offset_c;
115
116 drm_local_map_t *sarea;
117 drm_local_map_t *mmio;
118 drm_local_map_t *cce_ring;
119 drm_local_map_t *ring_rptr;
120 drm_local_map_t *agp_textures;
121 struct drm_ati_pcigart_info gart_info;
122} drm_r128_private_t;
123
124typedef struct drm_r128_buf_priv {
125 u32 age;
126 int prim;
127 int discard;
128 int dispatched;
129 drm_r128_freelist_t *list_entry;
130} drm_r128_buf_priv_t;
131
132extern struct drm_ioctl_desc r128_ioctls[];
133extern int r128_max_ioctl;
134
135 /* r128_cce.c */
136extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
137extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
138extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
139extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
140extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
141extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
142extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
143extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
144
145extern void r128_freelist_reset(struct drm_device * dev);
146
147extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
148
149extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
150extern int r128_do_cleanup_cce(struct drm_device * dev);
151
152extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
153
154extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
155extern void r128_driver_irq_preinstall(struct drm_device * dev);
156extern void r128_driver_irq_postinstall(struct drm_device * dev);
157extern void r128_driver_irq_uninstall(struct drm_device * dev);
158extern void r128_driver_lastclose(struct drm_device * dev);
159extern void r128_driver_preclose(struct drm_device * dev,
160 struct drm_file *file_priv);
161
162extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
163 unsigned long arg);
164
165/* Register definitions, register access macros and drmAddMap constants
166 * for Rage 128 kernel driver.
167 */
168
169#define R128_AUX_SC_CNTL 0x1660
170# define R128_AUX1_SC_EN (1 << 0)
171# define R128_AUX1_SC_MODE_OR (0 << 1)
172# define R128_AUX1_SC_MODE_NAND (1 << 1)
173# define R128_AUX2_SC_EN (1 << 2)
174# define R128_AUX2_SC_MODE_OR (0 << 3)
175# define R128_AUX2_SC_MODE_NAND (1 << 3)
176# define R128_AUX3_SC_EN (1 << 4)
177# define R128_AUX3_SC_MODE_OR (0 << 5)
178# define R128_AUX3_SC_MODE_NAND (1 << 5)
179#define R128_AUX1_SC_LEFT 0x1664
180#define R128_AUX1_SC_RIGHT 0x1668
181#define R128_AUX1_SC_TOP 0x166c
182#define R128_AUX1_SC_BOTTOM 0x1670
183#define R128_AUX2_SC_LEFT 0x1674
184#define R128_AUX2_SC_RIGHT 0x1678
185#define R128_AUX2_SC_TOP 0x167c
186#define R128_AUX2_SC_BOTTOM 0x1680
187#define R128_AUX3_SC_LEFT 0x1684
188#define R128_AUX3_SC_RIGHT 0x1688
189#define R128_AUX3_SC_TOP 0x168c
190#define R128_AUX3_SC_BOTTOM 0x1690
191
192#define R128_BRUSH_DATA0 0x1480
193#define R128_BUS_CNTL 0x0030
194# define R128_BUS_MASTER_DIS (1 << 6)
195
196#define R128_CLOCK_CNTL_INDEX 0x0008
197#define R128_CLOCK_CNTL_DATA 0x000c
198# define R128_PLL_WR_EN (1 << 7)
199#define R128_CONSTANT_COLOR_C 0x1d34
200#define R128_CRTC_OFFSET 0x0224
201#define R128_CRTC_OFFSET_CNTL 0x0228
202# define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16)
203
204#define R128_DP_GUI_MASTER_CNTL 0x146c
205# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
206# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
207# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4)
208# define R128_GMC_BRUSH_NONE (15 << 4)
209# define R128_GMC_DST_16BPP (4 << 8)
210# define R128_GMC_DST_24BPP (5 << 8)
211# define R128_GMC_DST_32BPP (6 << 8)
212# define R128_GMC_DST_DATATYPE_SHIFT 8
213# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12)
214# define R128_DP_SRC_SOURCE_MEMORY (2 << 24)
215# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24)
216# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28)
217# define R128_GMC_AUX_CLIP_DIS (1 << 29)
218# define R128_GMC_WR_MSK_DIS (1 << 30)
219# define R128_ROP3_S 0x00cc0000
220# define R128_ROP3_P 0x00f00000
221#define R128_DP_WRITE_MASK 0x16cc
222#define R128_DST_PITCH_OFFSET_C 0x1c80
223# define R128_DST_TILE (1 << 31)
224
225#define R128_GEN_INT_CNTL 0x0040
226# define R128_CRTC_VBLANK_INT_EN (1 << 0)
227#define R128_GEN_INT_STATUS 0x0044
228# define R128_CRTC_VBLANK_INT (1 << 0)
229# define R128_CRTC_VBLANK_INT_AK (1 << 0)
230#define R128_GEN_RESET_CNTL 0x00f0
231# define R128_SOFT_RESET_GUI (1 << 0)
232
233#define R128_GUI_SCRATCH_REG0 0x15e0
234#define R128_GUI_SCRATCH_REG1 0x15e4
235#define R128_GUI_SCRATCH_REG2 0x15e8
236#define R128_GUI_SCRATCH_REG3 0x15ec
237#define R128_GUI_SCRATCH_REG4 0x15f0
238#define R128_GUI_SCRATCH_REG5 0x15f4
239
240#define R128_GUI_STAT 0x1740
241# define R128_GUI_FIFOCNT_MASK 0x0fff
242# define R128_GUI_ACTIVE (1 << 31)
243
244#define R128_MCLK_CNTL 0x000f
245# define R128_FORCE_GCP (1 << 16)
246# define R128_FORCE_PIPE3D_CP (1 << 17)
247# define R128_FORCE_RCP (1 << 18)
248
249#define R128_PC_GUI_CTLSTAT 0x1748
250#define R128_PC_NGUI_CTLSTAT 0x0184
251# define R128_PC_FLUSH_GUI (3 << 0)
252# define R128_PC_RI_GUI (1 << 2)
253# define R128_PC_FLUSH_ALL 0x00ff
254# define R128_PC_BUSY (1 << 31)
255
256#define R128_PCI_GART_PAGE 0x017c
257#define R128_PRIM_TEX_CNTL_C 0x1cb0
258
259#define R128_SCALE_3D_CNTL 0x1a00
260#define R128_SEC_TEX_CNTL_C 0x1d00
261#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c
262#define R128_SETUP_CNTL 0x1bc4
263#define R128_STEN_REF_MASK_C 0x1d40
264
265#define R128_TEX_CNTL_C 0x1c9c
266# define R128_TEX_CACHE_FLUSH (1 << 23)
267
268#define R128_WAIT_UNTIL 0x1720
269# define R128_EVENT_CRTC_OFFSET (1 << 0)
270#define R128_WINDOW_XY_OFFSET 0x1bcc
271
272/* CCE registers
273 */
274#define R128_PM4_BUFFER_OFFSET 0x0700
275#define R128_PM4_BUFFER_CNTL 0x0704
276# define R128_PM4_MASK (15 << 28)
277# define R128_PM4_NONPM4 (0 << 28)
278# define R128_PM4_192PIO (1 << 28)
279# define R128_PM4_192BM (2 << 28)
280# define R128_PM4_128PIO_64INDBM (3 << 28)
281# define R128_PM4_128BM_64INDBM (4 << 28)
282# define R128_PM4_64PIO_128INDBM (5 << 28)
283# define R128_PM4_64BM_128INDBM (6 << 28)
284# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
285# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
286# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
287# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27)
288
289#define R128_PM4_BUFFER_WM_CNTL 0x0708
290# define R128_WMA_SHIFT 0
291# define R128_WMB_SHIFT 8
292# define R128_WMC_SHIFT 16
293# define R128_WB_WM_SHIFT 24
294
295#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c
296#define R128_PM4_BUFFER_DL_RPTR 0x0710
297#define R128_PM4_BUFFER_DL_WPTR 0x0714
298# define R128_PM4_BUFFER_DL_DONE (1 << 31)
299
300#define R128_PM4_VC_FPU_SETUP 0x071c
301
302#define R128_PM4_IW_INDOFF 0x0738
303#define R128_PM4_IW_INDSIZE 0x073c
304
305#define R128_PM4_STAT 0x07b8
306# define R128_PM4_FIFOCNT_MASK 0x0fff
307# define R128_PM4_BUSY (1 << 16)
308# define R128_PM4_GUI_ACTIVE (1 << 31)
309
310#define R128_PM4_MICROCODE_ADDR 0x07d4
311#define R128_PM4_MICROCODE_RADDR 0x07d8
312#define R128_PM4_MICROCODE_DATAH 0x07dc
313#define R128_PM4_MICROCODE_DATAL 0x07e0
314
315#define R128_PM4_BUFFER_ADDR 0x07f0
316#define R128_PM4_MICRO_CNTL 0x07fc
317# define R128_PM4_MICRO_FREERUN (1 << 30)
318
319#define R128_PM4_FIFO_DATA_EVEN 0x1000
320#define R128_PM4_FIFO_DATA_ODD 0x1004
321
322/* CCE command packets
323 */
324#define R128_CCE_PACKET0 0x00000000
325#define R128_CCE_PACKET1 0x40000000
326#define R128_CCE_PACKET2 0x80000000
327#define R128_CCE_PACKET3 0xC0000000
328# define R128_CNTL_HOSTDATA_BLT 0x00009400
329# define R128_CNTL_PAINT_MULTI 0x00009A00
330# define R128_CNTL_BITBLT_MULTI 0x00009B00
331# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300
332
333#define R128_CCE_PACKET_MASK 0xC0000000
334#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
335#define R128_CCE_PACKET0_REG_MASK 0x000007ff
336#define R128_CCE_PACKET1_REG0_MASK 0x000007ff
337#define R128_CCE_PACKET1_REG1_MASK 0x003ff800
338
339#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000
340#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001
341#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002
342#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003
343#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
344#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
345#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
346#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007
347#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010
348#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020
349#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030
350#define R128_CCE_VC_CNTL_NUM_SHIFT 16
351
352#define R128_DATATYPE_VQ 0
353#define R128_DATATYPE_CI4 1
354#define R128_DATATYPE_CI8 2
355#define R128_DATATYPE_ARGB1555 3
356#define R128_DATATYPE_RGB565 4
357#define R128_DATATYPE_RGB888 5
358#define R128_DATATYPE_ARGB8888 6
359#define R128_DATATYPE_RGB332 7
360#define R128_DATATYPE_Y8 8
361#define R128_DATATYPE_RGB8 9
362#define R128_DATATYPE_CI16 10
363#define R128_DATATYPE_YVYU422 11
364#define R128_DATATYPE_VYUY422 12
365#define R128_DATATYPE_AYUV444 14
366#define R128_DATATYPE_ARGB4444 15
367
368/* Constants */
369#define R128_AGP_OFFSET 0x02000000
370
371#define R128_WATERMARK_L 16
372#define R128_WATERMARK_M 8
373#define R128_WATERMARK_N 8
374#define R128_WATERMARK_K 128
375
376#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */
377
378#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0
379#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1
380#define R128_MAX_VB_AGE 0x7fffffff
381#define R128_MAX_VB_VERTS (0xffff)
382
383#define R128_RING_HIGH_MARK 128
384
385#define R128_PERFORMANCE_BOXES 0
386
387#define R128_PCIGART_TABLE_SIZE 32768
388
389#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
390#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
391#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
392#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
393
394#define R128_WRITE_PLL(addr,val) \
395do { \
396 R128_WRITE8(R128_CLOCK_CNTL_INDEX, \
397 ((addr) & 0x1f) | R128_PLL_WR_EN); \
398 R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
399} while (0)
400
401#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
402 ((n) << 16) | ((reg) >> 2))
403#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \
404 (((reg1) >> 2) << 11) | ((reg0) >> 2))
405#define CCE_PACKET2() (R128_CCE_PACKET2)
406#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
407 (pkt) | ((n) << 16))
408
409static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
410{
411 drm_r128_ring_buffer_t *ring = &dev_priv->ring;
412 ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
413 if (ring->space <= 0)
414 ring->space += ring->size;
415}
416
417/* ================================================================
418 * Misc helper macros
419 */
420
421#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
422do { \
423 drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
424 if ( ring->space < ring->high_mark ) { \
425 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \
426 r128_update_ring_snapshot( dev_priv ); \
427 if ( ring->space >= ring->high_mark ) \
428 goto __ring_space_done; \
429 DRM_UDELAY(1); \
430 } \
431 DRM_ERROR( "ring space check failed!\n" ); \
432 return -EBUSY; \
433 } \
434 __ring_space_done: \
435 ; \
436} while (0)
437
438#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
439do { \
440 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \
441 if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \
442 int __ret = r128_do_cce_idle( dev_priv ); \
443 if ( __ret ) return __ret; \
444 sarea_priv->last_dispatch = 0; \
445 r128_freelist_reset( dev ); \
446 } \
447} while (0)
448
449#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \
450 OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \
451 OUT_RING( R128_EVENT_CRTC_OFFSET ); \
452} while (0)
453
454/* ================================================================
455 * Ring control
456 */
457
458#define R128_VERBOSE 0
459
460#define RING_LOCALS \
461 int write, _nr; unsigned int tail_mask; volatile u32 *ring;
462
463#define BEGIN_RING( n ) do { \
464 if ( R128_VERBOSE ) { \
465 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
466 } \
467 if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
468 COMMIT_RING(); \
469 r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \
470 } \
471 _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
472 ring = dev_priv->ring.start; \
473 write = dev_priv->ring.tail; \
474 tail_mask = dev_priv->ring.tail_mask; \
475} while (0)
476
477/* You can set this to zero if you want. If the card locks up, you'll
478 * need to keep this set. It works around a bug in early revs of the
479 * Rage 128 chipset, where the CCE would read 32 dwords past the end of
480 * the ring buffer before wrapping around.
481 */
482#define R128_BROKEN_CCE 1
483
484#define ADVANCE_RING() do { \
485 if ( R128_VERBOSE ) { \
486 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
487 write, dev_priv->ring.tail ); \
488 } \
489 if ( R128_BROKEN_CCE && write < 32 ) { \
490 memcpy( dev_priv->ring.end, \
491 dev_priv->ring.start, \
492 write * sizeof(u32) ); \
493 } \
494 if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
495 DRM_ERROR( \
496 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
497 ((dev_priv->ring.tail + _nr) & tail_mask), \
498 write, __LINE__); \
499 } else \
500 dev_priv->ring.tail = write; \
501} while (0)
502
503#define COMMIT_RING() do { \
504 if ( R128_VERBOSE ) { \
505 DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \
506 dev_priv->ring.tail ); \
507 } \
508 DRM_MEMORYBARRIER(); \
509 R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \
510 R128_READ( R128_PM4_BUFFER_DL_WPTR ); \
511} while (0)
512
513#define OUT_RING( x ) do { \
514 if ( R128_VERBOSE ) { \
515 DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
516 (unsigned int)(x), write ); \
517 } \
518 ring[write++] = cpu_to_le32( x ); \
519 write &= tail_mask; \
520} while (0)
521
522#endif /* __R128_DRV_H__ */
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
new file mode 100644
index 000000000000..d3cb676eee84
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -0,0 +1,221 @@
1/**
2 * \file r128_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the R128 DRM.
5 *
6 * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
7 *
8 * Copyright (C) Paul Mackerras 2005
9 * Copyright (C) Egbert Eich 2003,2004
10 * Copyright (C) Dave Airlie 2005
11 * All Rights Reserved.
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a
14 * copy of this software and associated documentation files (the "Software"),
15 * to deal in the Software without restriction, including without limitation
16 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 * and/or sell copies of the Software, and to permit persons to whom the
18 * Software is furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice (including the next
21 * paragraph) shall be included in all copies or substantial portions of the
22 * Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
28 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32#include <linux/compat.h>
33
34#include "drmP.h"
35#include "drm.h"
36#include "r128_drm.h"
37
38typedef struct drm_r128_init32 {
39 int func;
40 unsigned int sarea_priv_offset;
41 int is_pci;
42 int cce_mode;
43 int cce_secure;
44 int ring_size;
45 int usec_timeout;
46
47 unsigned int fb_bpp;
48 unsigned int front_offset, front_pitch;
49 unsigned int back_offset, back_pitch;
50 unsigned int depth_bpp;
51 unsigned int depth_offset, depth_pitch;
52 unsigned int span_offset;
53
54 unsigned int fb_offset;
55 unsigned int mmio_offset;
56 unsigned int ring_offset;
57 unsigned int ring_rptr_offset;
58 unsigned int buffers_offset;
59 unsigned int agp_textures_offset;
60} drm_r128_init32_t;
61
62static int compat_r128_init(struct file *file, unsigned int cmd,
63 unsigned long arg)
64{
65 drm_r128_init32_t init32;
66 drm_r128_init_t __user *init;
67
68 if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
69 return -EFAULT;
70
71 init = compat_alloc_user_space(sizeof(*init));
72 if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
73 || __put_user(init32.func, &init->func)
74 || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
75 || __put_user(init32.is_pci, &init->is_pci)
76 || __put_user(init32.cce_mode, &init->cce_mode)
77 || __put_user(init32.cce_secure, &init->cce_secure)
78 || __put_user(init32.ring_size, &init->ring_size)
79 || __put_user(init32.usec_timeout, &init->usec_timeout)
80 || __put_user(init32.fb_bpp, &init->fb_bpp)
81 || __put_user(init32.front_offset, &init->front_offset)
82 || __put_user(init32.front_pitch, &init->front_pitch)
83 || __put_user(init32.back_offset, &init->back_offset)
84 || __put_user(init32.back_pitch, &init->back_pitch)
85 || __put_user(init32.depth_bpp, &init->depth_bpp)
86 || __put_user(init32.depth_offset, &init->depth_offset)
87 || __put_user(init32.depth_pitch, &init->depth_pitch)
88 || __put_user(init32.span_offset, &init->span_offset)
89 || __put_user(init32.fb_offset, &init->fb_offset)
90 || __put_user(init32.mmio_offset, &init->mmio_offset)
91 || __put_user(init32.ring_offset, &init->ring_offset)
92 || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
93 || __put_user(init32.buffers_offset, &init->buffers_offset)
94 || __put_user(init32.agp_textures_offset,
95 &init->agp_textures_offset))
96 return -EFAULT;
97
98 return drm_ioctl(file->f_path.dentry->d_inode, file,
99 DRM_IOCTL_R128_INIT, (unsigned long)init);
100}
101
102typedef struct drm_r128_depth32 {
103 int func;
104 int n;
105 u32 x;
106 u32 y;
107 u32 buffer;
108 u32 mask;
109} drm_r128_depth32_t;
110
111static int compat_r128_depth(struct file *file, unsigned int cmd,
112 unsigned long arg)
113{
114 drm_r128_depth32_t depth32;
115 drm_r128_depth_t __user *depth;
116
117 if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
118 return -EFAULT;
119
120 depth = compat_alloc_user_space(sizeof(*depth));
121 if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
122 || __put_user(depth32.func, &depth->func)
123 || __put_user(depth32.n, &depth->n)
124 || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
125 || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
126 || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
127 &depth->buffer)
128 || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
129 &depth->mask))
130 return -EFAULT;
131
132 return drm_ioctl(file->f_path.dentry->d_inode, file,
133 DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
134
135}
136
137typedef struct drm_r128_stipple32 {
138 u32 mask;
139} drm_r128_stipple32_t;
140
141static int compat_r128_stipple(struct file *file, unsigned int cmd,
142 unsigned long arg)
143{
144 drm_r128_stipple32_t stipple32;
145 drm_r128_stipple_t __user *stipple;
146
147 if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
148 return -EFAULT;
149
150 stipple = compat_alloc_user_space(sizeof(*stipple));
151 if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
152 || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
153 &stipple->mask))
154 return -EFAULT;
155
156 return drm_ioctl(file->f_path.dentry->d_inode, file,
157 DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
158}
159
160typedef struct drm_r128_getparam32 {
161 int param;
162 u32 value;
163} drm_r128_getparam32_t;
164
165static int compat_r128_getparam(struct file *file, unsigned int cmd,
166 unsigned long arg)
167{
168 drm_r128_getparam32_t getparam32;
169 drm_r128_getparam_t __user *getparam;
170
171 if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
172 return -EFAULT;
173
174 getparam = compat_alloc_user_space(sizeof(*getparam));
175 if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
176 || __put_user(getparam32.param, &getparam->param)
177 || __put_user((void __user *)(unsigned long)getparam32.value,
178 &getparam->value))
179 return -EFAULT;
180
181 return drm_ioctl(file->f_path.dentry->d_inode, file,
182 DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
183}
184
185drm_ioctl_compat_t *r128_compat_ioctls[] = {
186 [DRM_R128_INIT] = compat_r128_init,
187 [DRM_R128_DEPTH] = compat_r128_depth,
188 [DRM_R128_STIPPLE] = compat_r128_stipple,
189 [DRM_R128_GETPARAM] = compat_r128_getparam,
190};
191
192/**
193 * Called whenever a 32-bit process running under a 64-bit kernel
194 * performs an ioctl on /dev/dri/card<n>.
195 *
196 * \param filp file pointer.
197 * \param cmd command.
198 * \param arg user argument.
199 * \return zero on success or negative number on failure.
200 */
201long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
202{
203 unsigned int nr = DRM_IOCTL_NR(cmd);
204 drm_ioctl_compat_t *fn = NULL;
205 int ret;
206
207 if (nr < DRM_COMMAND_BASE)
208 return drm_compat_ioctl(filp, cmd, arg);
209
210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
211 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
212
213 lock_kernel(); /* XXX for now */
214 if (fn != NULL)
215 ret = (*fn) (filp, cmd, arg);
216 else
217 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
218 unlock_kernel();
219
220 return ret;
221}
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
new file mode 100644
index 000000000000..c76fdca7662d
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -0,0 +1,101 @@
1/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
2/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Eric Anholt <anholt@FreeBSD.org>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "r128_drm.h"
36#include "r128_drv.h"
37
38irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39{
40 struct drm_device *dev = (struct drm_device *) arg;
41 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
42 int status;
43
44 status = R128_READ(R128_GEN_INT_STATUS);
45
46 /* VBLANK interrupt */
47 if (status & R128_CRTC_VBLANK_INT) {
48 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
49 atomic_inc(&dev->vbl_received);
50 DRM_WAKEUP(&dev->vbl_queue);
51 drm_vbl_send_signals(dev);
52 return IRQ_HANDLED;
53 }
54 return IRQ_NONE;
55}
56
57int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
58{
59 unsigned int cur_vblank;
60 int ret = 0;
61
62 /* Assume that the user has missed the current sequence number
63 * by about a day rather than she wants to wait for years
64 * using vertical blanks...
65 */
66 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
67 (((cur_vblank = atomic_read(&dev->vbl_received))
68 - *sequence) <= (1 << 23)));
69
70 *sequence = cur_vblank;
71
72 return ret;
73}
74
75void r128_driver_irq_preinstall(struct drm_device * dev)
76{
77 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
78
79 /* Disable *all* interrupts */
80 R128_WRITE(R128_GEN_INT_CNTL, 0);
81 /* Clear vblank bit if it's already high */
82 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
83}
84
85void r128_driver_irq_postinstall(struct drm_device * dev)
86{
87 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
88
89 /* Turn on VBL interrupt */
90 R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
91}
92
93void r128_driver_irq_uninstall(struct drm_device * dev)
94{
95 drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
96 if (!dev_priv)
97 return;
98
99 /* Disable *all* interrupts */
100 R128_WRITE(R128_GEN_INT_CNTL, 0);
101}
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
new file mode 100644
index 000000000000..51a9afce7b9b
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -0,0 +1,1681 @@
1/* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4/*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 */
30
31#include "drmP.h"
32#include "drm.h"
33#include "r128_drm.h"
34#include "r128_drv.h"
35
36/* ================================================================
37 * CCE hardware state programming functions
38 */
39
40static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
41 struct drm_clip_rect * boxes, int count)
42{
43 u32 aux_sc_cntl = 0x00000000;
44 RING_LOCALS;
45 DRM_DEBUG("\n");
46
47 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
48
49 if (count >= 1) {
50 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
51 OUT_RING(boxes[0].x1);
52 OUT_RING(boxes[0].x2 - 1);
53 OUT_RING(boxes[0].y1);
54 OUT_RING(boxes[0].y2 - 1);
55
56 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
57 }
58 if (count >= 2) {
59 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
60 OUT_RING(boxes[1].x1);
61 OUT_RING(boxes[1].x2 - 1);
62 OUT_RING(boxes[1].y1);
63 OUT_RING(boxes[1].y2 - 1);
64
65 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
66 }
67 if (count >= 3) {
68 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
69 OUT_RING(boxes[2].x1);
70 OUT_RING(boxes[2].x2 - 1);
71 OUT_RING(boxes[2].y1);
72 OUT_RING(boxes[2].y2 - 1);
73
74 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
75 }
76
77 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
78 OUT_RING(aux_sc_cntl);
79
80 ADVANCE_RING();
81}
82
83static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
84{
85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
87 RING_LOCALS;
88 DRM_DEBUG("\n");
89
90 BEGIN_RING(2);
91
92 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
93 OUT_RING(ctx->scale_3d_cntl);
94
95 ADVANCE_RING();
96}
97
98static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
99{
100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
102 RING_LOCALS;
103 DRM_DEBUG("\n");
104
105 BEGIN_RING(13);
106
107 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
108 OUT_RING(ctx->dst_pitch_offset_c);
109 OUT_RING(ctx->dp_gui_master_cntl_c);
110 OUT_RING(ctx->sc_top_left_c);
111 OUT_RING(ctx->sc_bottom_right_c);
112 OUT_RING(ctx->z_offset_c);
113 OUT_RING(ctx->z_pitch_c);
114 OUT_RING(ctx->z_sten_cntl_c);
115 OUT_RING(ctx->tex_cntl_c);
116 OUT_RING(ctx->misc_3d_state_cntl_reg);
117 OUT_RING(ctx->texture_clr_cmp_clr_c);
118 OUT_RING(ctx->texture_clr_cmp_msk_c);
119 OUT_RING(ctx->fog_color_c);
120
121 ADVANCE_RING();
122}
123
124static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
125{
126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
128 RING_LOCALS;
129 DRM_DEBUG("\n");
130
131 BEGIN_RING(3);
132
133 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
134 OUT_RING(ctx->setup_cntl);
135 OUT_RING(ctx->pm4_vc_fpu_setup);
136
137 ADVANCE_RING();
138}
139
140static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
141{
142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
144 RING_LOCALS;
145 DRM_DEBUG("\n");
146
147 BEGIN_RING(5);
148
149 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
150 OUT_RING(ctx->dp_write_mask);
151
152 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
153 OUT_RING(ctx->sten_ref_mask_c);
154 OUT_RING(ctx->plane_3d_mask_c);
155
156 ADVANCE_RING();
157}
158
159static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
160{
161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
163 RING_LOCALS;
164 DRM_DEBUG("\n");
165
166 BEGIN_RING(2);
167
168 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
169 OUT_RING(ctx->window_xy_offset);
170
171 ADVANCE_RING();
172}
173
174static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
175{
176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
178 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
179 int i;
180 RING_LOCALS;
181 DRM_DEBUG("\n");
182
183 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
184
185 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
186 2 + R128_MAX_TEXTURE_LEVELS));
187 OUT_RING(tex->tex_cntl);
188 OUT_RING(tex->tex_combine_cntl);
189 OUT_RING(ctx->tex_size_pitch_c);
190 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
191 OUT_RING(tex->tex_offset[i]);
192 }
193
194 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
195 OUT_RING(ctx->constant_color_c);
196 OUT_RING(tex->tex_border_color);
197
198 ADVANCE_RING();
199}
200
201static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
202{
203 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
204 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
205 int i;
206 RING_LOCALS;
207 DRM_DEBUG("\n");
208
209 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
210
211 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
212 OUT_RING(tex->tex_cntl);
213 OUT_RING(tex->tex_combine_cntl);
214 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
215 OUT_RING(tex->tex_offset[i]);
216 }
217
218 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
219 OUT_RING(tex->tex_border_color);
220
221 ADVANCE_RING();
222}
223
224static void r128_emit_state(drm_r128_private_t * dev_priv)
225{
226 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
227 unsigned int dirty = sarea_priv->dirty;
228
229 DRM_DEBUG("dirty=0x%08x\n", dirty);
230
231 if (dirty & R128_UPLOAD_CORE) {
232 r128_emit_core(dev_priv);
233 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
234 }
235
236 if (dirty & R128_UPLOAD_CONTEXT) {
237 r128_emit_context(dev_priv);
238 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
239 }
240
241 if (dirty & R128_UPLOAD_SETUP) {
242 r128_emit_setup(dev_priv);
243 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
244 }
245
246 if (dirty & R128_UPLOAD_MASKS) {
247 r128_emit_masks(dev_priv);
248 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
249 }
250
251 if (dirty & R128_UPLOAD_WINDOW) {
252 r128_emit_window(dev_priv);
253 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
254 }
255
256 if (dirty & R128_UPLOAD_TEX0) {
257 r128_emit_tex0(dev_priv);
258 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
259 }
260
261 if (dirty & R128_UPLOAD_TEX1) {
262 r128_emit_tex1(dev_priv);
263 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
264 }
265
266 /* Turn off the texture cache flushing */
267 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
268
269 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
270}
271
272#if R128_PERFORMANCE_BOXES
273/* ================================================================
274 * Performance monitoring functions
275 */
276
277static void r128_clear_box(drm_r128_private_t * dev_priv,
278 int x, int y, int w, int h, int r, int g, int b)
279{
280 u32 pitch, offset;
281 u32 fb_bpp, color;
282 RING_LOCALS;
283
284 switch (dev_priv->fb_bpp) {
285 case 16:
286 fb_bpp = R128_GMC_DST_16BPP;
287 color = (((r & 0xf8) << 8) |
288 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
289 break;
290 case 24:
291 fb_bpp = R128_GMC_DST_24BPP;
292 color = ((r << 16) | (g << 8) | b);
293 break;
294 case 32:
295 fb_bpp = R128_GMC_DST_32BPP;
296 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
297 break;
298 default:
299 return;
300 }
301
302 offset = dev_priv->back_offset;
303 pitch = dev_priv->back_pitch >> 3;
304
305 BEGIN_RING(6);
306
307 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
308 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
309 R128_GMC_BRUSH_SOLID_COLOR |
310 fb_bpp |
311 R128_GMC_SRC_DATATYPE_COLOR |
312 R128_ROP3_P |
313 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
314
315 OUT_RING((pitch << 21) | (offset >> 5));
316 OUT_RING(color);
317
318 OUT_RING((x << 16) | y);
319 OUT_RING((w << 16) | h);
320
321 ADVANCE_RING();
322}
323
324static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
325{
326 if (atomic_read(&dev_priv->idle_count) == 0) {
327 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
328 } else {
329 atomic_set(&dev_priv->idle_count, 0);
330 }
331}
332
333#endif
334
335/* ================================================================
336 * CCE command dispatch functions
337 */
338
339static void r128_print_dirty(const char *msg, unsigned int flags)
340{
341 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
342 msg,
343 flags,
344 (flags & R128_UPLOAD_CORE) ? "core, " : "",
345 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
346 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
347 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
348 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
349 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
350 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
351 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
352 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
353}
354
355static void r128_cce_dispatch_clear(struct drm_device * dev,
356 drm_r128_clear_t * clear)
357{
358 drm_r128_private_t *dev_priv = dev->dev_private;
359 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
360 int nbox = sarea_priv->nbox;
361 struct drm_clip_rect *pbox = sarea_priv->boxes;
362 unsigned int flags = clear->flags;
363 int i;
364 RING_LOCALS;
365 DRM_DEBUG("\n");
366
367 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
368 unsigned int tmp = flags;
369
370 flags &= ~(R128_FRONT | R128_BACK);
371 if (tmp & R128_FRONT)
372 flags |= R128_BACK;
373 if (tmp & R128_BACK)
374 flags |= R128_FRONT;
375 }
376
377 for (i = 0; i < nbox; i++) {
378 int x = pbox[i].x1;
379 int y = pbox[i].y1;
380 int w = pbox[i].x2 - x;
381 int h = pbox[i].y2 - y;
382
383 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
384 pbox[i].x1, pbox[i].y1, pbox[i].x2,
385 pbox[i].y2, flags);
386
387 if (flags & (R128_FRONT | R128_BACK)) {
388 BEGIN_RING(2);
389
390 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
391 OUT_RING(clear->color_mask);
392
393 ADVANCE_RING();
394 }
395
396 if (flags & R128_FRONT) {
397 BEGIN_RING(6);
398
399 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
400 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
401 R128_GMC_BRUSH_SOLID_COLOR |
402 (dev_priv->color_fmt << 8) |
403 R128_GMC_SRC_DATATYPE_COLOR |
404 R128_ROP3_P |
405 R128_GMC_CLR_CMP_CNTL_DIS |
406 R128_GMC_AUX_CLIP_DIS);
407
408 OUT_RING(dev_priv->front_pitch_offset_c);
409 OUT_RING(clear->clear_color);
410
411 OUT_RING((x << 16) | y);
412 OUT_RING((w << 16) | h);
413
414 ADVANCE_RING();
415 }
416
417 if (flags & R128_BACK) {
418 BEGIN_RING(6);
419
420 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
421 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
422 R128_GMC_BRUSH_SOLID_COLOR |
423 (dev_priv->color_fmt << 8) |
424 R128_GMC_SRC_DATATYPE_COLOR |
425 R128_ROP3_P |
426 R128_GMC_CLR_CMP_CNTL_DIS |
427 R128_GMC_AUX_CLIP_DIS);
428
429 OUT_RING(dev_priv->back_pitch_offset_c);
430 OUT_RING(clear->clear_color);
431
432 OUT_RING((x << 16) | y);
433 OUT_RING((w << 16) | h);
434
435 ADVANCE_RING();
436 }
437
438 if (flags & R128_DEPTH) {
439 BEGIN_RING(6);
440
441 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
442 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
443 R128_GMC_BRUSH_SOLID_COLOR |
444 (dev_priv->depth_fmt << 8) |
445 R128_GMC_SRC_DATATYPE_COLOR |
446 R128_ROP3_P |
447 R128_GMC_CLR_CMP_CNTL_DIS |
448 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
449
450 OUT_RING(dev_priv->depth_pitch_offset_c);
451 OUT_RING(clear->clear_depth);
452
453 OUT_RING((x << 16) | y);
454 OUT_RING((w << 16) | h);
455
456 ADVANCE_RING();
457 }
458 }
459}
460
461static void r128_cce_dispatch_swap(struct drm_device * dev)
462{
463 drm_r128_private_t *dev_priv = dev->dev_private;
464 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
465 int nbox = sarea_priv->nbox;
466 struct drm_clip_rect *pbox = sarea_priv->boxes;
467 int i;
468 RING_LOCALS;
469 DRM_DEBUG("\n");
470
471#if R128_PERFORMANCE_BOXES
472 /* Do some trivial performance monitoring...
473 */
474 r128_cce_performance_boxes(dev_priv);
475#endif
476
477 for (i = 0; i < nbox; i++) {
478 int x = pbox[i].x1;
479 int y = pbox[i].y1;
480 int w = pbox[i].x2 - x;
481 int h = pbox[i].y2 - y;
482
483 BEGIN_RING(7);
484
485 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
486 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
487 R128_GMC_DST_PITCH_OFFSET_CNTL |
488 R128_GMC_BRUSH_NONE |
489 (dev_priv->color_fmt << 8) |
490 R128_GMC_SRC_DATATYPE_COLOR |
491 R128_ROP3_S |
492 R128_DP_SRC_SOURCE_MEMORY |
493 R128_GMC_CLR_CMP_CNTL_DIS |
494 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
495
496 /* Make this work even if front & back are flipped:
497 */
498 if (dev_priv->current_page == 0) {
499 OUT_RING(dev_priv->back_pitch_offset_c);
500 OUT_RING(dev_priv->front_pitch_offset_c);
501 } else {
502 OUT_RING(dev_priv->front_pitch_offset_c);
503 OUT_RING(dev_priv->back_pitch_offset_c);
504 }
505
506 OUT_RING((x << 16) | y);
507 OUT_RING((x << 16) | y);
508 OUT_RING((w << 16) | h);
509
510 ADVANCE_RING();
511 }
512
513 /* Increment the frame counter. The client-side 3D driver must
514 * throttle the framerate by waiting for this value before
515 * performing the swapbuffer ioctl.
516 */
517 dev_priv->sarea_priv->last_frame++;
518
519 BEGIN_RING(2);
520
521 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
522 OUT_RING(dev_priv->sarea_priv->last_frame);
523
524 ADVANCE_RING();
525}
526
527static void r128_cce_dispatch_flip(struct drm_device * dev)
528{
529 drm_r128_private_t *dev_priv = dev->dev_private;
530 RING_LOCALS;
531 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
532 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
533
534#if R128_PERFORMANCE_BOXES
535 /* Do some trivial performance monitoring...
536 */
537 r128_cce_performance_boxes(dev_priv);
538#endif
539
540 BEGIN_RING(4);
541
542 R128_WAIT_UNTIL_PAGE_FLIPPED();
543 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
544
545 if (dev_priv->current_page == 0) {
546 OUT_RING(dev_priv->back_offset);
547 } else {
548 OUT_RING(dev_priv->front_offset);
549 }
550
551 ADVANCE_RING();
552
553 /* Increment the frame counter. The client-side 3D driver must
554 * throttle the framerate by waiting for this value before
555 * performing the swapbuffer ioctl.
556 */
557 dev_priv->sarea_priv->last_frame++;
558 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
559 1 - dev_priv->current_page;
560
561 BEGIN_RING(2);
562
563 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
564 OUT_RING(dev_priv->sarea_priv->last_frame);
565
566 ADVANCE_RING();
567}
568
569static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
570{
571 drm_r128_private_t *dev_priv = dev->dev_private;
572 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
573 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
574 int format = sarea_priv->vc_format;
575 int offset = buf->bus_address;
576 int size = buf->used;
577 int prim = buf_priv->prim;
578 int i = 0;
579 RING_LOCALS;
580 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
581
582 if (0)
583 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
584
585 if (buf->used) {
586 buf_priv->dispatched = 1;
587
588 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
589 r128_emit_state(dev_priv);
590 }
591
592 do {
593 /* Emit the next set of up to three cliprects */
594 if (i < sarea_priv->nbox) {
595 r128_emit_clip_rects(dev_priv,
596 &sarea_priv->boxes[i],
597 sarea_priv->nbox - i);
598 }
599
600 /* Emit the vertex buffer rendering commands */
601 BEGIN_RING(5);
602
603 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
604 OUT_RING(offset);
605 OUT_RING(size);
606 OUT_RING(format);
607 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
608 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
609
610 ADVANCE_RING();
611
612 i += 3;
613 } while (i < sarea_priv->nbox);
614 }
615
616 if (buf_priv->discard) {
617 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
618
619 /* Emit the vertex buffer age */
620 BEGIN_RING(2);
621
622 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
623 OUT_RING(buf_priv->age);
624
625 ADVANCE_RING();
626
627 buf->pending = 1;
628 buf->used = 0;
629 /* FIXME: Check dispatched field */
630 buf_priv->dispatched = 0;
631 }
632
633 dev_priv->sarea_priv->last_dispatch++;
634
635 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
636 sarea_priv->nbox = 0;
637}
638
639static void r128_cce_dispatch_indirect(struct drm_device * dev,
640 struct drm_buf * buf, int start, int end)
641{
642 drm_r128_private_t *dev_priv = dev->dev_private;
643 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
644 RING_LOCALS;
645 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
646
647 if (start != end) {
648 int offset = buf->bus_address + start;
649 int dwords = (end - start + 3) / sizeof(u32);
650
651 /* Indirect buffer data must be an even number of
652 * dwords, so if we've been given an odd number we must
653 * pad the data with a Type-2 CCE packet.
654 */
655 if (dwords & 1) {
656 u32 *data = (u32 *)
657 ((char *)dev->agp_buffer_map->handle
658 + buf->offset + start);
659 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
660 }
661
662 buf_priv->dispatched = 1;
663
664 /* Fire off the indirect buffer */
665 BEGIN_RING(3);
666
667 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
668 OUT_RING(offset);
669 OUT_RING(dwords);
670
671 ADVANCE_RING();
672 }
673
674 if (buf_priv->discard) {
675 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
676
677 /* Emit the indirect buffer age */
678 BEGIN_RING(2);
679
680 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
681 OUT_RING(buf_priv->age);
682
683 ADVANCE_RING();
684
685 buf->pending = 1;
686 buf->used = 0;
687 /* FIXME: Check dispatched field */
688 buf_priv->dispatched = 0;
689 }
690
691 dev_priv->sarea_priv->last_dispatch++;
692}
693
694static void r128_cce_dispatch_indices(struct drm_device * dev,
695 struct drm_buf * buf,
696 int start, int end, int count)
697{
698 drm_r128_private_t *dev_priv = dev->dev_private;
699 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
700 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
701 int format = sarea_priv->vc_format;
702 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
703 int prim = buf_priv->prim;
704 u32 *data;
705 int dwords;
706 int i = 0;
707 RING_LOCALS;
708 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
709
710 if (0)
711 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
712
713 if (start != end) {
714 buf_priv->dispatched = 1;
715
716 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
717 r128_emit_state(dev_priv);
718 }
719
720 dwords = (end - start + 3) / sizeof(u32);
721
722 data = (u32 *) ((char *)dev->agp_buffer_map->handle
723 + buf->offset + start);
724
725 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
726 dwords - 2));
727
728 data[1] = cpu_to_le32(offset);
729 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
730 data[3] = cpu_to_le32(format);
731 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
732 (count << 16)));
733
734 if (count & 0x1) {
735#ifdef __LITTLE_ENDIAN
736 data[dwords - 1] &= 0x0000ffff;
737#else
738 data[dwords - 1] &= 0xffff0000;
739#endif
740 }
741
742 do {
743 /* Emit the next set of up to three cliprects */
744 if (i < sarea_priv->nbox) {
745 r128_emit_clip_rects(dev_priv,
746 &sarea_priv->boxes[i],
747 sarea_priv->nbox - i);
748 }
749
750 r128_cce_dispatch_indirect(dev, buf, start, end);
751
752 i += 3;
753 } while (i < sarea_priv->nbox);
754 }
755
756 if (buf_priv->discard) {
757 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
758
759 /* Emit the vertex buffer age */
760 BEGIN_RING(2);
761
762 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
763 OUT_RING(buf_priv->age);
764
765 ADVANCE_RING();
766
767 buf->pending = 1;
768 /* FIXME: Check dispatched field */
769 buf_priv->dispatched = 0;
770 }
771
772 dev_priv->sarea_priv->last_dispatch++;
773
774 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
775 sarea_priv->nbox = 0;
776}
777
778static int r128_cce_dispatch_blit(struct drm_device * dev,
779 struct drm_file *file_priv,
780 drm_r128_blit_t * blit)
781{
782 drm_r128_private_t *dev_priv = dev->dev_private;
783 struct drm_device_dma *dma = dev->dma;
784 struct drm_buf *buf;
785 drm_r128_buf_priv_t *buf_priv;
786 u32 *data;
787 int dword_shift, dwords;
788 RING_LOCALS;
789 DRM_DEBUG("\n");
790
791 /* The compiler won't optimize away a division by a variable,
792 * even if the only legal values are powers of two. Thus, we'll
793 * use a shift instead.
794 */
795 switch (blit->format) {
796 case R128_DATATYPE_ARGB8888:
797 dword_shift = 0;
798 break;
799 case R128_DATATYPE_ARGB1555:
800 case R128_DATATYPE_RGB565:
801 case R128_DATATYPE_ARGB4444:
802 case R128_DATATYPE_YVYU422:
803 case R128_DATATYPE_VYUY422:
804 dword_shift = 1;
805 break;
806 case R128_DATATYPE_CI8:
807 case R128_DATATYPE_RGB8:
808 dword_shift = 2;
809 break;
810 default:
811 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return -EINVAL;
813 }
814
815 /* Flush the pixel cache, and mark the contents as Read Invalid.
816 * This ensures no pixel data gets mixed up with the texture
817 * data from the host data blit, otherwise part of the texture
818 * image may be corrupted.
819 */
820 BEGIN_RING(2);
821
822 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
823 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
824
825 ADVANCE_RING();
826
827 /* Dispatch the indirect buffer.
828 */
829 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private;
831
832 if (buf->file_priv != file_priv) {
833 DRM_ERROR("process %d using buffer owned by %p\n",
834 DRM_CURRENTPID, buf->file_priv);
835 return -EINVAL;
836 }
837 if (buf->pending) {
838 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return -EINVAL;
840 }
841
842 buf_priv->discard = 1;
843
844 dwords = (blit->width * blit->height) >> dword_shift;
845
846 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
847
848 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
849 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
850 R128_GMC_BRUSH_NONE |
851 (blit->format << 8) |
852 R128_GMC_SRC_DATATYPE_COLOR |
853 R128_ROP3_S |
854 R128_DP_SRC_SOURCE_HOST_DATA |
855 R128_GMC_CLR_CMP_CNTL_DIS |
856 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
857
858 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
859 data[3] = cpu_to_le32(0xffffffff);
860 data[4] = cpu_to_le32(0xffffffff);
861 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
862 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
863 data[7] = cpu_to_le32(dwords);
864
865 buf->used = (dwords + 8) * sizeof(u32);
866
867 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
868
869 /* Flush the pixel cache after the blit completes. This ensures
870 * the texture data is written out to memory before rendering
871 * continues.
872 */
873 BEGIN_RING(2);
874
875 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
876 OUT_RING(R128_PC_FLUSH_GUI);
877
878 ADVANCE_RING();
879
880 return 0;
881}
882
883/* ================================================================
884 * Tiled depth buffer management
885 *
886 * FIXME: These should all set the destination write mask for when we
887 * have hardware stencil support.
888 */
889
890static int r128_cce_dispatch_write_span(struct drm_device * dev,
891 drm_r128_depth_t * depth)
892{
893 drm_r128_private_t *dev_priv = dev->dev_private;
894 int count, x, y;
895 u32 *buffer;
896 u8 *mask;
897 int i, buffer_size, mask_size;
898 RING_LOCALS;
899 DRM_DEBUG("\n");
900
901 count = depth->n;
902 if (count > 4096 || count <= 0)
903 return -EMSGSIZE;
904
905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
906 return -EFAULT;
907 }
908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
909 return -EFAULT;
910 }
911
912 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
914 if (buffer == NULL)
915 return -ENOMEM;
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
918 return -EFAULT;
919 }
920
921 mask_size = depth->n * sizeof(u8);
922 if (depth->mask) {
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
924 if (mask == NULL) {
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
926 return -ENOMEM;
927 }
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
930 drm_free(mask, mask_size, DRM_MEM_BUFS);
931 return -EFAULT;
932 }
933
934 for (i = 0; i < count; i++, x++) {
935 if (mask[i]) {
936 BEGIN_RING(6);
937
938 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
939 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
940 R128_GMC_BRUSH_SOLID_COLOR |
941 (dev_priv->depth_fmt << 8) |
942 R128_GMC_SRC_DATATYPE_COLOR |
943 R128_ROP3_P |
944 R128_GMC_CLR_CMP_CNTL_DIS |
945 R128_GMC_WR_MSK_DIS);
946
947 OUT_RING(dev_priv->depth_pitch_offset_c);
948 OUT_RING(buffer[i]);
949
950 OUT_RING((x << 16) | y);
951 OUT_RING((1 << 16) | 1);
952
953 ADVANCE_RING();
954 }
955 }
956
957 drm_free(mask, mask_size, DRM_MEM_BUFS);
958 } else {
959 for (i = 0; i < count; i++, x++) {
960 BEGIN_RING(6);
961
962 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
963 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
964 R128_GMC_BRUSH_SOLID_COLOR |
965 (dev_priv->depth_fmt << 8) |
966 R128_GMC_SRC_DATATYPE_COLOR |
967 R128_ROP3_P |
968 R128_GMC_CLR_CMP_CNTL_DIS |
969 R128_GMC_WR_MSK_DIS);
970
971 OUT_RING(dev_priv->depth_pitch_offset_c);
972 OUT_RING(buffer[i]);
973
974 OUT_RING((x << 16) | y);
975 OUT_RING((1 << 16) | 1);
976
977 ADVANCE_RING();
978 }
979 }
980
981 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
982
983 return 0;
984}
985
986static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
987 drm_r128_depth_t * depth)
988{
989 drm_r128_private_t *dev_priv = dev->dev_private;
990 int count, *x, *y;
991 u32 *buffer;
992 u8 *mask;
993 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
994 RING_LOCALS;
995 DRM_DEBUG("\n");
996
997 count = depth->n;
998 if (count > 4096 || count <= 0)
999 return -EMSGSIZE;
1000
1001 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1004 if (x == NULL) {
1005 return -ENOMEM;
1006 }
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1008 if (y == NULL) {
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1010 return -ENOMEM;
1011 }
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1015 return -EFAULT;
1016 }
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1020 return -EFAULT;
1021 }
1022
1023 buffer_size = depth->n * sizeof(u32);
1024 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
1025 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1028 return -ENOMEM;
1029 }
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1034 return -EFAULT;
1035 }
1036
1037 if (depth->mask) {
1038 mask_size = depth->n * sizeof(u8);
1039 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
1040 if (mask == NULL) {
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1044 return -ENOMEM;
1045 }
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS);
1051 return -EFAULT;
1052 }
1053
1054 for (i = 0; i < count; i++) {
1055 if (mask[i]) {
1056 BEGIN_RING(6);
1057
1058 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1059 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1060 R128_GMC_BRUSH_SOLID_COLOR |
1061 (dev_priv->depth_fmt << 8) |
1062 R128_GMC_SRC_DATATYPE_COLOR |
1063 R128_ROP3_P |
1064 R128_GMC_CLR_CMP_CNTL_DIS |
1065 R128_GMC_WR_MSK_DIS);
1066
1067 OUT_RING(dev_priv->depth_pitch_offset_c);
1068 OUT_RING(buffer[i]);
1069
1070 OUT_RING((x[i] << 16) | y[i]);
1071 OUT_RING((1 << 16) | 1);
1072
1073 ADVANCE_RING();
1074 }
1075 }
1076
1077 drm_free(mask, mask_size, DRM_MEM_BUFS);
1078 } else {
1079 for (i = 0; i < count; i++) {
1080 BEGIN_RING(6);
1081
1082 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1083 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1084 R128_GMC_BRUSH_SOLID_COLOR |
1085 (dev_priv->depth_fmt << 8) |
1086 R128_GMC_SRC_DATATYPE_COLOR |
1087 R128_ROP3_P |
1088 R128_GMC_CLR_CMP_CNTL_DIS |
1089 R128_GMC_WR_MSK_DIS);
1090
1091 OUT_RING(dev_priv->depth_pitch_offset_c);
1092 OUT_RING(buffer[i]);
1093
1094 OUT_RING((x[i] << 16) | y[i]);
1095 OUT_RING((1 << 16) | 1);
1096
1097 ADVANCE_RING();
1098 }
1099 }
1100
1101 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1102 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1103 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1104
1105 return 0;
1106}
1107
1108static int r128_cce_dispatch_read_span(struct drm_device * dev,
1109 drm_r128_depth_t * depth)
1110{
1111 drm_r128_private_t *dev_priv = dev->dev_private;
1112 int count, x, y;
1113 RING_LOCALS;
1114 DRM_DEBUG("\n");
1115
1116 count = depth->n;
1117 if (count > 4096 || count <= 0)
1118 return -EMSGSIZE;
1119
1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1121 return -EFAULT;
1122 }
1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1124 return -EFAULT;
1125 }
1126
1127 BEGIN_RING(7);
1128
1129 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1130 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1131 R128_GMC_DST_PITCH_OFFSET_CNTL |
1132 R128_GMC_BRUSH_NONE |
1133 (dev_priv->depth_fmt << 8) |
1134 R128_GMC_SRC_DATATYPE_COLOR |
1135 R128_ROP3_S |
1136 R128_DP_SRC_SOURCE_MEMORY |
1137 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1138
1139 OUT_RING(dev_priv->depth_pitch_offset_c);
1140 OUT_RING(dev_priv->span_pitch_offset_c);
1141
1142 OUT_RING((x << 16) | y);
1143 OUT_RING((0 << 16) | 0);
1144 OUT_RING((count << 16) | 1);
1145
1146 ADVANCE_RING();
1147
1148 return 0;
1149}
1150
1151static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
1152 drm_r128_depth_t * depth)
1153{
1154 drm_r128_private_t *dev_priv = dev->dev_private;
1155 int count, *x, *y;
1156 int i, xbuf_size, ybuf_size;
1157 RING_LOCALS;
1158 DRM_DEBUG("\n");
1159
1160 count = depth->n;
1161 if (count > 4096 || count <= 0)
1162 return -EMSGSIZE;
1163
1164 if (count > dev_priv->depth_pitch) {
1165 count = dev_priv->depth_pitch;
1166 }
1167
1168 xbuf_size = count * sizeof(*x);
1169 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1171 if (x == NULL) {
1172 return -ENOMEM;
1173 }
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1175 if (y == NULL) {
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1177 return -ENOMEM;
1178 }
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1182 return -EFAULT;
1183 }
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1187 return -EFAULT;
1188 }
1189
1190 for (i = 0; i < count; i++) {
1191 BEGIN_RING(7);
1192
1193 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1194 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1195 R128_GMC_DST_PITCH_OFFSET_CNTL |
1196 R128_GMC_BRUSH_NONE |
1197 (dev_priv->depth_fmt << 8) |
1198 R128_GMC_SRC_DATATYPE_COLOR |
1199 R128_ROP3_S |
1200 R128_DP_SRC_SOURCE_MEMORY |
1201 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1202
1203 OUT_RING(dev_priv->depth_pitch_offset_c);
1204 OUT_RING(dev_priv->span_pitch_offset_c);
1205
1206 OUT_RING((x[i] << 16) | y[i]);
1207 OUT_RING((i << 16) | 0);
1208 OUT_RING((1 << 16) | 1);
1209
1210 ADVANCE_RING();
1211 }
1212
1213 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1214 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1215
1216 return 0;
1217}
1218
1219/* ================================================================
1220 * Polygon stipple
1221 */
1222
1223static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
1224{
1225 drm_r128_private_t *dev_priv = dev->dev_private;
1226 int i;
1227 RING_LOCALS;
1228 DRM_DEBUG("\n");
1229
1230 BEGIN_RING(33);
1231
1232 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1233 for (i = 0; i < 32; i++) {
1234 OUT_RING(stipple[i]);
1235 }
1236
1237 ADVANCE_RING();
1238}
1239
1240/* ================================================================
1241 * IOCTL functions
1242 */
1243
1244static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1245{
1246 drm_r128_private_t *dev_priv = dev->dev_private;
1247 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1248 drm_r128_clear_t *clear = data;
1249 DRM_DEBUG("\n");
1250
1251 LOCK_TEST_WITH_RETURN(dev, file_priv);
1252
1253 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1254
1255 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1256 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1257
1258 r128_cce_dispatch_clear(dev, clear);
1259 COMMIT_RING();
1260
1261 /* Make sure we restore the 3D state next time.
1262 */
1263 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1264
1265 return 0;
1266}
1267
1268static int r128_do_init_pageflip(struct drm_device * dev)
1269{
1270 drm_r128_private_t *dev_priv = dev->dev_private;
1271 DRM_DEBUG("\n");
1272
1273 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1274 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1275
1276 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1277 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1278 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1279
1280 dev_priv->page_flipping = 1;
1281 dev_priv->current_page = 0;
1282 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1283
1284 return 0;
1285}
1286
1287static int r128_do_cleanup_pageflip(struct drm_device * dev)
1288{
1289 drm_r128_private_t *dev_priv = dev->dev_private;
1290 DRM_DEBUG("\n");
1291
1292 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1293 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1294
1295 if (dev_priv->current_page != 0) {
1296 r128_cce_dispatch_flip(dev);
1297 COMMIT_RING();
1298 }
1299
1300 dev_priv->page_flipping = 0;
1301 return 0;
1302}
1303
1304/* Swapping and flipping are different operations, need different ioctls.
1305 * They can & should be intermixed to support multiple 3d windows.
1306 */
1307
1308static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1309{
1310 drm_r128_private_t *dev_priv = dev->dev_private;
1311 DRM_DEBUG("\n");
1312
1313 LOCK_TEST_WITH_RETURN(dev, file_priv);
1314
1315 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1316
1317 if (!dev_priv->page_flipping)
1318 r128_do_init_pageflip(dev);
1319
1320 r128_cce_dispatch_flip(dev);
1321
1322 COMMIT_RING();
1323 return 0;
1324}
1325
1326static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1327{
1328 drm_r128_private_t *dev_priv = dev->dev_private;
1329 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1330 DRM_DEBUG("\n");
1331
1332 LOCK_TEST_WITH_RETURN(dev, file_priv);
1333
1334 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1335
1336 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1337 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1338
1339 r128_cce_dispatch_swap(dev);
1340 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1341 R128_UPLOAD_MASKS);
1342
1343 COMMIT_RING();
1344 return 0;
1345}
1346
1347static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1348{
1349 drm_r128_private_t *dev_priv = dev->dev_private;
1350 struct drm_device_dma *dma = dev->dma;
1351 struct drm_buf *buf;
1352 drm_r128_buf_priv_t *buf_priv;
1353 drm_r128_vertex_t *vertex = data;
1354
1355 LOCK_TEST_WITH_RETURN(dev, file_priv);
1356
1357 if (!dev_priv) {
1358 DRM_ERROR("called with no initialization\n");
1359 return -EINVAL;
1360 }
1361
1362 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1363 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
1364
1365 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1366 DRM_ERROR("buffer index %d (of %d max)\n",
1367 vertex->idx, dma->buf_count - 1);
1368 return -EINVAL;
1369 }
1370 if (vertex->prim < 0 ||
1371 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1372 DRM_ERROR("buffer prim %d\n", vertex->prim);
1373 return -EINVAL;
1374 }
1375
1376 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1377 VB_AGE_TEST_WITH_RETURN(dev_priv);
1378
1379 buf = dma->buflist[vertex->idx];
1380 buf_priv = buf->dev_private;
1381
1382 if (buf->file_priv != file_priv) {
1383 DRM_ERROR("process %d using buffer owned by %p\n",
1384 DRM_CURRENTPID, buf->file_priv);
1385 return -EINVAL;
1386 }
1387 if (buf->pending) {
1388 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1389 return -EINVAL;
1390 }
1391
1392 buf->used = vertex->count;
1393 buf_priv->prim = vertex->prim;
1394 buf_priv->discard = vertex->discard;
1395
1396 r128_cce_dispatch_vertex(dev, buf);
1397
1398 COMMIT_RING();
1399 return 0;
1400}
1401
1402static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1403{
1404 drm_r128_private_t *dev_priv = dev->dev_private;
1405 struct drm_device_dma *dma = dev->dma;
1406 struct drm_buf *buf;
1407 drm_r128_buf_priv_t *buf_priv;
1408 drm_r128_indices_t *elts = data;
1409 int count;
1410
1411 LOCK_TEST_WITH_RETURN(dev, file_priv);
1412
1413 if (!dev_priv) {
1414 DRM_ERROR("called with no initialization\n");
1415 return -EINVAL;
1416 }
1417
1418 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1419 elts->idx, elts->start, elts->end, elts->discard);
1420
1421 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1422 DRM_ERROR("buffer index %d (of %d max)\n",
1423 elts->idx, dma->buf_count - 1);
1424 return -EINVAL;
1425 }
1426 if (elts->prim < 0 ||
1427 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1428 DRM_ERROR("buffer prim %d\n", elts->prim);
1429 return -EINVAL;
1430 }
1431
1432 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1433 VB_AGE_TEST_WITH_RETURN(dev_priv);
1434
1435 buf = dma->buflist[elts->idx];
1436 buf_priv = buf->dev_private;
1437
1438 if (buf->file_priv != file_priv) {
1439 DRM_ERROR("process %d using buffer owned by %p\n",
1440 DRM_CURRENTPID, buf->file_priv);
1441 return -EINVAL;
1442 }
1443 if (buf->pending) {
1444 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1445 return -EINVAL;
1446 }
1447
1448 count = (elts->end - elts->start) / sizeof(u16);
1449 elts->start -= R128_INDEX_PRIM_OFFSET;
1450
1451 if (elts->start & 0x7) {
1452 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1453 return -EINVAL;
1454 }
1455 if (elts->start < buf->used) {
1456 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1457 return -EINVAL;
1458 }
1459
1460 buf->used = elts->end;
1461 buf_priv->prim = elts->prim;
1462 buf_priv->discard = elts->discard;
1463
1464 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1465
1466 COMMIT_RING();
1467 return 0;
1468}
1469
1470static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1471{
1472 struct drm_device_dma *dma = dev->dma;
1473 drm_r128_private_t *dev_priv = dev->dev_private;
1474 drm_r128_blit_t *blit = data;
1475 int ret;
1476
1477 LOCK_TEST_WITH_RETURN(dev, file_priv);
1478
1479 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
1480
1481 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1482 DRM_ERROR("buffer index %d (of %d max)\n",
1483 blit->idx, dma->buf_count - 1);
1484 return -EINVAL;
1485 }
1486
1487 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1488 VB_AGE_TEST_WITH_RETURN(dev_priv);
1489
1490 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1491
1492 COMMIT_RING();
1493 return ret;
1494}
1495
1496static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1497{
1498 drm_r128_private_t *dev_priv = dev->dev_private;
1499 drm_r128_depth_t *depth = data;
1500 int ret;
1501
1502 LOCK_TEST_WITH_RETURN(dev, file_priv);
1503
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1505
1506 ret = -EINVAL;
1507 switch (depth->func) {
1508 case R128_WRITE_SPAN:
1509 ret = r128_cce_dispatch_write_span(dev, depth);
1510 break;
1511 case R128_WRITE_PIXELS:
1512 ret = r128_cce_dispatch_write_pixels(dev, depth);
1513 break;
1514 case R128_READ_SPAN:
1515 ret = r128_cce_dispatch_read_span(dev, depth);
1516 break;
1517 case R128_READ_PIXELS:
1518 ret = r128_cce_dispatch_read_pixels(dev, depth);
1519 break;
1520 }
1521
1522 COMMIT_RING();
1523 return ret;
1524}
1525
1526static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1527{
1528 drm_r128_private_t *dev_priv = dev->dev_private;
1529 drm_r128_stipple_t *stipple = data;
1530 u32 mask[32];
1531
1532 LOCK_TEST_WITH_RETURN(dev, file_priv);
1533
1534 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
1535 return -EFAULT;
1536
1537 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1538
1539 r128_cce_dispatch_stipple(dev, mask);
1540
1541 COMMIT_RING();
1542 return 0;
1543}
1544
1545static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1546{
1547 drm_r128_private_t *dev_priv = dev->dev_private;
1548 struct drm_device_dma *dma = dev->dma;
1549 struct drm_buf *buf;
1550 drm_r128_buf_priv_t *buf_priv;
1551 drm_r128_indirect_t *indirect = data;
1552#if 0
1553 RING_LOCALS;
1554#endif
1555
1556 LOCK_TEST_WITH_RETURN(dev, file_priv);
1557
1558 if (!dev_priv) {
1559 DRM_ERROR("called with no initialization\n");
1560 return -EINVAL;
1561 }
1562
1563 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
1564 indirect->idx, indirect->start, indirect->end,
1565 indirect->discard);
1566
1567 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1568 DRM_ERROR("buffer index %d (of %d max)\n",
1569 indirect->idx, dma->buf_count - 1);
1570 return -EINVAL;
1571 }
1572
1573 buf = dma->buflist[indirect->idx];
1574 buf_priv = buf->dev_private;
1575
1576 if (buf->file_priv != file_priv) {
1577 DRM_ERROR("process %d using buffer owned by %p\n",
1578 DRM_CURRENTPID, buf->file_priv);
1579 return -EINVAL;
1580 }
1581 if (buf->pending) {
1582 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1583 return -EINVAL;
1584 }
1585
1586 if (indirect->start < buf->used) {
1587 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1588 indirect->start, buf->used);
1589 return -EINVAL;
1590 }
1591
1592 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1593 VB_AGE_TEST_WITH_RETURN(dev_priv);
1594
1595 buf->used = indirect->end;
1596 buf_priv->discard = indirect->discard;
1597
1598#if 0
1599 /* Wait for the 3D stream to idle before the indirect buffer
1600 * containing 2D acceleration commands is processed.
1601 */
1602 BEGIN_RING(2);
1603 RADEON_WAIT_UNTIL_3D_IDLE();
1604 ADVANCE_RING();
1605#endif
1606
1607 /* Dispatch the indirect buffer full of commands from the
1608 * X server. This is insecure and is thus only available to
1609 * privileged clients.
1610 */
1611 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1612
1613 COMMIT_RING();
1614 return 0;
1615}
1616
1617static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1618{
1619 drm_r128_private_t *dev_priv = dev->dev_private;
1620 drm_r128_getparam_t *param = data;
1621 int value;
1622
1623 if (!dev_priv) {
1624 DRM_ERROR("called with no initialization\n");
1625 return -EINVAL;
1626 }
1627
1628 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1629
1630 switch (param->param) {
1631 case R128_PARAM_IRQ_NR:
1632 value = dev->irq;
1633 break;
1634 default:
1635 return -EINVAL;
1636 }
1637
1638 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
1639 DRM_ERROR("copy_to_user\n");
1640 return -EFAULT;
1641 }
1642
1643 return 0;
1644}
1645
1646void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1647{
1648 if (dev->dev_private) {
1649 drm_r128_private_t *dev_priv = dev->dev_private;
1650 if (dev_priv->page_flipping) {
1651 r128_do_cleanup_pageflip(dev);
1652 }
1653 }
1654}
1655
1656void r128_driver_lastclose(struct drm_device * dev)
1657{
1658 r128_do_cleanup_cce(dev);
1659}
1660
1661struct drm_ioctl_desc r128_ioctls[] = {
1662 DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1663 DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1664 DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1665 DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1666 DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1667 DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
1668 DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1669 DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
1670 DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
1671 DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
1672 DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1673 DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
1674 DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
1675 DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
1676 DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1677 DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1678 DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
1679};
1680
1681int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 000000000000..feb521ebc393
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
7
8radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
9
10obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
new file mode 100644
index 000000000000..702df45320f7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -0,0 +1,1071 @@
1/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "radeon_drm.h"
37#include "radeon_drv.h"
38#include "r300_reg.h"
39
40#define R300_SIMULTANEOUS_CLIPRECTS 4
41
42/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
43 */
44static const int r300_cliprect_cntl[4] = {
45 0xAAAA,
46 0xEEEE,
47 0xFEFE,
48 0xFFFE
49};
50
51/**
52 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
53 * buffer, starting with index n.
54 */
55static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
56 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
57{
58 struct drm_clip_rect box;
59 int nr;
60 int i;
61 RING_LOCALS;
62
63 nr = cmdbuf->nbox - n;
64 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
65 nr = R300_SIMULTANEOUS_CLIPRECTS;
66
67 DRM_DEBUG("%i cliprects\n", nr);
68
69 if (nr) {
70 BEGIN_RING(6 + nr * 2);
71 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
72
73 for (i = 0; i < nr; ++i) {
74 if (DRM_COPY_FROM_USER_UNCHECKED
75 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
76 DRM_ERROR("copy cliprect faulted\n");
77 return -EFAULT;
78 }
79
80 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
81 box.x1 = (box.x1) &
82 R300_CLIPRECT_MASK;
83 box.y1 = (box.y1) &
84 R300_CLIPRECT_MASK;
85 box.x2 = (box.x2) &
86 R300_CLIPRECT_MASK;
87 box.y2 = (box.y2) &
88 R300_CLIPRECT_MASK;
89 } else {
90 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
91 R300_CLIPRECT_MASK;
92 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
93 R300_CLIPRECT_MASK;
94 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
95 R300_CLIPRECT_MASK;
96 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
97 R300_CLIPRECT_MASK;
98
99 }
100 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
101 (box.y1 << R300_CLIPRECT_Y_SHIFT));
102 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
103 (box.y2 << R300_CLIPRECT_Y_SHIFT));
104
105 }
106
107 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
108
109 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
110 * client might be able to trample over memory.
111 * The impact should be very limited, but I'd rather be safe than
112 * sorry.
113 */
114 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
115 OUT_RING(0);
116 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
117 ADVANCE_RING();
118 } else {
119 /* Why we allow zero cliprect rendering:
120 * There are some commands in a command buffer that must be submitted
121 * even when there are no cliprects, e.g. DMA buffer discard
122 * or state setting (though state setting could be avoided by
123 * simulating a loss of context).
124 *
125 * Now since the cmdbuf interface is so chaotic right now (and is
126 * bound to remain that way for a bit until things settle down),
127 * it is basically impossible to filter out the commands that are
128 * necessary and those that aren't.
129 *
130 * So I choose the safe way and don't do any filtering at all;
131 * instead, I simply set up the engine so that all rendering
132 * can't produce any fragments.
133 */
134 BEGIN_RING(2);
135 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
136 ADVANCE_RING();
137 }
138
139 return 0;
140}
141
142static u8 r300_reg_flags[0x10000 >> 2];
143
144void r300_init_reg_flags(struct drm_device *dev)
145{
146 int i;
147 drm_radeon_private_t *dev_priv = dev->dev_private;
148
149 memset(r300_reg_flags, 0, 0x10000 >> 2);
150#define ADD_RANGE_MARK(reg, count,mark) \
151 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
152 r300_reg_flags[i]|=(mark);
153
154#define MARK_SAFE 1
155#define MARK_CHECK_OFFSET 2
156
157#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
158
159 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
160 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
161 ADD_RANGE(R300_VAP_CNTL, 1);
162 ADD_RANGE(R300_SE_VTE_CNTL, 2);
163 ADD_RANGE(0x2134, 2);
164 ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
165 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
166 ADD_RANGE(0x21DC, 1);
167 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
168 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
169 ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
170 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
171 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
172 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
173 ADD_RANGE(R300_GB_ENABLE, 1);
174 ADD_RANGE(R300_GB_MSPOS0, 5);
175 ADD_RANGE(R300_TX_CNTL, 1);
176 ADD_RANGE(R300_TX_ENABLE, 1);
177 ADD_RANGE(0x4200, 4);
178 ADD_RANGE(0x4214, 1);
179 ADD_RANGE(R300_RE_POINTSIZE, 1);
180 ADD_RANGE(0x4230, 3);
181 ADD_RANGE(R300_RE_LINE_CNT, 1);
182 ADD_RANGE(R300_RE_UNK4238, 1);
183 ADD_RANGE(0x4260, 3);
184 ADD_RANGE(R300_RE_SHADE, 4);
185 ADD_RANGE(R300_RE_POLYGON_MODE, 5);
186 ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
187 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
188 ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
189 ADD_RANGE(R300_RE_CULL_CNTL, 1);
190 ADD_RANGE(0x42C0, 2);
191 ADD_RANGE(R300_RS_CNTL_0, 2);
192
193 ADD_RANGE(R300_SC_HYPERZ, 2);
194 ADD_RANGE(0x43E8, 1);
195
196 ADD_RANGE(0x46A4, 5);
197
198 ADD_RANGE(R300_RE_FOG_STATE, 1);
199 ADD_RANGE(R300_FOG_COLOR_R, 3);
200 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
201 ADD_RANGE(0x4BD8, 1);
202 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
203 ADD_RANGE(0x4E00, 1);
204 ADD_RANGE(R300_RB3D_CBLEND, 2);
205 ADD_RANGE(R300_RB3D_COLORMASK, 1);
206 ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
207 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
208 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
209 ADD_RANGE(0x4E50, 9);
210 ADD_RANGE(0x4E88, 1);
211 ADD_RANGE(0x4EA0, 2);
212 ADD_RANGE(R300_ZB_CNTL, 3);
213 ADD_RANGE(R300_ZB_FORMAT, 4);
214 ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
215 ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
216 ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
217 ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
218
219 ADD_RANGE(R300_TX_FILTER_0, 16);
220 ADD_RANGE(R300_TX_FILTER1_0, 16);
221 ADD_RANGE(R300_TX_SIZE_0, 16);
222 ADD_RANGE(R300_TX_FORMAT_0, 16);
223 ADD_RANGE(R300_TX_PITCH_0, 16);
224 /* Texture offset is dangerous and needs more checking */
225 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
226 ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
227 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
228
229 /* Sporadic registers used as primitives are emitted */
230 ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
231 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
232 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
233 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
234
235 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
236 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
237 ADD_RANGE(R500_US_CONFIG, 2);
238 ADD_RANGE(R500_US_CODE_ADDR, 3);
239 ADD_RANGE(R500_US_FC_CTRL, 1);
240 ADD_RANGE(R500_RS_IP_0, 16);
241 ADD_RANGE(R500_RS_INST_0, 16);
242 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
243 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
244 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
245 } else {
246 ADD_RANGE(R300_PFS_CNTL_0, 3);
247 ADD_RANGE(R300_PFS_NODE_0, 4);
248 ADD_RANGE(R300_PFS_TEXI_0, 64);
249 ADD_RANGE(R300_PFS_INSTR0_0, 64);
250 ADD_RANGE(R300_PFS_INSTR1_0, 64);
251 ADD_RANGE(R300_PFS_INSTR2_0, 64);
252 ADD_RANGE(R300_PFS_INSTR3_0, 64);
253 ADD_RANGE(R300_RS_INTERP_0, 8);
254 ADD_RANGE(R300_RS_ROUTE_0, 8);
255
256 }
257}
258
259static __inline__ int r300_check_range(unsigned reg, int count)
260{
261 int i;
262 if (reg & ~0xffff)
263 return -1;
264 for (i = (reg >> 2); i < (reg >> 2) + count; i++)
265 if (r300_reg_flags[i] != MARK_SAFE)
266 return 1;
267 return 0;
268}
269
270static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
271 dev_priv,
272 drm_radeon_kcmd_buffer_t
273 * cmdbuf,
274 drm_r300_cmd_header_t
275 header)
276{
277 int reg;
278 int sz;
279 int i;
280 int values[64];
281 RING_LOCALS;
282
283 sz = header.packet0.count;
284 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
285
286 if ((sz > 64) || (sz < 0)) {
287 DRM_ERROR
288 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
289 reg, sz);
290 return -EINVAL;
291 }
292 for (i = 0; i < sz; i++) {
293 values[i] = ((int *)cmdbuf->buf)[i];
294 switch (r300_reg_flags[(reg >> 2) + i]) {
295 case MARK_SAFE:
296 break;
297 case MARK_CHECK_OFFSET:
298 if (!radeon_check_offset(dev_priv, (u32) values[i])) {
299 DRM_ERROR
300 ("Offset failed range check (reg=%04x sz=%d)\n",
301 reg, sz);
302 return -EINVAL;
303 }
304 break;
305 default:
306 DRM_ERROR("Register %04x failed check as flag=%02x\n",
307 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
308 return -EINVAL;
309 }
310 }
311
312 BEGIN_RING(1 + sz);
313 OUT_RING(CP_PACKET0(reg, sz - 1));
314 OUT_RING_TABLE(values, sz);
315 ADVANCE_RING();
316
317 cmdbuf->buf += sz * 4;
318 cmdbuf->bufsz -= sz * 4;
319
320 return 0;
321}
322
323/**
324 * Emits a packet0 setting arbitrary registers.
325 * Called by r300_do_cp_cmdbuf.
326 *
327 * Note that checks are performed on contents and addresses of the registers
328 */
329static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
330 drm_radeon_kcmd_buffer_t *cmdbuf,
331 drm_r300_cmd_header_t header)
332{
333 int reg;
334 int sz;
335 RING_LOCALS;
336
337 sz = header.packet0.count;
338 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
339
340 if (!sz)
341 return 0;
342
343 if (sz * 4 > cmdbuf->bufsz)
344 return -EINVAL;
345
346 if (reg + sz * 4 >= 0x10000) {
347 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
348 sz);
349 return -EINVAL;
350 }
351
352 if (r300_check_range(reg, sz)) {
353 /* go and check everything */
354 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
355 header);
356 }
357 /* the rest of the data is safe to emit, whatever the values the user passed */
358
359 BEGIN_RING(1 + sz);
360 OUT_RING(CP_PACKET0(reg, sz - 1));
361 OUT_RING_TABLE((int *)cmdbuf->buf, sz);
362 ADVANCE_RING();
363
364 cmdbuf->buf += sz * 4;
365 cmdbuf->bufsz -= sz * 4;
366
367 return 0;
368}
369
370/**
371 * Uploads user-supplied vertex program instructions or parameters onto
372 * the graphics card.
373 * Called by r300_do_cp_cmdbuf.
374 */
375static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
376 drm_radeon_kcmd_buffer_t *cmdbuf,
377 drm_r300_cmd_header_t header)
378{
379 int sz;
380 int addr;
381 RING_LOCALS;
382
383 sz = header.vpu.count;
384 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
385
386 if (!sz)
387 return 0;
388 if (sz * 16 > cmdbuf->bufsz)
389 return -EINVAL;
390
391 BEGIN_RING(5 + sz * 4);
392 /* Wait for VAP to come to senses.. */
393 /* there is no need to emit it multiple times, (only once before VAP is programmed,
394 but this optimization is for later */
395 OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
396 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
397 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
398 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
399
400 ADVANCE_RING();
401
402 cmdbuf->buf += sz * 16;
403 cmdbuf->bufsz -= sz * 16;
404
405 return 0;
406}
407
408/**
409 * Emit a clear packet from userspace.
410 * Called by r300_emit_packet3.
411 */
412static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
413 drm_radeon_kcmd_buffer_t *cmdbuf)
414{
415 RING_LOCALS;
416
417 if (8 * 4 > cmdbuf->bufsz)
418 return -EINVAL;
419
420 BEGIN_RING(10);
421 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
422 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
423 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
424 OUT_RING_TABLE((int *)cmdbuf->buf, 8);
425 ADVANCE_RING();
426
427 cmdbuf->buf += 8 * 4;
428 cmdbuf->bufsz -= 8 * 4;
429
430 return 0;
431}
432
433static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
434 drm_radeon_kcmd_buffer_t *cmdbuf,
435 u32 header)
436{
437 int count, i, k;
438#define MAX_ARRAY_PACKET 64
439 u32 payload[MAX_ARRAY_PACKET];
440 u32 narrays;
441 RING_LOCALS;
442
443 count = (header >> 16) & 0x3fff;
444
445 if ((count + 1) > MAX_ARRAY_PACKET) {
446 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
447 count);
448 return -EINVAL;
449 }
450 memset(payload, 0, MAX_ARRAY_PACKET * 4);
451 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
452
453 /* carefully check packet contents */
454
455 narrays = payload[0];
456 k = 0;
457 i = 1;
458 while ((k < narrays) && (i < (count + 1))) {
459 i++; /* skip attribute field */
460 if (!radeon_check_offset(dev_priv, payload[i])) {
461 DRM_ERROR
462 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
463 k, i);
464 return -EINVAL;
465 }
466 k++;
467 i++;
468 if (k == narrays)
469 break;
470 /* have one more to process, they come in pairs */
471 if (!radeon_check_offset(dev_priv, payload[i])) {
472 DRM_ERROR
473 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
474 k, i);
475 return -EINVAL;
476 }
477 k++;
478 i++;
479 }
480 /* do the counts match what we expect ? */
481 if ((k != narrays) || (i != (count + 1))) {
482 DRM_ERROR
483 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
484 k, i, narrays, count + 1);
485 return -EINVAL;
486 }
487
488 /* all clear, output packet */
489
490 BEGIN_RING(count + 2);
491 OUT_RING(header);
492 OUT_RING_TABLE(payload, count + 1);
493 ADVANCE_RING();
494
495 cmdbuf->buf += (count + 2) * 4;
496 cmdbuf->bufsz -= (count + 2) * 4;
497
498 return 0;
499}
500
501static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
502 drm_radeon_kcmd_buffer_t *cmdbuf)
503{
504 u32 *cmd = (u32 *) cmdbuf->buf;
505 int count, ret;
506 RING_LOCALS;
507
508 count=(cmd[0]>>16) & 0x3fff;
509
510 if (cmd[0] & 0x8000) {
511 u32 offset;
512
513 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
514 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
515 offset = cmd[2] << 10;
516 ret = !radeon_check_offset(dev_priv, offset);
517 if (ret) {
518 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
519 return -EINVAL;
520 }
521 }
522
523 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
524 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
525 offset = cmd[3] << 10;
526 ret = !radeon_check_offset(dev_priv, offset);
527 if (ret) {
528 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
529 return -EINVAL;
530 }
531
532 }
533 }
534
535 BEGIN_RING(count+2);
536 OUT_RING(cmd[0]);
537 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
538 ADVANCE_RING();
539
540 cmdbuf->buf += (count+2)*4;
541 cmdbuf->bufsz -= (count+2)*4;
542
543 return 0;
544}
545
546static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
547 drm_radeon_kcmd_buffer_t *cmdbuf)
548{
549 u32 *cmd = (u32 *) cmdbuf->buf;
550 int count, ret;
551 RING_LOCALS;
552
553 count=(cmd[0]>>16) & 0x3fff;
554
555 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
556 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
557 return -EINVAL;
558 }
559 ret = !radeon_check_offset(dev_priv, cmd[2]);
560 if (ret) {
561 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
562 return -EINVAL;
563 }
564
565 BEGIN_RING(count+2);
566 OUT_RING(cmd[0]);
567 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
568 ADVANCE_RING();
569
570 cmdbuf->buf += (count+2)*4;
571 cmdbuf->bufsz -= (count+2)*4;
572
573 return 0;
574}
575
576static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
577 drm_radeon_kcmd_buffer_t *cmdbuf)
578{
579 u32 header;
580 int count;
581 RING_LOCALS;
582
583 if (4 > cmdbuf->bufsz)
584 return -EINVAL;
585
586 /* Fixme !! This simply emits a packet without much checking.
587 We need to be smarter. */
588
589 /* obtain first word - actual packet3 header */
590 header = *(u32 *) cmdbuf->buf;
591
592 /* Is it packet 3 ? */
593 if ((header >> 30) != 0x3) {
594 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
595 return -EINVAL;
596 }
597
598 count = (header >> 16) & 0x3fff;
599
600 /* Check again now that we know how much data to expect */
601 if ((count + 2) * 4 > cmdbuf->bufsz) {
602 DRM_ERROR
603 ("Expected packet3 of length %d but have only %d bytes left\n",
604 (count + 2) * 4, cmdbuf->bufsz);
605 return -EINVAL;
606 }
607
608 /* Is it a packet type we know about ? */
609 switch (header & 0xff00) {
610 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
611 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
612
613 case RADEON_CNTL_BITBLT_MULTI:
614 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
615
616 case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
617 return r300_emit_indx_buffer(dev_priv, cmdbuf);
618 case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
619 case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
620 case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
621 case RADEON_WAIT_FOR_IDLE:
622 case RADEON_CP_NOP:
623 /* these packets are safe */
624 break;
625 default:
626 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
627 return -EINVAL;
628 }
629
630 BEGIN_RING(count + 2);
631 OUT_RING(header);
632 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
633 ADVANCE_RING();
634
635 cmdbuf->buf += (count + 2) * 4;
636 cmdbuf->bufsz -= (count + 2) * 4;
637
638 return 0;
639}
640
641/**
642 * Emit a rendering packet3 from userspace.
643 * Called by r300_do_cp_cmdbuf.
644 */
645static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
646 drm_radeon_kcmd_buffer_t *cmdbuf,
647 drm_r300_cmd_header_t header)
648{
649 int n;
650 int ret;
651 char *orig_buf = cmdbuf->buf;
652 int orig_bufsz = cmdbuf->bufsz;
653
654 /* This is a do-while-loop so that we run the interior at least once,
655 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
656 */
657 n = 0;
658 do {
659 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
660 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
661 if (ret)
662 return ret;
663
664 cmdbuf->buf = orig_buf;
665 cmdbuf->bufsz = orig_bufsz;
666 }
667
668 switch (header.packet3.packet) {
669 case R300_CMD_PACKET3_CLEAR:
670 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
671 ret = r300_emit_clear(dev_priv, cmdbuf);
672 if (ret) {
673 DRM_ERROR("r300_emit_clear failed\n");
674 return ret;
675 }
676 break;
677
678 case R300_CMD_PACKET3_RAW:
679 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
680 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
681 if (ret) {
682 DRM_ERROR("r300_emit_raw_packet3 failed\n");
683 return ret;
684 }
685 break;
686
687 default:
688 DRM_ERROR("bad packet3 type %i at %p\n",
689 header.packet3.packet,
690 cmdbuf->buf - sizeof(header));
691 return -EINVAL;
692 }
693
694 n += R300_SIMULTANEOUS_CLIPRECTS;
695 } while (n < cmdbuf->nbox);
696
697 return 0;
698}
699
700/* Some of the R300 chips seem to be extremely touchy about the two registers
701 * that are configured in r300_pacify.
702 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
703 * sends a command buffer that contains only state setting commands and a
704 * vertex program/parameter upload sequence, this will eventually lead to a
705 * lockup, unless the sequence is bracketed by calls to r300_pacify.
706 * So we should take great care to *always* call r300_pacify before
707 * *anything* 3D related, and again afterwards. This is what the
708 * call bracket in r300_do_cp_cmdbuf is for.
709 */
710
711/**
712 * Emit the sequence to pacify R300.
713 */
714static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
715{
716 RING_LOCALS;
717
718 BEGIN_RING(6);
719 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
720 OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
721 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
722 OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE|
723 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
724 OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
725 OUT_RING(0x0);
726 ADVANCE_RING();
727}
728
729/**
730 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
731 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
732 * be careful about how this function is called.
733 */
734static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
735{
736 drm_radeon_private_t *dev_priv = dev->dev_private;
737 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
738
739 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
740 buf->pending = 1;
741 buf->used = 0;
742}
743
744static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
745 drm_r300_cmd_header_t header)
746{
747 u32 wait_until;
748 RING_LOCALS;
749
750 if (!header.wait.flags)
751 return;
752
753 wait_until = 0;
754
755 switch(header.wait.flags) {
756 case R300_WAIT_2D:
757 wait_until = RADEON_WAIT_2D_IDLE;
758 break;
759 case R300_WAIT_3D:
760 wait_until = RADEON_WAIT_3D_IDLE;
761 break;
762 case R300_NEW_WAIT_2D_3D:
763 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
764 break;
765 case R300_NEW_WAIT_2D_2D_CLEAN:
766 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
767 break;
768 case R300_NEW_WAIT_3D_3D_CLEAN:
769 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
770 break;
771 case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
772 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
773 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
774 break;
775 default:
776 return;
777 }
778
779 BEGIN_RING(2);
780 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
781 OUT_RING(wait_until);
782 ADVANCE_RING();
783}
784
785static int r300_scratch(drm_radeon_private_t *dev_priv,
786 drm_radeon_kcmd_buffer_t *cmdbuf,
787 drm_r300_cmd_header_t header)
788{
789 u32 *ref_age_base;
790 u32 i, buf_idx, h_pending;
791 RING_LOCALS;
792
793 if (cmdbuf->bufsz <
794 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
795 return -EINVAL;
796 }
797
798 if (header.scratch.reg >= 5) {
799 return -EINVAL;
800 }
801
802 dev_priv->scratch_ages[header.scratch.reg]++;
803
804 ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
805
806 cmdbuf->buf += sizeof(u64);
807 cmdbuf->bufsz -= sizeof(u64);
808
809 for (i=0; i < header.scratch.n_bufs; i++) {
810 buf_idx = *(u32 *)cmdbuf->buf;
811 buf_idx *= 2; /* 8 bytes per buf */
812
813 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
814 return -EINVAL;
815 }
816
817 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
818 return -EINVAL;
819 }
820
821 if (h_pending == 0) {
822 return -EINVAL;
823 }
824
825 h_pending--;
826
827 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
828 return -EINVAL;
829 }
830
831 cmdbuf->buf += sizeof(buf_idx);
832 cmdbuf->bufsz -= sizeof(buf_idx);
833 }
834
835 BEGIN_RING(2);
836 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
837 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
838 ADVANCE_RING();
839
840 return 0;
841}
842
843/**
844 * Uploads user-supplied vertex program instructions or parameters onto
845 * the graphics card.
846 * Called by r300_do_cp_cmdbuf.
847 */
848static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
849 drm_radeon_kcmd_buffer_t *cmdbuf,
850 drm_r300_cmd_header_t header)
851{
852 int sz;
853 int addr;
854 int type;
855 int clamp;
856 int stride;
857 RING_LOCALS;
858
859 sz = header.r500fp.count;
860 /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
861 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
862
863 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
864 clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
865
866 addr |= (type << 16);
867 addr |= (clamp << 17);
868
869 stride = type ? 4 : 6;
870
871 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
872 if (!sz)
873 return 0;
874 if (sz * stride * 4 > cmdbuf->bufsz)
875 return -EINVAL;
876
877 BEGIN_RING(3 + sz * stride);
878 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
879 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
880 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
881
882 ADVANCE_RING();
883
884 cmdbuf->buf += sz * stride * 4;
885 cmdbuf->bufsz -= sz * stride * 4;
886
887 return 0;
888}
889
890
891/**
892 * Parses and validates a user-supplied command buffer and emits appropriate
893 * commands on the DMA ring buffer.
894 * Called by the ioctl handler function radeon_cp_cmdbuf.
895 */
896int r300_do_cp_cmdbuf(struct drm_device *dev,
897 struct drm_file *file_priv,
898 drm_radeon_kcmd_buffer_t *cmdbuf)
899{
900 drm_radeon_private_t *dev_priv = dev->dev_private;
901 struct drm_device_dma *dma = dev->dma;
902 struct drm_buf *buf = NULL;
903 int emit_dispatch_age = 0;
904 int ret = 0;
905
906 DRM_DEBUG("\n");
907
908 /* See the comment above r300_emit_begin3d for why this call must be here,
909 * and what the cleanup gotos are for. */
910 r300_pacify(dev_priv);
911
912 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
913 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
914 if (ret)
915 goto cleanup;
916 }
917
918 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
919 int idx;
920 drm_r300_cmd_header_t header;
921
922 header.u = *(unsigned int *)cmdbuf->buf;
923
924 cmdbuf->buf += sizeof(header);
925 cmdbuf->bufsz -= sizeof(header);
926
927 switch (header.header.cmd_type) {
928 case R300_CMD_PACKET0:
929 DRM_DEBUG("R300_CMD_PACKET0\n");
930 ret = r300_emit_packet0(dev_priv, cmdbuf, header);
931 if (ret) {
932 DRM_ERROR("r300_emit_packet0 failed\n");
933 goto cleanup;
934 }
935 break;
936
937 case R300_CMD_VPU:
938 DRM_DEBUG("R300_CMD_VPU\n");
939 ret = r300_emit_vpu(dev_priv, cmdbuf, header);
940 if (ret) {
941 DRM_ERROR("r300_emit_vpu failed\n");
942 goto cleanup;
943 }
944 break;
945
946 case R300_CMD_PACKET3:
947 DRM_DEBUG("R300_CMD_PACKET3\n");
948 ret = r300_emit_packet3(dev_priv, cmdbuf, header);
949 if (ret) {
950 DRM_ERROR("r300_emit_packet3 failed\n");
951 goto cleanup;
952 }
953 break;
954
955 case R300_CMD_END3D:
956 DRM_DEBUG("R300_CMD_END3D\n");
957 /* TODO:
958 Ideally userspace driver should not need to issue this call,
959 i.e. the drm driver should issue it automatically and prevent
960 lockups.
961
962 In practice, we do not understand why this call is needed and what
963 it does (except for some vague guesses that it has to do with cache
964 coherence) and so the user space driver does it.
965
966 Once we are sure which uses prevent lockups the code could be moved
967 into the kernel and the userspace driver will not
968 need to use this command.
969
970 Note that issuing this command does not hurt anything
971 except, possibly, performance */
972 r300_pacify(dev_priv);
973 break;
974
975 case R300_CMD_CP_DELAY:
976 /* simple enough, we can do it here */
977 DRM_DEBUG("R300_CMD_CP_DELAY\n");
978 {
979 int i;
980 RING_LOCALS;
981
982 BEGIN_RING(header.delay.count);
983 for (i = 0; i < header.delay.count; i++)
984 OUT_RING(RADEON_CP_PACKET2);
985 ADVANCE_RING();
986 }
987 break;
988
989 case R300_CMD_DMA_DISCARD:
990 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
991 idx = header.dma.buf_idx;
992 if (idx < 0 || idx >= dma->buf_count) {
993 DRM_ERROR("buffer index %d (of %d max)\n",
994 idx, dma->buf_count - 1);
995 ret = -EINVAL;
996 goto cleanup;
997 }
998
999 buf = dma->buflist[idx];
1000 if (buf->file_priv != file_priv || buf->pending) {
1001 DRM_ERROR("bad buffer %p %p %d\n",
1002 buf->file_priv, file_priv,
1003 buf->pending);
1004 ret = -EINVAL;
1005 goto cleanup;
1006 }
1007
1008 emit_dispatch_age = 1;
1009 r300_discard_buffer(dev, buf);
1010 break;
1011
1012 case R300_CMD_WAIT:
1013 DRM_DEBUG("R300_CMD_WAIT\n");
1014 r300_cmd_wait(dev_priv, header);
1015 break;
1016
1017 case R300_CMD_SCRATCH:
1018 DRM_DEBUG("R300_CMD_SCRATCH\n");
1019 ret = r300_scratch(dev_priv, cmdbuf, header);
1020 if (ret) {
1021 DRM_ERROR("r300_scratch failed\n");
1022 goto cleanup;
1023 }
1024 break;
1025
1026 case R300_CMD_R500FP:
1027 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1028 DRM_ERROR("Calling r500 command on r300 card\n");
1029 ret = -EINVAL;
1030 goto cleanup;
1031 }
1032 DRM_DEBUG("R300_CMD_R500FP\n");
1033 ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
1034 if (ret) {
1035 DRM_ERROR("r300_emit_r500fp failed\n");
1036 goto cleanup;
1037 }
1038 break;
1039 default:
1040 DRM_ERROR("bad cmd_type %i at %p\n",
1041 header.header.cmd_type,
1042 cmdbuf->buf - sizeof(header));
1043 ret = -EINVAL;
1044 goto cleanup;
1045 }
1046 }
1047
1048 DRM_DEBUG("END\n");
1049
1050 cleanup:
1051 r300_pacify(dev_priv);
1052
1053 /* We emit the vertex buffer age here, outside the pacifier "brackets"
1054 * for two reasons:
1055 * (1) This may coalesce multiple age emissions into a single one and
1056 * (2) more importantly, some chips lock up hard when scratch registers
1057 * are written inside the pacifier bracket.
1058 */
1059 if (emit_dispatch_age) {
1060 RING_LOCALS;
1061
1062 /* Emit the vertex buffer age */
1063 BEGIN_RING(2);
1064 RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
1065 ADVANCE_RING();
1066 }
1067
1068 COMMIT_RING();
1069
1070 return ret;
1071}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
new file mode 100644
index 000000000000..a6802f26afc4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -0,0 +1,1772 @@
1/**************************************************************************
2
3Copyright (C) 2004-2005 Nicolai Haehnle et al.
4
5Permission is hereby granted, free of charge, to any person obtaining a
6copy of this software and associated documentation files (the "Software"),
7to deal in the Software without restriction, including without limitation
8on the rights to use, copy, modify, merge, publish, distribute, sub
9license, and/or sell copies of the Software, and to permit persons to whom
10the Software is furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice (including the next
13paragraph) shall be included in all copies or substantial portions of the
14Software.
15
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22USE OR OTHER DEALINGS IN THE SOFTWARE.
23
24**************************************************************************/
25
26#ifndef _R300_REG_H
27#define _R300_REG_H
28
29#define R300_MC_INIT_MISC_LAT_TIMER 0x180
30# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
31# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4
32# define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT 8
33# define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT 12
34# define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT 16
35# define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT 20
36# define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24
37# define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28
38
39#define R300_MC_INIT_GFX_LAT_TIMER 0x154
40# define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0
41# define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4
42# define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT 8
43# define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT 12
44# define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT 16
45# define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT 20
46# define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT 24
47# define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT 28
48
49/*
50 * This file contains registers and constants for the R300. They have been
51 * found mostly by examining command buffers captured using glxtest, as well
52 * as by extrapolating some known registers and constants from the R200.
53 * I am fairly certain that they are correct unless stated otherwise
54 * in comments.
55 */
56
57#define R300_SE_VPORT_XSCALE 0x1D98
58#define R300_SE_VPORT_XOFFSET 0x1D9C
59#define R300_SE_VPORT_YSCALE 0x1DA0
60#define R300_SE_VPORT_YOFFSET 0x1DA4
61#define R300_SE_VPORT_ZSCALE 0x1DA8
62#define R300_SE_VPORT_ZOFFSET 0x1DAC
63
64
65/*
66 * Vertex Array Processing (VAP) Control
67 * Stolen from r200 code from Christoph Brill (It's a guess!)
68 */
69#define R300_VAP_CNTL 0x2080
70
71/* This register is written directly and also starts data section
72 * in many 3d CP_PACKET3's
73 */
74#define R300_VAP_VF_CNTL 0x2084
75# define R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT 0
76# define R300_VAP_VF_CNTL__PRIM_NONE (0<<0)
77# define R300_VAP_VF_CNTL__PRIM_POINTS (1<<0)
78# define R300_VAP_VF_CNTL__PRIM_LINES (2<<0)
79# define R300_VAP_VF_CNTL__PRIM_LINE_STRIP (3<<0)
80# define R300_VAP_VF_CNTL__PRIM_TRIANGLES (4<<0)
81# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN (5<<0)
82# define R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP (6<<0)
83# define R300_VAP_VF_CNTL__PRIM_LINE_LOOP (12<<0)
84# define R300_VAP_VF_CNTL__PRIM_QUADS (13<<0)
85# define R300_VAP_VF_CNTL__PRIM_QUAD_STRIP (14<<0)
86# define R300_VAP_VF_CNTL__PRIM_POLYGON (15<<0)
87
88# define R300_VAP_VF_CNTL__PRIM_WALK__SHIFT 4
89 /* State based - direct writes to registers trigger vertex
90 generation */
91# define R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED (0<<4)
92# define R300_VAP_VF_CNTL__PRIM_WALK_INDICES (1<<4)
93# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST (2<<4)
94# define R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED (3<<4)
95
96 /* I don't think I saw these three used.. */
97# define R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT 6
98# define R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT 9
99# define R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT 10
100
101 /* index size - when not set the indices are assumed to be 16 bit */
102# define R300_VAP_VF_CNTL__INDEX_SIZE_32bit (1<<11)
103 /* number of vertices */
104# define R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT 16
105
106/* BEGIN: Wild guesses */
107#define R300_VAP_OUTPUT_VTX_FMT_0 0x2090
108# define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT (1<<0)
109# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT (1<<1)
110# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
111# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
112# define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
113# define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
114
115#define R300_VAP_OUTPUT_VTX_FMT_1 0x2094
116 /* each of the following is 3 bits wide, specifies number
117 of components */
118# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
119# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
120# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
121# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
122# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
123# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
124# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
125# define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
126/* END: Wild guesses */
127
128#define R300_SE_VTE_CNTL 0x20b0
129# define R300_VPORT_X_SCALE_ENA 0x00000001
130# define R300_VPORT_X_OFFSET_ENA 0x00000002
131# define R300_VPORT_Y_SCALE_ENA 0x00000004
132# define R300_VPORT_Y_OFFSET_ENA 0x00000008
133# define R300_VPORT_Z_SCALE_ENA 0x00000010
134# define R300_VPORT_Z_OFFSET_ENA 0x00000020
135# define R300_VTX_XY_FMT 0x00000100
136# define R300_VTX_Z_FMT 0x00000200
137# define R300_VTX_W0_FMT 0x00000400
138# define R300_VTX_W0_NORMALIZE 0x00000800
139# define R300_VTX_ST_DENORMALIZED 0x00001000
140
141/* BEGIN: Vertex data assembly - lots of uncertainties */
142
143/* gap */
144
145#define R300_VAP_CNTL_STATUS 0x2140
146# define R300_VC_NO_SWAP (0 << 0)
147# define R300_VC_16BIT_SWAP (1 << 0)
148# define R300_VC_32BIT_SWAP (2 << 0)
149# define R300_VAP_TCL_BYPASS (1 << 8)
150
151/* gap */
152
153/* Where do we get our vertex data?
154 *
155 * Vertex data either comes either from immediate mode registers or from
156 * vertex arrays.
157 * There appears to be no mixed mode (though we can force the pitch of
158 * vertex arrays to 0, effectively reusing the same element over and over
159 * again).
160 *
161 * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
162 * if these registers influence vertex array processing.
163 *
164 * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
165 *
166 * In both cases, vertex attributes are then passed through INPUT_ROUTE.
167 *
168 * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
169 * into the vertex processor's input registers.
170 * The first word routes the first input, the second word the second, etc.
171 * The corresponding input is routed into the register with the given index.
172 * The list is ended by a word with INPUT_ROUTE_END set.
173 *
174 * Always set COMPONENTS_4 in immediate mode.
175 */
176
177#define R300_VAP_INPUT_ROUTE_0_0 0x2150
178# define R300_INPUT_ROUTE_COMPONENTS_1 (0 << 0)
179# define R300_INPUT_ROUTE_COMPONENTS_2 (1 << 0)
180# define R300_INPUT_ROUTE_COMPONENTS_3 (2 << 0)
181# define R300_INPUT_ROUTE_COMPONENTS_4 (3 << 0)
182# define R300_INPUT_ROUTE_COMPONENTS_RGBA (4 << 0) /* GUESS */
183# define R300_VAP_INPUT_ROUTE_IDX_SHIFT 8
184# define R300_VAP_INPUT_ROUTE_IDX_MASK (31 << 8) /* GUESS */
185# define R300_VAP_INPUT_ROUTE_END (1 << 13)
186# define R300_INPUT_ROUTE_IMMEDIATE_MODE (0 << 14) /* GUESS */
187# define R300_INPUT_ROUTE_FLOAT (1 << 14) /* GUESS */
188# define R300_INPUT_ROUTE_UNSIGNED_BYTE (2 << 14) /* GUESS */
189# define R300_INPUT_ROUTE_FLOAT_COLOR (3 << 14) /* GUESS */
190#define R300_VAP_INPUT_ROUTE_0_1 0x2154
191#define R300_VAP_INPUT_ROUTE_0_2 0x2158
192#define R300_VAP_INPUT_ROUTE_0_3 0x215C
193#define R300_VAP_INPUT_ROUTE_0_4 0x2160
194#define R300_VAP_INPUT_ROUTE_0_5 0x2164
195#define R300_VAP_INPUT_ROUTE_0_6 0x2168
196#define R300_VAP_INPUT_ROUTE_0_7 0x216C
197
198/* gap */
199
200/* Notes:
201 * - always set up to produce at least two attributes:
202 * if vertex program uses only position, fglrx will set normal, too
203 * - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
204 */
205#define R300_VAP_INPUT_CNTL_0 0x2180
206# define R300_INPUT_CNTL_0_COLOR 0x00000001
207#define R300_VAP_INPUT_CNTL_1 0x2184
208# define R300_INPUT_CNTL_POS 0x00000001
209# define R300_INPUT_CNTL_NORMAL 0x00000002
210# define R300_INPUT_CNTL_COLOR 0x00000004
211# define R300_INPUT_CNTL_TC0 0x00000400
212# define R300_INPUT_CNTL_TC1 0x00000800
213# define R300_INPUT_CNTL_TC2 0x00001000 /* GUESS */
214# define R300_INPUT_CNTL_TC3 0x00002000 /* GUESS */
215# define R300_INPUT_CNTL_TC4 0x00004000 /* GUESS */
216# define R300_INPUT_CNTL_TC5 0x00008000 /* GUESS */
217# define R300_INPUT_CNTL_TC6 0x00010000 /* GUESS */
218# define R300_INPUT_CNTL_TC7 0x00020000 /* GUESS */
219
220/* gap */
221
222/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
223 * are set to a swizzling bit pattern, other words are 0.
224 *
225 * In immediate mode, the pattern is always set to xyzw. In vertex array
226 * mode, the swizzling pattern is e.g. used to set zw components in texture
227 * coordinates with only tweo components.
228 */
229#define R300_VAP_INPUT_ROUTE_1_0 0x21E0
230# define R300_INPUT_ROUTE_SELECT_X 0
231# define R300_INPUT_ROUTE_SELECT_Y 1
232# define R300_INPUT_ROUTE_SELECT_Z 2
233# define R300_INPUT_ROUTE_SELECT_W 3
234# define R300_INPUT_ROUTE_SELECT_ZERO 4
235# define R300_INPUT_ROUTE_SELECT_ONE 5
236# define R300_INPUT_ROUTE_SELECT_MASK 7
237# define R300_INPUT_ROUTE_X_SHIFT 0
238# define R300_INPUT_ROUTE_Y_SHIFT 3
239# define R300_INPUT_ROUTE_Z_SHIFT 6
240# define R300_INPUT_ROUTE_W_SHIFT 9
241# define R300_INPUT_ROUTE_ENABLE (15 << 12)
242#define R300_VAP_INPUT_ROUTE_1_1 0x21E4
243#define R300_VAP_INPUT_ROUTE_1_2 0x21E8
244#define R300_VAP_INPUT_ROUTE_1_3 0x21EC
245#define R300_VAP_INPUT_ROUTE_1_4 0x21F0
246#define R300_VAP_INPUT_ROUTE_1_5 0x21F4
247#define R300_VAP_INPUT_ROUTE_1_6 0x21F8
248#define R300_VAP_INPUT_ROUTE_1_7 0x21FC
249
250/* END: Vertex data assembly */
251
252/* gap */
253
254/* BEGIN: Upload vertex program and data */
255
256/*
257 * The programmable vertex shader unit has a memory bank of unknown size
258 * that can be written to in 16 byte units by writing the address into
259 * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
260 *
261 * Pointers into the memory bank are always in multiples of 16 bytes.
262 *
263 * The memory bank is divided into areas with fixed meaning.
264 *
265 * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
266 * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
267 * whereas the difference between known addresses suggests size 512.
268 *
269 * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
270 * Native reported limits and the VPI layout suggest size 256, whereas
271 * difference between known addresses suggests size 512.
272 *
273 * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
274 * floating point pointsize. The exact purpose of this state is uncertain,
275 * as there is also the R300_RE_POINTSIZE register.
276 *
277 * Multiple vertex programs and parameter sets can be loaded at once,
278 * which could explain the size discrepancy.
279 */
280#define R300_VAP_PVS_UPLOAD_ADDRESS 0x2200
281# define R300_PVS_UPLOAD_PROGRAM 0x00000000
282# define R300_PVS_UPLOAD_PARAMETERS 0x00000200
283# define R300_PVS_UPLOAD_POINTSIZE 0x00000406
284
285/* gap */
286
287#define R300_VAP_PVS_UPLOAD_DATA 0x2208
288
289/* END: Upload vertex program and data */
290
291/* gap */
292
293/* I do not know the purpose of this register. However, I do know that
294 * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
295 * for normal rendering.
296 */
297#define R300_VAP_UNKNOWN_221C 0x221C
298# define R300_221C_NORMAL 0x00000000
299# define R300_221C_CLEAR 0x0001C000
300
301/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
302 * plane is per-pixel and the second plane is per-vertex.
303 *
304 * This was determined by experimentation alone but I believe it is correct.
305 *
306 * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
307 */
308#define R300_VAP_CLIP_X_0 0x2220
309#define R300_VAP_CLIP_X_1 0x2224
310#define R300_VAP_CLIP_Y_0 0x2228
311#define R300_VAP_CLIP_Y_1 0x2230
312
313/* gap */
314
315/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
316 * rendering commands and overwriting vertex program parameters.
317 * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
318 * avoids bugs caused by still running shaders reading bad data from memory.
319 */
320#define R300_VAP_PVS_WAITIDLE 0x2284 /* GUESS */
321
322/* Absolutely no clue what this register is about. */
323#define R300_VAP_UNKNOWN_2288 0x2288
324# define R300_2288_R300 0x00750000 /* -- nh */
325# define R300_2288_RV350 0x0000FFFF /* -- Vladimir */
326
327/* gap */
328
329/* Addresses are relative to the vertex program instruction area of the
330 * memory bank. PROGRAM_END points to the last instruction of the active
331 * program
332 *
333 * The meaning of the two UNKNOWN fields is obviously not known. However,
334 * experiments so far have shown that both *must* point to an instruction
335 * inside the vertex program, otherwise the GPU locks up.
336 *
337 * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
338 * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
339 * position takes place.
340 *
341 * Most likely this is used to ignore rest of the program in cases
342 * where group of verts arent visible. For some reason this "section"
343 * is sometimes accepted other instruction that have no relationship with
344 * position calculations.
345 */
346#define R300_VAP_PVS_CNTL_1 0x22D0
347# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
348# define R300_PVS_CNTL_1_POS_END_SHIFT 10
349# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
350/* Addresses are relative the the vertex program parameters area. */
351#define R300_VAP_PVS_CNTL_2 0x22D4
352# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
353# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
354#define R300_VAP_PVS_CNTL_3 0x22D8
355# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
356# define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
357
358/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
359 * immediate vertices
360 */
361#define R300_VAP_VTX_COLOR_R 0x2464
362#define R300_VAP_VTX_COLOR_G 0x2468
363#define R300_VAP_VTX_COLOR_B 0x246C
364#define R300_VAP_VTX_POS_0_X_1 0x2490 /* used for glVertex2*() */
365#define R300_VAP_VTX_POS_0_Y_1 0x2494
366#define R300_VAP_VTX_COLOR_PKD 0x249C /* RGBA */
367#define R300_VAP_VTX_POS_0_X_2 0x24A0 /* used for glVertex3*() */
368#define R300_VAP_VTX_POS_0_Y_2 0x24A4
369#define R300_VAP_VTX_POS_0_Z_2 0x24A8
370/* write 0 to indicate end of packet? */
371#define R300_VAP_VTX_END_OF_PKT 0x24AC
372
373/* gap */
374
375/* These are values from r300_reg/r300_reg.h - they are known to be correct
376 * and are here so we can use one register file instead of several
377 * - Vladimir
378 */
379#define R300_GB_VAP_RASTER_VTX_FMT_0 0x4000
380# define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT (1<<0)
381# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT (1<<1)
382# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT (1<<2)
383# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT (1<<3)
384# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT (1<<4)
385# define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE (0xf<<5)
386# define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT (0x1<<16)
387
388#define R300_GB_VAP_RASTER_VTX_FMT_1 0x4004
389 /* each of the following is 3 bits wide, specifies number
390 of components */
391# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
392# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
393# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
394# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
395# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
396# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
397# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
398# define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
399
400/* UNK30 seems to enables point to quad transformation on textures
401 * (or something closely related to that).
402 * This bit is rather fatal at the time being due to lackings at pixel
403 * shader side
404 */
405#define R300_GB_ENABLE 0x4008
406# define R300_GB_POINT_STUFF_ENABLE (1<<0)
407# define R300_GB_LINE_STUFF_ENABLE (1<<1)
408# define R300_GB_TRIANGLE_STUFF_ENABLE (1<<2)
409# define R300_GB_STENCIL_AUTO_ENABLE (1<<4)
410# define R300_GB_UNK31 (1<<31)
411 /* each of the following is 2 bits wide */
412#define R300_GB_TEX_REPLICATE 0
413#define R300_GB_TEX_ST 1
414#define R300_GB_TEX_STR 2
415# define R300_GB_TEX0_SOURCE_SHIFT 16
416# define R300_GB_TEX1_SOURCE_SHIFT 18
417# define R300_GB_TEX2_SOURCE_SHIFT 20
418# define R300_GB_TEX3_SOURCE_SHIFT 22
419# define R300_GB_TEX4_SOURCE_SHIFT 24
420# define R300_GB_TEX5_SOURCE_SHIFT 26
421# define R300_GB_TEX6_SOURCE_SHIFT 28
422# define R300_GB_TEX7_SOURCE_SHIFT 30
423
424/* MSPOS - positions for multisample antialiasing (?) */
425#define R300_GB_MSPOS0 0x4010
426 /* shifts - each of the fields is 4 bits */
427# define R300_GB_MSPOS0__MS_X0_SHIFT 0
428# define R300_GB_MSPOS0__MS_Y0_SHIFT 4
429# define R300_GB_MSPOS0__MS_X1_SHIFT 8
430# define R300_GB_MSPOS0__MS_Y1_SHIFT 12
431# define R300_GB_MSPOS0__MS_X2_SHIFT 16
432# define R300_GB_MSPOS0__MS_Y2_SHIFT 20
433# define R300_GB_MSPOS0__MSBD0_Y 24
434# define R300_GB_MSPOS0__MSBD0_X 28
435
436#define R300_GB_MSPOS1 0x4014
437# define R300_GB_MSPOS1__MS_X3_SHIFT 0
438# define R300_GB_MSPOS1__MS_Y3_SHIFT 4
439# define R300_GB_MSPOS1__MS_X4_SHIFT 8
440# define R300_GB_MSPOS1__MS_Y4_SHIFT 12
441# define R300_GB_MSPOS1__MS_X5_SHIFT 16
442# define R300_GB_MSPOS1__MS_Y5_SHIFT 20
443# define R300_GB_MSPOS1__MSBD1 24
444
445
446#define R300_GB_TILE_CONFIG 0x4018
447# define R300_GB_TILE_ENABLE (1<<0)
448# define R300_GB_TILE_PIPE_COUNT_RV300 0
449# define R300_GB_TILE_PIPE_COUNT_R300 (3<<1)
450# define R300_GB_TILE_PIPE_COUNT_R420 (7<<1)
451# define R300_GB_TILE_PIPE_COUNT_RV410 (3<<1)
452# define R300_GB_TILE_SIZE_8 0
453# define R300_GB_TILE_SIZE_16 (1<<4)
454# define R300_GB_TILE_SIZE_32 (2<<4)
455# define R300_GB_SUPER_SIZE_1 (0<<6)
456# define R300_GB_SUPER_SIZE_2 (1<<6)
457# define R300_GB_SUPER_SIZE_4 (2<<6)
458# define R300_GB_SUPER_SIZE_8 (3<<6)
459# define R300_GB_SUPER_SIZE_16 (4<<6)
460# define R300_GB_SUPER_SIZE_32 (5<<6)
461# define R300_GB_SUPER_SIZE_64 (6<<6)
462# define R300_GB_SUPER_SIZE_128 (7<<6)
463# define R300_GB_SUPER_X_SHIFT 9 /* 3 bits wide */
464# define R300_GB_SUPER_Y_SHIFT 12 /* 3 bits wide */
465# define R300_GB_SUPER_TILE_A 0
466# define R300_GB_SUPER_TILE_B (1<<15)
467# define R300_GB_SUBPIXEL_1_12 0
468# define R300_GB_SUBPIXEL_1_16 (1<<16)
469
470#define R300_GB_FIFO_SIZE 0x4024
471 /* each of the following is 2 bits wide */
472#define R300_GB_FIFO_SIZE_32 0
473#define R300_GB_FIFO_SIZE_64 1
474#define R300_GB_FIFO_SIZE_128 2
475#define R300_GB_FIFO_SIZE_256 3
476# define R300_SC_IFIFO_SIZE_SHIFT 0
477# define R300_SC_TZFIFO_SIZE_SHIFT 2
478# define R300_SC_BFIFO_SIZE_SHIFT 4
479
480# define R300_US_OFIFO_SIZE_SHIFT 12
481# define R300_US_WFIFO_SIZE_SHIFT 14
482 /* the following use the same constants as above, but meaning is
483 is times 2 (i.e. instead of 32 words it means 64 */
484# define R300_RS_TFIFO_SIZE_SHIFT 6
485# define R300_RS_CFIFO_SIZE_SHIFT 8
486# define R300_US_RAM_SIZE_SHIFT 10
487 /* watermarks, 3 bits wide */
488# define R300_RS_HIGHWATER_COL_SHIFT 16
489# define R300_RS_HIGHWATER_TEX_SHIFT 19
490# define R300_OFIFO_HIGHWATER_SHIFT 22 /* two bits only */
491# define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT 24
492
493#define R300_GB_SELECT 0x401C
494# define R300_GB_FOG_SELECT_C0A 0
495# define R300_GB_FOG_SELECT_C1A 1
496# define R300_GB_FOG_SELECT_C2A 2
497# define R300_GB_FOG_SELECT_C3A 3
498# define R300_GB_FOG_SELECT_1_1_W 4
499# define R300_GB_FOG_SELECT_Z 5
500# define R300_GB_DEPTH_SELECT_Z 0
501# define R300_GB_DEPTH_SELECT_1_1_W (1<<3)
502# define R300_GB_W_SELECT_1_W 0
503# define R300_GB_W_SELECT_1 (1<<4)
504
505#define R300_GB_AA_CONFIG 0x4020
506# define R300_AA_DISABLE 0x00
507# define R300_AA_ENABLE 0x01
508# define R300_AA_SUBSAMPLES_2 0
509# define R300_AA_SUBSAMPLES_3 (1<<1)
510# define R300_AA_SUBSAMPLES_4 (2<<1)
511# define R300_AA_SUBSAMPLES_6 (3<<1)
512
513/* gap */
514
515/* Zero to flush caches. */
516#define R300_TX_CNTL 0x4100
517#define R300_TX_FLUSH 0x0
518
519/* The upper enable bits are guessed, based on fglrx reported limits. */
520#define R300_TX_ENABLE 0x4104
521# define R300_TX_ENABLE_0 (1 << 0)
522# define R300_TX_ENABLE_1 (1 << 1)
523# define R300_TX_ENABLE_2 (1 << 2)
524# define R300_TX_ENABLE_3 (1 << 3)
525# define R300_TX_ENABLE_4 (1 << 4)
526# define R300_TX_ENABLE_5 (1 << 5)
527# define R300_TX_ENABLE_6 (1 << 6)
528# define R300_TX_ENABLE_7 (1 << 7)
529# define R300_TX_ENABLE_8 (1 << 8)
530# define R300_TX_ENABLE_9 (1 << 9)
531# define R300_TX_ENABLE_10 (1 << 10)
532# define R300_TX_ENABLE_11 (1 << 11)
533# define R300_TX_ENABLE_12 (1 << 12)
534# define R300_TX_ENABLE_13 (1 << 13)
535# define R300_TX_ENABLE_14 (1 << 14)
536# define R300_TX_ENABLE_15 (1 << 15)
537
538/* The pointsize is given in multiples of 6. The pointsize can be
539 * enormous: Clear() renders a single point that fills the entire
540 * framebuffer.
541 */
542#define R300_RE_POINTSIZE 0x421C
543# define R300_POINTSIZE_Y_SHIFT 0
544# define R300_POINTSIZE_Y_MASK (0xFFFF << 0) /* GUESS */
545# define R300_POINTSIZE_X_SHIFT 16
546# define R300_POINTSIZE_X_MASK (0xFFFF << 16) /* GUESS */
547# define R300_POINTSIZE_MAX (R300_POINTSIZE_Y_MASK / 6)
548
549/* The line width is given in multiples of 6.
550 * In default mode lines are classified as vertical lines.
551 * HO: horizontal
552 * VE: vertical or horizontal
553 * HO & VE: no classification
554 */
555#define R300_RE_LINE_CNT 0x4234
556# define R300_LINESIZE_SHIFT 0
557# define R300_LINESIZE_MASK (0xFFFF << 0) /* GUESS */
558# define R300_LINESIZE_MAX (R300_LINESIZE_MASK / 6)
559# define R300_LINE_CNT_HO (1 << 16)
560# define R300_LINE_CNT_VE (1 << 17)
561
562/* Some sort of scale or clamp value for texcoordless textures. */
563#define R300_RE_UNK4238 0x4238
564
565/* Something shade related */
566#define R300_RE_SHADE 0x4274
567
568#define R300_RE_SHADE_MODEL 0x4278
569# define R300_RE_SHADE_MODEL_SMOOTH 0x3aaaa
570# define R300_RE_SHADE_MODEL_FLAT 0x39595
571
572/* Dangerous */
573#define R300_RE_POLYGON_MODE 0x4288
574# define R300_PM_ENABLED (1 << 0)
575# define R300_PM_FRONT_POINT (0 << 0)
576# define R300_PM_BACK_POINT (0 << 0)
577# define R300_PM_FRONT_LINE (1 << 4)
578# define R300_PM_FRONT_FILL (1 << 5)
579# define R300_PM_BACK_LINE (1 << 7)
580# define R300_PM_BACK_FILL (1 << 8)
581
582/* Fog parameters */
583#define R300_RE_FOG_SCALE 0x4294
584#define R300_RE_FOG_START 0x4298
585
586/* Not sure why there are duplicate of factor and constant values.
587 * My best guess so far is that there are separate zbiases for test and write.
588 * Ordering might be wrong.
589 * Some of the tests indicate that fgl has a fallback implementation of zbias
590 * via pixel shaders.
591 */
592#define R300_RE_ZBIAS_CNTL 0x42A0 /* GUESS */
593#define R300_RE_ZBIAS_T_FACTOR 0x42A4
594#define R300_RE_ZBIAS_T_CONSTANT 0x42A8
595#define R300_RE_ZBIAS_W_FACTOR 0x42AC
596#define R300_RE_ZBIAS_W_CONSTANT 0x42B0
597
598/* This register needs to be set to (1<<1) for RV350 to correctly
599 * perform depth test (see --vb-triangles in r300_demo)
600 * Don't know about other chips. - Vladimir
601 * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
602 * My guess is that there are two bits for each zbias primitive
603 * (FILL, LINE, POINT).
604 * One to enable depth test and one for depth write.
605 * Yet this doesnt explain why depth writes work ...
606 */
607#define R300_RE_OCCLUSION_CNTL 0x42B4
608# define R300_OCCLUSION_ON (1<<1)
609
610#define R300_RE_CULL_CNTL 0x42B8
611# define R300_CULL_FRONT (1 << 0)
612# define R300_CULL_BACK (1 << 1)
613# define R300_FRONT_FACE_CCW (0 << 2)
614# define R300_FRONT_FACE_CW (1 << 2)
615
616
617/* BEGIN: Rasterization / Interpolators - many guesses */
618
619/* 0_UNKNOWN_18 has always been set except for clear operations.
620 * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
621 * on the vertex program, *not* the fragment program)
622 */
623#define R300_RS_CNTL_0 0x4300
624# define R300_RS_CNTL_TC_CNT_SHIFT 2
625# define R300_RS_CNTL_TC_CNT_MASK (7 << 2)
626 /* number of color interpolators used */
627# define R300_RS_CNTL_CI_CNT_SHIFT 7
628# define R300_RS_CNTL_0_UNKNOWN_18 (1 << 18)
629 /* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
630 register. */
631#define R300_RS_CNTL_1 0x4304
632
633/* gap */
634
635/* Only used for texture coordinates.
636 * Use the source field to route texture coordinate input from the
637 * vertex program to the desired interpolator. Note that the source
638 * field is relative to the outputs the vertex program *actually*
639 * writes. If a vertex program only writes texcoord[1], this will
640 * be source index 0.
641 * Set INTERP_USED on all interpolators that produce data used by
642 * the fragment program. INTERP_USED looks like a swizzling mask,
643 * but I haven't seen it used that way.
644 *
645 * Note: The _UNKNOWN constants are always set in their respective
646 * register. I don't know if this is necessary.
647 */
648#define R300_RS_INTERP_0 0x4310
649#define R300_RS_INTERP_1 0x4314
650# define R300_RS_INTERP_1_UNKNOWN 0x40
651#define R300_RS_INTERP_2 0x4318
652# define R300_RS_INTERP_2_UNKNOWN 0x80
653#define R300_RS_INTERP_3 0x431C
654# define R300_RS_INTERP_3_UNKNOWN 0xC0
655#define R300_RS_INTERP_4 0x4320
656#define R300_RS_INTERP_5 0x4324
657#define R300_RS_INTERP_6 0x4328
658#define R300_RS_INTERP_7 0x432C
659# define R300_RS_INTERP_SRC_SHIFT 2
660# define R300_RS_INTERP_SRC_MASK (7 << 2)
661# define R300_RS_INTERP_USED 0x00D10000
662
663/* These DWORDs control how vertex data is routed into fragment program
664 * registers, after interpolators.
665 */
666#define R300_RS_ROUTE_0 0x4330
667#define R300_RS_ROUTE_1 0x4334
668#define R300_RS_ROUTE_2 0x4338
669#define R300_RS_ROUTE_3 0x433C /* GUESS */
670#define R300_RS_ROUTE_4 0x4340 /* GUESS */
671#define R300_RS_ROUTE_5 0x4344 /* GUESS */
672#define R300_RS_ROUTE_6 0x4348 /* GUESS */
673#define R300_RS_ROUTE_7 0x434C /* GUESS */
674# define R300_RS_ROUTE_SOURCE_INTERP_0 0
675# define R300_RS_ROUTE_SOURCE_INTERP_1 1
676# define R300_RS_ROUTE_SOURCE_INTERP_2 2
677# define R300_RS_ROUTE_SOURCE_INTERP_3 3
678# define R300_RS_ROUTE_SOURCE_INTERP_4 4
679# define R300_RS_ROUTE_SOURCE_INTERP_5 5 /* GUESS */
680# define R300_RS_ROUTE_SOURCE_INTERP_6 6 /* GUESS */
681# define R300_RS_ROUTE_SOURCE_INTERP_7 7 /* GUESS */
682# define R300_RS_ROUTE_ENABLE (1 << 3) /* GUESS */
683# define R300_RS_ROUTE_DEST_SHIFT 6
684# define R300_RS_ROUTE_DEST_MASK (31 << 6) /* GUESS */
685
686/* Special handling for color: When the fragment program uses color,
687 * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
688 * color register index.
689 *
690 * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
691 * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
692 * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
693 * correct or not. - Oliver.
694 */
695# define R300_RS_ROUTE_0_COLOR (1 << 14)
696# define R300_RS_ROUTE_0_COLOR_DEST_SHIFT 17
697# define R300_RS_ROUTE_0_COLOR_DEST_MASK (31 << 17) /* GUESS */
698/* As above, but for secondary color */
699# define R300_RS_ROUTE_1_COLOR1 (1 << 14)
700# define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
701# define R300_RS_ROUTE_1_COLOR1_DEST_MASK (31 << 17)
702# define R300_RS_ROUTE_1_UNKNOWN11 (1 << 11)
703/* END: Rasterization / Interpolators - many guesses */
704
705/* Hierarchical Z Enable */
706#define R300_SC_HYPERZ 0x43a4
707# define R300_SC_HYPERZ_DISABLE (0 << 0)
708# define R300_SC_HYPERZ_ENABLE (1 << 0)
709# define R300_SC_HYPERZ_MIN (0 << 1)
710# define R300_SC_HYPERZ_MAX (1 << 1)
711# define R300_SC_HYPERZ_ADJ_256 (0 << 2)
712# define R300_SC_HYPERZ_ADJ_128 (1 << 2)
713# define R300_SC_HYPERZ_ADJ_64 (2 << 2)
714# define R300_SC_HYPERZ_ADJ_32 (3 << 2)
715# define R300_SC_HYPERZ_ADJ_16 (4 << 2)
716# define R300_SC_HYPERZ_ADJ_8 (5 << 2)
717# define R300_SC_HYPERZ_ADJ_4 (6 << 2)
718# define R300_SC_HYPERZ_ADJ_2 (7 << 2)
719# define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
720# define R300_SC_HYPERZ_HZ_Z0MIN (1 << 5)
721# define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
722# define R300_SC_HYPERZ_HZ_Z0MAX (1 << 6)
723
724#define R300_SC_EDGERULE 0x43a8
725
726/* BEGIN: Scissors and cliprects */
727
728/* There are four clipping rectangles. Their corner coordinates are inclusive.
729 * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
730 * on whether the pixel is inside cliprects 0-3, respectively. For example,
731 * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
732 * the number 3 (binary 0011).
733 * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
734 * the pixel is rasterized.
735 *
736 * In addition to this, there is a scissors rectangle. Only pixels inside the
737 * scissors rectangle are drawn. (coordinates are inclusive)
738 *
739 * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
740 * for the purpose of clipping and scissors.
741 */
742#define R300_RE_CLIPRECT_TL_0 0x43B0
743#define R300_RE_CLIPRECT_BR_0 0x43B4
744#define R300_RE_CLIPRECT_TL_1 0x43B8
745#define R300_RE_CLIPRECT_BR_1 0x43BC
746#define R300_RE_CLIPRECT_TL_2 0x43C0
747#define R300_RE_CLIPRECT_BR_2 0x43C4
748#define R300_RE_CLIPRECT_TL_3 0x43C8
749#define R300_RE_CLIPRECT_BR_3 0x43CC
750# define R300_CLIPRECT_OFFSET 1440
751# define R300_CLIPRECT_MASK 0x1FFF
752# define R300_CLIPRECT_X_SHIFT 0
753# define R300_CLIPRECT_X_MASK (0x1FFF << 0)
754# define R300_CLIPRECT_Y_SHIFT 13
755# define R300_CLIPRECT_Y_MASK (0x1FFF << 13)
756#define R300_RE_CLIPRECT_CNTL 0x43D0
757# define R300_CLIP_OUT (1 << 0)
758# define R300_CLIP_0 (1 << 1)
759# define R300_CLIP_1 (1 << 2)
760# define R300_CLIP_10 (1 << 3)
761# define R300_CLIP_2 (1 << 4)
762# define R300_CLIP_20 (1 << 5)
763# define R300_CLIP_21 (1 << 6)
764# define R300_CLIP_210 (1 << 7)
765# define R300_CLIP_3 (1 << 8)
766# define R300_CLIP_30 (1 << 9)
767# define R300_CLIP_31 (1 << 10)
768# define R300_CLIP_310 (1 << 11)
769# define R300_CLIP_32 (1 << 12)
770# define R300_CLIP_320 (1 << 13)
771# define R300_CLIP_321 (1 << 14)
772# define R300_CLIP_3210 (1 << 15)
773
774/* gap */
775
776#define R300_RE_SCISSORS_TL 0x43E0
777#define R300_RE_SCISSORS_BR 0x43E4
778# define R300_SCISSORS_OFFSET 1440
779# define R300_SCISSORS_X_SHIFT 0
780# define R300_SCISSORS_X_MASK (0x1FFF << 0)
781# define R300_SCISSORS_Y_SHIFT 13
782# define R300_SCISSORS_Y_MASK (0x1FFF << 13)
783/* END: Scissors and cliprects */
784
785/* BEGIN: Texture specification */
786
787/*
788 * The texture specification dwords are grouped by meaning and not by texture
789 * unit. This means that e.g. the offset for texture image unit N is found in
790 * register TX_OFFSET_0 + (4*N)
791 */
792#define R300_TX_FILTER_0 0x4400
793# define R300_TX_REPEAT 0
794# define R300_TX_MIRRORED 1
795# define R300_TX_CLAMP 4
796# define R300_TX_CLAMP_TO_EDGE 2
797# define R300_TX_CLAMP_TO_BORDER 6
798# define R300_TX_WRAP_S_SHIFT 0
799# define R300_TX_WRAP_S_MASK (7 << 0)
800# define R300_TX_WRAP_T_SHIFT 3
801# define R300_TX_WRAP_T_MASK (7 << 3)
802# define R300_TX_WRAP_Q_SHIFT 6
803# define R300_TX_WRAP_Q_MASK (7 << 6)
804# define R300_TX_MAG_FILTER_NEAREST (1 << 9)
805# define R300_TX_MAG_FILTER_LINEAR (2 << 9)
806# define R300_TX_MAG_FILTER_MASK (3 << 9)
807# define R300_TX_MIN_FILTER_NEAREST (1 << 11)
808# define R300_TX_MIN_FILTER_LINEAR (2 << 11)
809# define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST (5 << 11)
810# define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR (9 << 11)
811# define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 11)
812# define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR (10 << 11)
813
814/* NOTE: NEAREST doesnt seem to exist.
815 * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
816 * anisotropy modes because that would void selected mag filter
817 */
818# define R300_TX_MIN_FILTER_ANISO_NEAREST (0 << 13)
819# define R300_TX_MIN_FILTER_ANISO_LINEAR (0 << 13)
820# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
821# define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (2 << 13)
822# define R300_TX_MIN_FILTER_MASK ( (15 << 11) | (3 << 13) )
823# define R300_TX_MAX_ANISO_1_TO_1 (0 << 21)
824# define R300_TX_MAX_ANISO_2_TO_1 (2 << 21)
825# define R300_TX_MAX_ANISO_4_TO_1 (4 << 21)
826# define R300_TX_MAX_ANISO_8_TO_1 (6 << 21)
827# define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
828# define R300_TX_MAX_ANISO_MASK (14 << 21)
829
830#define R300_TX_FILTER1_0 0x4440
831# define R300_CHROMA_KEY_MODE_DISABLE 0
832# define R300_CHROMA_KEY_FORCE 1
833# define R300_CHROMA_KEY_BLEND 2
834# define R300_MC_ROUND_NORMAL (0<<2)
835# define R300_MC_ROUND_MPEG4 (1<<2)
836# define R300_LOD_BIAS_MASK 0x1fff
837# define R300_EDGE_ANISO_EDGE_DIAG (0<<13)
838# define R300_EDGE_ANISO_EDGE_ONLY (1<<13)
839# define R300_MC_COORD_TRUNCATE_DISABLE (0<<14)
840# define R300_MC_COORD_TRUNCATE_MPEG (1<<14)
841# define R300_TX_TRI_PERF_0_8 (0<<15)
842# define R300_TX_TRI_PERF_1_8 (1<<15)
843# define R300_TX_TRI_PERF_1_4 (2<<15)
844# define R300_TX_TRI_PERF_3_8 (3<<15)
845# define R300_ANISO_THRESHOLD_MASK (7<<17)
846
847#define R300_TX_SIZE_0 0x4480
848# define R300_TX_WIDTHMASK_SHIFT 0
849# define R300_TX_WIDTHMASK_MASK (2047 << 0)
850# define R300_TX_HEIGHTMASK_SHIFT 11
851# define R300_TX_HEIGHTMASK_MASK (2047 << 11)
852# define R300_TX_UNK23 (1 << 23)
853# define R300_TX_MAX_MIP_LEVEL_SHIFT 26
854# define R300_TX_MAX_MIP_LEVEL_MASK (0xf << 26)
855# define R300_TX_SIZE_PROJECTED (1<<30)
856# define R300_TX_SIZE_TXPITCH_EN (1<<31)
857#define R300_TX_FORMAT_0 0x44C0
858 /* The interpretation of the format word by Wladimir van der Laan */
859 /* The X, Y, Z and W refer to the layout of the components.
860 They are given meanings as R, G, B and Alpha by the swizzle
861 specification */
862# define R300_TX_FORMAT_X8 0x0
863# define R300_TX_FORMAT_X16 0x1
864# define R300_TX_FORMAT_Y4X4 0x2
865# define R300_TX_FORMAT_Y8X8 0x3
866# define R300_TX_FORMAT_Y16X16 0x4
867# define R300_TX_FORMAT_Z3Y3X2 0x5
868# define R300_TX_FORMAT_Z5Y6X5 0x6
869# define R300_TX_FORMAT_Z6Y5X5 0x7
870# define R300_TX_FORMAT_Z11Y11X10 0x8
871# define R300_TX_FORMAT_Z10Y11X11 0x9
872# define R300_TX_FORMAT_W4Z4Y4X4 0xA
873# define R300_TX_FORMAT_W1Z5Y5X5 0xB
874# define R300_TX_FORMAT_W8Z8Y8X8 0xC
875# define R300_TX_FORMAT_W2Z10Y10X10 0xD
876# define R300_TX_FORMAT_W16Z16Y16X16 0xE
877# define R300_TX_FORMAT_DXT1 0xF
878# define R300_TX_FORMAT_DXT3 0x10
879# define R300_TX_FORMAT_DXT5 0x11
880# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
881# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
882# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
883# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
884 /* 0x16 - some 16 bit green format.. ?? */
885# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
886# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
887
888 /* gap */
889 /* Floating point formats */
890 /* Note - hardware supports both 16 and 32 bit floating point */
891# define R300_TX_FORMAT_FL_I16 0x18
892# define R300_TX_FORMAT_FL_I16A16 0x19
893# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
894# define R300_TX_FORMAT_FL_I32 0x1B
895# define R300_TX_FORMAT_FL_I32A32 0x1C
896# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
897 /* alpha modes, convenience mostly */
898 /* if you have alpha, pick constant appropriate to the
899 number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
900# define R300_TX_FORMAT_ALPHA_1CH 0x000
901# define R300_TX_FORMAT_ALPHA_2CH 0x200
902# define R300_TX_FORMAT_ALPHA_4CH 0x600
903# define R300_TX_FORMAT_ALPHA_NONE 0xA00
904 /* Swizzling */
905 /* constants */
906# define R300_TX_FORMAT_X 0
907# define R300_TX_FORMAT_Y 1
908# define R300_TX_FORMAT_Z 2
909# define R300_TX_FORMAT_W 3
910# define R300_TX_FORMAT_ZERO 4
911# define R300_TX_FORMAT_ONE 5
912 /* 2.0*Z, everything above 1.0 is set to 0.0 */
913# define R300_TX_FORMAT_CUT_Z 6
914 /* 2.0*W, everything above 1.0 is set to 0.0 */
915# define R300_TX_FORMAT_CUT_W 7
916
917# define R300_TX_FORMAT_B_SHIFT 18
918# define R300_TX_FORMAT_G_SHIFT 15
919# define R300_TX_FORMAT_R_SHIFT 12
920# define R300_TX_FORMAT_A_SHIFT 9
921 /* Convenience macro to take care of layout and swizzling */
922# define R300_EASY_TX_FORMAT(B, G, R, A, FMT) ( \
923 ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
924 | ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
925 | ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
926 | ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
927 | (R300_TX_FORMAT_##FMT) \
928 )
929 /* These can be ORed with result of R300_EASY_TX_FORMAT()
930 We don't really know what they do. Take values from a
931 constant color ? */
932# define R300_TX_FORMAT_CONST_X (1<<5)
933# define R300_TX_FORMAT_CONST_Y (2<<5)
934# define R300_TX_FORMAT_CONST_Z (4<<5)
935# define R300_TX_FORMAT_CONST_W (8<<5)
936
937# define R300_TX_FORMAT_YUV_MODE 0x00800000
938
939#define R300_TX_PITCH_0 0x4500 /* obvious missing in gap */
940#define R300_TX_OFFSET_0 0x4540
941 /* BEGIN: Guess from R200 */
942# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
943# define R300_TXO_ENDIAN_BYTE_SWAP (1 << 0)
944# define R300_TXO_ENDIAN_WORD_SWAP (2 << 0)
945# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
946# define R300_TXO_MACRO_TILE (1 << 2)
947# define R300_TXO_MICRO_TILE (1 << 3)
948# define R300_TXO_OFFSET_MASK 0xffffffe0
949# define R300_TXO_OFFSET_SHIFT 5
950 /* END: Guess from R200 */
951
952/* 32 bit chroma key */
953#define R300_TX_CHROMA_KEY_0 0x4580
954/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
955#define R300_TX_BORDER_COLOR_0 0x45C0
956
957/* END: Texture specification */
958
959/* BEGIN: Fragment program instruction set */
960
961/* Fragment programs are written directly into register space.
962 * There are separate instruction streams for texture instructions and ALU
963 * instructions.
964 * In order to synchronize these streams, the program is divided into up
965 * to 4 nodes. Each node begins with a number of TEX operations, followed
966 * by a number of ALU operations.
967 * The first node can have zero TEX ops, all subsequent nodes must have at
968 * least
969 * one TEX ops.
970 * All nodes must have at least one ALU op.
971 *
972 * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
973 * 1 node, a value of 3 means 4 nodes.
974 * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
975 * offsets into the respective instruction streams, while *_END points to the
976 * last instruction relative to this offset.
977 */
978#define R300_PFS_CNTL_0 0x4600
979# define R300_PFS_CNTL_LAST_NODES_SHIFT 0
980# define R300_PFS_CNTL_LAST_NODES_MASK (3 << 0)
981# define R300_PFS_CNTL_FIRST_NODE_HAS_TEX (1 << 3)
982#define R300_PFS_CNTL_1 0x4604
983/* There is an unshifted value here which has so far always been equal to the
984 * index of the highest used temporary register.
985 */
986#define R300_PFS_CNTL_2 0x4608
987# define R300_PFS_CNTL_ALU_OFFSET_SHIFT 0
988# define R300_PFS_CNTL_ALU_OFFSET_MASK (63 << 0)
989# define R300_PFS_CNTL_ALU_END_SHIFT 6
990# define R300_PFS_CNTL_ALU_END_MASK (63 << 6)
991# define R300_PFS_CNTL_TEX_OFFSET_SHIFT 12
992# define R300_PFS_CNTL_TEX_OFFSET_MASK (31 << 12) /* GUESS */
993# define R300_PFS_CNTL_TEX_END_SHIFT 18
994# define R300_PFS_CNTL_TEX_END_MASK (31 << 18) /* GUESS */
995
996/* gap */
997
998/* Nodes are stored backwards. The last active node is always stored in
999 * PFS_NODE_3.
1000 * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
1001 * first node is stored in NODE_2, the second node is stored in NODE_3.
1002 *
1003 * Offsets are relative to the master offset from PFS_CNTL_2.
1004 */
1005#define R300_PFS_NODE_0 0x4610
1006#define R300_PFS_NODE_1 0x4614
1007#define R300_PFS_NODE_2 0x4618
1008#define R300_PFS_NODE_3 0x461C
1009# define R300_PFS_NODE_ALU_OFFSET_SHIFT 0
1010# define R300_PFS_NODE_ALU_OFFSET_MASK (63 << 0)
1011# define R300_PFS_NODE_ALU_END_SHIFT 6
1012# define R300_PFS_NODE_ALU_END_MASK (63 << 6)
1013# define R300_PFS_NODE_TEX_OFFSET_SHIFT 12
1014# define R300_PFS_NODE_TEX_OFFSET_MASK (31 << 12)
1015# define R300_PFS_NODE_TEX_END_SHIFT 17
1016# define R300_PFS_NODE_TEX_END_MASK (31 << 17)
1017# define R300_PFS_NODE_OUTPUT_COLOR (1 << 22)
1018# define R300_PFS_NODE_OUTPUT_DEPTH (1 << 23)
1019
1020/* TEX
1021 * As far as I can tell, texture instructions cannot write into output
1022 * registers directly. A subsequent ALU instruction is always necessary,
1023 * even if it's just MAD o0, r0, 1, 0
1024 */
1025#define R300_PFS_TEXI_0 0x4620
1026# define R300_FPITX_SRC_SHIFT 0
1027# define R300_FPITX_SRC_MASK (31 << 0)
1028 /* GUESS */
1029# define R300_FPITX_SRC_CONST (1 << 5)
1030# define R300_FPITX_DST_SHIFT 6
1031# define R300_FPITX_DST_MASK (31 << 6)
1032# define R300_FPITX_IMAGE_SHIFT 11
1033 /* GUESS based on layout and native limits */
1034# define R300_FPITX_IMAGE_MASK (15 << 11)
1035/* Unsure if these are opcodes, or some kind of bitfield, but this is how
1036 * they were set when I checked
1037 */
1038# define R300_FPITX_OPCODE_SHIFT 15
1039# define R300_FPITX_OP_TEX 1
1040# define R300_FPITX_OP_KIL 2
1041# define R300_FPITX_OP_TXP 3
1042# define R300_FPITX_OP_TXB 4
1043# define R300_FPITX_OPCODE_MASK (7 << 15)
1044
1045/* ALU
1046 * The ALU instructions register blocks are enumerated according to the order
1047 * in which fglrx. I assume there is space for 64 instructions, since
1048 * each block has space for a maximum of 64 DWORDs, and this matches reported
1049 * native limits.
1050 *
1051 * The basic functional block seems to be one MAD for each color and alpha,
1052 * and an adder that adds all components after the MUL.
1053 * - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
1054 * - DP4: Use OUTC_DP4, OUTA_DP4
1055 * - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
1056 * - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
1057 * - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
1058 * - CMP: If ARG2 < 0, return ARG1, else return ARG0
1059 * - FLR: use FRC+MAD
1060 * - XPD: use MAD+MAD
1061 * - SGE, SLT: use MAD+CMP
1062 * - RSQ: use ABS modifier for argument
1063 * - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
1064 * (e.g. RCP) into color register
1065 * - apparently, there's no quick DST operation
1066 * - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
1067 * - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
1068 * - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
1069 *
1070 * Operand selection
1071 * First stage selects three sources from the available registers and
1072 * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
1073 * fglrx sorts the three source fields: Registers before constants,
1074 * lower indices before higher indices; I do not know whether this is
1075 * necessary.
1076 *
1077 * fglrx fills unused sources with "read constant 0"
1078 * According to specs, you cannot select more than two different constants.
1079 *
1080 * Second stage selects the operands from the sources. This is defined in
1081 * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
1082 * zero and one.
1083 * Swizzling and negation happens in this stage, as well.
1084 *
1085 * Important: Color and alpha seem to be mostly separate, i.e. their sources
1086 * selection appears to be fully independent (the register storage is probably
1087 * physically split into a color and an alpha section).
1088 * However (because of the apparent physical split), there is some interaction
1089 * WRT swizzling. If, for example, you want to load an R component into an
1090 * Alpha operand, this R component is taken from a *color* source, not from
1091 * an alpha source. The corresponding register doesn't even have to appear in
1092 * the alpha sources list. (I hope this all makes sense to you)
1093 *
1094 * Destination selection
1095 * The destination register index is in FPI1 (color) and FPI3 (alpha)
1096 * together with enable bits.
1097 * There are separate enable bits for writing into temporary registers
1098 * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
1099 * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
1100 * same index must be used for both).
1101 *
1102 * Note: There is a special form for LRP
1103 * - Argument order is the same as in ARB_fragment_program.
1104 * - Operation is MAD
1105 * - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
1106 * - Set FPI0/FPI2_SPECIAL_LRP
1107 * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
1108 */
1109#define R300_PFS_INSTR1_0 0x46C0
1110# define R300_FPI1_SRC0C_SHIFT 0
1111# define R300_FPI1_SRC0C_MASK (31 << 0)
1112# define R300_FPI1_SRC0C_CONST (1 << 5)
1113# define R300_FPI1_SRC1C_SHIFT 6
1114# define R300_FPI1_SRC1C_MASK (31 << 6)
1115# define R300_FPI1_SRC1C_CONST (1 << 11)
1116# define R300_FPI1_SRC2C_SHIFT 12
1117# define R300_FPI1_SRC2C_MASK (31 << 12)
1118# define R300_FPI1_SRC2C_CONST (1 << 17)
1119# define R300_FPI1_SRC_MASK 0x0003ffff
1120# define R300_FPI1_DSTC_SHIFT 18
1121# define R300_FPI1_DSTC_MASK (31 << 18)
1122# define R300_FPI1_DSTC_REG_MASK_SHIFT 23
1123# define R300_FPI1_DSTC_REG_X (1 << 23)
1124# define R300_FPI1_DSTC_REG_Y (1 << 24)
1125# define R300_FPI1_DSTC_REG_Z (1 << 25)
1126# define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT 26
1127# define R300_FPI1_DSTC_OUTPUT_X (1 << 26)
1128# define R300_FPI1_DSTC_OUTPUT_Y (1 << 27)
1129# define R300_FPI1_DSTC_OUTPUT_Z (1 << 28)
1130
1131#define R300_PFS_INSTR3_0 0x47C0
1132# define R300_FPI3_SRC0A_SHIFT 0
1133# define R300_FPI3_SRC0A_MASK (31 << 0)
1134# define R300_FPI3_SRC0A_CONST (1 << 5)
1135# define R300_FPI3_SRC1A_SHIFT 6
1136# define R300_FPI3_SRC1A_MASK (31 << 6)
1137# define R300_FPI3_SRC1A_CONST (1 << 11)
1138# define R300_FPI3_SRC2A_SHIFT 12
1139# define R300_FPI3_SRC2A_MASK (31 << 12)
1140# define R300_FPI3_SRC2A_CONST (1 << 17)
1141# define R300_FPI3_SRC_MASK 0x0003ffff
1142# define R300_FPI3_DSTA_SHIFT 18
1143# define R300_FPI3_DSTA_MASK (31 << 18)
1144# define R300_FPI3_DSTA_REG (1 << 23)
1145# define R300_FPI3_DSTA_OUTPUT (1 << 24)
1146# define R300_FPI3_DSTA_DEPTH (1 << 27)
1147
1148#define R300_PFS_INSTR0_0 0x48C0
1149# define R300_FPI0_ARGC_SRC0C_XYZ 0
1150# define R300_FPI0_ARGC_SRC0C_XXX 1
1151# define R300_FPI0_ARGC_SRC0C_YYY 2
1152# define R300_FPI0_ARGC_SRC0C_ZZZ 3
1153# define R300_FPI0_ARGC_SRC1C_XYZ 4
1154# define R300_FPI0_ARGC_SRC1C_XXX 5
1155# define R300_FPI0_ARGC_SRC1C_YYY 6
1156# define R300_FPI0_ARGC_SRC1C_ZZZ 7
1157# define R300_FPI0_ARGC_SRC2C_XYZ 8
1158# define R300_FPI0_ARGC_SRC2C_XXX 9
1159# define R300_FPI0_ARGC_SRC2C_YYY 10
1160# define R300_FPI0_ARGC_SRC2C_ZZZ 11
1161# define R300_FPI0_ARGC_SRC0A 12
1162# define R300_FPI0_ARGC_SRC1A 13
1163# define R300_FPI0_ARGC_SRC2A 14
1164# define R300_FPI0_ARGC_SRC1C_LRP 15
1165# define R300_FPI0_ARGC_ZERO 20
1166# define R300_FPI0_ARGC_ONE 21
1167 /* GUESS */
1168# define R300_FPI0_ARGC_HALF 22
1169# define R300_FPI0_ARGC_SRC0C_YZX 23
1170# define R300_FPI0_ARGC_SRC1C_YZX 24
1171# define R300_FPI0_ARGC_SRC2C_YZX 25
1172# define R300_FPI0_ARGC_SRC0C_ZXY 26
1173# define R300_FPI0_ARGC_SRC1C_ZXY 27
1174# define R300_FPI0_ARGC_SRC2C_ZXY 28
1175# define R300_FPI0_ARGC_SRC0CA_WZY 29
1176# define R300_FPI0_ARGC_SRC1CA_WZY 30
1177# define R300_FPI0_ARGC_SRC2CA_WZY 31
1178
1179# define R300_FPI0_ARG0C_SHIFT 0
1180# define R300_FPI0_ARG0C_MASK (31 << 0)
1181# define R300_FPI0_ARG0C_NEG (1 << 5)
1182# define R300_FPI0_ARG0C_ABS (1 << 6)
1183# define R300_FPI0_ARG1C_SHIFT 7
1184# define R300_FPI0_ARG1C_MASK (31 << 7)
1185# define R300_FPI0_ARG1C_NEG (1 << 12)
1186# define R300_FPI0_ARG1C_ABS (1 << 13)
1187# define R300_FPI0_ARG2C_SHIFT 14
1188# define R300_FPI0_ARG2C_MASK (31 << 14)
1189# define R300_FPI0_ARG2C_NEG (1 << 19)
1190# define R300_FPI0_ARG2C_ABS (1 << 20)
1191# define R300_FPI0_SPECIAL_LRP (1 << 21)
1192# define R300_FPI0_OUTC_MAD (0 << 23)
1193# define R300_FPI0_OUTC_DP3 (1 << 23)
1194# define R300_FPI0_OUTC_DP4 (2 << 23)
1195# define R300_FPI0_OUTC_MIN (4 << 23)
1196# define R300_FPI0_OUTC_MAX (5 << 23)
1197# define R300_FPI0_OUTC_CMPH (7 << 23)
1198# define R300_FPI0_OUTC_CMP (8 << 23)
1199# define R300_FPI0_OUTC_FRC (9 << 23)
1200# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
1201# define R300_FPI0_OUTC_SAT (1 << 30)
1202# define R300_FPI0_INSERT_NOP (1 << 31)
1203
1204#define R300_PFS_INSTR2_0 0x49C0
1205# define R300_FPI2_ARGA_SRC0C_X 0
1206# define R300_FPI2_ARGA_SRC0C_Y 1
1207# define R300_FPI2_ARGA_SRC0C_Z 2
1208# define R300_FPI2_ARGA_SRC1C_X 3
1209# define R300_FPI2_ARGA_SRC1C_Y 4
1210# define R300_FPI2_ARGA_SRC1C_Z 5
1211# define R300_FPI2_ARGA_SRC2C_X 6
1212# define R300_FPI2_ARGA_SRC2C_Y 7
1213# define R300_FPI2_ARGA_SRC2C_Z 8
1214# define R300_FPI2_ARGA_SRC0A 9
1215# define R300_FPI2_ARGA_SRC1A 10
1216# define R300_FPI2_ARGA_SRC2A 11
1217# define R300_FPI2_ARGA_SRC1A_LRP 15
1218# define R300_FPI2_ARGA_ZERO 16
1219# define R300_FPI2_ARGA_ONE 17
1220 /* GUESS */
1221# define R300_FPI2_ARGA_HALF 18
1222# define R300_FPI2_ARG0A_SHIFT 0
1223# define R300_FPI2_ARG0A_MASK (31 << 0)
1224# define R300_FPI2_ARG0A_NEG (1 << 5)
1225 /* GUESS */
1226# define R300_FPI2_ARG0A_ABS (1 << 6)
1227# define R300_FPI2_ARG1A_SHIFT 7
1228# define R300_FPI2_ARG1A_MASK (31 << 7)
1229# define R300_FPI2_ARG1A_NEG (1 << 12)
1230 /* GUESS */
1231# define R300_FPI2_ARG1A_ABS (1 << 13)
1232# define R300_FPI2_ARG2A_SHIFT 14
1233# define R300_FPI2_ARG2A_MASK (31 << 14)
1234# define R300_FPI2_ARG2A_NEG (1 << 19)
1235 /* GUESS */
1236# define R300_FPI2_ARG2A_ABS (1 << 20)
1237# define R300_FPI2_SPECIAL_LRP (1 << 21)
1238# define R300_FPI2_OUTA_MAD (0 << 23)
1239# define R300_FPI2_OUTA_DP4 (1 << 23)
1240# define R300_FPI2_OUTA_MIN (2 << 23)
1241# define R300_FPI2_OUTA_MAX (3 << 23)
1242# define R300_FPI2_OUTA_CMP (6 << 23)
1243# define R300_FPI2_OUTA_FRC (7 << 23)
1244# define R300_FPI2_OUTA_EX2 (8 << 23)
1245# define R300_FPI2_OUTA_LG2 (9 << 23)
1246# define R300_FPI2_OUTA_RCP (10 << 23)
1247# define R300_FPI2_OUTA_RSQ (11 << 23)
1248# define R300_FPI2_OUTA_SAT (1 << 30)
1249# define R300_FPI2_UNKNOWN_31 (1 << 31)
1250/* END: Fragment program instruction set */
1251
1252/* Fog state and color */
1253#define R300_RE_FOG_STATE 0x4BC0
1254# define R300_FOG_ENABLE (1 << 0)
1255# define R300_FOG_MODE_LINEAR (0 << 1)
1256# define R300_FOG_MODE_EXP (1 << 1)
1257# define R300_FOG_MODE_EXP2 (2 << 1)
1258# define R300_FOG_MODE_MASK (3 << 1)
1259#define R300_FOG_COLOR_R 0x4BC8
1260#define R300_FOG_COLOR_G 0x4BCC
1261#define R300_FOG_COLOR_B 0x4BD0
1262
1263#define R300_PP_ALPHA_TEST 0x4BD4
1264# define R300_REF_ALPHA_MASK 0x000000ff
1265# define R300_ALPHA_TEST_FAIL (0 << 8)
1266# define R300_ALPHA_TEST_LESS (1 << 8)
1267# define R300_ALPHA_TEST_LEQUAL (3 << 8)
1268# define R300_ALPHA_TEST_EQUAL (2 << 8)
1269# define R300_ALPHA_TEST_GEQUAL (6 << 8)
1270# define R300_ALPHA_TEST_GREATER (4 << 8)
1271# define R300_ALPHA_TEST_NEQUAL (5 << 8)
1272# define R300_ALPHA_TEST_PASS (7 << 8)
1273# define R300_ALPHA_TEST_OP_MASK (7 << 8)
1274# define R300_ALPHA_TEST_ENABLE (1 << 11)
1275
1276/* gap */
1277
1278/* Fragment program parameters in 7.16 floating point */
1279#define R300_PFS_PARAM_0_X 0x4C00
1280#define R300_PFS_PARAM_0_Y 0x4C04
1281#define R300_PFS_PARAM_0_Z 0x4C08
1282#define R300_PFS_PARAM_0_W 0x4C0C
1283/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
1284#define R300_PFS_PARAM_31_X 0x4DF0
1285#define R300_PFS_PARAM_31_Y 0x4DF4
1286#define R300_PFS_PARAM_31_Z 0x4DF8
1287#define R300_PFS_PARAM_31_W 0x4DFC
1288
1289/* Notes:
1290 * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
1291 * the application
1292 * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
1293 * are set to the same
1294 * function (both registers are always set up completely in any case)
1295 * - Most blend flags are simply copied from R200 and not tested yet
1296 */
1297#define R300_RB3D_CBLEND 0x4E04
1298#define R300_RB3D_ABLEND 0x4E08
1299/* the following only appear in CBLEND */
1300# define R300_BLEND_ENABLE (1 << 0)
1301# define R300_BLEND_UNKNOWN (3 << 1)
1302# define R300_BLEND_NO_SEPARATE (1 << 3)
1303/* the following are shared between CBLEND and ABLEND */
1304# define R300_FCN_MASK (3 << 12)
1305# define R300_COMB_FCN_ADD_CLAMP (0 << 12)
1306# define R300_COMB_FCN_ADD_NOCLAMP (1 << 12)
1307# define R300_COMB_FCN_SUB_CLAMP (2 << 12)
1308# define R300_COMB_FCN_SUB_NOCLAMP (3 << 12)
1309# define R300_COMB_FCN_MIN (4 << 12)
1310# define R300_COMB_FCN_MAX (5 << 12)
1311# define R300_COMB_FCN_RSUB_CLAMP (6 << 12)
1312# define R300_COMB_FCN_RSUB_NOCLAMP (7 << 12)
1313# define R300_BLEND_GL_ZERO (32)
1314# define R300_BLEND_GL_ONE (33)
1315# define R300_BLEND_GL_SRC_COLOR (34)
1316# define R300_BLEND_GL_ONE_MINUS_SRC_COLOR (35)
1317# define R300_BLEND_GL_DST_COLOR (36)
1318# define R300_BLEND_GL_ONE_MINUS_DST_COLOR (37)
1319# define R300_BLEND_GL_SRC_ALPHA (38)
1320# define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA (39)
1321# define R300_BLEND_GL_DST_ALPHA (40)
1322# define R300_BLEND_GL_ONE_MINUS_DST_ALPHA (41)
1323# define R300_BLEND_GL_SRC_ALPHA_SATURATE (42)
1324# define R300_BLEND_GL_CONST_COLOR (43)
1325# define R300_BLEND_GL_ONE_MINUS_CONST_COLOR (44)
1326# define R300_BLEND_GL_CONST_ALPHA (45)
1327# define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA (46)
1328# define R300_BLEND_MASK (63)
1329# define R300_SRC_BLEND_SHIFT (16)
1330# define R300_DST_BLEND_SHIFT (24)
1331#define R300_RB3D_BLEND_COLOR 0x4E10
1332#define R300_RB3D_COLORMASK 0x4E0C
1333# define R300_COLORMASK0_B (1<<0)
1334# define R300_COLORMASK0_G (1<<1)
1335# define R300_COLORMASK0_R (1<<2)
1336# define R300_COLORMASK0_A (1<<3)
1337
1338/* gap */
1339
1340#define R300_RB3D_COLOROFFSET0 0x4E28
1341# define R300_COLOROFFSET_MASK 0xFFFFFFF0 /* GUESS */
1342#define R300_RB3D_COLOROFFSET1 0x4E2C /* GUESS */
1343#define R300_RB3D_COLOROFFSET2 0x4E30 /* GUESS */
1344#define R300_RB3D_COLOROFFSET3 0x4E34 /* GUESS */
1345
1346/* gap */
1347
1348/* Bit 16: Larger tiles
1349 * Bit 17: 4x2 tiles
1350 * Bit 18: Extremely weird tile like, but some pixels duplicated?
1351 */
1352#define R300_RB3D_COLORPITCH0 0x4E38
1353# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
1354# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
1355# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
1356# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
1357# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
1358# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
1359# define R300_COLOR_FORMAT_RGB565 (2 << 22)
1360# define R300_COLOR_FORMAT_ARGB8888 (3 << 22)
1361#define R300_RB3D_COLORPITCH1 0x4E3C /* GUESS */
1362#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1363#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1364
1365/* gap */
1366
1367/* Guess by Vladimir.
1368 * Set to 0A before 3D operations, set to 02 afterwards.
1369 */
1370/*#define R300_RB3D_DSTCACHE_CTLSTAT 0x4E4C*/
1371# define R300_RB3D_DSTCACHE_UNKNOWN_02 0x00000002
1372# define R300_RB3D_DSTCACHE_UNKNOWN_0A 0x0000000A
1373
1374/* gap */
1375/* There seems to be no "write only" setting, so use Z-test = ALWAYS
1376 * for this.
1377 * Bit (1<<8) is the "test" bit. so plain write is 6 - vd
1378 */
1379#define R300_ZB_CNTL 0x4F00
1380# define R300_STENCIL_ENABLE (1 << 0)
1381# define R300_Z_ENABLE (1 << 1)
1382# define R300_Z_WRITE_ENABLE (1 << 2)
1383# define R300_Z_SIGNED_COMPARE (1 << 3)
1384# define R300_STENCIL_FRONT_BACK (1 << 4)
1385
1386#define R300_ZB_ZSTENCILCNTL 0x4f04
1387 /* functions */
1388# define R300_ZS_NEVER 0
1389# define R300_ZS_LESS 1
1390# define R300_ZS_LEQUAL 2
1391# define R300_ZS_EQUAL 3
1392# define R300_ZS_GEQUAL 4
1393# define R300_ZS_GREATER 5
1394# define R300_ZS_NOTEQUAL 6
1395# define R300_ZS_ALWAYS 7
1396# define R300_ZS_MASK 7
1397 /* operations */
1398# define R300_ZS_KEEP 0
1399# define R300_ZS_ZERO 1
1400# define R300_ZS_REPLACE 2
1401# define R300_ZS_INCR 3
1402# define R300_ZS_DECR 4
1403# define R300_ZS_INVERT 5
1404# define R300_ZS_INCR_WRAP 6
1405# define R300_ZS_DECR_WRAP 7
1406# define R300_Z_FUNC_SHIFT 0
1407 /* front and back refer to operations done for front
1408 and back faces, i.e. separate stencil function support */
1409# define R300_S_FRONT_FUNC_SHIFT 3
1410# define R300_S_FRONT_SFAIL_OP_SHIFT 6
1411# define R300_S_FRONT_ZPASS_OP_SHIFT 9
1412# define R300_S_FRONT_ZFAIL_OP_SHIFT 12
1413# define R300_S_BACK_FUNC_SHIFT 15
1414# define R300_S_BACK_SFAIL_OP_SHIFT 18
1415# define R300_S_BACK_ZPASS_OP_SHIFT 21
1416# define R300_S_BACK_ZFAIL_OP_SHIFT 24
1417
1418#define R300_ZB_STENCILREFMASK 0x4f08
1419# define R300_STENCILREF_SHIFT 0
1420# define R300_STENCILREF_MASK 0x000000ff
1421# define R300_STENCILMASK_SHIFT 8
1422# define R300_STENCILMASK_MASK 0x0000ff00
1423# define R300_STENCILWRITEMASK_SHIFT 16
1424# define R300_STENCILWRITEMASK_MASK 0x00ff0000
1425
1426/* gap */
1427
1428#define R300_ZB_FORMAT 0x4f10
1429# define R300_DEPTHFORMAT_16BIT_INT_Z (0 << 0)
1430# define R300_DEPTHFORMAT_16BIT_13E3 (1 << 0)
1431# define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL (2 << 0)
1432/* reserved up to (15 << 0) */
1433# define R300_INVERT_13E3_LEADING_ONES (0 << 4)
1434# define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
1435
1436#define R300_ZB_ZTOP 0x4F14
1437# define R300_ZTOP_DISABLE (0 << 0)
1438# define R300_ZTOP_ENABLE (1 << 0)
1439
1440/* gap */
1441
1442#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
1443# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT (0 << 0)
1444# define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
1445# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT (0 << 1)
1446# define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE (1 << 1)
1447# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE (0 << 31)
1448# define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY (1 << 31)
1449
1450#define R300_ZB_BW_CNTL 0x4f1c
1451# define R300_HIZ_DISABLE (0 << 0)
1452# define R300_HIZ_ENABLE (1 << 0)
1453# define R300_HIZ_MIN (0 << 1)
1454# define R300_HIZ_MAX (1 << 1)
1455# define R300_FAST_FILL_DISABLE (0 << 2)
1456# define R300_FAST_FILL_ENABLE (1 << 2)
1457# define R300_RD_COMP_DISABLE (0 << 3)
1458# define R300_RD_COMP_ENABLE (1 << 3)
1459# define R300_WR_COMP_DISABLE (0 << 4)
1460# define R300_WR_COMP_ENABLE (1 << 4)
1461# define R300_ZB_CB_CLEAR_RMW (0 << 5)
1462# define R300_ZB_CB_CLEAR_CACHE_LINEAR (1 << 5)
1463# define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE (0 << 6)
1464# define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE (1 << 6)
1465
1466# define R500_ZEQUAL_OPTIMIZE_ENABLE (0 << 7)
1467# define R500_ZEQUAL_OPTIMIZE_DISABLE (1 << 7)
1468# define R500_SEQUAL_OPTIMIZE_ENABLE (0 << 8)
1469# define R500_SEQUAL_OPTIMIZE_DISABLE (1 << 8)
1470
1471# define R500_BMASK_ENABLE (0 << 10)
1472# define R500_BMASK_DISABLE (1 << 10)
1473# define R500_HIZ_EQUAL_REJECT_DISABLE (0 << 11)
1474# define R500_HIZ_EQUAL_REJECT_ENABLE (1 << 11)
1475# define R500_HIZ_FP_EXP_BITS_DISABLE (0 << 12)
1476# define R500_HIZ_FP_EXP_BITS_1 (1 << 12)
1477# define R500_HIZ_FP_EXP_BITS_2 (2 << 12)
1478# define R500_HIZ_FP_EXP_BITS_3 (3 << 12)
1479# define R500_HIZ_FP_EXP_BITS_4 (4 << 12)
1480# define R500_HIZ_FP_EXP_BITS_5 (5 << 12)
1481# define R500_HIZ_FP_INVERT_LEADING_ONES (0 << 15)
1482# define R500_HIZ_FP_INVERT_LEADING_ZEROS (1 << 15)
1483# define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE (0 << 16)
1484# define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE (1 << 16)
1485# define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE (0 << 17)
1486# define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE (1 << 17)
1487# define R500_PEQ_PACKING_DISABLE (0 << 18)
1488# define R500_PEQ_PACKING_ENABLE (1 << 18)
1489# define R500_COVERED_PTR_MASKING_DISABLE (0 << 18)
1490# define R500_COVERED_PTR_MASKING_ENABLE (1 << 18)
1491
1492
1493/* gap */
1494
1495/* Z Buffer Address Offset.
1496 * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
1497 */
1498#define R300_ZB_DEPTHOFFSET 0x4f20
1499
1500/* Z Buffer Pitch and Endian Control */
1501#define R300_ZB_DEPTHPITCH 0x4f24
1502# define R300_DEPTHPITCH_MASK 0x00003FFC
1503# define R300_DEPTHMACROTILE_DISABLE (0 << 16)
1504# define R300_DEPTHMACROTILE_ENABLE (1 << 16)
1505# define R300_DEPTHMICROTILE_LINEAR (0 << 17)
1506# define R300_DEPTHMICROTILE_TILED (1 << 17)
1507# define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
1508# define R300_DEPTHENDIAN_NO_SWAP (0 << 18)
1509# define R300_DEPTHENDIAN_WORD_SWAP (1 << 18)
1510# define R300_DEPTHENDIAN_DWORD_SWAP (2 << 18)
1511# define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
1512
1513/* Z Buffer Clear Value */
1514#define R300_ZB_DEPTHCLEARVALUE 0x4f28
1515
1516#define R300_ZB_ZMASK_OFFSET 0x4f30
1517#define R300_ZB_ZMASK_PITCH 0x4f34
1518#define R300_ZB_ZMASK_WRINDEX 0x4f38
1519#define R300_ZB_ZMASK_DWORD 0x4f3c
1520#define R300_ZB_ZMASK_RDINDEX 0x4f40
1521
1522/* Hierarchical Z Memory Offset */
1523#define R300_ZB_HIZ_OFFSET 0x4f44
1524
1525/* Hierarchical Z Write Index */
1526#define R300_ZB_HIZ_WRINDEX 0x4f48
1527
1528/* Hierarchical Z Data */
1529#define R300_ZB_HIZ_DWORD 0x4f4c
1530
1531/* Hierarchical Z Read Index */
1532#define R300_ZB_HIZ_RDINDEX 0x4f50
1533
1534/* Hierarchical Z Pitch */
1535#define R300_ZB_HIZ_PITCH 0x4f54
1536
1537/* Z Buffer Z Pass Counter Data */
1538#define R300_ZB_ZPASS_DATA 0x4f58
1539
1540/* Z Buffer Z Pass Counter Address */
1541#define R300_ZB_ZPASS_ADDR 0x4f5c
1542
1543/* Depth buffer X and Y coordinate offset */
1544#define R300_ZB_DEPTHXY_OFFSET 0x4f60
1545# define R300_DEPTHX_OFFSET_SHIFT 1
1546# define R300_DEPTHX_OFFSET_MASK 0x000007FE
1547# define R300_DEPTHY_OFFSET_SHIFT 17
1548# define R300_DEPTHY_OFFSET_MASK 0x07FE0000
1549
1550/* Sets the fifo sizes */
1551#define R500_ZB_FIFO_SIZE 0x4fd0
1552# define R500_OP_FIFO_SIZE_FULL (0 << 0)
1553# define R500_OP_FIFO_SIZE_HALF (1 << 0)
1554# define R500_OP_FIFO_SIZE_QUATER (2 << 0)
1555# define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
1556
1557/* Stencil Reference Value and Mask for backfacing quads */
1558/* R300_ZB_STENCILREFMASK handles front face */
1559#define R500_ZB_STENCILREFMASK_BF 0x4fd4
1560# define R500_STENCILREF_SHIFT 0
1561# define R500_STENCILREF_MASK 0x000000ff
1562# define R500_STENCILMASK_SHIFT 8
1563# define R500_STENCILMASK_MASK 0x0000ff00
1564# define R500_STENCILWRITEMASK_SHIFT 16
1565# define R500_STENCILWRITEMASK_MASK 0x00ff0000
1566
1567/* BEGIN: Vertex program instruction set */
1568
1569/* Every instruction is four dwords long:
1570 * DWORD 0: output and opcode
1571 * DWORD 1: first argument
1572 * DWORD 2: second argument
1573 * DWORD 3: third argument
1574 *
1575 * Notes:
1576 * - ABS r, a is implemented as MAX r, a, -a
1577 * - MOV is implemented as ADD to zero
1578 * - XPD is implemented as MUL + MAD
1579 * - FLR is implemented as FRC + ADD
1580 * - apparently, fglrx tries to schedule instructions so that there is at
1581 * least one instruction between the write to a temporary and the first
1582 * read from said temporary; however, violations of this scheduling are
1583 * allowed
1584 * - register indices seem to be unrelated with OpenGL aliasing to
1585 * conventional state
1586 * - only one attribute and one parameter can be loaded at a time; however,
1587 * the same attribute/parameter can be used for more than one argument
1588 * - the second software argument for POW is the third hardware argument
1589 * (no idea why)
1590 * - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
1591 *
1592 * There is some magic surrounding LIT:
1593 * The single argument is replicated across all three inputs, but swizzled:
1594 * First argument: xyzy
1595 * Second argument: xyzx
1596 * Third argument: xyzw
1597 * Whenever the result is used later in the fragment program, fglrx forces
1598 * x and w to be 1.0 in the input selection; I don't know whether this is
1599 * strictly necessary
1600 */
1601#define R300_VPI_OUT_OP_DOT (1 << 0)
1602#define R300_VPI_OUT_OP_MUL (2 << 0)
1603#define R300_VPI_OUT_OP_ADD (3 << 0)
1604#define R300_VPI_OUT_OP_MAD (4 << 0)
1605#define R300_VPI_OUT_OP_DST (5 << 0)
1606#define R300_VPI_OUT_OP_FRC (6 << 0)
1607#define R300_VPI_OUT_OP_MAX (7 << 0)
1608#define R300_VPI_OUT_OP_MIN (8 << 0)
1609#define R300_VPI_OUT_OP_SGE (9 << 0)
1610#define R300_VPI_OUT_OP_SLT (10 << 0)
1611 /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
1612#define R300_VPI_OUT_OP_UNK12 (12 << 0)
1613#define R300_VPI_OUT_OP_ARL (13 << 0)
1614#define R300_VPI_OUT_OP_EXP (65 << 0)
1615#define R300_VPI_OUT_OP_LOG (66 << 0)
1616 /* Used in fog computations, scalar(scalar) */
1617#define R300_VPI_OUT_OP_UNK67 (67 << 0)
1618#define R300_VPI_OUT_OP_LIT (68 << 0)
1619#define R300_VPI_OUT_OP_POW (69 << 0)
1620#define R300_VPI_OUT_OP_RCP (70 << 0)
1621#define R300_VPI_OUT_OP_RSQ (72 << 0)
1622 /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
1623#define R300_VPI_OUT_OP_UNK73 (73 << 0)
1624#define R300_VPI_OUT_OP_EX2 (75 << 0)
1625#define R300_VPI_OUT_OP_LG2 (76 << 0)
1626#define R300_VPI_OUT_OP_MAD_2 (128 << 0)
1627 /* all temps, vector(scalar, vector, vector) */
1628#define R300_VPI_OUT_OP_UNK129 (129 << 0)
1629
1630#define R300_VPI_OUT_REG_CLASS_TEMPORARY (0 << 8)
1631#define R300_VPI_OUT_REG_CLASS_ADDR (1 << 8)
1632#define R300_VPI_OUT_REG_CLASS_RESULT (2 << 8)
1633#define R300_VPI_OUT_REG_CLASS_MASK (31 << 8)
1634
1635#define R300_VPI_OUT_REG_INDEX_SHIFT 13
1636 /* GUESS based on fglrx native limits */
1637#define R300_VPI_OUT_REG_INDEX_MASK (31 << 13)
1638
1639#define R300_VPI_OUT_WRITE_X (1 << 20)
1640#define R300_VPI_OUT_WRITE_Y (1 << 21)
1641#define R300_VPI_OUT_WRITE_Z (1 << 22)
1642#define R300_VPI_OUT_WRITE_W (1 << 23)
1643
1644#define R300_VPI_IN_REG_CLASS_TEMPORARY (0 << 0)
1645#define R300_VPI_IN_REG_CLASS_ATTRIBUTE (1 << 0)
1646#define R300_VPI_IN_REG_CLASS_PARAMETER (2 << 0)
1647#define R300_VPI_IN_REG_CLASS_NONE (9 << 0)
1648#define R300_VPI_IN_REG_CLASS_MASK (31 << 0)
1649
1650#define R300_VPI_IN_REG_INDEX_SHIFT 5
1651 /* GUESS based on fglrx native limits */
1652#define R300_VPI_IN_REG_INDEX_MASK (255 << 5)
1653
1654/* The R300 can select components from the input register arbitrarily.
1655 * Use the following constants, shifted by the component shift you
1656 * want to select
1657 */
1658#define R300_VPI_IN_SELECT_X 0
1659#define R300_VPI_IN_SELECT_Y 1
1660#define R300_VPI_IN_SELECT_Z 2
1661#define R300_VPI_IN_SELECT_W 3
1662#define R300_VPI_IN_SELECT_ZERO 4
1663#define R300_VPI_IN_SELECT_ONE 5
1664#define R300_VPI_IN_SELECT_MASK 7
1665
1666#define R300_VPI_IN_X_SHIFT 13
1667#define R300_VPI_IN_Y_SHIFT 16
1668#define R300_VPI_IN_Z_SHIFT 19
1669#define R300_VPI_IN_W_SHIFT 22
1670
1671#define R300_VPI_IN_NEG_X (1 << 25)
1672#define R300_VPI_IN_NEG_Y (1 << 26)
1673#define R300_VPI_IN_NEG_Z (1 << 27)
1674#define R300_VPI_IN_NEG_W (1 << 28)
1675/* END: Vertex program instruction set */
1676
1677/* BEGIN: Packet 3 commands */
1678
1679/* A primitive emission dword. */
1680#define R300_PRIM_TYPE_NONE (0 << 0)
1681#define R300_PRIM_TYPE_POINT (1 << 0)
1682#define R300_PRIM_TYPE_LINE (2 << 0)
1683#define R300_PRIM_TYPE_LINE_STRIP (3 << 0)
1684#define R300_PRIM_TYPE_TRI_LIST (4 << 0)
1685#define R300_PRIM_TYPE_TRI_FAN (5 << 0)
1686#define R300_PRIM_TYPE_TRI_STRIP (6 << 0)
1687#define R300_PRIM_TYPE_TRI_TYPE2 (7 << 0)
1688#define R300_PRIM_TYPE_RECT_LIST (8 << 0)
1689#define R300_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
1690#define R300_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
1691 /* GUESS (based on r200) */
1692#define R300_PRIM_TYPE_POINT_SPRITES (11 << 0)
1693#define R300_PRIM_TYPE_LINE_LOOP (12 << 0)
1694#define R300_PRIM_TYPE_QUADS (13 << 0)
1695#define R300_PRIM_TYPE_QUAD_STRIP (14 << 0)
1696#define R300_PRIM_TYPE_POLYGON (15 << 0)
1697#define R300_PRIM_TYPE_MASK 0xF
1698#define R300_PRIM_WALK_IND (1 << 4)
1699#define R300_PRIM_WALK_LIST (2 << 4)
1700#define R300_PRIM_WALK_RING (3 << 4)
1701#define R300_PRIM_WALK_MASK (3 << 4)
1702 /* GUESS (based on r200) */
1703#define R300_PRIM_COLOR_ORDER_BGRA (0 << 6)
1704#define R300_PRIM_COLOR_ORDER_RGBA (1 << 6)
1705#define R300_PRIM_NUM_VERTICES_SHIFT 16
1706#define R300_PRIM_NUM_VERTICES_MASK 0xffff
1707
1708/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
1709 * Two parameter dwords:
1710 * 0. The first parameter appears to be always 0
1711 * 1. The second parameter is a standard primitive emission dword.
1712 */
1713#define R300_PACKET3_3D_DRAW_VBUF 0x00002800
1714
1715/* Specify the full set of vertex arrays as (address, stride).
1716 * The first parameter is the number of vertex arrays specified.
1717 * The rest of the command is a variable length list of blocks, where
1718 * each block is three dwords long and specifies two arrays.
1719 * The first dword of a block is split into two words, the lower significant
1720 * word refers to the first array, the more significant word to the second
1721 * array in the block.
1722 * The low byte of each word contains the size of an array entry in dwords,
1723 * the high byte contains the stride of the array.
1724 * The second dword of a block contains the pointer to the first array,
1725 * the third dword of a block contains the pointer to the second array.
1726 * Note that if the total number of arrays is odd, the third dword of
1727 * the last block is omitted.
1728 */
1729#define R300_PACKET3_3D_LOAD_VBPNTR 0x00002F00
1730
1731#define R300_PACKET3_INDX_BUFFER 0x00003300
1732# define R300_EB_UNK1_SHIFT 24
1733# define R300_EB_UNK1 (0x80<<24)
1734# define R300_EB_UNK2 0x0810
1735#define R300_PACKET3_3D_DRAW_VBUF_2 0x00003400
1736#define R300_PACKET3_3D_DRAW_INDX_2 0x00003600
1737
1738/* END: Packet 3 commands */
1739
1740
1741/* Color formats for 2d packets
1742 */
1743#define R300_CP_COLOR_FORMAT_CI8 2
1744#define R300_CP_COLOR_FORMAT_ARGB1555 3
1745#define R300_CP_COLOR_FORMAT_RGB565 4
1746#define R300_CP_COLOR_FORMAT_ARGB8888 6
1747#define R300_CP_COLOR_FORMAT_RGB332 7
1748#define R300_CP_COLOR_FORMAT_RGB8 9
1749#define R300_CP_COLOR_FORMAT_ARGB4444 15
1750
1751/*
1752 * CP type-3 packets
1753 */
1754#define R300_CP_CMD_BITBLT_MULTI 0xC0009B00
1755
1756#define R500_VAP_INDEX_OFFSET 0x208c
1757
1758#define R500_GA_US_VECTOR_INDEX 0x4250
1759#define R500_GA_US_VECTOR_DATA 0x4254
1760
1761#define R500_RS_IP_0 0x4074
1762#define R500_RS_INST_0 0x4320
1763
1764#define R500_US_CONFIG 0x4600
1765
1766#define R500_US_FC_CTRL 0x4624
1767#define R500_US_CODE_ADDR 0x4630
1768
1769#define R500_RB3D_COLOR_CLEAR_VALUE_AR 0x46c0
1770#define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8
1771
1772#endif /* _R300_REG_H */
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
new file mode 100644
index 000000000000..e53158f0ecb5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -0,0 +1,1773 @@
1/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
2/*
3 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * Copyright 2007 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Kevin E. Martin <martin@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "radeon_drm.h"
35#include "radeon_drv.h"
36#include "r300_reg.h"
37
38#include "radeon_microcode.h"
39
40#define RADEON_FIFO_DEBUG 0
41
42static int radeon_do_cleanup_cp(struct drm_device * dev);
43
44static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
45{
46 u32 ret;
47 RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
48 ret = RADEON_READ(R520_MC_IND_DATA);
49 RADEON_WRITE(R520_MC_IND_INDEX, 0);
50 return ret;
51}
52
53static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
54{
55 u32 ret;
56 RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
57 ret = RADEON_READ(RS480_NB_MC_DATA);
58 RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
59 return ret;
60}
61
62static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
63{
64 u32 ret;
65 RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
66 ret = RADEON_READ(RS690_MC_DATA);
67 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
68 return ret;
69}
70
71static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
72{
73 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
74 return RS690_READ_MCIND(dev_priv, addr);
75 else
76 return RS480_READ_MCIND(dev_priv, addr);
77}
78
79u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
80{
81
82 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
83 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
84 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
85 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
86 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
87 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
88 else
89 return RADEON_READ(RADEON_MC_FB_LOCATION);
90}
91
92static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
93{
94 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
95 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
96 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
97 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
98 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
99 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
100 else
101 RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
102}
103
104static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
105{
106 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
107 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
108 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
109 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
110 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
111 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
112 else
113 RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
114}
115
116static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
117{
118 u32 agp_base_hi = upper_32_bits(agp_base);
119 u32 agp_base_lo = agp_base & 0xffffffff;
120
121 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
122 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
123 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
124 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
125 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
126 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
127 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
128 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
129 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
130 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
131 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
132 RADEON_WRITE(RS480_AGP_BASE_2, 0);
133 } else {
134 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
135 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
136 RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
137 }
138}
139
140static int RADEON_READ_PLL(struct drm_device * dev, int addr)
141{
142 drm_radeon_private_t *dev_priv = dev->dev_private;
143
144 RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
145 return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
146}
147
148static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
149{
150 RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
151 return RADEON_READ(RADEON_PCIE_DATA);
152}
153
154#if RADEON_FIFO_DEBUG
155static void radeon_status(drm_radeon_private_t * dev_priv)
156{
157 printk("%s:\n", __func__);
158 printk("RBBM_STATUS = 0x%08x\n",
159 (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
160 printk("CP_RB_RTPR = 0x%08x\n",
161 (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
162 printk("CP_RB_WTPR = 0x%08x\n",
163 (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
164 printk("AIC_CNTL = 0x%08x\n",
165 (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
166 printk("AIC_STAT = 0x%08x\n",
167 (unsigned int)RADEON_READ(RADEON_AIC_STAT));
168 printk("AIC_PT_BASE = 0x%08x\n",
169 (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
170 printk("TLB_ADDR = 0x%08x\n",
171 (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
172 printk("TLB_DATA = 0x%08x\n",
173 (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
174}
175#endif
176
177/* ================================================================
178 * Engine, FIFO control
179 */
180
181static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
182{
183 u32 tmp;
184 int i;
185
186 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
187
188 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
189 tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
190 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
191 RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
192
193 for (i = 0; i < dev_priv->usec_timeout; i++) {
194 if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
195 & RADEON_RB3D_DC_BUSY)) {
196 return 0;
197 }
198 DRM_UDELAY(1);
199 }
200 } else {
201 /* 3D */
202 tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT);
203 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
204 RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
205
206 /* 2D */
207 tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT);
208 tmp |= RADEON_RB3D_DC_FLUSH_ALL;
209 RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp);
210
211 for (i = 0; i < dev_priv->usec_timeout; i++) {
212 if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT)
213 & RADEON_RB3D_DC_BUSY)) {
214 return 0;
215 }
216 DRM_UDELAY(1);
217 }
218 }
219
220#if RADEON_FIFO_DEBUG
221 DRM_ERROR("failed!\n");
222 radeon_status(dev_priv);
223#endif
224 return -EBUSY;
225}
226
227static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
228{
229 int i;
230
231 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
232
233 for (i = 0; i < dev_priv->usec_timeout; i++) {
234 int slots = (RADEON_READ(RADEON_RBBM_STATUS)
235 & RADEON_RBBM_FIFOCNT_MASK);
236 if (slots >= entries)
237 return 0;
238 DRM_UDELAY(1);
239 }
240
241#if RADEON_FIFO_DEBUG
242 DRM_ERROR("failed!\n");
243 radeon_status(dev_priv);
244#endif
245 return -EBUSY;
246}
247
248static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
249{
250 int i, ret;
251
252 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
253
254 ret = radeon_do_wait_for_fifo(dev_priv, 64);
255 if (ret)
256 return ret;
257
258 for (i = 0; i < dev_priv->usec_timeout; i++) {
259 if (!(RADEON_READ(RADEON_RBBM_STATUS)
260 & RADEON_RBBM_ACTIVE)) {
261 radeon_do_pixcache_flush(dev_priv);
262 return 0;
263 }
264 DRM_UDELAY(1);
265 }
266
267#if RADEON_FIFO_DEBUG
268 DRM_ERROR("failed!\n");
269 radeon_status(dev_priv);
270#endif
271 return -EBUSY;
272}
273
274static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
275{
276 uint32_t gb_tile_config, gb_pipe_sel = 0;
277
278 /* RS4xx/RS6xx/R4xx/R5xx */
279 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
280 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
281 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
282 } else {
283 /* R3xx */
284 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
285 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
286 dev_priv->num_gb_pipes = 2;
287 } else {
288 /* R3Vxx */
289 dev_priv->num_gb_pipes = 1;
290 }
291 }
292 DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
293
294 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
295
296 switch (dev_priv->num_gb_pipes) {
297 case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
298 case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
299 case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
300 default:
301 case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
302 }
303
304 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
305 RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
306 RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
307 }
308 RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
309 radeon_do_wait_for_idle(dev_priv);
310 RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
311 RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
312 R300_DC_AUTOFLUSH_ENABLE |
313 R300_DC_DC_DISABLE_IGNORE_PE));
314
315
316}
317
318/* ================================================================
319 * CP control, initialization
320 */
321
322/* Load the microcode for the CP */
323static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
324{
325 int i;
326 DRM_DEBUG("\n");
327
328 radeon_do_wait_for_idle(dev_priv);
329
330 RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
331 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
332 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
333 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
334 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
335 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
336 DRM_INFO("Loading R100 Microcode\n");
337 for (i = 0; i < 256; i++) {
338 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
339 R100_cp_microcode[i][1]);
340 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
341 R100_cp_microcode[i][0]);
342 }
343 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
344 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
345 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
346 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
347 DRM_INFO("Loading R200 Microcode\n");
348 for (i = 0; i < 256; i++) {
349 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
350 R200_cp_microcode[i][1]);
351 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
352 R200_cp_microcode[i][0]);
353 }
354 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
355 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
356 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
357 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
358 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
359 DRM_INFO("Loading R300 Microcode\n");
360 for (i = 0; i < 256; i++) {
361 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
362 R300_cp_microcode[i][1]);
363 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
364 R300_cp_microcode[i][0]);
365 }
366 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
367 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
368 DRM_INFO("Loading R400 Microcode\n");
369 for (i = 0; i < 256; i++) {
370 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
371 R420_cp_microcode[i][1]);
372 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
373 R420_cp_microcode[i][0]);
374 }
375 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
376 DRM_INFO("Loading RS690 Microcode\n");
377 for (i = 0; i < 256; i++) {
378 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
379 RS690_cp_microcode[i][1]);
380 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
381 RS690_cp_microcode[i][0]);
382 }
383 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
384 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
385 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
386 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
387 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
388 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
389 DRM_INFO("Loading R500 Microcode\n");
390 for (i = 0; i < 256; i++) {
391 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
392 R520_cp_microcode[i][1]);
393 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
394 R520_cp_microcode[i][0]);
395 }
396 }
397}
398
399/* Flush any pending commands to the CP. This should only be used just
400 * prior to a wait for idle, as it informs the engine that the command
401 * stream is ending.
402 */
403static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
404{
405 DRM_DEBUG("\n");
406#if 0
407 u32 tmp;
408
409 tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
410 RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
411#endif
412}
413
414/* Wait for the CP to go idle.
415 */
416int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
417{
418 RING_LOCALS;
419 DRM_DEBUG("\n");
420
421 BEGIN_RING(6);
422
423 RADEON_PURGE_CACHE();
424 RADEON_PURGE_ZCACHE();
425 RADEON_WAIT_UNTIL_IDLE();
426
427 ADVANCE_RING();
428 COMMIT_RING();
429
430 return radeon_do_wait_for_idle(dev_priv);
431}
432
433/* Start the Command Processor.
434 */
435static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
436{
437 RING_LOCALS;
438 DRM_DEBUG("\n");
439
440 radeon_do_wait_for_idle(dev_priv);
441
442 RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
443
444 dev_priv->cp_running = 1;
445
446 BEGIN_RING(6);
447
448 RADEON_PURGE_CACHE();
449 RADEON_PURGE_ZCACHE();
450 RADEON_WAIT_UNTIL_IDLE();
451
452 ADVANCE_RING();
453 COMMIT_RING();
454}
455
456/* Reset the Command Processor. This will not flush any pending
457 * commands, so you must wait for the CP command stream to complete
458 * before calling this routine.
459 */
460static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
461{
462 u32 cur_read_ptr;
463 DRM_DEBUG("\n");
464
465 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
466 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
467 SET_RING_HEAD(dev_priv, cur_read_ptr);
468 dev_priv->ring.tail = cur_read_ptr;
469}
470
471/* Stop the Command Processor. This will not flush any pending
472 * commands, so you must flush the command stream and wait for the CP
473 * to go idle before calling this routine.
474 */
475static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
476{
477 DRM_DEBUG("\n");
478
479 RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
480
481 dev_priv->cp_running = 0;
482}
483
484/* Reset the engine. This will stop the CP if it is running.
485 */
486static int radeon_do_engine_reset(struct drm_device * dev)
487{
488 drm_radeon_private_t *dev_priv = dev->dev_private;
489 u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
490 DRM_DEBUG("\n");
491
492 radeon_do_pixcache_flush(dev_priv);
493
494 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
495 /* may need something similar for newer chips */
496 clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
497 mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
498
499 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
500 RADEON_FORCEON_MCLKA |
501 RADEON_FORCEON_MCLKB |
502 RADEON_FORCEON_YCLKA |
503 RADEON_FORCEON_YCLKB |
504 RADEON_FORCEON_MC |
505 RADEON_FORCEON_AIC));
506 }
507
508 rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
509
510 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
511 RADEON_SOFT_RESET_CP |
512 RADEON_SOFT_RESET_HI |
513 RADEON_SOFT_RESET_SE |
514 RADEON_SOFT_RESET_RE |
515 RADEON_SOFT_RESET_PP |
516 RADEON_SOFT_RESET_E2 |
517 RADEON_SOFT_RESET_RB));
518 RADEON_READ(RADEON_RBBM_SOFT_RESET);
519 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
520 ~(RADEON_SOFT_RESET_CP |
521 RADEON_SOFT_RESET_HI |
522 RADEON_SOFT_RESET_SE |
523 RADEON_SOFT_RESET_RE |
524 RADEON_SOFT_RESET_PP |
525 RADEON_SOFT_RESET_E2 |
526 RADEON_SOFT_RESET_RB)));
527 RADEON_READ(RADEON_RBBM_SOFT_RESET);
528
529 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
530 RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
531 RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
532 RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
533 }
534
535 /* setup the raster pipes */
536 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
537 radeon_init_pipes(dev_priv);
538
539 /* Reset the CP ring */
540 radeon_do_cp_reset(dev_priv);
541
542 /* The CP is no longer running after an engine reset */
543 dev_priv->cp_running = 0;
544
545 /* Reset any pending vertex, indirect buffers */
546 radeon_freelist_reset(dev);
547
548 return 0;
549}
550
551static void radeon_cp_init_ring_buffer(struct drm_device * dev,
552 drm_radeon_private_t * dev_priv)
553{
554 u32 ring_start, cur_read_ptr;
555 u32 tmp;
556
557 /* Initialize the memory controller. With new memory map, the fb location
558 * is not changed, it should have been properly initialized already. Part
559 * of the problem is that the code below is bogus, assuming the GART is
560 * always appended to the fb which is not necessarily the case
561 */
562 if (!dev_priv->new_memmap)
563 radeon_write_fb_location(dev_priv,
564 ((dev_priv->gart_vm_start - 1) & 0xffff0000)
565 | (dev_priv->fb_location >> 16));
566
567#if __OS_HAS_AGP
568 if (dev_priv->flags & RADEON_IS_AGP) {
569 radeon_write_agp_base(dev_priv, dev->agp->base);
570
571 radeon_write_agp_location(dev_priv,
572 (((dev_priv->gart_vm_start - 1 +
573 dev_priv->gart_size) & 0xffff0000) |
574 (dev_priv->gart_vm_start >> 16)));
575
576 ring_start = (dev_priv->cp_ring->offset
577 - dev->agp->base
578 + dev_priv->gart_vm_start);
579 } else
580#endif
581 ring_start = (dev_priv->cp_ring->offset
582 - (unsigned long)dev->sg->virtual
583 + dev_priv->gart_vm_start);
584
585 RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
586
587 /* Set the write pointer delay */
588 RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
589
590 /* Initialize the ring buffer's read and write pointers */
591 cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
592 RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
593 SET_RING_HEAD(dev_priv, cur_read_ptr);
594 dev_priv->ring.tail = cur_read_ptr;
595
596#if __OS_HAS_AGP
597 if (dev_priv->flags & RADEON_IS_AGP) {
598 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
599 dev_priv->ring_rptr->offset
600 - dev->agp->base + dev_priv->gart_vm_start);
601 } else
602#endif
603 {
604 struct drm_sg_mem *entry = dev->sg;
605 unsigned long tmp_ofs, page_ofs;
606
607 tmp_ofs = dev_priv->ring_rptr->offset -
608 (unsigned long)dev->sg->virtual;
609 page_ofs = tmp_ofs >> PAGE_SHIFT;
610
611 RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
612 DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
613 (unsigned long)entry->busaddr[page_ofs],
614 entry->handle + tmp_ofs);
615 }
616
617 /* Set ring buffer size */
618#ifdef __BIG_ENDIAN
619 RADEON_WRITE(RADEON_CP_RB_CNTL,
620 RADEON_BUF_SWAP_32BIT |
621 (dev_priv->ring.fetch_size_l2ow << 18) |
622 (dev_priv->ring.rptr_update_l2qw << 8) |
623 dev_priv->ring.size_l2qw);
624#else
625 RADEON_WRITE(RADEON_CP_RB_CNTL,
626 (dev_priv->ring.fetch_size_l2ow << 18) |
627 (dev_priv->ring.rptr_update_l2qw << 8) |
628 dev_priv->ring.size_l2qw);
629#endif
630
631 /* Start with assuming that writeback doesn't work */
632 dev_priv->writeback_works = 0;
633
634 /* Initialize the scratch register pointer. This will cause
635 * the scratch register values to be written out to memory
636 * whenever they are updated.
637 *
638 * We simply put this behind the ring read pointer, this works
639 * with PCI GART as well as (whatever kind of) AGP GART
640 */
641 RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
642 + RADEON_SCRATCH_REG_OFFSET);
643
644 dev_priv->scratch = ((__volatile__ u32 *)
645 dev_priv->ring_rptr->handle +
646 (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
647
648 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
649
650 /* Turn on bus mastering */
651 tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
652 RADEON_WRITE(RADEON_BUS_CNTL, tmp);
653
654 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
655 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
656
657 dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
658 RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
659 dev_priv->sarea_priv->last_dispatch);
660
661 dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
662 RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
663
664 radeon_do_wait_for_idle(dev_priv);
665
666 /* Sync everything up */
667 RADEON_WRITE(RADEON_ISYNC_CNTL,
668 (RADEON_ISYNC_ANY2D_IDLE3D |
669 RADEON_ISYNC_ANY3D_IDLE2D |
670 RADEON_ISYNC_WAIT_IDLEGUI |
671 RADEON_ISYNC_CPSCRATCH_IDLEGUI));
672
673}
674
675static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
676{
677 u32 tmp;
678
679 /* Writeback doesn't seem to work everywhere, test it here and possibly
680 * enable it if it appears to work
681 */
682 DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
683 RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
684
685 for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
686 if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
687 0xdeadbeef)
688 break;
689 DRM_UDELAY(1);
690 }
691
692 if (tmp < dev_priv->usec_timeout) {
693 dev_priv->writeback_works = 1;
694 DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
695 } else {
696 dev_priv->writeback_works = 0;
697 DRM_INFO("writeback test failed\n");
698 }
699 if (radeon_no_wb == 1) {
700 dev_priv->writeback_works = 0;
701 DRM_INFO("writeback forced off\n");
702 }
703
704 if (!dev_priv->writeback_works) {
705 /* Disable writeback to avoid unnecessary bus master transfer */
706 RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
707 RADEON_RB_NO_UPDATE);
708 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
709 }
710}
711
712/* Enable or disable IGP GART on the chip */
713static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
714{
715 u32 temp;
716
717 if (on) {
718 DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
719 dev_priv->gart_vm_start,
720 (long)dev_priv->gart_info.bus_addr,
721 dev_priv->gart_size);
722
723 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
724 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
725 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
726 RS690_BLOCK_GFX_D3_EN));
727 else
728 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
729
730 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
731 RS480_VA_SIZE_32MB));
732
733 temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
734 IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
735 RS480_TLB_ENABLE |
736 RS480_GTW_LAC_EN |
737 RS480_1LEVEL_GART));
738
739 temp = dev_priv->gart_info.bus_addr & 0xfffff000;
740 temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
741 IGP_WRITE_MCIND(RS480_GART_BASE, temp);
742
743 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
744 IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
745 RS480_REQ_TYPE_SNOOP_DIS));
746
747 radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
748
749 dev_priv->gart_size = 32*1024*1024;
750 temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
751 0xffff0000) | (dev_priv->gart_vm_start >> 16));
752
753 radeon_write_agp_location(dev_priv, temp);
754
755 temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
756 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
757 RS480_VA_SIZE_32MB));
758
759 do {
760 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
761 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
762 break;
763 DRM_UDELAY(1);
764 } while (1);
765
766 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
767 RS480_GART_CACHE_INVALIDATE);
768
769 do {
770 temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
771 if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
772 break;
773 DRM_UDELAY(1);
774 } while (1);
775
776 IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
777 } else {
778 IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
779 }
780}
781
782static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
783{
784 u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
785 if (on) {
786
787 DRM_DEBUG("programming pcie %08X %08lX %08X\n",
788 dev_priv->gart_vm_start,
789 (long)dev_priv->gart_info.bus_addr,
790 dev_priv->gart_size);
791 RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
792 dev_priv->gart_vm_start);
793 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
794 dev_priv->gart_info.bus_addr);
795 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
796 dev_priv->gart_vm_start);
797 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
798 dev_priv->gart_vm_start +
799 dev_priv->gart_size - 1);
800
801 radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
802
803 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
804 RADEON_PCIE_TX_GART_EN);
805 } else {
806 RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
807 tmp & ~RADEON_PCIE_TX_GART_EN);
808 }
809}
810
811/* Enable or disable PCI GART on the chip */
812static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
813{
814 u32 tmp;
815
816 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
817 (dev_priv->flags & RADEON_IS_IGPGART)) {
818 radeon_set_igpgart(dev_priv, on);
819 return;
820 }
821
822 if (dev_priv->flags & RADEON_IS_PCIE) {
823 radeon_set_pciegart(dev_priv, on);
824 return;
825 }
826
827 tmp = RADEON_READ(RADEON_AIC_CNTL);
828
829 if (on) {
830 RADEON_WRITE(RADEON_AIC_CNTL,
831 tmp | RADEON_PCIGART_TRANSLATE_EN);
832
833 /* set PCI GART page-table base address
834 */
835 RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
836
837 /* set address range for PCI address translate
838 */
839 RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
840 RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
841 + dev_priv->gart_size - 1);
842
843 /* Turn off AGP aperture -- is this required for PCI GART?
844 */
845 radeon_write_agp_location(dev_priv, 0xffffffc0);
846 RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
847 } else {
848 RADEON_WRITE(RADEON_AIC_CNTL,
849 tmp & ~RADEON_PCIGART_TRANSLATE_EN);
850 }
851}
852
853static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
854{
855 drm_radeon_private_t *dev_priv = dev->dev_private;
856
857 DRM_DEBUG("\n");
858
859 /* if we require new memory map but we don't have it fail */
860 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
861 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
862 radeon_do_cleanup_cp(dev);
863 return -EINVAL;
864 }
865
866 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
867 DRM_DEBUG("Forcing AGP card to PCI mode\n");
868 dev_priv->flags &= ~RADEON_IS_AGP;
869 } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
870 && !init->is_pci) {
871 DRM_DEBUG("Restoring AGP flag\n");
872 dev_priv->flags |= RADEON_IS_AGP;
873 }
874
875 if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
876 DRM_ERROR("PCI GART memory not allocated!\n");
877 radeon_do_cleanup_cp(dev);
878 return -EINVAL;
879 }
880
881 dev_priv->usec_timeout = init->usec_timeout;
882 if (dev_priv->usec_timeout < 1 ||
883 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
884 DRM_DEBUG("TIMEOUT problem!\n");
885 radeon_do_cleanup_cp(dev);
886 return -EINVAL;
887 }
888
889 /* Enable vblank on CRTC1 for older X servers
890 */
891 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
892
893 switch(init->func) {
894 case RADEON_INIT_R200_CP:
895 dev_priv->microcode_version = UCODE_R200;
896 break;
897 case RADEON_INIT_R300_CP:
898 dev_priv->microcode_version = UCODE_R300;
899 break;
900 default:
901 dev_priv->microcode_version = UCODE_R100;
902 }
903
904 dev_priv->do_boxes = 0;
905 dev_priv->cp_mode = init->cp_mode;
906
907 /* We don't support anything other than bus-mastering ring mode,
908 * but the ring can be in either AGP or PCI space for the ring
909 * read pointer.
910 */
911 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
912 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
913 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
914 radeon_do_cleanup_cp(dev);
915 return -EINVAL;
916 }
917
918 switch (init->fb_bpp) {
919 case 16:
920 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
921 break;
922 case 32:
923 default:
924 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
925 break;
926 }
927 dev_priv->front_offset = init->front_offset;
928 dev_priv->front_pitch = init->front_pitch;
929 dev_priv->back_offset = init->back_offset;
930 dev_priv->back_pitch = init->back_pitch;
931
932 switch (init->depth_bpp) {
933 case 16:
934 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
935 break;
936 case 32:
937 default:
938 dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
939 break;
940 }
941 dev_priv->depth_offset = init->depth_offset;
942 dev_priv->depth_pitch = init->depth_pitch;
943
944 /* Hardware state for depth clears. Remove this if/when we no
945 * longer clear the depth buffer with a 3D rectangle. Hard-code
946 * all values to prevent unwanted 3D state from slipping through
947 * and screwing with the clear operation.
948 */
949 dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
950 (dev_priv->color_fmt << 10) |
951 (dev_priv->microcode_version ==
952 UCODE_R100 ? RADEON_ZBLOCK16 : 0));
953
954 dev_priv->depth_clear.rb3d_zstencilcntl =
955 (dev_priv->depth_fmt |
956 RADEON_Z_TEST_ALWAYS |
957 RADEON_STENCIL_TEST_ALWAYS |
958 RADEON_STENCIL_S_FAIL_REPLACE |
959 RADEON_STENCIL_ZPASS_REPLACE |
960 RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
961
962 dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
963 RADEON_BFACE_SOLID |
964 RADEON_FFACE_SOLID |
965 RADEON_FLAT_SHADE_VTX_LAST |
966 RADEON_DIFFUSE_SHADE_FLAT |
967 RADEON_ALPHA_SHADE_FLAT |
968 RADEON_SPECULAR_SHADE_FLAT |
969 RADEON_FOG_SHADE_FLAT |
970 RADEON_VTX_PIX_CENTER_OGL |
971 RADEON_ROUND_MODE_TRUNC |
972 RADEON_ROUND_PREC_8TH_PIX);
973
974
975 dev_priv->ring_offset = init->ring_offset;
976 dev_priv->ring_rptr_offset = init->ring_rptr_offset;
977 dev_priv->buffers_offset = init->buffers_offset;
978 dev_priv->gart_textures_offset = init->gart_textures_offset;
979
980 dev_priv->sarea = drm_getsarea(dev);
981 if (!dev_priv->sarea) {
982 DRM_ERROR("could not find sarea!\n");
983 radeon_do_cleanup_cp(dev);
984 return -EINVAL;
985 }
986
987 dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
988 if (!dev_priv->cp_ring) {
989 DRM_ERROR("could not find cp ring region!\n");
990 radeon_do_cleanup_cp(dev);
991 return -EINVAL;
992 }
993 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
994 if (!dev_priv->ring_rptr) {
995 DRM_ERROR("could not find ring read pointer!\n");
996 radeon_do_cleanup_cp(dev);
997 return -EINVAL;
998 }
999 dev->agp_buffer_token = init->buffers_offset;
1000 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
1001 if (!dev->agp_buffer_map) {
1002 DRM_ERROR("could not find dma buffer region!\n");
1003 radeon_do_cleanup_cp(dev);
1004 return -EINVAL;
1005 }
1006
1007 if (init->gart_textures_offset) {
1008 dev_priv->gart_textures =
1009 drm_core_findmap(dev, init->gart_textures_offset);
1010 if (!dev_priv->gart_textures) {
1011 DRM_ERROR("could not find GART texture region!\n");
1012 radeon_do_cleanup_cp(dev);
1013 return -EINVAL;
1014 }
1015 }
1016
1017 dev_priv->sarea_priv =
1018 (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
1019 init->sarea_priv_offset);
1020
1021#if __OS_HAS_AGP
1022 if (dev_priv->flags & RADEON_IS_AGP) {
1023 drm_core_ioremap(dev_priv->cp_ring, dev);
1024 drm_core_ioremap(dev_priv->ring_rptr, dev);
1025 drm_core_ioremap(dev->agp_buffer_map, dev);
1026 if (!dev_priv->cp_ring->handle ||
1027 !dev_priv->ring_rptr->handle ||
1028 !dev->agp_buffer_map->handle) {
1029 DRM_ERROR("could not find ioremap agp regions!\n");
1030 radeon_do_cleanup_cp(dev);
1031 return -EINVAL;
1032 }
1033 } else
1034#endif
1035 {
1036 dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
1037 dev_priv->ring_rptr->handle =
1038 (void *)dev_priv->ring_rptr->offset;
1039 dev->agp_buffer_map->handle =
1040 (void *)dev->agp_buffer_map->offset;
1041
1042 DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
1043 dev_priv->cp_ring->handle);
1044 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
1045 dev_priv->ring_rptr->handle);
1046 DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
1047 dev->agp_buffer_map->handle);
1048 }
1049
1050 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
1051 dev_priv->fb_size =
1052 ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
1053 - dev_priv->fb_location;
1054
1055 dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
1056 ((dev_priv->front_offset
1057 + dev_priv->fb_location) >> 10));
1058
1059 dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
1060 ((dev_priv->back_offset
1061 + dev_priv->fb_location) >> 10));
1062
1063 dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
1064 ((dev_priv->depth_offset
1065 + dev_priv->fb_location) >> 10));
1066
1067 dev_priv->gart_size = init->gart_size;
1068
1069 /* New let's set the memory map ... */
1070 if (dev_priv->new_memmap) {
1071 u32 base = 0;
1072
1073 DRM_INFO("Setting GART location based on new memory map\n");
1074
1075 /* If using AGP, try to locate the AGP aperture at the same
1076 * location in the card and on the bus, though we have to
1077 * align it down.
1078 */
1079#if __OS_HAS_AGP
1080 if (dev_priv->flags & RADEON_IS_AGP) {
1081 base = dev->agp->base;
1082 /* Check if valid */
1083 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
1084 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
1085 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
1086 dev->agp->base);
1087 base = 0;
1088 }
1089 }
1090#endif
1091 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */
1092 if (base == 0) {
1093 base = dev_priv->fb_location + dev_priv->fb_size;
1094 if (base < dev_priv->fb_location ||
1095 ((base + dev_priv->gart_size) & 0xfffffffful) < base)
1096 base = dev_priv->fb_location
1097 - dev_priv->gart_size;
1098 }
1099 dev_priv->gart_vm_start = base & 0xffc00000u;
1100 if (dev_priv->gart_vm_start != base)
1101 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
1102 base, dev_priv->gart_vm_start);
1103 } else {
1104 DRM_INFO("Setting GART location based on old memory map\n");
1105 dev_priv->gart_vm_start = dev_priv->fb_location +
1106 RADEON_READ(RADEON_CONFIG_APER_SIZE);
1107 }
1108
1109#if __OS_HAS_AGP
1110 if (dev_priv->flags & RADEON_IS_AGP)
1111 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1112 - dev->agp->base
1113 + dev_priv->gart_vm_start);
1114 else
1115#endif
1116 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
1117 - (unsigned long)dev->sg->virtual
1118 + dev_priv->gart_vm_start);
1119
1120 DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
1121 DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
1122 DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
1123 dev_priv->gart_buffers_offset);
1124
1125 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
1126 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
1127 + init->ring_size / sizeof(u32));
1128 dev_priv->ring.size = init->ring_size;
1129 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
1130
1131 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
1132 dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
1133
1134 dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
1135 dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
1136 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1137
1138 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
1139
1140#if __OS_HAS_AGP
1141 if (dev_priv->flags & RADEON_IS_AGP) {
1142 /* Turn off PCI GART */
1143 radeon_set_pcigart(dev_priv, 0);
1144 } else
1145#endif
1146 {
1147 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
1148 /* if we have an offset set from userspace */
1149 if (dev_priv->pcigart_offset_set) {
1150 dev_priv->gart_info.bus_addr =
1151 dev_priv->pcigart_offset + dev_priv->fb_location;
1152 dev_priv->gart_info.mapping.offset =
1153 dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
1154 dev_priv->gart_info.mapping.size =
1155 dev_priv->gart_info.table_size;
1156
1157 drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
1158 dev_priv->gart_info.addr =
1159 dev_priv->gart_info.mapping.handle;
1160
1161 if (dev_priv->flags & RADEON_IS_PCIE)
1162 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
1163 else
1164 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1165 dev_priv->gart_info.gart_table_location =
1166 DRM_ATI_GART_FB;
1167
1168 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
1169 dev_priv->gart_info.addr,
1170 dev_priv->pcigart_offset);
1171 } else {
1172 if (dev_priv->flags & RADEON_IS_IGPGART)
1173 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
1174 else
1175 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
1176 dev_priv->gart_info.gart_table_location =
1177 DRM_ATI_GART_MAIN;
1178 dev_priv->gart_info.addr = NULL;
1179 dev_priv->gart_info.bus_addr = 0;
1180 if (dev_priv->flags & RADEON_IS_PCIE) {
1181 DRM_ERROR
1182 ("Cannot use PCI Express without GART in FB memory\n");
1183 radeon_do_cleanup_cp(dev);
1184 return -EINVAL;
1185 }
1186 }
1187
1188 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
1189 DRM_ERROR("failed to init PCI GART!\n");
1190 radeon_do_cleanup_cp(dev);
1191 return -ENOMEM;
1192 }
1193
1194 /* Turn on PCI GART */
1195 radeon_set_pcigart(dev_priv, 1);
1196 }
1197
1198 radeon_cp_load_microcode(dev_priv);
1199 radeon_cp_init_ring_buffer(dev, dev_priv);
1200
1201 dev_priv->last_buf = 0;
1202
1203 radeon_do_engine_reset(dev);
1204 radeon_test_writeback(dev_priv);
1205
1206 return 0;
1207}
1208
1209static int radeon_do_cleanup_cp(struct drm_device * dev)
1210{
1211 drm_radeon_private_t *dev_priv = dev->dev_private;
1212 DRM_DEBUG("\n");
1213
1214 /* Make sure interrupts are disabled here because the uninstall ioctl
1215 * may not have been called from userspace and after dev_private
1216 * is freed, it's too late.
1217 */
1218 if (dev->irq_enabled)
1219 drm_irq_uninstall(dev);
1220
1221#if __OS_HAS_AGP
1222 if (dev_priv->flags & RADEON_IS_AGP) {
1223 if (dev_priv->cp_ring != NULL) {
1224 drm_core_ioremapfree(dev_priv->cp_ring, dev);
1225 dev_priv->cp_ring = NULL;
1226 }
1227 if (dev_priv->ring_rptr != NULL) {
1228 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
1229 dev_priv->ring_rptr = NULL;
1230 }
1231 if (dev->agp_buffer_map != NULL) {
1232 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1233 dev->agp_buffer_map = NULL;
1234 }
1235 } else
1236#endif
1237 {
1238
1239 if (dev_priv->gart_info.bus_addr) {
1240 /* Turn off PCI GART */
1241 radeon_set_pcigart(dev_priv, 0);
1242 if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
1243 DRM_ERROR("failed to cleanup PCI GART!\n");
1244 }
1245
1246 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
1247 {
1248 drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
1249 dev_priv->gart_info.addr = 0;
1250 }
1251 }
1252 /* only clear to the start of flags */
1253 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
1254
1255 return 0;
1256}
1257
1258/* This code will reinit the Radeon CP hardware after a resume from disc.
1259 * AFAIK, it would be very difficult to pickle the state at suspend time, so
1260 * here we make sure that all Radeon hardware initialisation is re-done without
1261 * affecting running applications.
1262 *
1263 * Charl P. Botha <http://cpbotha.net>
1264 */
1265static int radeon_do_resume_cp(struct drm_device * dev)
1266{
1267 drm_radeon_private_t *dev_priv = dev->dev_private;
1268
1269 if (!dev_priv) {
1270 DRM_ERROR("Called with no initialization\n");
1271 return -EINVAL;
1272 }
1273
1274 DRM_DEBUG("Starting radeon_do_resume_cp()\n");
1275
1276#if __OS_HAS_AGP
1277 if (dev_priv->flags & RADEON_IS_AGP) {
1278 /* Turn off PCI GART */
1279 radeon_set_pcigart(dev_priv, 0);
1280 } else
1281#endif
1282 {
1283 /* Turn on PCI GART */
1284 radeon_set_pcigart(dev_priv, 1);
1285 }
1286
1287 radeon_cp_load_microcode(dev_priv);
1288 radeon_cp_init_ring_buffer(dev, dev_priv);
1289
1290 radeon_do_engine_reset(dev);
1291 radeon_enable_interrupt(dev);
1292
1293 DRM_DEBUG("radeon_do_resume_cp() complete\n");
1294
1295 return 0;
1296}
1297
1298int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
1299{
1300 drm_radeon_init_t *init = data;
1301
1302 LOCK_TEST_WITH_RETURN(dev, file_priv);
1303
1304 if (init->func == RADEON_INIT_R300_CP)
1305 r300_init_reg_flags(dev);
1306
1307 switch (init->func) {
1308 case RADEON_INIT_CP:
1309 case RADEON_INIT_R200_CP:
1310 case RADEON_INIT_R300_CP:
1311 return radeon_do_init_cp(dev, init);
1312 case RADEON_CLEANUP_CP:
1313 return radeon_do_cleanup_cp(dev);
1314 }
1315
1316 return -EINVAL;
1317}
1318
1319int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
1320{
1321 drm_radeon_private_t *dev_priv = dev->dev_private;
1322 DRM_DEBUG("\n");
1323
1324 LOCK_TEST_WITH_RETURN(dev, file_priv);
1325
1326 if (dev_priv->cp_running) {
1327 DRM_DEBUG("while CP running\n");
1328 return 0;
1329 }
1330 if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
1331 DRM_DEBUG("called with bogus CP mode (%d)\n",
1332 dev_priv->cp_mode);
1333 return 0;
1334 }
1335
1336 radeon_do_cp_start(dev_priv);
1337
1338 return 0;
1339}
1340
1341/* Stop the CP. The engine must have been idled before calling this
1342 * routine.
1343 */
1344int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
1345{
1346 drm_radeon_private_t *dev_priv = dev->dev_private;
1347 drm_radeon_cp_stop_t *stop = data;
1348 int ret;
1349 DRM_DEBUG("\n");
1350
1351 LOCK_TEST_WITH_RETURN(dev, file_priv);
1352
1353 if (!dev_priv->cp_running)
1354 return 0;
1355
1356 /* Flush any pending CP commands. This ensures any outstanding
1357 * commands are exectuted by the engine before we turn it off.
1358 */
1359 if (stop->flush) {
1360 radeon_do_cp_flush(dev_priv);
1361 }
1362
1363 /* If we fail to make the engine go idle, we return an error
1364 * code so that the DRM ioctl wrapper can try again.
1365 */
1366 if (stop->idle) {
1367 ret = radeon_do_cp_idle(dev_priv);
1368 if (ret)
1369 return ret;
1370 }
1371
1372 /* Finally, we can turn off the CP. If the engine isn't idle,
1373 * we will get some dropped triangles as they won't be fully
1374 * rendered before the CP is shut down.
1375 */
1376 radeon_do_cp_stop(dev_priv);
1377
1378 /* Reset the engine */
1379 radeon_do_engine_reset(dev);
1380
1381 return 0;
1382}
1383
1384void radeon_do_release(struct drm_device * dev)
1385{
1386 drm_radeon_private_t *dev_priv = dev->dev_private;
1387 int i, ret;
1388
1389 if (dev_priv) {
1390 if (dev_priv->cp_running) {
1391 /* Stop the cp */
1392 while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
1393 DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
1394#ifdef __linux__
1395 schedule();
1396#else
1397 tsleep(&ret, PZERO, "rdnrel", 1);
1398#endif
1399 }
1400 radeon_do_cp_stop(dev_priv);
1401 radeon_do_engine_reset(dev);
1402 }
1403
1404 /* Disable *all* interrupts */
1405 if (dev_priv->mmio) /* remove this after permanent addmaps */
1406 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
1407
1408 if (dev_priv->mmio) { /* remove all surfaces */
1409 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1410 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
1411 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
1412 16 * i, 0);
1413 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
1414 16 * i, 0);
1415 }
1416 }
1417
1418 /* Free memory heap structures */
1419 radeon_mem_takedown(&(dev_priv->gart_heap));
1420 radeon_mem_takedown(&(dev_priv->fb_heap));
1421
1422 /* deallocate kernel resources */
1423 radeon_do_cleanup_cp(dev);
1424 }
1425}
1426
1427/* Just reset the CP ring. Called as part of an X Server engine reset.
1428 */
1429int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1430{
1431 drm_radeon_private_t *dev_priv = dev->dev_private;
1432 DRM_DEBUG("\n");
1433
1434 LOCK_TEST_WITH_RETURN(dev, file_priv);
1435
1436 if (!dev_priv) {
1437 DRM_DEBUG("called before init done\n");
1438 return -EINVAL;
1439 }
1440
1441 radeon_do_cp_reset(dev_priv);
1442
1443 /* The CP is no longer running after an engine reset */
1444 dev_priv->cp_running = 0;
1445
1446 return 0;
1447}
1448
1449int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
1450{
1451 drm_radeon_private_t *dev_priv = dev->dev_private;
1452 DRM_DEBUG("\n");
1453
1454 LOCK_TEST_WITH_RETURN(dev, file_priv);
1455
1456 return radeon_do_cp_idle(dev_priv);
1457}
1458
1459/* Added by Charl P. Botha to call radeon_do_resume_cp().
1460 */
1461int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
1462{
1463
1464 return radeon_do_resume_cp(dev);
1465}
1466
1467int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
1468{
1469 DRM_DEBUG("\n");
1470
1471 LOCK_TEST_WITH_RETURN(dev, file_priv);
1472
1473 return radeon_do_engine_reset(dev);
1474}
1475
1476/* ================================================================
1477 * Fullscreen mode
1478 */
1479
1480/* KW: Deprecated to say the least:
1481 */
1482int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
1483{
1484 return 0;
1485}
1486
1487/* ================================================================
1488 * Freelist management
1489 */
1490
1491/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
1492 * bufs until freelist code is used. Note this hides a problem with
1493 * the scratch register * (used to keep track of last buffer
1494 * completed) being written to before * the last buffer has actually
1495 * completed rendering.
1496 *
1497 * KW: It's also a good way to find free buffers quickly.
1498 *
1499 * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
1500 * sleep. However, bugs in older versions of radeon_accel.c mean that
1501 * we essentially have to do this, else old clients will break.
1502 *
1503 * However, it does leave open a potential deadlock where all the
1504 * buffers are held by other clients, which can't release them because
1505 * they can't get the lock.
1506 */
1507
1508struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1509{
1510 struct drm_device_dma *dma = dev->dma;
1511 drm_radeon_private_t *dev_priv = dev->dev_private;
1512 drm_radeon_buf_priv_t *buf_priv;
1513 struct drm_buf *buf;
1514 int i, t;
1515 int start;
1516
1517 if (++dev_priv->last_buf >= dma->buf_count)
1518 dev_priv->last_buf = 0;
1519
1520 start = dev_priv->last_buf;
1521
1522 for (t = 0; t < dev_priv->usec_timeout; t++) {
1523 u32 done_age = GET_SCRATCH(1);
1524 DRM_DEBUG("done_age = %d\n", done_age);
1525 for (i = start; i < dma->buf_count; i++) {
1526 buf = dma->buflist[i];
1527 buf_priv = buf->dev_private;
1528 if (buf->file_priv == NULL || (buf->pending &&
1529 buf_priv->age <=
1530 done_age)) {
1531 dev_priv->stats.requested_bufs++;
1532 buf->pending = 0;
1533 return buf;
1534 }
1535 start = 0;
1536 }
1537
1538 if (t) {
1539 DRM_UDELAY(1);
1540 dev_priv->stats.freelist_loops++;
1541 }
1542 }
1543
1544 DRM_DEBUG("returning NULL!\n");
1545 return NULL;
1546}
1547
1548#if 0
1549struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1550{
1551 struct drm_device_dma *dma = dev->dma;
1552 drm_radeon_private_t *dev_priv = dev->dev_private;
1553 drm_radeon_buf_priv_t *buf_priv;
1554 struct drm_buf *buf;
1555 int i, t;
1556 int start;
1557 u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
1558
1559 if (++dev_priv->last_buf >= dma->buf_count)
1560 dev_priv->last_buf = 0;
1561
1562 start = dev_priv->last_buf;
1563 dev_priv->stats.freelist_loops++;
1564
1565 for (t = 0; t < 2; t++) {
1566 for (i = start; i < dma->buf_count; i++) {
1567 buf = dma->buflist[i];
1568 buf_priv = buf->dev_private;
1569 if (buf->file_priv == 0 || (buf->pending &&
1570 buf_priv->age <=
1571 done_age)) {
1572 dev_priv->stats.requested_bufs++;
1573 buf->pending = 0;
1574 return buf;
1575 }
1576 }
1577 start = 0;
1578 }
1579
1580 return NULL;
1581}
1582#endif
1583
1584void radeon_freelist_reset(struct drm_device * dev)
1585{
1586 struct drm_device_dma *dma = dev->dma;
1587 drm_radeon_private_t *dev_priv = dev->dev_private;
1588 int i;
1589
1590 dev_priv->last_buf = 0;
1591 for (i = 0; i < dma->buf_count; i++) {
1592 struct drm_buf *buf = dma->buflist[i];
1593 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1594 buf_priv->age = 0;
1595 }
1596}
1597
1598/* ================================================================
1599 * CP command submission
1600 */
1601
1602int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
1603{
1604 drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
1605 int i;
1606 u32 last_head = GET_RING_HEAD(dev_priv);
1607
1608 for (i = 0; i < dev_priv->usec_timeout; i++) {
1609 u32 head = GET_RING_HEAD(dev_priv);
1610
1611 ring->space = (head - ring->tail) * sizeof(u32);
1612 if (ring->space <= 0)
1613 ring->space += ring->size;
1614 if (ring->space > n)
1615 return 0;
1616
1617 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
1618
1619 if (head != last_head)
1620 i = 0;
1621 last_head = head;
1622
1623 DRM_UDELAY(1);
1624 }
1625
1626 /* FIXME: This return value is ignored in the BEGIN_RING macro! */
1627#if RADEON_FIFO_DEBUG
1628 radeon_status(dev_priv);
1629 DRM_ERROR("failed!\n");
1630#endif
1631 return -EBUSY;
1632}
1633
1634static int radeon_cp_get_buffers(struct drm_device *dev,
1635 struct drm_file *file_priv,
1636 struct drm_dma * d)
1637{
1638 int i;
1639 struct drm_buf *buf;
1640
1641 for (i = d->granted_count; i < d->request_count; i++) {
1642 buf = radeon_freelist_get(dev);
1643 if (!buf)
1644 return -EBUSY; /* NOTE: broken client */
1645
1646 buf->file_priv = file_priv;
1647
1648 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1649 sizeof(buf->idx)))
1650 return -EFAULT;
1651 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1652 sizeof(buf->total)))
1653 return -EFAULT;
1654
1655 d->granted_count++;
1656 }
1657 return 0;
1658}
1659
1660int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1661{
1662 struct drm_device_dma *dma = dev->dma;
1663 int ret = 0;
1664 struct drm_dma *d = data;
1665
1666 LOCK_TEST_WITH_RETURN(dev, file_priv);
1667
1668 /* Please don't send us buffers.
1669 */
1670 if (d->send_count != 0) {
1671 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1672 DRM_CURRENTPID, d->send_count);
1673 return -EINVAL;
1674 }
1675
1676 /* We'll send you buffers.
1677 */
1678 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1679 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1680 DRM_CURRENTPID, d->request_count, dma->buf_count);
1681 return -EINVAL;
1682 }
1683
1684 d->granted_count = 0;
1685
1686 if (d->request_count) {
1687 ret = radeon_cp_get_buffers(dev, file_priv, d);
1688 }
1689
1690 return ret;
1691}
1692
1693int radeon_driver_load(struct drm_device *dev, unsigned long flags)
1694{
1695 drm_radeon_private_t *dev_priv;
1696 int ret = 0;
1697
1698 dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
1699 if (dev_priv == NULL)
1700 return -ENOMEM;
1701
1702 memset(dev_priv, 0, sizeof(drm_radeon_private_t));
1703 dev->dev_private = (void *)dev_priv;
1704 dev_priv->flags = flags;
1705
1706 switch (flags & RADEON_FAMILY_MASK) {
1707 case CHIP_R100:
1708 case CHIP_RV200:
1709 case CHIP_R200:
1710 case CHIP_R300:
1711 case CHIP_R350:
1712 case CHIP_R420:
1713 case CHIP_RV410:
1714 case CHIP_RV515:
1715 case CHIP_R520:
1716 case CHIP_RV570:
1717 case CHIP_R580:
1718 dev_priv->flags |= RADEON_HAS_HIERZ;
1719 break;
1720 default:
1721 /* all other chips have no hierarchical z buffer */
1722 break;
1723 }
1724
1725 if (drm_device_is_agp(dev))
1726 dev_priv->flags |= RADEON_IS_AGP;
1727 else if (drm_device_is_pcie(dev))
1728 dev_priv->flags |= RADEON_IS_PCIE;
1729 else
1730 dev_priv->flags |= RADEON_IS_PCI;
1731
1732 DRM_DEBUG("%s card detected\n",
1733 ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
1734 return ret;
1735}
1736
1737/* Create mappings for registers and framebuffer so userland doesn't necessarily
1738 * have to find them.
1739 */
1740int radeon_driver_firstopen(struct drm_device *dev)
1741{
1742 int ret;
1743 drm_local_map_t *map;
1744 drm_radeon_private_t *dev_priv = dev->dev_private;
1745
1746 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
1747
1748 ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
1749 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
1750 _DRM_READ_ONLY, &dev_priv->mmio);
1751 if (ret != 0)
1752 return ret;
1753
1754 dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
1755 ret = drm_addmap(dev, dev_priv->fb_aper_offset,
1756 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
1757 _DRM_WRITE_COMBINING, &map);
1758 if (ret != 0)
1759 return ret;
1760
1761 return 0;
1762}
1763
1764int radeon_driver_unload(struct drm_device *dev)
1765{
1766 drm_radeon_private_t *dev_priv = dev->dev_private;
1767
1768 DRM_DEBUG("\n");
1769 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
1770
1771 dev->dev_private = NULL;
1772 return 0;
1773}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
new file mode 100644
index 000000000000..349ac3d3b848
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -0,0 +1,126 @@
1/**
2 * \file radeon_drv.c
3 * ATI Radeon driver
4 *
5 * \author Gareth Hughes <gareth@valinux.com>
6 */
7
8/*
9 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
10 * All Rights Reserved.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the next
20 * paragraph) shall be included in all copies or substantial portions of the
21 * Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
27 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
28 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
29 * OTHER DEALINGS IN THE SOFTWARE.
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "radeon_drm.h"
35#include "radeon_drv.h"
36
37#include "drm_pciids.h"
38
39int radeon_no_wb;
40
41MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
42module_param_named(no_wb, radeon_no_wb, int, 0444);
43
44static int dri_library_name(struct drm_device *dev, char *buf)
45{
46 drm_radeon_private_t *dev_priv = dev->dev_private;
47 int family = dev_priv->flags & RADEON_FAMILY_MASK;
48
49 return snprintf(buf, PAGE_SIZE, "%s\n",
50 (family < CHIP_R200) ? "radeon" :
51 ((family < CHIP_R300) ? "r200" :
52 "r300"));
53}
54
55static struct pci_device_id pciidlist[] = {
56 radeon_PCI_IDS
57};
58
59static struct drm_driver driver = {
60 .driver_features =
61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
62 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
63 DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
64 .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
65 .load = radeon_driver_load,
66 .firstopen = radeon_driver_firstopen,
67 .open = radeon_driver_open,
68 .preclose = radeon_driver_preclose,
69 .postclose = radeon_driver_postclose,
70 .lastclose = radeon_driver_lastclose,
71 .unload = radeon_driver_unload,
72 .vblank_wait = radeon_driver_vblank_wait,
73 .vblank_wait2 = radeon_driver_vblank_wait2,
74 .dri_library_name = dri_library_name,
75 .irq_preinstall = radeon_driver_irq_preinstall,
76 .irq_postinstall = radeon_driver_irq_postinstall,
77 .irq_uninstall = radeon_driver_irq_uninstall,
78 .irq_handler = radeon_driver_irq_handler,
79 .reclaim_buffers = drm_core_reclaim_buffers,
80 .get_map_ofs = drm_core_get_map_ofs,
81 .get_reg_ofs = drm_core_get_reg_ofs,
82 .ioctls = radeon_ioctls,
83 .dma_ioctl = radeon_cp_buffers,
84 .fops = {
85 .owner = THIS_MODULE,
86 .open = drm_open,
87 .release = drm_release,
88 .ioctl = drm_ioctl,
89 .mmap = drm_mmap,
90 .poll = drm_poll,
91 .fasync = drm_fasync,
92#ifdef CONFIG_COMPAT
93 .compat_ioctl = radeon_compat_ioctl,
94#endif
95 },
96
97 .pci_driver = {
98 .name = DRIVER_NAME,
99 .id_table = pciidlist,
100 },
101
102 .name = DRIVER_NAME,
103 .desc = DRIVER_DESC,
104 .date = DRIVER_DATE,
105 .major = DRIVER_MAJOR,
106 .minor = DRIVER_MINOR,
107 .patchlevel = DRIVER_PATCHLEVEL,
108};
109
110static int __init radeon_init(void)
111{
112 driver.num_ioctls = radeon_max_ioctl;
113 return drm_init(&driver);
114}
115
116static void __exit radeon_exit(void)
117{
118 drm_exit(&driver);
119}
120
121module_init(radeon_init);
122module_exit(radeon_exit);
123
124MODULE_AUTHOR(DRIVER_AUTHOR);
125MODULE_DESCRIPTION(DRIVER_DESC);
126MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
new file mode 100644
index 000000000000..3f0eca957aa7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -0,0 +1,1406 @@
1/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
2 *
3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 * Authors:
27 * Kevin E. Martin <martin@valinux.com>
28 * Gareth Hughes <gareth@valinux.com>
29 */
30
31#ifndef __RADEON_DRV_H__
32#define __RADEON_DRV_H__
33
34/* General customization:
35 */
36
37#define DRIVER_AUTHOR "Gareth Hughes, Keith Whitwell, others."
38
39#define DRIVER_NAME "radeon"
40#define DRIVER_DESC "ATI Radeon"
41#define DRIVER_DATE "20080528"
42
43/* Interface history:
44 *
45 * 1.1 - ??
46 * 1.2 - Add vertex2 ioctl (keith)
47 * - Add stencil capability to clear ioctl (gareth, keith)
48 * - Increase MAX_TEXTURE_LEVELS (brian)
49 * 1.3 - Add cmdbuf ioctl (keith)
50 * - Add support for new radeon packets (keith)
51 * - Add getparam ioctl (keith)
52 * - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
53 * 1.4 - Add scratch registers to get_param ioctl.
54 * 1.5 - Add r200 packets to cmdbuf ioctl
55 * - Add r200 function to init ioctl
56 * - Add 'scalar2' instruction to cmdbuf
57 * 1.6 - Add static GART memory manager
58 * Add irq handler (won't be turned on unless X server knows to)
59 * Add irq ioctls and irq_active getparam.
60 * Add wait command for cmdbuf ioctl
61 * Add GART offset query for getparam
62 * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
63 * and R200_PP_CUBIC_OFFSET_F1_[0..5].
64 * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
65 * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
66 * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
67 * Add 'GET' queries for starting additional clients on different VT's.
68 * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
69 * Add texture rectangle support for r100.
70 * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
71 * clients use to tell the DRM where they think the framebuffer is
72 * located in the card's address space
73 * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
74 * and GL_EXT_blend_[func|equation]_separate on r200
75 * 1.12- Add R300 CP microcode support - this just loads the CP on r300
76 * (No 3D support yet - just microcode loading).
77 * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
78 * - Add hyperz support, add hyperz flags to clear ioctl.
79 * 1.14- Add support for color tiling
80 * - Add R100/R200 surface allocation/free support
81 * 1.15- Add support for texture micro tiling
82 * - Add support for r100 cube maps
83 * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
84 * texture filtering on r200
85 * 1.17- Add initial support for R300 (3D).
86 * 1.18- Add support for GL_ATI_fragment_shader, new packets
87 * R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
88 * R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
89 * (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
90 * 1.19- Add support for gart table in FB memory and PCIE r300
91 * 1.20- Add support for r300 texrect
92 * 1.21- Add support for card type getparam
93 * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
94 * 1.23- Add new radeon memory map work from benh
95 * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
96 * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
97 * new packet type)
98 * 1.26- Add support for variable size PCI(E) gart aperture
99 * 1.27- Add support for IGP GART
100 * 1.28- Add support for VBL on CRTC2
101 * 1.29- R500 3D cmd buffer support
102 */
103#define DRIVER_MAJOR 1
104#define DRIVER_MINOR 29
105#define DRIVER_PATCHLEVEL 0
106
107/*
108 * Radeon chip families
109 */
110enum radeon_family {
111 CHIP_R100,
112 CHIP_RV100,
113 CHIP_RS100,
114 CHIP_RV200,
115 CHIP_RS200,
116 CHIP_R200,
117 CHIP_RV250,
118 CHIP_RS300,
119 CHIP_RV280,
120 CHIP_R300,
121 CHIP_R350,
122 CHIP_RV350,
123 CHIP_RV380,
124 CHIP_R420,
125 CHIP_RV410,
126 CHIP_RS480,
127 CHIP_RS690,
128 CHIP_RV515,
129 CHIP_R520,
130 CHIP_RV530,
131 CHIP_RV560,
132 CHIP_RV570,
133 CHIP_R580,
134 CHIP_LAST,
135};
136
137enum radeon_cp_microcode_version {
138 UCODE_R100,
139 UCODE_R200,
140 UCODE_R300,
141};
142
143/*
144 * Chip flags
145 */
146enum radeon_chip_flags {
147 RADEON_FAMILY_MASK = 0x0000ffffUL,
148 RADEON_FLAGS_MASK = 0xffff0000UL,
149 RADEON_IS_MOBILITY = 0x00010000UL,
150 RADEON_IS_IGP = 0x00020000UL,
151 RADEON_SINGLE_CRTC = 0x00040000UL,
152 RADEON_IS_AGP = 0x00080000UL,
153 RADEON_HAS_HIERZ = 0x00100000UL,
154 RADEON_IS_PCIE = 0x00200000UL,
155 RADEON_NEW_MEMMAP = 0x00400000UL,
156 RADEON_IS_PCI = 0x00800000UL,
157 RADEON_IS_IGPGART = 0x01000000UL,
158};
159
160#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
161 DRM_READ32( (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR))
162#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
163
164typedef struct drm_radeon_freelist {
165 unsigned int age;
166 struct drm_buf *buf;
167 struct drm_radeon_freelist *next;
168 struct drm_radeon_freelist *prev;
169} drm_radeon_freelist_t;
170
171typedef struct drm_radeon_ring_buffer {
172 u32 *start;
173 u32 *end;
174 int size;
175 int size_l2qw;
176
177 int rptr_update; /* Double Words */
178 int rptr_update_l2qw; /* log2 Quad Words */
179
180 int fetch_size; /* Double Words */
181 int fetch_size_l2ow; /* log2 Oct Words */
182
183 u32 tail;
184 u32 tail_mask;
185 int space;
186
187 int high_mark;
188} drm_radeon_ring_buffer_t;
189
190typedef struct drm_radeon_depth_clear_t {
191 u32 rb3d_cntl;
192 u32 rb3d_zstencilcntl;
193 u32 se_cntl;
194} drm_radeon_depth_clear_t;
195
196struct drm_radeon_driver_file_fields {
197 int64_t radeon_fb_delta;
198};
199
200struct mem_block {
201 struct mem_block *next;
202 struct mem_block *prev;
203 int start;
204 int size;
205 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
206};
207
208struct radeon_surface {
209 int refcount;
210 u32 lower;
211 u32 upper;
212 u32 flags;
213};
214
215struct radeon_virt_surface {
216 int surface_index;
217 u32 lower;
218 u32 upper;
219 u32 flags;
220 struct drm_file *file_priv;
221};
222
223typedef struct drm_radeon_private {
224 drm_radeon_ring_buffer_t ring;
225 drm_radeon_sarea_t *sarea_priv;
226
227 u32 fb_location;
228 u32 fb_size;
229 int new_memmap;
230
231 int gart_size;
232 u32 gart_vm_start;
233 unsigned long gart_buffers_offset;
234
235 int cp_mode;
236 int cp_running;
237
238 drm_radeon_freelist_t *head;
239 drm_radeon_freelist_t *tail;
240 int last_buf;
241 volatile u32 *scratch;
242 int writeback_works;
243
244 int usec_timeout;
245
246 int microcode_version;
247
248 struct {
249 u32 boxes;
250 int freelist_timeouts;
251 int freelist_loops;
252 int requested_bufs;
253 int last_frame_reads;
254 int last_clear_reads;
255 int clears;
256 int texture_uploads;
257 } stats;
258
259 int do_boxes;
260 int page_flipping;
261
262 u32 color_fmt;
263 unsigned int front_offset;
264 unsigned int front_pitch;
265 unsigned int back_offset;
266 unsigned int back_pitch;
267
268 u32 depth_fmt;
269 unsigned int depth_offset;
270 unsigned int depth_pitch;
271
272 u32 front_pitch_offset;
273 u32 back_pitch_offset;
274 u32 depth_pitch_offset;
275
276 drm_radeon_depth_clear_t depth_clear;
277
278 unsigned long ring_offset;
279 unsigned long ring_rptr_offset;
280 unsigned long buffers_offset;
281 unsigned long gart_textures_offset;
282
283 drm_local_map_t *sarea;
284 drm_local_map_t *mmio;
285 drm_local_map_t *cp_ring;
286 drm_local_map_t *ring_rptr;
287 drm_local_map_t *gart_textures;
288
289 struct mem_block *gart_heap;
290 struct mem_block *fb_heap;
291
292 /* SW interrupt */
293 wait_queue_head_t swi_queue;
294 atomic_t swi_emitted;
295 int vblank_crtc;
296 uint32_t irq_enable_reg;
297 int irq_enabled;
298 uint32_t r500_disp_irq_reg;
299
300 struct radeon_surface surfaces[RADEON_MAX_SURFACES];
301 struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
302
303 unsigned long pcigart_offset;
304 unsigned int pcigart_offset_set;
305 struct drm_ati_pcigart_info gart_info;
306
307 u32 scratch_ages[5];
308
309 /* starting from here on, data is preserved accross an open */
310 uint32_t flags; /* see radeon_chip_flags */
311 unsigned long fb_aper_offset;
312
313 int num_gb_pipes;
314} drm_radeon_private_t;
315
316typedef struct drm_radeon_buf_priv {
317 u32 age;
318} drm_radeon_buf_priv_t;
319
320typedef struct drm_radeon_kcmd_buffer {
321 int bufsz;
322 char *buf;
323 int nbox;
324 struct drm_clip_rect __user *boxes;
325} drm_radeon_kcmd_buffer_t;
326
327extern int radeon_no_wb;
328extern struct drm_ioctl_desc radeon_ioctls[];
329extern int radeon_max_ioctl;
330
331/* Check whether the given hardware address is inside the framebuffer or the
332 * GART area.
333 */
334static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
335 u64 off)
336{
337 u32 fb_start = dev_priv->fb_location;
338 u32 fb_end = fb_start + dev_priv->fb_size - 1;
339 u32 gart_start = dev_priv->gart_vm_start;
340 u32 gart_end = gart_start + dev_priv->gart_size - 1;
341
342 return ((off >= fb_start && off <= fb_end) ||
343 (off >= gart_start && off <= gart_end));
344}
345
346 /* radeon_cp.c */
347extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
348extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
349extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
350extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
351extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
352extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
353extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
354extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
355extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
356extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
357
358extern void radeon_freelist_reset(struct drm_device * dev);
359extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
360
361extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
362
363extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
364
365extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
366extern int radeon_presetup(struct drm_device *dev);
367extern int radeon_driver_postcleanup(struct drm_device *dev);
368
369extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
370extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
371extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
372extern void radeon_mem_takedown(struct mem_block **heap);
373extern void radeon_mem_release(struct drm_file *file_priv,
374 struct mem_block *heap);
375
376 /* radeon_irq.c */
377extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
378extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
379
380extern void radeon_do_release(struct drm_device * dev);
381extern int radeon_driver_vblank_wait(struct drm_device * dev,
382 unsigned int *sequence);
383extern int radeon_driver_vblank_wait2(struct drm_device * dev,
384 unsigned int *sequence);
385extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
386extern void radeon_driver_irq_preinstall(struct drm_device * dev);
387extern void radeon_driver_irq_postinstall(struct drm_device * dev);
388extern void radeon_driver_irq_uninstall(struct drm_device * dev);
389extern void radeon_enable_interrupt(struct drm_device *dev);
390extern int radeon_vblank_crtc_get(struct drm_device *dev);
391extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
392
393extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
394extern int radeon_driver_unload(struct drm_device *dev);
395extern int radeon_driver_firstopen(struct drm_device *dev);
396extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
397extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
398extern void radeon_driver_lastclose(struct drm_device * dev);
399extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
400extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
401 unsigned long arg);
402
403/* r300_cmdbuf.c */
404extern void r300_init_reg_flags(struct drm_device *dev);
405
406extern int r300_do_cp_cmdbuf(struct drm_device * dev,
407 struct drm_file *file_priv,
408 drm_radeon_kcmd_buffer_t * cmdbuf);
409
410/* Flags for stats.boxes
411 */
412#define RADEON_BOX_DMA_IDLE 0x1
413#define RADEON_BOX_RING_FULL 0x2
414#define RADEON_BOX_FLIP 0x4
415#define RADEON_BOX_WAIT_IDLE 0x8
416#define RADEON_BOX_TEXTURE_LOAD 0x10
417
418/* Register definitions, register access macros and drmAddMap constants
419 * for Radeon kernel driver.
420 */
421
422#define RADEON_AGP_COMMAND 0x0f60
423#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
424# define RADEON_AGP_ENABLE (1<<8)
425#define RADEON_AUX_SCISSOR_CNTL 0x26f0
426# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
427# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
428# define RADEON_EXCLUSIVE_SCISSOR_2 (1 << 26)
429# define RADEON_SCISSOR_0_ENABLE (1 << 28)
430# define RADEON_SCISSOR_1_ENABLE (1 << 29)
431# define RADEON_SCISSOR_2_ENABLE (1 << 30)
432
433#define RADEON_BUS_CNTL 0x0030
434# define RADEON_BUS_MASTER_DIS (1 << 6)
435
436#define RADEON_CLOCK_CNTL_DATA 0x000c
437# define RADEON_PLL_WR_EN (1 << 7)
438#define RADEON_CLOCK_CNTL_INDEX 0x0008
439#define RADEON_CONFIG_APER_SIZE 0x0108
440#define RADEON_CONFIG_MEMSIZE 0x00f8
441#define RADEON_CRTC_OFFSET 0x0224
442#define RADEON_CRTC_OFFSET_CNTL 0x0228
443# define RADEON_CRTC_TILE_EN (1 << 15)
444# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
445#define RADEON_CRTC2_OFFSET 0x0324
446#define RADEON_CRTC2_OFFSET_CNTL 0x0328
447
448#define RADEON_PCIE_INDEX 0x0030
449#define RADEON_PCIE_DATA 0x0034
450#define RADEON_PCIE_TX_GART_CNTL 0x10
451# define RADEON_PCIE_TX_GART_EN (1 << 0)
452# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
453# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
454# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
455# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
456# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
457# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
458# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
459#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
460#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
461#define RADEON_PCIE_TX_GART_BASE 0x13
462#define RADEON_PCIE_TX_GART_START_LO 0x14
463#define RADEON_PCIE_TX_GART_START_HI 0x15
464#define RADEON_PCIE_TX_GART_END_LO 0x16
465#define RADEON_PCIE_TX_GART_END_HI 0x17
466
467#define RS480_NB_MC_INDEX 0x168
468# define RS480_NB_MC_IND_WR_EN (1 << 8)
469#define RS480_NB_MC_DATA 0x16c
470
471#define RS690_MC_INDEX 0x78
472# define RS690_MC_INDEX_MASK 0x1ff
473# define RS690_MC_INDEX_WR_EN (1 << 9)
474# define RS690_MC_INDEX_WR_ACK 0x7f
475#define RS690_MC_DATA 0x7c
476
477/* MC indirect registers */
478#define RS480_MC_MISC_CNTL 0x18
479# define RS480_DISABLE_GTW (1 << 1)
480/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
481# define RS480_GART_INDEX_REG_EN (1 << 12)
482# define RS690_BLOCK_GFX_D3_EN (1 << 14)
483#define RS480_K8_FB_LOCATION 0x1e
484#define RS480_GART_FEATURE_ID 0x2b
485# define RS480_HANG_EN (1 << 11)
486# define RS480_TLB_ENABLE (1 << 18)
487# define RS480_P2P_ENABLE (1 << 19)
488# define RS480_GTW_LAC_EN (1 << 25)
489# define RS480_2LEVEL_GART (0 << 30)
490# define RS480_1LEVEL_GART (1 << 30)
491# define RS480_PDC_EN (1 << 31)
492#define RS480_GART_BASE 0x2c
493#define RS480_GART_CACHE_CNTRL 0x2e
494# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
495#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
496# define RS480_GART_EN (1 << 0)
497# define RS480_VA_SIZE_32MB (0 << 1)
498# define RS480_VA_SIZE_64MB (1 << 1)
499# define RS480_VA_SIZE_128MB (2 << 1)
500# define RS480_VA_SIZE_256MB (3 << 1)
501# define RS480_VA_SIZE_512MB (4 << 1)
502# define RS480_VA_SIZE_1GB (5 << 1)
503# define RS480_VA_SIZE_2GB (6 << 1)
504#define RS480_AGP_MODE_CNTL 0x39
505# define RS480_POST_GART_Q_SIZE (1 << 18)
506# define RS480_NONGART_SNOOP (1 << 19)
507# define RS480_AGP_RD_BUF_SIZE (1 << 20)
508# define RS480_REQ_TYPE_SNOOP_SHIFT 22
509# define RS480_REQ_TYPE_SNOOP_MASK 0x3
510# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
511#define RS480_MC_MISC_UMA_CNTL 0x5f
512#define RS480_MC_MCLK_CNTL 0x7a
513#define RS480_MC_UMA_DUALCH_CNTL 0x86
514
515#define RS690_MC_FB_LOCATION 0x100
516#define RS690_MC_AGP_LOCATION 0x101
517#define RS690_MC_AGP_BASE 0x102
518#define RS690_MC_AGP_BASE_2 0x103
519
520#define R520_MC_IND_INDEX 0x70
521#define R520_MC_IND_WR_EN (1 << 24)
522#define R520_MC_IND_DATA 0x74
523
524#define RV515_MC_FB_LOCATION 0x01
525#define RV515_MC_AGP_LOCATION 0x02
526#define RV515_MC_AGP_BASE 0x03
527#define RV515_MC_AGP_BASE_2 0x04
528
529#define R520_MC_FB_LOCATION 0x04
530#define R520_MC_AGP_LOCATION 0x05
531#define R520_MC_AGP_BASE 0x06
532#define R520_MC_AGP_BASE_2 0x07
533
534#define RADEON_MPP_TB_CONFIG 0x01c0
535#define RADEON_MEM_CNTL 0x0140
536#define RADEON_MEM_SDRAM_MODE_REG 0x0158
537#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
538#define RS480_AGP_BASE_2 0x0164
539#define RADEON_AGP_BASE 0x0170
540
541/* pipe config regs */
542#define R400_GB_PIPE_SELECT 0x402c
543#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
544#define R500_SU_REG_DEST 0x42c8
545#define R300_GB_TILE_CONFIG 0x4018
546# define R300_ENABLE_TILING (1 << 0)
547# define R300_PIPE_COUNT_RV350 (0 << 1)
548# define R300_PIPE_COUNT_R300 (3 << 1)
549# define R300_PIPE_COUNT_R420_3P (6 << 1)
550# define R300_PIPE_COUNT_R420 (7 << 1)
551# define R300_TILE_SIZE_8 (0 << 4)
552# define R300_TILE_SIZE_16 (1 << 4)
553# define R300_TILE_SIZE_32 (2 << 4)
554# define R300_SUBPIXEL_1_12 (0 << 16)
555# define R300_SUBPIXEL_1_16 (1 << 16)
556#define R300_DST_PIPE_CONFIG 0x170c
557# define R300_PIPE_AUTO_CONFIG (1 << 31)
558#define R300_RB2D_DSTCACHE_MODE 0x3428
559# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
560# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
561
562#define RADEON_RB3D_COLOROFFSET 0x1c40
563#define RADEON_RB3D_COLORPITCH 0x1c48
564
565#define RADEON_SRC_X_Y 0x1590
566
567#define RADEON_DP_GUI_MASTER_CNTL 0x146c
568# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
569# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
570# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
571# define RADEON_GMC_BRUSH_NONE (15 << 4)
572# define RADEON_GMC_DST_16BPP (4 << 8)
573# define RADEON_GMC_DST_24BPP (5 << 8)
574# define RADEON_GMC_DST_32BPP (6 << 8)
575# define RADEON_GMC_DST_DATATYPE_SHIFT 8
576# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
577# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
578# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
579# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
580# define RADEON_GMC_WR_MSK_DIS (1 << 30)
581# define RADEON_ROP3_S 0x00cc0000
582# define RADEON_ROP3_P 0x00f00000
583#define RADEON_DP_WRITE_MASK 0x16cc
584#define RADEON_SRC_PITCH_OFFSET 0x1428
585#define RADEON_DST_PITCH_OFFSET 0x142c
586#define RADEON_DST_PITCH_OFFSET_C 0x1c80
587# define RADEON_DST_TILE_LINEAR (0 << 30)
588# define RADEON_DST_TILE_MACRO (1 << 30)
589# define RADEON_DST_TILE_MICRO (2 << 30)
590# define RADEON_DST_TILE_BOTH (3 << 30)
591
592#define RADEON_SCRATCH_REG0 0x15e0
593#define RADEON_SCRATCH_REG1 0x15e4
594#define RADEON_SCRATCH_REG2 0x15e8
595#define RADEON_SCRATCH_REG3 0x15ec
596#define RADEON_SCRATCH_REG4 0x15f0
597#define RADEON_SCRATCH_REG5 0x15f4
598#define RADEON_SCRATCH_UMSK 0x0770
599#define RADEON_SCRATCH_ADDR 0x0774
600
601#define RADEON_SCRATCHOFF( x ) (RADEON_SCRATCH_REG_OFFSET + 4*(x))
602
603#define GET_SCRATCH( x ) (dev_priv->writeback_works \
604 ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
605 : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
606
607#define RADEON_GEN_INT_CNTL 0x0040
608# define RADEON_CRTC_VBLANK_MASK (1 << 0)
609# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
610# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
611# define RADEON_SW_INT_ENABLE (1 << 25)
612
613#define RADEON_GEN_INT_STATUS 0x0044
614# define RADEON_CRTC_VBLANK_STAT (1 << 0)
615# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
616# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
617# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
618# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
619# define RADEON_SW_INT_TEST (1 << 25)
620# define RADEON_SW_INT_TEST_ACK (1 << 25)
621# define RADEON_SW_INT_FIRE (1 << 26)
622
623#define RADEON_HOST_PATH_CNTL 0x0130
624# define RADEON_HDP_SOFT_RESET (1 << 26)
625# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28)
626# define RADEON_HDP_WC_TIMEOUT_28BCLK (7 << 28)
627
628#define RADEON_ISYNC_CNTL 0x1724
629# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
630# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
631# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
632# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
633# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
634# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
635
636#define RADEON_RBBM_GUICNTL 0x172c
637# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
638# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
639# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
640# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
641
642#define RADEON_MC_AGP_LOCATION 0x014c
643#define RADEON_MC_FB_LOCATION 0x0148
644#define RADEON_MCLK_CNTL 0x0012
645# define RADEON_FORCEON_MCLKA (1 << 16)
646# define RADEON_FORCEON_MCLKB (1 << 17)
647# define RADEON_FORCEON_YCLKA (1 << 18)
648# define RADEON_FORCEON_YCLKB (1 << 19)
649# define RADEON_FORCEON_MC (1 << 20)
650# define RADEON_FORCEON_AIC (1 << 21)
651
652#define RADEON_PP_BORDER_COLOR_0 0x1d40
653#define RADEON_PP_BORDER_COLOR_1 0x1d44
654#define RADEON_PP_BORDER_COLOR_2 0x1d48
655#define RADEON_PP_CNTL 0x1c38
656# define RADEON_SCISSOR_ENABLE (1 << 1)
657#define RADEON_PP_LUM_MATRIX 0x1d00
658#define RADEON_PP_MISC 0x1c14
659#define RADEON_PP_ROT_MATRIX_0 0x1d58
660#define RADEON_PP_TXFILTER_0 0x1c54
661#define RADEON_PP_TXOFFSET_0 0x1c5c
662#define RADEON_PP_TXFILTER_1 0x1c6c
663#define RADEON_PP_TXFILTER_2 0x1c84
664
665#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */
666#define R300_DSTCACHE_CTLSTAT 0x1714
667# define R300_RB2D_DC_FLUSH (3 << 0)
668# define R300_RB2D_DC_FREE (3 << 2)
669# define R300_RB2D_DC_FLUSH_ALL 0xf
670# define R300_RB2D_DC_BUSY (1 << 31)
671#define RADEON_RB3D_CNTL 0x1c3c
672# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
673# define RADEON_PLANE_MASK_ENABLE (1 << 1)
674# define RADEON_DITHER_ENABLE (1 << 2)
675# define RADEON_ROUND_ENABLE (1 << 3)
676# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
677# define RADEON_DITHER_INIT (1 << 5)
678# define RADEON_ROP_ENABLE (1 << 6)
679# define RADEON_STENCIL_ENABLE (1 << 7)
680# define RADEON_Z_ENABLE (1 << 8)
681# define RADEON_ZBLOCK16 (1 << 15)
682#define RADEON_RB3D_DEPTHOFFSET 0x1c24
683#define RADEON_RB3D_DEPTHCLEARVALUE 0x3230
684#define RADEON_RB3D_DEPTHPITCH 0x1c28
685#define RADEON_RB3D_PLANEMASK 0x1d84
686#define RADEON_RB3D_STENCILREFMASK 0x1d7c
687#define RADEON_RB3D_ZCACHE_MODE 0x3250
688#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
689# define RADEON_RB3D_ZC_FLUSH (1 << 0)
690# define RADEON_RB3D_ZC_FREE (1 << 2)
691# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
692# define RADEON_RB3D_ZC_BUSY (1 << 31)
693#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
694# define R300_ZC_FLUSH (1 << 0)
695# define R300_ZC_FREE (1 << 1)
696# define R300_ZC_FLUSH_ALL 0x3
697# define R300_ZC_BUSY (1 << 31)
698#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
699# define RADEON_RB3D_DC_FLUSH (3 << 0)
700# define RADEON_RB3D_DC_FREE (3 << 2)
701# define RADEON_RB3D_DC_FLUSH_ALL 0xf
702# define RADEON_RB3D_DC_BUSY (1 << 31)
703#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
704# define R300_RB3D_DC_FINISH (1 << 4)
705#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
706# define RADEON_Z_TEST_MASK (7 << 4)
707# define RADEON_Z_TEST_ALWAYS (7 << 4)
708# define RADEON_Z_HIERARCHY_ENABLE (1 << 8)
709# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
710# define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16)
711# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
712# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
713# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
714# define RADEON_FORCE_Z_DIRTY (1 << 29)
715# define RADEON_Z_WRITE_ENABLE (1 << 30)
716# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31)
717#define RADEON_RBBM_SOFT_RESET 0x00f0
718# define RADEON_SOFT_RESET_CP (1 << 0)
719# define RADEON_SOFT_RESET_HI (1 << 1)
720# define RADEON_SOFT_RESET_SE (1 << 2)
721# define RADEON_SOFT_RESET_RE (1 << 3)
722# define RADEON_SOFT_RESET_PP (1 << 4)
723# define RADEON_SOFT_RESET_E2 (1 << 5)
724# define RADEON_SOFT_RESET_RB (1 << 6)
725# define RADEON_SOFT_RESET_HDP (1 << 7)
726/*
727 * 6:0 Available slots in the FIFO
728 * 8 Host Interface active
729 * 9 CP request active
730 * 10 FIFO request active
731 * 11 Host Interface retry active
732 * 12 CP retry active
733 * 13 FIFO retry active
734 * 14 FIFO pipeline busy
735 * 15 Event engine busy
736 * 16 CP command stream busy
737 * 17 2D engine busy
738 * 18 2D portion of render backend busy
739 * 20 3D setup engine busy
740 * 26 GA engine busy
741 * 27 CBA 2D engine busy
742 * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
743 * command stream queue not empty or Ring Buffer not empty
744 */
745#define RADEON_RBBM_STATUS 0x0e40
746/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
747/* #define RADEON_RBBM_STATUS 0x1740 */
748/* bits 6:0 are dword slots available in the cmd fifo */
749# define RADEON_RBBM_FIFOCNT_MASK 0x007f
750# define RADEON_HIRQ_ON_RBB (1 << 8)
751# define RADEON_CPRQ_ON_RBB (1 << 9)
752# define RADEON_CFRQ_ON_RBB (1 << 10)
753# define RADEON_HIRQ_IN_RTBUF (1 << 11)
754# define RADEON_CPRQ_IN_RTBUF (1 << 12)
755# define RADEON_CFRQ_IN_RTBUF (1 << 13)
756# define RADEON_PIPE_BUSY (1 << 14)
757# define RADEON_ENG_EV_BUSY (1 << 15)
758# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
759# define RADEON_E2_BUSY (1 << 17)
760# define RADEON_RB2D_BUSY (1 << 18)
761# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
762# define RADEON_VAP_BUSY (1 << 20)
763# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
764# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
765# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
766# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
767# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
768# define RADEON_GA_BUSY (1 << 26)
769# define RADEON_CBA2D_BUSY (1 << 27)
770# define RADEON_RBBM_ACTIVE (1 << 31)
771#define RADEON_RE_LINE_PATTERN 0x1cd0
772#define RADEON_RE_MISC 0x26c4
773#define RADEON_RE_TOP_LEFT 0x26c0
774#define RADEON_RE_WIDTH_HEIGHT 0x1c44
775#define RADEON_RE_STIPPLE_ADDR 0x1cc8
776#define RADEON_RE_STIPPLE_DATA 0x1ccc
777
778#define RADEON_SCISSOR_TL_0 0x1cd8
779#define RADEON_SCISSOR_BR_0 0x1cdc
780#define RADEON_SCISSOR_TL_1 0x1ce0
781#define RADEON_SCISSOR_BR_1 0x1ce4
782#define RADEON_SCISSOR_TL_2 0x1ce8
783#define RADEON_SCISSOR_BR_2 0x1cec
784#define RADEON_SE_COORD_FMT 0x1c50
785#define RADEON_SE_CNTL 0x1c4c
786# define RADEON_FFACE_CULL_CW (0 << 0)
787# define RADEON_BFACE_SOLID (3 << 1)
788# define RADEON_FFACE_SOLID (3 << 3)
789# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
790# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
791# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
792# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
793# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
794# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
795# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
796# define RADEON_FOG_SHADE_FLAT (1 << 14)
797# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
798# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
799# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
800# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
801# define RADEON_ROUND_MODE_TRUNC (0 << 28)
802# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
803#define RADEON_SE_CNTL_STATUS 0x2140
804#define RADEON_SE_LINE_WIDTH 0x1db8
805#define RADEON_SE_VPORT_XSCALE 0x1d98
806#define RADEON_SE_ZBIAS_FACTOR 0x1db0
807#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
808#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
809#define RADEON_SE_TCL_VECTOR_INDX_REG 0x2200
810# define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT 16
811# define RADEON_VEC_INDX_DWORD_COUNT_SHIFT 28
812#define RADEON_SE_TCL_VECTOR_DATA_REG 0x2204
813#define RADEON_SE_TCL_SCALAR_INDX_REG 0x2208
814# define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT 16
815#define RADEON_SE_TCL_SCALAR_DATA_REG 0x220C
816#define RADEON_SURFACE_ACCESS_FLAGS 0x0bf8
817#define RADEON_SURFACE_ACCESS_CLR 0x0bfc
818#define RADEON_SURFACE_CNTL 0x0b00
819# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
820# define RADEON_NONSURF_AP0_SWP_MASK (3 << 20)
821# define RADEON_NONSURF_AP0_SWP_LITTLE (0 << 20)
822# define RADEON_NONSURF_AP0_SWP_BIG16 (1 << 20)
823# define RADEON_NONSURF_AP0_SWP_BIG32 (2 << 20)
824# define RADEON_NONSURF_AP1_SWP_MASK (3 << 22)
825# define RADEON_NONSURF_AP1_SWP_LITTLE (0 << 22)
826# define RADEON_NONSURF_AP1_SWP_BIG16 (1 << 22)
827# define RADEON_NONSURF_AP1_SWP_BIG32 (2 << 22)
828#define RADEON_SURFACE0_INFO 0x0b0c
829# define RADEON_SURF_PITCHSEL_MASK (0x1ff << 0)
830# define RADEON_SURF_TILE_MODE_MASK (3 << 16)
831# define RADEON_SURF_TILE_MODE_MACRO (0 << 16)
832# define RADEON_SURF_TILE_MODE_MICRO (1 << 16)
833# define RADEON_SURF_TILE_MODE_32BIT_Z (2 << 16)
834# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
835#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
836#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
837# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0)
838#define RADEON_SURFACE1_INFO 0x0b1c
839#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
840#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
841#define RADEON_SURFACE2_INFO 0x0b2c
842#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
843#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
844#define RADEON_SURFACE3_INFO 0x0b3c
845#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
846#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
847#define RADEON_SURFACE4_INFO 0x0b4c
848#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
849#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
850#define RADEON_SURFACE5_INFO 0x0b5c
851#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
852#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
853#define RADEON_SURFACE6_INFO 0x0b6c
854#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
855#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
856#define RADEON_SURFACE7_INFO 0x0b7c
857#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
858#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
859#define RADEON_SW_SEMAPHORE 0x013c
860
861#define RADEON_WAIT_UNTIL 0x1720
862# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
863# define RADEON_WAIT_2D_IDLE (1 << 14)
864# define RADEON_WAIT_3D_IDLE (1 << 15)
865# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
866# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
867# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
868
869#define RADEON_RB3D_ZMASKOFFSET 0x3234
870#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
871# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
872# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
873
874/* CP registers */
875#define RADEON_CP_ME_RAM_ADDR 0x07d4
876#define RADEON_CP_ME_RAM_RADDR 0x07d8
877#define RADEON_CP_ME_RAM_DATAH 0x07dc
878#define RADEON_CP_ME_RAM_DATAL 0x07e0
879
880#define RADEON_CP_RB_BASE 0x0700
881#define RADEON_CP_RB_CNTL 0x0704
882# define RADEON_BUF_SWAP_32BIT (2 << 16)
883# define RADEON_RB_NO_UPDATE (1 << 27)
884#define RADEON_CP_RB_RPTR_ADDR 0x070c
885#define RADEON_CP_RB_RPTR 0x0710
886#define RADEON_CP_RB_WPTR 0x0714
887
888#define RADEON_CP_RB_WPTR_DELAY 0x0718
889# define RADEON_PRE_WRITE_TIMER_SHIFT 0
890# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
891
892#define RADEON_CP_IB_BASE 0x0738
893
894#define RADEON_CP_CSQ_CNTL 0x0740
895# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
896# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
897# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
898# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
899# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
900# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
901# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
902
903#define RADEON_AIC_CNTL 0x01d0
904# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
905#define RADEON_AIC_STAT 0x01d4
906#define RADEON_AIC_PT_BASE 0x01d8
907#define RADEON_AIC_LO_ADDR 0x01dc
908#define RADEON_AIC_HI_ADDR 0x01e0
909#define RADEON_AIC_TLB_ADDR 0x01e4
910#define RADEON_AIC_TLB_DATA 0x01e8
911
912/* CP command packets */
913#define RADEON_CP_PACKET0 0x00000000
914# define RADEON_ONE_REG_WR (1 << 15)
915#define RADEON_CP_PACKET1 0x40000000
916#define RADEON_CP_PACKET2 0x80000000
917#define RADEON_CP_PACKET3 0xC0000000
918# define RADEON_CP_NOP 0x00001000
919# define RADEON_CP_NEXT_CHAR 0x00001900
920# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
921# define RADEON_CP_SET_SCISSORS 0x00001E00
922 /* GEN_INDX_PRIM is unsupported starting with R300 */
923# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
924# define RADEON_WAIT_FOR_IDLE 0x00002600
925# define RADEON_3D_DRAW_VBUF 0x00002800
926# define RADEON_3D_DRAW_IMMD 0x00002900
927# define RADEON_3D_DRAW_INDX 0x00002A00
928# define RADEON_CP_LOAD_PALETTE 0x00002C00
929# define RADEON_3D_LOAD_VBPNTR 0x00002F00
930# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
931# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
932# define RADEON_3D_CLEAR_ZMASK 0x00003200
933# define RADEON_CP_INDX_BUFFER 0x00003300
934# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
935# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
936# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
937# define RADEON_3D_CLEAR_HIZ 0x00003700
938# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
939# define RADEON_CNTL_HOSTDATA_BLT 0x00009400
940# define RADEON_CNTL_PAINT_MULTI 0x00009A00
941# define RADEON_CNTL_BITBLT_MULTI 0x00009B00
942# define RADEON_CNTL_SET_SCISSORS 0xC0001E00
943
944#define RADEON_CP_PACKET_MASK 0xC0000000
945#define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
946#define RADEON_CP_PACKET0_REG_MASK 0x000007ff
947#define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
948#define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
949
950#define RADEON_VTX_Z_PRESENT (1 << 31)
951#define RADEON_VTX_PKCOLOR_PRESENT (1 << 3)
952
953#define RADEON_PRIM_TYPE_NONE (0 << 0)
954#define RADEON_PRIM_TYPE_POINT (1 << 0)
955#define RADEON_PRIM_TYPE_LINE (2 << 0)
956#define RADEON_PRIM_TYPE_LINE_STRIP (3 << 0)
957#define RADEON_PRIM_TYPE_TRI_LIST (4 << 0)
958#define RADEON_PRIM_TYPE_TRI_FAN (5 << 0)
959#define RADEON_PRIM_TYPE_TRI_STRIP (6 << 0)
960#define RADEON_PRIM_TYPE_TRI_TYPE2 (7 << 0)
961#define RADEON_PRIM_TYPE_RECT_LIST (8 << 0)
962#define RADEON_PRIM_TYPE_3VRT_POINT_LIST (9 << 0)
963#define RADEON_PRIM_TYPE_3VRT_LINE_LIST (10 << 0)
964#define RADEON_PRIM_TYPE_MASK 0xf
965#define RADEON_PRIM_WALK_IND (1 << 4)
966#define RADEON_PRIM_WALK_LIST (2 << 4)
967#define RADEON_PRIM_WALK_RING (3 << 4)
968#define RADEON_COLOR_ORDER_BGRA (0 << 6)
969#define RADEON_COLOR_ORDER_RGBA (1 << 6)
970#define RADEON_MAOS_ENABLE (1 << 7)
971#define RADEON_VTX_FMT_R128_MODE (0 << 8)
972#define RADEON_VTX_FMT_RADEON_MODE (1 << 8)
973#define RADEON_NUM_VERTICES_SHIFT 16
974
975#define RADEON_COLOR_FORMAT_CI8 2
976#define RADEON_COLOR_FORMAT_ARGB1555 3
977#define RADEON_COLOR_FORMAT_RGB565 4
978#define RADEON_COLOR_FORMAT_ARGB8888 6
979#define RADEON_COLOR_FORMAT_RGB332 7
980#define RADEON_COLOR_FORMAT_RGB8 9
981#define RADEON_COLOR_FORMAT_ARGB4444 15
982
983#define RADEON_TXFORMAT_I8 0
984#define RADEON_TXFORMAT_AI88 1
985#define RADEON_TXFORMAT_RGB332 2
986#define RADEON_TXFORMAT_ARGB1555 3
987#define RADEON_TXFORMAT_RGB565 4
988#define RADEON_TXFORMAT_ARGB4444 5
989#define RADEON_TXFORMAT_ARGB8888 6
990#define RADEON_TXFORMAT_RGBA8888 7
991#define RADEON_TXFORMAT_Y8 8
992#define RADEON_TXFORMAT_VYUY422 10
993#define RADEON_TXFORMAT_YVYU422 11
994#define RADEON_TXFORMAT_DXT1 12
995#define RADEON_TXFORMAT_DXT23 14
996#define RADEON_TXFORMAT_DXT45 15
997
998#define R200_PP_TXCBLEND_0 0x2f00
999#define R200_PP_TXCBLEND_1 0x2f10
1000#define R200_PP_TXCBLEND_2 0x2f20
1001#define R200_PP_TXCBLEND_3 0x2f30
1002#define R200_PP_TXCBLEND_4 0x2f40
1003#define R200_PP_TXCBLEND_5 0x2f50
1004#define R200_PP_TXCBLEND_6 0x2f60
1005#define R200_PP_TXCBLEND_7 0x2f70
1006#define R200_SE_TCL_LIGHT_MODEL_CTL_0 0x2268
1007#define R200_PP_TFACTOR_0 0x2ee0
1008#define R200_SE_VTX_FMT_0 0x2088
1009#define R200_SE_VAP_CNTL 0x2080
1010#define R200_SE_TCL_MATRIX_SEL_0 0x2230
1011#define R200_SE_TCL_TEX_PROC_CTL_2 0x22a8
1012#define R200_SE_TCL_UCP_VERT_BLEND_CTL 0x22c0
1013#define R200_PP_TXFILTER_5 0x2ca0
1014#define R200_PP_TXFILTER_4 0x2c80
1015#define R200_PP_TXFILTER_3 0x2c60
1016#define R200_PP_TXFILTER_2 0x2c40
1017#define R200_PP_TXFILTER_1 0x2c20
1018#define R200_PP_TXFILTER_0 0x2c00
1019#define R200_PP_TXOFFSET_5 0x2d78
1020#define R200_PP_TXOFFSET_4 0x2d60
1021#define R200_PP_TXOFFSET_3 0x2d48
1022#define R200_PP_TXOFFSET_2 0x2d30
1023#define R200_PP_TXOFFSET_1 0x2d18
1024#define R200_PP_TXOFFSET_0 0x2d00
1025
1026#define R200_PP_CUBIC_FACES_0 0x2c18
1027#define R200_PP_CUBIC_FACES_1 0x2c38
1028#define R200_PP_CUBIC_FACES_2 0x2c58
1029#define R200_PP_CUBIC_FACES_3 0x2c78
1030#define R200_PP_CUBIC_FACES_4 0x2c98
1031#define R200_PP_CUBIC_FACES_5 0x2cb8
1032#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04
1033#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08
1034#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c
1035#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10
1036#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14
1037#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c
1038#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20
1039#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24
1040#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28
1041#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c
1042#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34
1043#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38
1044#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c
1045#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40
1046#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44
1047#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c
1048#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50
1049#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54
1050#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58
1051#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c
1052#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64
1053#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68
1054#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c
1055#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70
1056#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74
1057#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c
1058#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80
1059#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84
1060#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88
1061#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c
1062
1063#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
1064#define R200_SE_VTE_CNTL 0x20b0
1065#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
1066#define R200_PP_TAM_DEBUG3 0x2d9c
1067#define R200_PP_CNTL_X 0x2cc4
1068#define R200_SE_VAP_CNTL_STATUS 0x2140
1069#define R200_RE_SCISSOR_TL_0 0x1cd8
1070#define R200_RE_SCISSOR_TL_1 0x1ce0
1071#define R200_RE_SCISSOR_TL_2 0x1ce8
1072#define R200_RB3D_DEPTHXY_OFFSET 0x1d60
1073#define R200_RE_AUX_SCISSOR_CNTL 0x26f0
1074#define R200_SE_VTX_STATE_CNTL 0x2180
1075#define R200_RE_POINTSIZE 0x2648
1076#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
1077
1078#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
1079#define RADEON_PP_TEX_SIZE_1 0x1d0c
1080#define RADEON_PP_TEX_SIZE_2 0x1d14
1081
1082#define RADEON_PP_CUBIC_FACES_0 0x1d24
1083#define RADEON_PP_CUBIC_FACES_1 0x1d28
1084#define RADEON_PP_CUBIC_FACES_2 0x1d2c
1085#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
1086#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
1087#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
1088
1089#define RADEON_SE_TCL_STATE_FLUSH 0x2284
1090
1091#define SE_VAP_CNTL__TCL_ENA_MASK 0x00000001
1092#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK 0x00010000
1093#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT 0x00000012
1094#define SE_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100
1095#define SE_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200
1096#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK 0x00000001
1097#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK 0x00000002
1098#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT 0x0000000b
1099#define R200_3D_DRAW_IMMD_2 0xC0003500
1100#define R200_SE_VTX_FMT_1 0x208c
1101#define R200_RE_CNTL 0x1c50
1102
1103#define R200_RB3D_BLENDCOLOR 0x3218
1104
1105#define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4
1106
1107#define R200_PP_TRI_PERF 0x2cf8
1108
1109#define R200_PP_AFS_0 0x2f80
1110#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */
1111
1112#define R200_VAP_PVS_CNTL_1 0x22D0
1113
1114#define R500_D1CRTC_STATUS 0x609c
1115#define R500_D2CRTC_STATUS 0x689c
1116#define R500_CRTC_V_BLANK (1<<0)
1117
1118#define R500_D1CRTC_FRAME_COUNT 0x60a4
1119#define R500_D2CRTC_FRAME_COUNT 0x68a4
1120
1121#define R500_D1MODE_V_COUNTER 0x6530
1122#define R500_D2MODE_V_COUNTER 0x6d30
1123
1124#define R500_D1MODE_VBLANK_STATUS 0x6534
1125#define R500_D2MODE_VBLANK_STATUS 0x6d34
1126#define R500_VBLANK_OCCURED (1<<0)
1127#define R500_VBLANK_ACK (1<<4)
1128#define R500_VBLANK_STAT (1<<12)
1129#define R500_VBLANK_INT (1<<16)
1130
1131#define R500_DxMODE_INT_MASK 0x6540
1132#define R500_D1MODE_INT_MASK (1<<0)
1133#define R500_D2MODE_INT_MASK (1<<8)
1134
1135#define R500_DISP_INTERRUPT_STATUS 0x7edc
1136#define R500_D1_VBLANK_INTERRUPT (1 << 4)
1137#define R500_D2_VBLANK_INTERRUPT (1 << 5)
1138
1139/* Constants */
1140#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
1141
1142#define RADEON_LAST_FRAME_REG RADEON_SCRATCH_REG0
1143#define RADEON_LAST_DISPATCH_REG RADEON_SCRATCH_REG1
1144#define RADEON_LAST_CLEAR_REG RADEON_SCRATCH_REG2
1145#define RADEON_LAST_SWI_REG RADEON_SCRATCH_REG3
1146#define RADEON_LAST_DISPATCH 1
1147
1148#define RADEON_MAX_VB_AGE 0x7fffffff
1149#define RADEON_MAX_VB_VERTS (0xffff)
1150
1151#define RADEON_RING_HIGH_MARK 128
1152
1153#define RADEON_PCIGART_TABLE_SIZE (32*1024)
1154
1155#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
1156#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
1157#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
1158#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
1159
1160#define RADEON_WRITE_PLL(addr, val) \
1161do { \
1162 RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \
1163 ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \
1164 RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \
1165} while (0)
1166
1167#define RADEON_WRITE_PCIE(addr, val) \
1168do { \
1169 RADEON_WRITE8(RADEON_PCIE_INDEX, \
1170 ((addr) & 0xff)); \
1171 RADEON_WRITE(RADEON_PCIE_DATA, (val)); \
1172} while (0)
1173
1174#define R500_WRITE_MCIND(addr, val) \
1175do { \
1176 RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
1177 RADEON_WRITE(R520_MC_IND_DATA, (val)); \
1178 RADEON_WRITE(R520_MC_IND_INDEX, 0); \
1179} while (0)
1180
1181#define RS480_WRITE_MCIND(addr, val) \
1182do { \
1183 RADEON_WRITE(RS480_NB_MC_INDEX, \
1184 ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
1185 RADEON_WRITE(RS480_NB_MC_DATA, (val)); \
1186 RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \
1187} while (0)
1188
1189#define RS690_WRITE_MCIND(addr, val) \
1190do { \
1191 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
1192 RADEON_WRITE(RS690_MC_DATA, val); \
1193 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
1194} while (0)
1195
1196#define IGP_WRITE_MCIND(addr, val) \
1197do { \
1198 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
1199 RS690_WRITE_MCIND(addr, val); \
1200 else \
1201 RS480_WRITE_MCIND(addr, val); \
1202} while (0)
1203
1204#define CP_PACKET0( reg, n ) \
1205 (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
1206#define CP_PACKET0_TABLE( reg, n ) \
1207 (RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
1208#define CP_PACKET1( reg0, reg1 ) \
1209 (RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
1210#define CP_PACKET2() \
1211 (RADEON_CP_PACKET2)
1212#define CP_PACKET3( pkt, n ) \
1213 (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
1214
1215/* ================================================================
1216 * Engine control helper macros
1217 */
1218
1219#define RADEON_WAIT_UNTIL_2D_IDLE() do { \
1220 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1221 OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
1222 RADEON_WAIT_HOST_IDLECLEAN) ); \
1223} while (0)
1224
1225#define RADEON_WAIT_UNTIL_3D_IDLE() do { \
1226 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1227 OUT_RING( (RADEON_WAIT_3D_IDLECLEAN | \
1228 RADEON_WAIT_HOST_IDLECLEAN) ); \
1229} while (0)
1230
1231#define RADEON_WAIT_UNTIL_IDLE() do { \
1232 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1233 OUT_RING( (RADEON_WAIT_2D_IDLECLEAN | \
1234 RADEON_WAIT_3D_IDLECLEAN | \
1235 RADEON_WAIT_HOST_IDLECLEAN) ); \
1236} while (0)
1237
1238#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do { \
1239 OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) ); \
1240 OUT_RING( RADEON_WAIT_CRTC_PFLIP ); \
1241} while (0)
1242
1243#define RADEON_FLUSH_CACHE() do { \
1244 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1245 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
1246 OUT_RING(RADEON_RB3D_DC_FLUSH); \
1247 } else { \
1248 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1249 OUT_RING(RADEON_RB3D_DC_FLUSH); \
1250 } \
1251} while (0)
1252
1253#define RADEON_PURGE_CACHE() do { \
1254 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1255 OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
1256 OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
1257 } else { \
1258 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1259 OUT_RING(RADEON_RB3D_DC_FLUSH_ALL); \
1260 } \
1261} while (0)
1262
1263#define RADEON_FLUSH_ZCACHE() do { \
1264 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1265 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
1266 OUT_RING(RADEON_RB3D_ZC_FLUSH); \
1267 } else { \
1268 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
1269 OUT_RING(R300_ZC_FLUSH); \
1270 } \
1271} while (0)
1272
1273#define RADEON_PURGE_ZCACHE() do { \
1274 if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
1275 OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
1276 OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL); \
1277 } else { \
1278 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
1279 OUT_RING(R300_ZC_FLUSH_ALL); \
1280 } \
1281} while (0)
1282
1283/* ================================================================
1284 * Misc helper macros
1285 */
1286
1287/* Perfbox functionality only.
1288 */
1289#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
1290do { \
1291 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \
1292 u32 head = GET_RING_HEAD( dev_priv ); \
1293 if (head == dev_priv->ring.tail) \
1294 dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \
1295 } \
1296} while (0)
1297
1298#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
1299do { \
1300 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; \
1301 if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) { \
1302 int __ret = radeon_do_cp_idle( dev_priv ); \
1303 if ( __ret ) return __ret; \
1304 sarea_priv->last_dispatch = 0; \
1305 radeon_freelist_reset( dev ); \
1306 } \
1307} while (0)
1308
1309#define RADEON_DISPATCH_AGE( age ) do { \
1310 OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) ); \
1311 OUT_RING( age ); \
1312} while (0)
1313
1314#define RADEON_FRAME_AGE( age ) do { \
1315 OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) ); \
1316 OUT_RING( age ); \
1317} while (0)
1318
1319#define RADEON_CLEAR_AGE( age ) do { \
1320 OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) ); \
1321 OUT_RING( age ); \
1322} while (0)
1323
1324/* ================================================================
1325 * Ring control
1326 */
1327
1328#define RADEON_VERBOSE 0
1329
1330#define RING_LOCALS int write, _nr; unsigned int mask; u32 *ring;
1331
1332#define BEGIN_RING( n ) do { \
1333 if ( RADEON_VERBOSE ) { \
1334 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
1335 } \
1336 if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
1337 COMMIT_RING(); \
1338 radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \
1339 } \
1340 _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
1341 ring = dev_priv->ring.start; \
1342 write = dev_priv->ring.tail; \
1343 mask = dev_priv->ring.tail_mask; \
1344} while (0)
1345
1346#define ADVANCE_RING() do { \
1347 if ( RADEON_VERBOSE ) { \
1348 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
1349 write, dev_priv->ring.tail ); \
1350 } \
1351 if (((dev_priv->ring.tail + _nr) & mask) != write) { \
1352 DRM_ERROR( \
1353 "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
1354 ((dev_priv->ring.tail + _nr) & mask), \
1355 write, __LINE__); \
1356 } else \
1357 dev_priv->ring.tail = write; \
1358} while (0)
1359
1360#define COMMIT_RING() do { \
1361 /* Flush writes to ring */ \
1362 DRM_MEMORYBARRIER(); \
1363 GET_RING_HEAD( dev_priv ); \
1364 RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
1365 /* read from PCI bus to ensure correct posting */ \
1366 RADEON_READ( RADEON_CP_RB_RPTR ); \
1367} while (0)
1368
1369#define OUT_RING( x ) do { \
1370 if ( RADEON_VERBOSE ) { \
1371 DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
1372 (unsigned int)(x), write ); \
1373 } \
1374 ring[write++] = (x); \
1375 write &= mask; \
1376} while (0)
1377
1378#define OUT_RING_REG( reg, val ) do { \
1379 OUT_RING( CP_PACKET0( reg, 0 ) ); \
1380 OUT_RING( val ); \
1381} while (0)
1382
1383#define OUT_RING_TABLE( tab, sz ) do { \
1384 int _size = (sz); \
1385 int *_tab = (int *)(tab); \
1386 \
1387 if (write + _size > mask) { \
1388 int _i = (mask+1) - write; \
1389 _size -= _i; \
1390 while (_i > 0 ) { \
1391 *(int *)(ring + write) = *_tab++; \
1392 write++; \
1393 _i--; \
1394 } \
1395 write = 0; \
1396 _tab += _i; \
1397 } \
1398 while (_size > 0) { \
1399 *(ring + write) = *_tab++; \
1400 write++; \
1401 _size--; \
1402 } \
1403 write &= mask; \
1404} while (0)
1405
1406#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
new file mode 100644
index 000000000000..56decda2a71f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -0,0 +1,424 @@
1/**
2 * \file radeon_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the Radeon DRM.
5 *
6 * \author Paul Mackerras <paulus@samba.org>
7 *
8 * Copyright (C) Paul Mackerras 2005
9 * All Rights Reserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30#include <linux/compat.h>
31
32#include "drmP.h"
33#include "drm.h"
34#include "radeon_drm.h"
35#include "radeon_drv.h"
36
37typedef struct drm_radeon_init32 {
38 int func;
39 u32 sarea_priv_offset;
40 int is_pci;
41 int cp_mode;
42 int gart_size;
43 int ring_size;
44 int usec_timeout;
45
46 unsigned int fb_bpp;
47 unsigned int front_offset, front_pitch;
48 unsigned int back_offset, back_pitch;
49 unsigned int depth_bpp;
50 unsigned int depth_offset, depth_pitch;
51
52 u32 fb_offset;
53 u32 mmio_offset;
54 u32 ring_offset;
55 u32 ring_rptr_offset;
56 u32 buffers_offset;
57 u32 gart_textures_offset;
58} drm_radeon_init32_t;
59
60static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
61 unsigned long arg)
62{
63 drm_radeon_init32_t init32;
64 drm_radeon_init_t __user *init;
65
66 if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
67 return -EFAULT;
68
69 init = compat_alloc_user_space(sizeof(*init));
70 if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
71 || __put_user(init32.func, &init->func)
72 || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
73 || __put_user(init32.is_pci, &init->is_pci)
74 || __put_user(init32.cp_mode, &init->cp_mode)
75 || __put_user(init32.gart_size, &init->gart_size)
76 || __put_user(init32.ring_size, &init->ring_size)
77 || __put_user(init32.usec_timeout, &init->usec_timeout)
78 || __put_user(init32.fb_bpp, &init->fb_bpp)
79 || __put_user(init32.front_offset, &init->front_offset)
80 || __put_user(init32.front_pitch, &init->front_pitch)
81 || __put_user(init32.back_offset, &init->back_offset)
82 || __put_user(init32.back_pitch, &init->back_pitch)
83 || __put_user(init32.depth_bpp, &init->depth_bpp)
84 || __put_user(init32.depth_offset, &init->depth_offset)
85 || __put_user(init32.depth_pitch, &init->depth_pitch)
86 || __put_user(init32.fb_offset, &init->fb_offset)
87 || __put_user(init32.mmio_offset, &init->mmio_offset)
88 || __put_user(init32.ring_offset, &init->ring_offset)
89 || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
90 || __put_user(init32.buffers_offset, &init->buffers_offset)
91 || __put_user(init32.gart_textures_offset,
92 &init->gart_textures_offset))
93 return -EFAULT;
94
95 return drm_ioctl(file->f_path.dentry->d_inode, file,
96 DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
97}
98
99typedef struct drm_radeon_clear32 {
100 unsigned int flags;
101 unsigned int clear_color;
102 unsigned int clear_depth;
103 unsigned int color_mask;
104 unsigned int depth_mask; /* misnamed field: should be stencil */
105 u32 depth_boxes;
106} drm_radeon_clear32_t;
107
108static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
109 unsigned long arg)
110{
111 drm_radeon_clear32_t clr32;
112 drm_radeon_clear_t __user *clr;
113
114 if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
115 return -EFAULT;
116
117 clr = compat_alloc_user_space(sizeof(*clr));
118 if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
119 || __put_user(clr32.flags, &clr->flags)
120 || __put_user(clr32.clear_color, &clr->clear_color)
121 || __put_user(clr32.clear_depth, &clr->clear_depth)
122 || __put_user(clr32.color_mask, &clr->color_mask)
123 || __put_user(clr32.depth_mask, &clr->depth_mask)
124 || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
125 &clr->depth_boxes))
126 return -EFAULT;
127
128 return drm_ioctl(file->f_path.dentry->d_inode, file,
129 DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
130}
131
132typedef struct drm_radeon_stipple32 {
133 u32 mask;
134} drm_radeon_stipple32_t;
135
136static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
137 unsigned long arg)
138{
139 drm_radeon_stipple32_t __user *argp = (void __user *)arg;
140 drm_radeon_stipple_t __user *request;
141 u32 mask;
142
143 if (get_user(mask, &argp->mask))
144 return -EFAULT;
145
146 request = compat_alloc_user_space(sizeof(*request));
147 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
148 || __put_user((unsigned int __user *)(unsigned long)mask,
149 &request->mask))
150 return -EFAULT;
151
152 return drm_ioctl(file->f_path.dentry->d_inode, file,
153 DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
154}
155
156typedef struct drm_radeon_tex_image32 {
157 unsigned int x, y; /* Blit coordinates */
158 unsigned int width, height;
159 u32 data;
160} drm_radeon_tex_image32_t;
161
162typedef struct drm_radeon_texture32 {
163 unsigned int offset;
164 int pitch;
165 int format;
166 int width; /* Texture image coordinates */
167 int height;
168 u32 image;
169} drm_radeon_texture32_t;
170
171static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
172 unsigned long arg)
173{
174 drm_radeon_texture32_t req32;
175 drm_radeon_texture_t __user *request;
176 drm_radeon_tex_image32_t img32;
177 drm_radeon_tex_image_t __user *image;
178
179 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
180 return -EFAULT;
181 if (req32.image == 0)
182 return -EINVAL;
183 if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
184 sizeof(img32)))
185 return -EFAULT;
186
187 request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
188 if (!access_ok(VERIFY_WRITE, request,
189 sizeof(*request) + sizeof(*image)))
190 return -EFAULT;
191 image = (drm_radeon_tex_image_t __user *) (request + 1);
192
193 if (__put_user(req32.offset, &request->offset)
194 || __put_user(req32.pitch, &request->pitch)
195 || __put_user(req32.format, &request->format)
196 || __put_user(req32.width, &request->width)
197 || __put_user(req32.height, &request->height)
198 || __put_user(image, &request->image)
199 || __put_user(img32.x, &image->x)
200 || __put_user(img32.y, &image->y)
201 || __put_user(img32.width, &image->width)
202 || __put_user(img32.height, &image->height)
203 || __put_user((const void __user *)(unsigned long)img32.data,
204 &image->data))
205 return -EFAULT;
206
207 return drm_ioctl(file->f_path.dentry->d_inode, file,
208 DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
209}
210
211typedef struct drm_radeon_vertex2_32 {
212 int idx; /* Index of vertex buffer */
213 int discard; /* Client finished with buffer? */
214 int nr_states;
215 u32 state;
216 int nr_prims;
217 u32 prim;
218} drm_radeon_vertex2_32_t;
219
220static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
221 unsigned long arg)
222{
223 drm_radeon_vertex2_32_t req32;
224 drm_radeon_vertex2_t __user *request;
225
226 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
227 return -EFAULT;
228
229 request = compat_alloc_user_space(sizeof(*request));
230 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
231 || __put_user(req32.idx, &request->idx)
232 || __put_user(req32.discard, &request->discard)
233 || __put_user(req32.nr_states, &request->nr_states)
234 || __put_user((void __user *)(unsigned long)req32.state,
235 &request->state)
236 || __put_user(req32.nr_prims, &request->nr_prims)
237 || __put_user((void __user *)(unsigned long)req32.prim,
238 &request->prim))
239 return -EFAULT;
240
241 return drm_ioctl(file->f_path.dentry->d_inode, file,
242 DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
243}
244
245typedef struct drm_radeon_cmd_buffer32 {
246 int bufsz;
247 u32 buf;
248 int nbox;
249 u32 boxes;
250} drm_radeon_cmd_buffer32_t;
251
252static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
253 unsigned long arg)
254{
255 drm_radeon_cmd_buffer32_t req32;
256 drm_radeon_cmd_buffer_t __user *request;
257
258 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
259 return -EFAULT;
260
261 request = compat_alloc_user_space(sizeof(*request));
262 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
263 || __put_user(req32.bufsz, &request->bufsz)
264 || __put_user((void __user *)(unsigned long)req32.buf,
265 &request->buf)
266 || __put_user(req32.nbox, &request->nbox)
267 || __put_user((void __user *)(unsigned long)req32.boxes,
268 &request->boxes))
269 return -EFAULT;
270
271 return drm_ioctl(file->f_path.dentry->d_inode, file,
272 DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
273}
274
275typedef struct drm_radeon_getparam32 {
276 int param;
277 u32 value;
278} drm_radeon_getparam32_t;
279
280static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
281 unsigned long arg)
282{
283 drm_radeon_getparam32_t req32;
284 drm_radeon_getparam_t __user *request;
285
286 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
287 return -EFAULT;
288
289 request = compat_alloc_user_space(sizeof(*request));
290 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
291 || __put_user(req32.param, &request->param)
292 || __put_user((void __user *)(unsigned long)req32.value,
293 &request->value))
294 return -EFAULT;
295
296 return drm_ioctl(file->f_path.dentry->d_inode, file,
297 DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
298}
299
300typedef struct drm_radeon_mem_alloc32 {
301 int region;
302 int alignment;
303 int size;
304 u32 region_offset; /* offset from start of fb or GART */
305} drm_radeon_mem_alloc32_t;
306
307static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
308 unsigned long arg)
309{
310 drm_radeon_mem_alloc32_t req32;
311 drm_radeon_mem_alloc_t __user *request;
312
313 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
314 return -EFAULT;
315
316 request = compat_alloc_user_space(sizeof(*request));
317 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
318 || __put_user(req32.region, &request->region)
319 || __put_user(req32.alignment, &request->alignment)
320 || __put_user(req32.size, &request->size)
321 || __put_user((int __user *)(unsigned long)req32.region_offset,
322 &request->region_offset))
323 return -EFAULT;
324
325 return drm_ioctl(file->f_path.dentry->d_inode, file,
326 DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
327}
328
329typedef struct drm_radeon_irq_emit32 {
330 u32 irq_seq;
331} drm_radeon_irq_emit32_t;
332
333static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
334 unsigned long arg)
335{
336 drm_radeon_irq_emit32_t req32;
337 drm_radeon_irq_emit_t __user *request;
338
339 if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
340 return -EFAULT;
341
342 request = compat_alloc_user_space(sizeof(*request));
343 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
344 || __put_user((int __user *)(unsigned long)req32.irq_seq,
345 &request->irq_seq))
346 return -EFAULT;
347
348 return drm_ioctl(file->f_path.dentry->d_inode, file,
349 DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
350}
351
352/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
353#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
354typedef struct drm_radeon_setparam32 {
355 int param;
356 u64 value;
357} __attribute__((packed)) drm_radeon_setparam32_t;
358
359static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
360 unsigned long arg)
361{
362 drm_radeon_setparam32_t req32;
363 drm_radeon_setparam_t __user *request;
364
365 if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
366 return -EFAULT;
367
368 request = compat_alloc_user_space(sizeof(*request));
369 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
370 || __put_user(req32.param, &request->param)
371 || __put_user((void __user *)(unsigned long)req32.value,
372 &request->value))
373 return -EFAULT;
374
375 return drm_ioctl(file->f_dentry->d_inode, file,
376 DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
377}
378#else
379#define compat_radeon_cp_setparam NULL
380#endif /* X86_64 || IA64 */
381
382drm_ioctl_compat_t *radeon_compat_ioctls[] = {
383 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
384 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
385 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
386 [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
387 [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
388 [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
389 [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
390 [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
391 [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
392 [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
393};
394
395/**
396 * Called whenever a 32-bit process running under a 64-bit kernel
397 * performs an ioctl on /dev/dri/card<n>.
398 *
399 * \param filp file pointer.
400 * \param cmd command.
401 * \param arg user argument.
402 * \return zero on success or negative number on failure.
403 */
404long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
405{
406 unsigned int nr = DRM_IOCTL_NR(cmd);
407 drm_ioctl_compat_t *fn = NULL;
408 int ret;
409
410 if (nr < DRM_COMMAND_BASE)
411 return drm_compat_ioctl(filp, cmd, arg);
412
413 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
414 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
415
416 lock_kernel(); /* XXX for now */
417 if (fn != NULL)
418 ret = (*fn) (filp, cmd, arg);
419 else
420 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
421 unlock_kernel();
422
423 return ret;
424}
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
new file mode 100644
index 000000000000..ee40d197deb7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -0,0 +1,320 @@
1/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
2/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Michel Dänzer <michel@daenzer.net>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "radeon_drm.h"
36#include "radeon_drv.h"
37
38static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
39 u32 mask)
40{
41 u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
42 if (irqs)
43 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
44 return irqs;
45}
46
47/* Interrupts - Used for device synchronization and flushing in the
48 * following circumstances:
49 *
50 * - Exclusive FB access with hw idle:
51 * - Wait for GUI Idle (?) interrupt, then do normal flush.
52 *
53 * - Frame throttling, NV_fence:
54 * - Drop marker irq's into command stream ahead of time.
55 * - Wait on irq's with lock *not held*
56 * - Check each for termination condition
57 *
58 * - Internally in cp_getbuffer, etc:
59 * - as above, but wait with lock held???
60 *
61 * NOTE: These functions are misleadingly named -- the irq's aren't
62 * tied to dma at all, this is just a hangover from dri prehistory.
63 */
64
65irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
66{
67 struct drm_device *dev = (struct drm_device *) arg;
68 drm_radeon_private_t *dev_priv =
69 (drm_radeon_private_t *) dev->dev_private;
70 u32 stat;
71
72 /* Only consider the bits we're interested in - others could be used
73 * outside the DRM
74 */
75 stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
76 RADEON_CRTC_VBLANK_STAT |
77 RADEON_CRTC2_VBLANK_STAT));
78 if (!stat)
79 return IRQ_NONE;
80
81 stat &= dev_priv->irq_enable_reg;
82
83 /* SW interrupt */
84 if (stat & RADEON_SW_INT_TEST) {
85 DRM_WAKEUP(&dev_priv->swi_queue);
86 }
87
88 /* VBLANK interrupt */
89 if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
90 int vblank_crtc = dev_priv->vblank_crtc;
91
92 if ((vblank_crtc &
93 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
94 (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
95 if (stat & RADEON_CRTC_VBLANK_STAT)
96 atomic_inc(&dev->vbl_received);
97 if (stat & RADEON_CRTC2_VBLANK_STAT)
98 atomic_inc(&dev->vbl_received2);
99 } else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
100 (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
101 ((stat & RADEON_CRTC2_VBLANK_STAT) &&
102 (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
103 atomic_inc(&dev->vbl_received);
104
105 DRM_WAKEUP(&dev->vbl_queue);
106 drm_vbl_send_signals(dev);
107 }
108
109 return IRQ_HANDLED;
110}
111
112static int radeon_emit_irq(struct drm_device * dev)
113{
114 drm_radeon_private_t *dev_priv = dev->dev_private;
115 unsigned int ret;
116 RING_LOCALS;
117
118 atomic_inc(&dev_priv->swi_emitted);
119 ret = atomic_read(&dev_priv->swi_emitted);
120
121 BEGIN_RING(4);
122 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
123 OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
124 ADVANCE_RING();
125 COMMIT_RING();
126
127 return ret;
128}
129
130static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
131{
132 drm_radeon_private_t *dev_priv =
133 (drm_radeon_private_t *) dev->dev_private;
134 int ret = 0;
135
136 if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
137 return 0;
138
139 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
140
141 DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
142 RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
143
144 return ret;
145}
146
147static int radeon_driver_vblank_do_wait(struct drm_device * dev,
148 unsigned int *sequence, int crtc)
149{
150 drm_radeon_private_t *dev_priv =
151 (drm_radeon_private_t *) dev->dev_private;
152 unsigned int cur_vblank;
153 int ret = 0;
154 int ack = 0;
155 atomic_t *counter;
156 if (!dev_priv) {
157 DRM_ERROR("called with no initialization\n");
158 return -EINVAL;
159 }
160
161 if (crtc == DRM_RADEON_VBLANK_CRTC1) {
162 counter = &dev->vbl_received;
163 ack |= RADEON_CRTC_VBLANK_STAT;
164 } else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
165 counter = &dev->vbl_received2;
166 ack |= RADEON_CRTC2_VBLANK_STAT;
167 } else
168 return -EINVAL;
169
170 radeon_acknowledge_irqs(dev_priv, ack);
171
172 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
173
174 /* Assume that the user has missed the current sequence number
175 * by about a day rather than she wants to wait for years
176 * using vertical blanks...
177 */
178 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
179 (((cur_vblank = atomic_read(counter))
180 - *sequence) <= (1 << 23)));
181
182 *sequence = cur_vblank;
183
184 return ret;
185}
186
187int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
188{
189 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
190}
191
192int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
193{
194 return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
195}
196
197/* Needs the lock as it touches the ring.
198 */
199int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
200{
201 drm_radeon_private_t *dev_priv = dev->dev_private;
202 drm_radeon_irq_emit_t *emit = data;
203 int result;
204
205 LOCK_TEST_WITH_RETURN(dev, file_priv);
206
207 if (!dev_priv) {
208 DRM_ERROR("called with no initialization\n");
209 return -EINVAL;
210 }
211
212 result = radeon_emit_irq(dev);
213
214 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
215 DRM_ERROR("copy_to_user\n");
216 return -EFAULT;
217 }
218
219 return 0;
220}
221
222/* Doesn't need the hardware lock.
223 */
224int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
225{
226 drm_radeon_private_t *dev_priv = dev->dev_private;
227 drm_radeon_irq_wait_t *irqwait = data;
228
229 if (!dev_priv) {
230 DRM_ERROR("called with no initialization\n");
231 return -EINVAL;
232 }
233
234 return radeon_wait_irq(dev, irqwait->irq_seq);
235}
236
237void radeon_enable_interrupt(struct drm_device *dev)
238{
239 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
240
241 dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
242 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
243 dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
244
245 if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
246 dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
247
248 RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
249 dev_priv->irq_enabled = 1;
250}
251
252/* drm_dma.h hooks
253*/
254void radeon_driver_irq_preinstall(struct drm_device * dev)
255{
256 drm_radeon_private_t *dev_priv =
257 (drm_radeon_private_t *) dev->dev_private;
258
259 /* Disable *all* interrupts */
260 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
261
262 /* Clear bits if they're already high */
263 radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
264 RADEON_CRTC_VBLANK_STAT |
265 RADEON_CRTC2_VBLANK_STAT));
266}
267
268void radeon_driver_irq_postinstall(struct drm_device * dev)
269{
270 drm_radeon_private_t *dev_priv =
271 (drm_radeon_private_t *) dev->dev_private;
272
273 atomic_set(&dev_priv->swi_emitted, 0);
274 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
275
276 radeon_enable_interrupt(dev);
277}
278
279void radeon_driver_irq_uninstall(struct drm_device * dev)
280{
281 drm_radeon_private_t *dev_priv =
282 (drm_radeon_private_t *) dev->dev_private;
283 if (!dev_priv)
284 return;
285
286 dev_priv->irq_enabled = 0;
287
288 /* Disable *all* interrupts */
289 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
290}
291
292
293int radeon_vblank_crtc_get(struct drm_device *dev)
294{
295 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
296 u32 flag;
297 u32 value;
298
299 flag = RADEON_READ(RADEON_GEN_INT_CNTL);
300 value = 0;
301
302 if (flag & RADEON_CRTC_VBLANK_MASK)
303 value |= DRM_RADEON_VBLANK_CRTC1;
304
305 if (flag & RADEON_CRTC2_VBLANK_MASK)
306 value |= DRM_RADEON_VBLANK_CRTC2;
307 return value;
308}
309
310int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
311{
312 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
313 if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
314 DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
315 return -EINVAL;
316 }
317 dev_priv->vblank_crtc = (unsigned int)value;
318 radeon_enable_interrupt(dev);
319 return 0;
320}
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
new file mode 100644
index 000000000000..4af5286a36fb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -0,0 +1,302 @@
1/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
2/*
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32#include "drmP.h"
33#include "drm.h"
34#include "radeon_drm.h"
35#include "radeon_drv.h"
36
37/* Very simple allocator for GART memory, working on a static range
38 * already mapped into each client's address space.
39 */
40
41static struct mem_block *split_block(struct mem_block *p, int start, int size,
42 struct drm_file *file_priv)
43{
44 /* Maybe cut off the start of an existing block */
45 if (start > p->start) {
46 struct mem_block *newblock =
47 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
48 if (!newblock)
49 goto out;
50 newblock->start = start;
51 newblock->size = p->size - (start - p->start);
52 newblock->file_priv = NULL;
53 newblock->next = p->next;
54 newblock->prev = p;
55 p->next->prev = newblock;
56 p->next = newblock;
57 p->size -= newblock->size;
58 p = newblock;
59 }
60
61 /* Maybe cut off the end of an existing block */
62 if (size < p->size) {
63 struct mem_block *newblock =
64 drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
65 if (!newblock)
66 goto out;
67 newblock->start = start + size;
68 newblock->size = p->size - size;
69 newblock->file_priv = NULL;
70 newblock->next = p->next;
71 newblock->prev = p;
72 p->next->prev = newblock;
73 p->next = newblock;
74 p->size = size;
75 }
76
77 out:
78 /* Our block is in the middle */
79 p->file_priv = file_priv;
80 return p;
81}
82
83static struct mem_block *alloc_block(struct mem_block *heap, int size,
84 int align2, struct drm_file *file_priv)
85{
86 struct mem_block *p;
87 int mask = (1 << align2) - 1;
88
89 list_for_each(p, heap) {
90 int start = (p->start + mask) & ~mask;
91 if (p->file_priv == NULL && start + size <= p->start + p->size)
92 return split_block(p, start, size, file_priv);
93 }
94
95 return NULL;
96}
97
98static struct mem_block *find_block(struct mem_block *heap, int start)
99{
100 struct mem_block *p;
101
102 list_for_each(p, heap)
103 if (p->start == start)
104 return p;
105
106 return NULL;
107}
108
109static void free_block(struct mem_block *p)
110{
111 p->file_priv = NULL;
112
113 /* Assumes a single contiguous range. Needs a special file_priv in
114 * 'heap' to stop it being subsumed.
115 */
116 if (p->next->file_priv == NULL) {
117 struct mem_block *q = p->next;
118 p->size += q->size;
119 p->next = q->next;
120 p->next->prev = p;
121 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
122 }
123
124 if (p->prev->file_priv == NULL) {
125 struct mem_block *q = p->prev;
126 q->size += p->size;
127 q->next = p->next;
128 q->next->prev = q;
129 drm_free(p, sizeof(*q), DRM_MEM_BUFS);
130 }
131}
132
133/* Initialize. How to check for an uninitialized heap?
134 */
135static int init_heap(struct mem_block **heap, int start, int size)
136{
137 struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
138
139 if (!blocks)
140 return -ENOMEM;
141
142 *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
143 if (!*heap) {
144 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
145 return -ENOMEM;
146 }
147
148 blocks->start = start;
149 blocks->size = size;
150 blocks->file_priv = NULL;
151 blocks->next = blocks->prev = *heap;
152
153 memset(*heap, 0, sizeof(**heap));
154 (*heap)->file_priv = (struct drm_file *) - 1;
155 (*heap)->next = (*heap)->prev = blocks;
156 return 0;
157}
158
159/* Free all blocks associated with the releasing file.
160 */
161void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
162{
163 struct mem_block *p;
164
165 if (!heap || !heap->next)
166 return;
167
168 list_for_each(p, heap) {
169 if (p->file_priv == file_priv)
170 p->file_priv = NULL;
171 }
172
173 /* Assumes a single contiguous range. Needs a special file_priv in
174 * 'heap' to stop it being subsumed.
175 */
176 list_for_each(p, heap) {
177 while (p->file_priv == NULL && p->next->file_priv == NULL) {
178 struct mem_block *q = p->next;
179 p->size += q->size;
180 p->next = q->next;
181 p->next->prev = p;
182 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
183 }
184 }
185}
186
187/* Shutdown.
188 */
189void radeon_mem_takedown(struct mem_block **heap)
190{
191 struct mem_block *p;
192
193 if (!*heap)
194 return;
195
196 for (p = (*heap)->next; p != *heap;) {
197 struct mem_block *q = p;
198 p = p->next;
199 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
200 }
201
202 drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
203 *heap = NULL;
204}
205
206/* IOCTL HANDLERS */
207
208static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
209{
210 switch (region) {
211 case RADEON_MEM_REGION_GART:
212 return &dev_priv->gart_heap;
213 case RADEON_MEM_REGION_FB:
214 return &dev_priv->fb_heap;
215 default:
216 return NULL;
217 }
218}
219
220int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
221{
222 drm_radeon_private_t *dev_priv = dev->dev_private;
223 drm_radeon_mem_alloc_t *alloc = data;
224 struct mem_block *block, **heap;
225
226 if (!dev_priv) {
227 DRM_ERROR("called with no initialization\n");
228 return -EINVAL;
229 }
230
231 heap = get_heap(dev_priv, alloc->region);
232 if (!heap || !*heap)
233 return -EFAULT;
234
235 /* Make things easier on ourselves: all allocations at least
236 * 4k aligned.
237 */
238 if (alloc->alignment < 12)
239 alloc->alignment = 12;
240
241 block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
242
243 if (!block)
244 return -ENOMEM;
245
246 if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
247 sizeof(int))) {
248 DRM_ERROR("copy_to_user\n");
249 return -EFAULT;
250 }
251
252 return 0;
253}
254
255int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
256{
257 drm_radeon_private_t *dev_priv = dev->dev_private;
258 drm_radeon_mem_free_t *memfree = data;
259 struct mem_block *block, **heap;
260
261 if (!dev_priv) {
262 DRM_ERROR("called with no initialization\n");
263 return -EINVAL;
264 }
265
266 heap = get_heap(dev_priv, memfree->region);
267 if (!heap || !*heap)
268 return -EFAULT;
269
270 block = find_block(*heap, memfree->region_offset);
271 if (!block)
272 return -EFAULT;
273
274 if (block->file_priv != file_priv)
275 return -EPERM;
276
277 free_block(block);
278 return 0;
279}
280
281int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
282{
283 drm_radeon_private_t *dev_priv = dev->dev_private;
284 drm_radeon_mem_init_heap_t *initheap = data;
285 struct mem_block **heap;
286
287 if (!dev_priv) {
288 DRM_ERROR("called with no initialization\n");
289 return -EINVAL;
290 }
291
292 heap = get_heap(dev_priv, initheap->region);
293 if (!heap)
294 return -EFAULT;
295
296 if (*heap) {
297 DRM_ERROR("heap already initialized?");
298 return -EFAULT;
299 }
300
301 return init_heap(heap, initheap->start, initheap->size);
302}
diff --git a/drivers/gpu/drm/radeon/radeon_microcode.h b/drivers/gpu/drm/radeon/radeon_microcode.h
new file mode 100644
index 000000000000..a348c9e7db1c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_microcode.h
@@ -0,0 +1,1844 @@
1/*
2 * Copyright 2007 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#ifndef RADEON_MICROCODE_H
27#define RADEON_MICROCODE_H
28
29/* production radeon ucode r1xx-r6xx */
30static const u32 R100_cp_microcode[][2] = {
31 { 0x21007000, 0000000000 },
32 { 0x20007000, 0000000000 },
33 { 0x000000b4, 0x00000004 },
34 { 0x000000b8, 0x00000004 },
35 { 0x6f5b4d4c, 0000000000 },
36 { 0x4c4c427f, 0000000000 },
37 { 0x5b568a92, 0000000000 },
38 { 0x4ca09c6d, 0000000000 },
39 { 0xad4c4c4c, 0000000000 },
40 { 0x4ce1af3d, 0000000000 },
41 { 0xd8afafaf, 0000000000 },
42 { 0xd64c4cdc, 0000000000 },
43 { 0x4cd10d10, 0000000000 },
44 { 0x000f0000, 0x00000016 },
45 { 0x362f242d, 0000000000 },
46 { 0x00000012, 0x00000004 },
47 { 0x000f0000, 0x00000016 },
48 { 0x362f282d, 0000000000 },
49 { 0x000380e7, 0x00000002 },
50 { 0x04002c97, 0x00000002 },
51 { 0x000f0001, 0x00000016 },
52 { 0x333a3730, 0000000000 },
53 { 0x000077ef, 0x00000002 },
54 { 0x00061000, 0x00000002 },
55 { 0x00000021, 0x0000001a },
56 { 0x00004000, 0x0000001e },
57 { 0x00061000, 0x00000002 },
58 { 0x00000021, 0x0000001a },
59 { 0x00004000, 0x0000001e },
60 { 0x00061000, 0x00000002 },
61 { 0x00000021, 0x0000001a },
62 { 0x00004000, 0x0000001e },
63 { 0x00000017, 0x00000004 },
64 { 0x0003802b, 0x00000002 },
65 { 0x040067e0, 0x00000002 },
66 { 0x00000017, 0x00000004 },
67 { 0x000077e0, 0x00000002 },
68 { 0x00065000, 0x00000002 },
69 { 0x000037e1, 0x00000002 },
70 { 0x040067e1, 0x00000006 },
71 { 0x000077e0, 0x00000002 },
72 { 0x000077e1, 0x00000002 },
73 { 0x000077e1, 0x00000006 },
74 { 0xffffffff, 0000000000 },
75 { 0x10000000, 0000000000 },
76 { 0x0003802b, 0x00000002 },
77 { 0x040067e0, 0x00000006 },
78 { 0x00007675, 0x00000002 },
79 { 0x00007676, 0x00000002 },
80 { 0x00007677, 0x00000002 },
81 { 0x00007678, 0x00000006 },
82 { 0x0003802c, 0x00000002 },
83 { 0x04002676, 0x00000002 },
84 { 0x00007677, 0x00000002 },
85 { 0x00007678, 0x00000006 },
86 { 0x0000002f, 0x00000018 },
87 { 0x0000002f, 0x00000018 },
88 { 0000000000, 0x00000006 },
89 { 0x00000030, 0x00000018 },
90 { 0x00000030, 0x00000018 },
91 { 0000000000, 0x00000006 },
92 { 0x01605000, 0x00000002 },
93 { 0x00065000, 0x00000002 },
94 { 0x00098000, 0x00000002 },
95 { 0x00061000, 0x00000002 },
96 { 0x64c0603e, 0x00000004 },
97 { 0x000380e6, 0x00000002 },
98 { 0x040025c5, 0x00000002 },
99 { 0x00080000, 0x00000016 },
100 { 0000000000, 0000000000 },
101 { 0x0400251d, 0x00000002 },
102 { 0x00007580, 0x00000002 },
103 { 0x00067581, 0x00000002 },
104 { 0x04002580, 0x00000002 },
105 { 0x00067581, 0x00000002 },
106 { 0x00000049, 0x00000004 },
107 { 0x00005000, 0000000000 },
108 { 0x000380e6, 0x00000002 },
109 { 0x040025c5, 0x00000002 },
110 { 0x00061000, 0x00000002 },
111 { 0x0000750e, 0x00000002 },
112 { 0x00019000, 0x00000002 },
113 { 0x00011055, 0x00000014 },
114 { 0x00000055, 0x00000012 },
115 { 0x0400250f, 0x00000002 },
116 { 0x0000504f, 0x00000004 },
117 { 0x000380e6, 0x00000002 },
118 { 0x040025c5, 0x00000002 },
119 { 0x00007565, 0x00000002 },
120 { 0x00007566, 0x00000002 },
121 { 0x00000058, 0x00000004 },
122 { 0x000380e6, 0x00000002 },
123 { 0x040025c5, 0x00000002 },
124 { 0x01e655b4, 0x00000002 },
125 { 0x4401b0e4, 0x00000002 },
126 { 0x01c110e4, 0x00000002 },
127 { 0x26667066, 0x00000018 },
128 { 0x040c2565, 0x00000002 },
129 { 0x00000066, 0x00000018 },
130 { 0x04002564, 0x00000002 },
131 { 0x00007566, 0x00000002 },
132 { 0x0000005d, 0x00000004 },
133 { 0x00401069, 0x00000008 },
134 { 0x00101000, 0x00000002 },
135 { 0x000d80ff, 0x00000002 },
136 { 0x0080006c, 0x00000008 },
137 { 0x000f9000, 0x00000002 },
138 { 0x000e00ff, 0x00000002 },
139 { 0000000000, 0x00000006 },
140 { 0x0000008f, 0x00000018 },
141 { 0x0000005b, 0x00000004 },
142 { 0x000380e6, 0x00000002 },
143 { 0x040025c5, 0x00000002 },
144 { 0x00007576, 0x00000002 },
145 { 0x00065000, 0x00000002 },
146 { 0x00009000, 0x00000002 },
147 { 0x00041000, 0x00000002 },
148 { 0x0c00350e, 0x00000002 },
149 { 0x00049000, 0x00000002 },
150 { 0x00051000, 0x00000002 },
151 { 0x01e785f8, 0x00000002 },
152 { 0x00200000, 0x00000002 },
153 { 0x0060007e, 0x0000000c },
154 { 0x00007563, 0x00000002 },
155 { 0x006075f0, 0x00000021 },
156 { 0x20007073, 0x00000004 },
157 { 0x00005073, 0x00000004 },
158 { 0x000380e6, 0x00000002 },
159 { 0x040025c5, 0x00000002 },
160 { 0x00007576, 0x00000002 },
161 { 0x00007577, 0x00000002 },
162 { 0x0000750e, 0x00000002 },
163 { 0x0000750f, 0x00000002 },
164 { 0x00a05000, 0x00000002 },
165 { 0x00600083, 0x0000000c },
166 { 0x006075f0, 0x00000021 },
167 { 0x000075f8, 0x00000002 },
168 { 0x00000083, 0x00000004 },
169 { 0x000a750e, 0x00000002 },
170 { 0x000380e6, 0x00000002 },
171 { 0x040025c5, 0x00000002 },
172 { 0x0020750f, 0x00000002 },
173 { 0x00600086, 0x00000004 },
174 { 0x00007570, 0x00000002 },
175 { 0x00007571, 0x00000002 },
176 { 0x00007572, 0x00000006 },
177 { 0x000380e6, 0x00000002 },
178 { 0x040025c5, 0x00000002 },
179 { 0x00005000, 0x00000002 },
180 { 0x00a05000, 0x00000002 },
181 { 0x00007568, 0x00000002 },
182 { 0x00061000, 0x00000002 },
183 { 0x00000095, 0x0000000c },
184 { 0x00058000, 0x00000002 },
185 { 0x0c607562, 0x00000002 },
186 { 0x00000097, 0x00000004 },
187 { 0x000380e6, 0x00000002 },
188 { 0x040025c5, 0x00000002 },
189 { 0x00600096, 0x00000004 },
190 { 0x400070e5, 0000000000 },
191 { 0x000380e6, 0x00000002 },
192 { 0x040025c5, 0x00000002 },
193 { 0x000380e5, 0x00000002 },
194 { 0x000000a8, 0x0000001c },
195 { 0x000650aa, 0x00000018 },
196 { 0x040025bb, 0x00000002 },
197 { 0x000610ab, 0x00000018 },
198 { 0x040075bc, 0000000000 },
199 { 0x000075bb, 0x00000002 },
200 { 0x000075bc, 0000000000 },
201 { 0x00090000, 0x00000006 },
202 { 0x00090000, 0x00000002 },
203 { 0x000d8002, 0x00000006 },
204 { 0x00007832, 0x00000002 },
205 { 0x00005000, 0x00000002 },
206 { 0x000380e7, 0x00000002 },
207 { 0x04002c97, 0x00000002 },
208 { 0x00007820, 0x00000002 },
209 { 0x00007821, 0x00000002 },
210 { 0x00007800, 0000000000 },
211 { 0x01200000, 0x00000002 },
212 { 0x20077000, 0x00000002 },
213 { 0x01200000, 0x00000002 },
214 { 0x20007000, 0x00000002 },
215 { 0x00061000, 0x00000002 },
216 { 0x0120751b, 0x00000002 },
217 { 0x8040750a, 0x00000002 },
218 { 0x8040750b, 0x00000002 },
219 { 0x00110000, 0x00000002 },
220 { 0x000380e5, 0x00000002 },
221 { 0x000000c6, 0x0000001c },
222 { 0x000610ab, 0x00000018 },
223 { 0x844075bd, 0x00000002 },
224 { 0x000610aa, 0x00000018 },
225 { 0x840075bb, 0x00000002 },
226 { 0x000610ab, 0x00000018 },
227 { 0x844075bc, 0x00000002 },
228 { 0x000000c9, 0x00000004 },
229 { 0x804075bd, 0x00000002 },
230 { 0x800075bb, 0x00000002 },
231 { 0x804075bc, 0x00000002 },
232 { 0x00108000, 0x00000002 },
233 { 0x01400000, 0x00000002 },
234 { 0x006000cd, 0x0000000c },
235 { 0x20c07000, 0x00000020 },
236 { 0x000000cf, 0x00000012 },
237 { 0x00800000, 0x00000006 },
238 { 0x0080751d, 0x00000006 },
239 { 0000000000, 0000000000 },
240 { 0x0000775c, 0x00000002 },
241 { 0x00a05000, 0x00000002 },
242 { 0x00661000, 0x00000002 },
243 { 0x0460275d, 0x00000020 },
244 { 0x00004000, 0000000000 },
245 { 0x01e00830, 0x00000002 },
246 { 0x21007000, 0000000000 },
247 { 0x6464614d, 0000000000 },
248 { 0x69687420, 0000000000 },
249 { 0x00000073, 0000000000 },
250 { 0000000000, 0000000000 },
251 { 0x00005000, 0x00000002 },
252 { 0x000380d0, 0x00000002 },
253 { 0x040025e0, 0x00000002 },
254 { 0x000075e1, 0000000000 },
255 { 0x00000001, 0000000000 },
256 { 0x000380e0, 0x00000002 },
257 { 0x04002394, 0x00000002 },
258 { 0x00005000, 0000000000 },
259 { 0000000000, 0000000000 },
260 { 0000000000, 0000000000 },
261 { 0x00000008, 0000000000 },
262 { 0x00000004, 0000000000 },
263 { 0000000000, 0000000000 },
264 { 0000000000, 0000000000 },
265 { 0000000000, 0000000000 },
266 { 0000000000, 0000000000 },
267 { 0000000000, 0000000000 },
268 { 0000000000, 0000000000 },
269 { 0000000000, 0000000000 },
270 { 0000000000, 0000000000 },
271 { 0000000000, 0000000000 },
272 { 0000000000, 0000000000 },
273 { 0000000000, 0000000000 },
274 { 0000000000, 0000000000 },
275 { 0000000000, 0000000000 },
276 { 0000000000, 0000000000 },
277 { 0000000000, 0000000000 },
278 { 0000000000, 0000000000 },
279 { 0000000000, 0000000000 },
280 { 0000000000, 0000000000 },
281 { 0000000000, 0000000000 },
282 { 0000000000, 0000000000 },
283 { 0000000000, 0000000000 },
284 { 0000000000, 0000000000 },
285 { 0000000000, 0000000000 },
286 { 0000000000, 0000000000 },
287};
288
289static const u32 R200_cp_microcode[][2] = {
290 { 0x21007000, 0000000000 },
291 { 0x20007000, 0000000000 },
292 { 0x000000bf, 0x00000004 },
293 { 0x000000c3, 0x00000004 },
294 { 0x7a685e5d, 0000000000 },
295 { 0x5d5d5588, 0000000000 },
296 { 0x68659197, 0000000000 },
297 { 0x5da19f78, 0000000000 },
298 { 0x5d5d5d5d, 0000000000 },
299 { 0x5dee5d50, 0000000000 },
300 { 0xf2acacac, 0000000000 },
301 { 0xe75df9e9, 0000000000 },
302 { 0xb1dd0e11, 0000000000 },
303 { 0xe2afafaf, 0000000000 },
304 { 0x000f0000, 0x00000016 },
305 { 0x452f232d, 0000000000 },
306 { 0x00000013, 0x00000004 },
307 { 0x000f0000, 0x00000016 },
308 { 0x452f272d, 0000000000 },
309 { 0x000f0001, 0x00000016 },
310 { 0x3e4d4a37, 0000000000 },
311 { 0x000077ef, 0x00000002 },
312 { 0x00061000, 0x00000002 },
313 { 0x00000020, 0x0000001a },
314 { 0x00004000, 0x0000001e },
315 { 0x00061000, 0x00000002 },
316 { 0x00000020, 0x0000001a },
317 { 0x00004000, 0x0000001e },
318 { 0x00061000, 0x00000002 },
319 { 0x00000020, 0x0000001a },
320 { 0x00004000, 0x0000001e },
321 { 0x00000016, 0x00000004 },
322 { 0x0003802a, 0x00000002 },
323 { 0x040067e0, 0x00000002 },
324 { 0x00000016, 0x00000004 },
325 { 0x000077e0, 0x00000002 },
326 { 0x00065000, 0x00000002 },
327 { 0x000037e1, 0x00000002 },
328 { 0x040067e1, 0x00000006 },
329 { 0x000077e0, 0x00000002 },
330 { 0x000077e1, 0x00000002 },
331 { 0x000077e1, 0x00000006 },
332 { 0xffffffff, 0000000000 },
333 { 0x10000000, 0000000000 },
334 { 0x07f007f0, 0000000000 },
335 { 0x0003802a, 0x00000002 },
336 { 0x040067e0, 0x00000006 },
337 { 0x0003802c, 0x00000002 },
338 { 0x04002741, 0x00000002 },
339 { 0x04002741, 0x00000002 },
340 { 0x04002743, 0x00000002 },
341 { 0x00007675, 0x00000002 },
342 { 0x00007676, 0x00000002 },
343 { 0x00007677, 0x00000002 },
344 { 0x00007678, 0x00000006 },
345 { 0x0003802c, 0x00000002 },
346 { 0x04002741, 0x00000002 },
347 { 0x04002741, 0x00000002 },
348 { 0x04002743, 0x00000002 },
349 { 0x00007676, 0x00000002 },
350 { 0x00007677, 0x00000002 },
351 { 0x00007678, 0x00000006 },
352 { 0x0003802b, 0x00000002 },
353 { 0x04002676, 0x00000002 },
354 { 0x00007677, 0x00000002 },
355 { 0x0003802c, 0x00000002 },
356 { 0x04002741, 0x00000002 },
357 { 0x04002743, 0x00000002 },
358 { 0x00007678, 0x00000006 },
359 { 0x0003802c, 0x00000002 },
360 { 0x04002741, 0x00000002 },
361 { 0x04002741, 0x00000002 },
362 { 0x04002743, 0x00000002 },
363 { 0x00007678, 0x00000006 },
364 { 0x0000002f, 0x00000018 },
365 { 0x0000002f, 0x00000018 },
366 { 0000000000, 0x00000006 },
367 { 0x00000037, 0x00000018 },
368 { 0x00000037, 0x00000018 },
369 { 0000000000, 0x00000006 },
370 { 0x01605000, 0x00000002 },
371 { 0x00065000, 0x00000002 },
372 { 0x00098000, 0x00000002 },
373 { 0x00061000, 0x00000002 },
374 { 0x64c06051, 0x00000004 },
375 { 0x00080000, 0x00000016 },
376 { 0000000000, 0000000000 },
377 { 0x0400251d, 0x00000002 },
378 { 0x00007580, 0x00000002 },
379 { 0x00067581, 0x00000002 },
380 { 0x04002580, 0x00000002 },
381 { 0x00067581, 0x00000002 },
382 { 0x0000005a, 0x00000004 },
383 { 0x00005000, 0000000000 },
384 { 0x00061000, 0x00000002 },
385 { 0x0000750e, 0x00000002 },
386 { 0x00019000, 0x00000002 },
387 { 0x00011064, 0x00000014 },
388 { 0x00000064, 0x00000012 },
389 { 0x0400250f, 0x00000002 },
390 { 0x0000505e, 0x00000004 },
391 { 0x00007565, 0x00000002 },
392 { 0x00007566, 0x00000002 },
393 { 0x00000065, 0x00000004 },
394 { 0x01e655b4, 0x00000002 },
395 { 0x4401b0f0, 0x00000002 },
396 { 0x01c110f0, 0x00000002 },
397 { 0x26667071, 0x00000018 },
398 { 0x040c2565, 0x00000002 },
399 { 0x00000071, 0x00000018 },
400 { 0x04002564, 0x00000002 },
401 { 0x00007566, 0x00000002 },
402 { 0x00000068, 0x00000004 },
403 { 0x00401074, 0x00000008 },
404 { 0x00101000, 0x00000002 },
405 { 0x000d80ff, 0x00000002 },
406 { 0x00800077, 0x00000008 },
407 { 0x000f9000, 0x00000002 },
408 { 0x000e00ff, 0x00000002 },
409 { 0000000000, 0x00000006 },
410 { 0x00000094, 0x00000018 },
411 { 0x00000068, 0x00000004 },
412 { 0x00007576, 0x00000002 },
413 { 0x00065000, 0x00000002 },
414 { 0x00009000, 0x00000002 },
415 { 0x00041000, 0x00000002 },
416 { 0x0c00350e, 0x00000002 },
417 { 0x00049000, 0x00000002 },
418 { 0x00051000, 0x00000002 },
419 { 0x01e785f8, 0x00000002 },
420 { 0x00200000, 0x00000002 },
421 { 0x00600087, 0x0000000c },
422 { 0x00007563, 0x00000002 },
423 { 0x006075f0, 0x00000021 },
424 { 0x2000707c, 0x00000004 },
425 { 0x0000507c, 0x00000004 },
426 { 0x00007576, 0x00000002 },
427 { 0x00007577, 0x00000002 },
428 { 0x0000750e, 0x00000002 },
429 { 0x0000750f, 0x00000002 },
430 { 0x00a05000, 0x00000002 },
431 { 0x0060008a, 0x0000000c },
432 { 0x006075f0, 0x00000021 },
433 { 0x000075f8, 0x00000002 },
434 { 0x0000008a, 0x00000004 },
435 { 0x000a750e, 0x00000002 },
436 { 0x0020750f, 0x00000002 },
437 { 0x0060008d, 0x00000004 },
438 { 0x00007570, 0x00000002 },
439 { 0x00007571, 0x00000002 },
440 { 0x00007572, 0x00000006 },
441 { 0x00005000, 0x00000002 },
442 { 0x00a05000, 0x00000002 },
443 { 0x00007568, 0x00000002 },
444 { 0x00061000, 0x00000002 },
445 { 0x00000098, 0x0000000c },
446 { 0x00058000, 0x00000002 },
447 { 0x0c607562, 0x00000002 },
448 { 0x0000009a, 0x00000004 },
449 { 0x00600099, 0x00000004 },
450 { 0x400070f1, 0000000000 },
451 { 0x000380f1, 0x00000002 },
452 { 0x000000a7, 0x0000001c },
453 { 0x000650a9, 0x00000018 },
454 { 0x040025bb, 0x00000002 },
455 { 0x000610aa, 0x00000018 },
456 { 0x040075bc, 0000000000 },
457 { 0x000075bb, 0x00000002 },
458 { 0x000075bc, 0000000000 },
459 { 0x00090000, 0x00000006 },
460 { 0x00090000, 0x00000002 },
461 { 0x000d8002, 0x00000006 },
462 { 0x00005000, 0x00000002 },
463 { 0x00007821, 0x00000002 },
464 { 0x00007800, 0000000000 },
465 { 0x00007821, 0x00000002 },
466 { 0x00007800, 0000000000 },
467 { 0x01665000, 0x00000002 },
468 { 0x000a0000, 0x00000002 },
469 { 0x000671cc, 0x00000002 },
470 { 0x0286f1cd, 0x00000002 },
471 { 0x000000b7, 0x00000010 },
472 { 0x21007000, 0000000000 },
473 { 0x000000be, 0x0000001c },
474 { 0x00065000, 0x00000002 },
475 { 0x000a0000, 0x00000002 },
476 { 0x00061000, 0x00000002 },
477 { 0x000b0000, 0x00000002 },
478 { 0x38067000, 0x00000002 },
479 { 0x000a00ba, 0x00000004 },
480 { 0x20007000, 0000000000 },
481 { 0x01200000, 0x00000002 },
482 { 0x20077000, 0x00000002 },
483 { 0x01200000, 0x00000002 },
484 { 0x20007000, 0000000000 },
485 { 0x00061000, 0x00000002 },
486 { 0x0120751b, 0x00000002 },
487 { 0x8040750a, 0x00000002 },
488 { 0x8040750b, 0x00000002 },
489 { 0x00110000, 0x00000002 },
490 { 0x000380f1, 0x00000002 },
491 { 0x000000d1, 0x0000001c },
492 { 0x000610aa, 0x00000018 },
493 { 0x844075bd, 0x00000002 },
494 { 0x000610a9, 0x00000018 },
495 { 0x840075bb, 0x00000002 },
496 { 0x000610aa, 0x00000018 },
497 { 0x844075bc, 0x00000002 },
498 { 0x000000d4, 0x00000004 },
499 { 0x804075bd, 0x00000002 },
500 { 0x800075bb, 0x00000002 },
501 { 0x804075bc, 0x00000002 },
502 { 0x00108000, 0x00000002 },
503 { 0x01400000, 0x00000002 },
504 { 0x006000d8, 0x0000000c },
505 { 0x20c07000, 0x00000020 },
506 { 0x000000da, 0x00000012 },
507 { 0x00800000, 0x00000006 },
508 { 0x0080751d, 0x00000006 },
509 { 0x000025bb, 0x00000002 },
510 { 0x000040d4, 0x00000004 },
511 { 0x0000775c, 0x00000002 },
512 { 0x00a05000, 0x00000002 },
513 { 0x00661000, 0x00000002 },
514 { 0x0460275d, 0x00000020 },
515 { 0x00004000, 0000000000 },
516 { 0x00007999, 0x00000002 },
517 { 0x00a05000, 0x00000002 },
518 { 0x00661000, 0x00000002 },
519 { 0x0460299b, 0x00000020 },
520 { 0x00004000, 0000000000 },
521 { 0x01e00830, 0x00000002 },
522 { 0x21007000, 0000000000 },
523 { 0x00005000, 0x00000002 },
524 { 0x00038056, 0x00000002 },
525 { 0x040025e0, 0x00000002 },
526 { 0x000075e1, 0000000000 },
527 { 0x00000001, 0000000000 },
528 { 0x000380ed, 0x00000002 },
529 { 0x04007394, 0000000000 },
530 { 0000000000, 0000000000 },
531 { 0000000000, 0000000000 },
532 { 0x000078c4, 0x00000002 },
533 { 0x000078c5, 0x00000002 },
534 { 0x000078c6, 0x00000002 },
535 { 0x00007924, 0x00000002 },
536 { 0x00007925, 0x00000002 },
537 { 0x00007926, 0x00000002 },
538 { 0x000000f2, 0x00000004 },
539 { 0x00007924, 0x00000002 },
540 { 0x00007925, 0x00000002 },
541 { 0x00007926, 0x00000002 },
542 { 0x000000f9, 0x00000004 },
543 { 0000000000, 0000000000 },
544 { 0000000000, 0000000000 },
545 { 0000000000, 0000000000 },
546};
547
548static const u32 R300_cp_microcode[][2] = {
549 { 0x4200e000, 0000000000 },
550 { 0x4000e000, 0000000000 },
551 { 0x000000ae, 0x00000008 },
552 { 0x000000b2, 0x00000008 },
553 { 0x67554b4a, 0000000000 },
554 { 0x4a4a4475, 0000000000 },
555 { 0x55527d83, 0000000000 },
556 { 0x4a8c8b65, 0000000000 },
557 { 0x4aef4af6, 0000000000 },
558 { 0x4ae14a4a, 0000000000 },
559 { 0xe4979797, 0000000000 },
560 { 0xdb4aebdd, 0000000000 },
561 { 0x9ccc4a4a, 0000000000 },
562 { 0xd1989898, 0000000000 },
563 { 0x4a0f9ad6, 0000000000 },
564 { 0x000ca000, 0x00000004 },
565 { 0x000d0012, 0x00000038 },
566 { 0x0000e8b4, 0x00000004 },
567 { 0x000d0014, 0x00000038 },
568 { 0x0000e8b6, 0x00000004 },
569 { 0x000d0016, 0x00000038 },
570 { 0x0000e854, 0x00000004 },
571 { 0x000d0018, 0x00000038 },
572 { 0x0000e855, 0x00000004 },
573 { 0x000d001a, 0x00000038 },
574 { 0x0000e856, 0x00000004 },
575 { 0x000d001c, 0x00000038 },
576 { 0x0000e857, 0x00000004 },
577 { 0x000d001e, 0x00000038 },
578 { 0x0000e824, 0x00000004 },
579 { 0x000d0020, 0x00000038 },
580 { 0x0000e825, 0x00000004 },
581 { 0x000d0022, 0x00000038 },
582 { 0x0000e830, 0x00000004 },
583 { 0x000d0024, 0x00000038 },
584 { 0x0000f0c0, 0x00000004 },
585 { 0x000d0026, 0x00000038 },
586 { 0x0000f0c1, 0x00000004 },
587 { 0x000d0028, 0x00000038 },
588 { 0x0000f041, 0x00000004 },
589 { 0x000d002a, 0x00000038 },
590 { 0x0000f184, 0x00000004 },
591 { 0x000d002c, 0x00000038 },
592 { 0x0000f185, 0x00000004 },
593 { 0x000d002e, 0x00000038 },
594 { 0x0000f186, 0x00000004 },
595 { 0x000d0030, 0x00000038 },
596 { 0x0000f187, 0x00000004 },
597 { 0x000d0032, 0x00000038 },
598 { 0x0000f180, 0x00000004 },
599 { 0x000d0034, 0x00000038 },
600 { 0x0000f393, 0x00000004 },
601 { 0x000d0036, 0x00000038 },
602 { 0x0000f38a, 0x00000004 },
603 { 0x000d0038, 0x00000038 },
604 { 0x0000f38e, 0x00000004 },
605 { 0x0000e821, 0x00000004 },
606 { 0x0140a000, 0x00000004 },
607 { 0x00000043, 0x00000018 },
608 { 0x00cce800, 0x00000004 },
609 { 0x001b0001, 0x00000004 },
610 { 0x08004800, 0x00000004 },
611 { 0x001b0001, 0x00000004 },
612 { 0x08004800, 0x00000004 },
613 { 0x001b0001, 0x00000004 },
614 { 0x08004800, 0x00000004 },
615 { 0x0000003a, 0x00000008 },
616 { 0x0000a000, 0000000000 },
617 { 0x2000451d, 0x00000004 },
618 { 0x0000e580, 0x00000004 },
619 { 0x000ce581, 0x00000004 },
620 { 0x08004580, 0x00000004 },
621 { 0x000ce581, 0x00000004 },
622 { 0x00000047, 0x00000008 },
623 { 0x0000a000, 0000000000 },
624 { 0x000c2000, 0x00000004 },
625 { 0x0000e50e, 0x00000004 },
626 { 0x00032000, 0x00000004 },
627 { 0x00022051, 0x00000028 },
628 { 0x00000051, 0x00000024 },
629 { 0x0800450f, 0x00000004 },
630 { 0x0000a04b, 0x00000008 },
631 { 0x0000e565, 0x00000004 },
632 { 0x0000e566, 0x00000004 },
633 { 0x00000052, 0x00000008 },
634 { 0x03cca5b4, 0x00000004 },
635 { 0x05432000, 0x00000004 },
636 { 0x00022000, 0x00000004 },
637 { 0x4ccce05e, 0x00000030 },
638 { 0x08274565, 0x00000004 },
639 { 0x0000005e, 0x00000030 },
640 { 0x08004564, 0x00000004 },
641 { 0x0000e566, 0x00000004 },
642 { 0x00000055, 0x00000008 },
643 { 0x00802061, 0x00000010 },
644 { 0x00202000, 0x00000004 },
645 { 0x001b00ff, 0x00000004 },
646 { 0x01000064, 0x00000010 },
647 { 0x001f2000, 0x00000004 },
648 { 0x001c00ff, 0x00000004 },
649 { 0000000000, 0x0000000c },
650 { 0x00000080, 0x00000030 },
651 { 0x00000055, 0x00000008 },
652 { 0x0000e576, 0x00000004 },
653 { 0x000ca000, 0x00000004 },
654 { 0x00012000, 0x00000004 },
655 { 0x00082000, 0x00000004 },
656 { 0x1800650e, 0x00000004 },
657 { 0x00092000, 0x00000004 },
658 { 0x000a2000, 0x00000004 },
659 { 0x000f0000, 0x00000004 },
660 { 0x00400000, 0x00000004 },
661 { 0x00000074, 0x00000018 },
662 { 0x0000e563, 0x00000004 },
663 { 0x00c0e5f9, 0x000000c2 },
664 { 0x00000069, 0x00000008 },
665 { 0x0000a069, 0x00000008 },
666 { 0x0000e576, 0x00000004 },
667 { 0x0000e577, 0x00000004 },
668 { 0x0000e50e, 0x00000004 },
669 { 0x0000e50f, 0x00000004 },
670 { 0x0140a000, 0x00000004 },
671 { 0x00000077, 0x00000018 },
672 { 0x00c0e5f9, 0x000000c2 },
673 { 0x00000077, 0x00000008 },
674 { 0x0014e50e, 0x00000004 },
675 { 0x0040e50f, 0x00000004 },
676 { 0x00c0007a, 0x00000008 },
677 { 0x0000e570, 0x00000004 },
678 { 0x0000e571, 0x00000004 },
679 { 0x0000e572, 0x0000000c },
680 { 0x0000a000, 0x00000004 },
681 { 0x0140a000, 0x00000004 },
682 { 0x0000e568, 0x00000004 },
683 { 0x000c2000, 0x00000004 },
684 { 0x00000084, 0x00000018 },
685 { 0x000b0000, 0x00000004 },
686 { 0x18c0e562, 0x00000004 },
687 { 0x00000086, 0x00000008 },
688 { 0x00c00085, 0x00000008 },
689 { 0x000700e3, 0x00000004 },
690 { 0x00000092, 0x00000038 },
691 { 0x000ca094, 0x00000030 },
692 { 0x080045bb, 0x00000004 },
693 { 0x000c2095, 0x00000030 },
694 { 0x0800e5bc, 0000000000 },
695 { 0x0000e5bb, 0x00000004 },
696 { 0x0000e5bc, 0000000000 },
697 { 0x00120000, 0x0000000c },
698 { 0x00120000, 0x00000004 },
699 { 0x001b0002, 0x0000000c },
700 { 0x0000a000, 0x00000004 },
701 { 0x0000e821, 0x00000004 },
702 { 0x0000e800, 0000000000 },
703 { 0x0000e821, 0x00000004 },
704 { 0x0000e82e, 0000000000 },
705 { 0x02cca000, 0x00000004 },
706 { 0x00140000, 0x00000004 },
707 { 0x000ce1cc, 0x00000004 },
708 { 0x050de1cd, 0x00000004 },
709 { 0x00400000, 0x00000004 },
710 { 0x000000a4, 0x00000018 },
711 { 0x00c0a000, 0x00000004 },
712 { 0x000000a1, 0x00000008 },
713 { 0x000000a6, 0x00000020 },
714 { 0x4200e000, 0000000000 },
715 { 0x000000ad, 0x00000038 },
716 { 0x000ca000, 0x00000004 },
717 { 0x00140000, 0x00000004 },
718 { 0x000c2000, 0x00000004 },
719 { 0x00160000, 0x00000004 },
720 { 0x700ce000, 0x00000004 },
721 { 0x001400a9, 0x00000008 },
722 { 0x4000e000, 0000000000 },
723 { 0x02400000, 0x00000004 },
724 { 0x400ee000, 0x00000004 },
725 { 0x02400000, 0x00000004 },
726 { 0x4000e000, 0000000000 },
727 { 0x000c2000, 0x00000004 },
728 { 0x0240e51b, 0x00000004 },
729 { 0x0080e50a, 0x00000005 },
730 { 0x0080e50b, 0x00000005 },
731 { 0x00220000, 0x00000004 },
732 { 0x000700e3, 0x00000004 },
733 { 0x000000c0, 0x00000038 },
734 { 0x000c2095, 0x00000030 },
735 { 0x0880e5bd, 0x00000005 },
736 { 0x000c2094, 0x00000030 },
737 { 0x0800e5bb, 0x00000005 },
738 { 0x000c2095, 0x00000030 },
739 { 0x0880e5bc, 0x00000005 },
740 { 0x000000c3, 0x00000008 },
741 { 0x0080e5bd, 0x00000005 },
742 { 0x0000e5bb, 0x00000005 },
743 { 0x0080e5bc, 0x00000005 },
744 { 0x00210000, 0x00000004 },
745 { 0x02800000, 0x00000004 },
746 { 0x00c000c7, 0x00000018 },
747 { 0x4180e000, 0x00000040 },
748 { 0x000000c9, 0x00000024 },
749 { 0x01000000, 0x0000000c },
750 { 0x0100e51d, 0x0000000c },
751 { 0x000045bb, 0x00000004 },
752 { 0x000080c3, 0x00000008 },
753 { 0x0000f3ce, 0x00000004 },
754 { 0x0140a000, 0x00000004 },
755 { 0x00cc2000, 0x00000004 },
756 { 0x08c053cf, 0x00000040 },
757 { 0x00008000, 0000000000 },
758 { 0x0000f3d2, 0x00000004 },
759 { 0x0140a000, 0x00000004 },
760 { 0x00cc2000, 0x00000004 },
761 { 0x08c053d3, 0x00000040 },
762 { 0x00008000, 0000000000 },
763 { 0x0000f39d, 0x00000004 },
764 { 0x0140a000, 0x00000004 },
765 { 0x00cc2000, 0x00000004 },
766 { 0x08c0539e, 0x00000040 },
767 { 0x00008000, 0000000000 },
768 { 0x03c00830, 0x00000004 },
769 { 0x4200e000, 0000000000 },
770 { 0x0000a000, 0x00000004 },
771 { 0x200045e0, 0x00000004 },
772 { 0x0000e5e1, 0000000000 },
773 { 0x00000001, 0000000000 },
774 { 0x000700e0, 0x00000004 },
775 { 0x0800e394, 0000000000 },
776 { 0000000000, 0000000000 },
777 { 0x0000e8c4, 0x00000004 },
778 { 0x0000e8c5, 0x00000004 },
779 { 0x0000e8c6, 0x00000004 },
780 { 0x0000e928, 0x00000004 },
781 { 0x0000e929, 0x00000004 },
782 { 0x0000e92a, 0x00000004 },
783 { 0x000000e4, 0x00000008 },
784 { 0x0000e928, 0x00000004 },
785 { 0x0000e929, 0x00000004 },
786 { 0x0000e92a, 0x00000004 },
787 { 0x000000eb, 0x00000008 },
788 { 0x02c02000, 0x00000004 },
789 { 0x00060000, 0x00000004 },
790 { 0x000000f3, 0x00000034 },
791 { 0x000000f0, 0x00000008 },
792 { 0x00008000, 0x00000004 },
793 { 0xc000e000, 0000000000 },
794 { 0000000000, 0000000000 },
795 { 0x000c2000, 0x00000004 },
796 { 0x001d0018, 0x00000004 },
797 { 0x001a0001, 0x00000004 },
798 { 0x000000fb, 0x00000034 },
799 { 0x0000004a, 0x00000008 },
800 { 0x0500a04a, 0x00000008 },
801 { 0000000000, 0000000000 },
802 { 0000000000, 0000000000 },
803 { 0000000000, 0000000000 },
804 { 0000000000, 0000000000 },
805};
806
807static const u32 R420_cp_microcode[][2] = {
808 { 0x4200e000, 0000000000 },
809 { 0x4000e000, 0000000000 },
810 { 0x00000099, 0x00000008 },
811 { 0x0000009d, 0x00000008 },
812 { 0x4a554b4a, 0000000000 },
813 { 0x4a4a4467, 0000000000 },
814 { 0x55526f75, 0000000000 },
815 { 0x4a7e7d65, 0000000000 },
816 { 0xd9d3dff6, 0000000000 },
817 { 0x4ac54a4a, 0000000000 },
818 { 0xc8828282, 0000000000 },
819 { 0xbf4acfc1, 0000000000 },
820 { 0x87b04a4a, 0000000000 },
821 { 0xb5838383, 0000000000 },
822 { 0x4a0f85ba, 0000000000 },
823 { 0x000ca000, 0x00000004 },
824 { 0x000d0012, 0x00000038 },
825 { 0x0000e8b4, 0x00000004 },
826 { 0x000d0014, 0x00000038 },
827 { 0x0000e8b6, 0x00000004 },
828 { 0x000d0016, 0x00000038 },
829 { 0x0000e854, 0x00000004 },
830 { 0x000d0018, 0x00000038 },
831 { 0x0000e855, 0x00000004 },
832 { 0x000d001a, 0x00000038 },
833 { 0x0000e856, 0x00000004 },
834 { 0x000d001c, 0x00000038 },
835 { 0x0000e857, 0x00000004 },
836 { 0x000d001e, 0x00000038 },
837 { 0x0000e824, 0x00000004 },
838 { 0x000d0020, 0x00000038 },
839 { 0x0000e825, 0x00000004 },
840 { 0x000d0022, 0x00000038 },
841 { 0x0000e830, 0x00000004 },
842 { 0x000d0024, 0x00000038 },
843 { 0x0000f0c0, 0x00000004 },
844 { 0x000d0026, 0x00000038 },
845 { 0x0000f0c1, 0x00000004 },
846 { 0x000d0028, 0x00000038 },
847 { 0x0000f041, 0x00000004 },
848 { 0x000d002a, 0x00000038 },
849 { 0x0000f184, 0x00000004 },
850 { 0x000d002c, 0x00000038 },
851 { 0x0000f185, 0x00000004 },
852 { 0x000d002e, 0x00000038 },
853 { 0x0000f186, 0x00000004 },
854 { 0x000d0030, 0x00000038 },
855 { 0x0000f187, 0x00000004 },
856 { 0x000d0032, 0x00000038 },
857 { 0x0000f180, 0x00000004 },
858 { 0x000d0034, 0x00000038 },
859 { 0x0000f393, 0x00000004 },
860 { 0x000d0036, 0x00000038 },
861 { 0x0000f38a, 0x00000004 },
862 { 0x000d0038, 0x00000038 },
863 { 0x0000f38e, 0x00000004 },
864 { 0x0000e821, 0x00000004 },
865 { 0x0140a000, 0x00000004 },
866 { 0x00000043, 0x00000018 },
867 { 0x00cce800, 0x00000004 },
868 { 0x001b0001, 0x00000004 },
869 { 0x08004800, 0x00000004 },
870 { 0x001b0001, 0x00000004 },
871 { 0x08004800, 0x00000004 },
872 { 0x001b0001, 0x00000004 },
873 { 0x08004800, 0x00000004 },
874 { 0x0000003a, 0x00000008 },
875 { 0x0000a000, 0000000000 },
876 { 0x2000451d, 0x00000004 },
877 { 0x0000e580, 0x00000004 },
878 { 0x000ce581, 0x00000004 },
879 { 0x08004580, 0x00000004 },
880 { 0x000ce581, 0x00000004 },
881 { 0x00000047, 0x00000008 },
882 { 0x0000a000, 0000000000 },
883 { 0x000c2000, 0x00000004 },
884 { 0x0000e50e, 0x00000004 },
885 { 0x00032000, 0x00000004 },
886 { 0x00022051, 0x00000028 },
887 { 0x00000051, 0x00000024 },
888 { 0x0800450f, 0x00000004 },
889 { 0x0000a04b, 0x00000008 },
890 { 0x0000e565, 0x00000004 },
891 { 0x0000e566, 0x00000004 },
892 { 0x00000052, 0x00000008 },
893 { 0x03cca5b4, 0x00000004 },
894 { 0x05432000, 0x00000004 },
895 { 0x00022000, 0x00000004 },
896 { 0x4ccce05e, 0x00000030 },
897 { 0x08274565, 0x00000004 },
898 { 0x0000005e, 0x00000030 },
899 { 0x08004564, 0x00000004 },
900 { 0x0000e566, 0x00000004 },
901 { 0x00000055, 0x00000008 },
902 { 0x00802061, 0x00000010 },
903 { 0x00202000, 0x00000004 },
904 { 0x001b00ff, 0x00000004 },
905 { 0x01000064, 0x00000010 },
906 { 0x001f2000, 0x00000004 },
907 { 0x001c00ff, 0x00000004 },
908 { 0000000000, 0x0000000c },
909 { 0x00000072, 0x00000030 },
910 { 0x00000055, 0x00000008 },
911 { 0x0000e576, 0x00000004 },
912 { 0x0000e577, 0x00000004 },
913 { 0x0000e50e, 0x00000004 },
914 { 0x0000e50f, 0x00000004 },
915 { 0x0140a000, 0x00000004 },
916 { 0x00000069, 0x00000018 },
917 { 0x00c0e5f9, 0x000000c2 },
918 { 0x00000069, 0x00000008 },
919 { 0x0014e50e, 0x00000004 },
920 { 0x0040e50f, 0x00000004 },
921 { 0x00c0006c, 0x00000008 },
922 { 0x0000e570, 0x00000004 },
923 { 0x0000e571, 0x00000004 },
924 { 0x0000e572, 0x0000000c },
925 { 0x0000a000, 0x00000004 },
926 { 0x0140a000, 0x00000004 },
927 { 0x0000e568, 0x00000004 },
928 { 0x000c2000, 0x00000004 },
929 { 0x00000076, 0x00000018 },
930 { 0x000b0000, 0x00000004 },
931 { 0x18c0e562, 0x00000004 },
932 { 0x00000078, 0x00000008 },
933 { 0x00c00077, 0x00000008 },
934 { 0x000700c7, 0x00000004 },
935 { 0x00000080, 0x00000038 },
936 { 0x0000e5bb, 0x00000004 },
937 { 0x0000e5bc, 0000000000 },
938 { 0x0000a000, 0x00000004 },
939 { 0x0000e821, 0x00000004 },
940 { 0x0000e800, 0000000000 },
941 { 0x0000e821, 0x00000004 },
942 { 0x0000e82e, 0000000000 },
943 { 0x02cca000, 0x00000004 },
944 { 0x00140000, 0x00000004 },
945 { 0x000ce1cc, 0x00000004 },
946 { 0x050de1cd, 0x00000004 },
947 { 0x00400000, 0x00000004 },
948 { 0x0000008f, 0x00000018 },
949 { 0x00c0a000, 0x00000004 },
950 { 0x0000008c, 0x00000008 },
951 { 0x00000091, 0x00000020 },
952 { 0x4200e000, 0000000000 },
953 { 0x00000098, 0x00000038 },
954 { 0x000ca000, 0x00000004 },
955 { 0x00140000, 0x00000004 },
956 { 0x000c2000, 0x00000004 },
957 { 0x00160000, 0x00000004 },
958 { 0x700ce000, 0x00000004 },
959 { 0x00140094, 0x00000008 },
960 { 0x4000e000, 0000000000 },
961 { 0x02400000, 0x00000004 },
962 { 0x400ee000, 0x00000004 },
963 { 0x02400000, 0x00000004 },
964 { 0x4000e000, 0000000000 },
965 { 0x000c2000, 0x00000004 },
966 { 0x0240e51b, 0x00000004 },
967 { 0x0080e50a, 0x00000005 },
968 { 0x0080e50b, 0x00000005 },
969 { 0x00220000, 0x00000004 },
970 { 0x000700c7, 0x00000004 },
971 { 0x000000a4, 0x00000038 },
972 { 0x0080e5bd, 0x00000005 },
973 { 0x0000e5bb, 0x00000005 },
974 { 0x0080e5bc, 0x00000005 },
975 { 0x00210000, 0x00000004 },
976 { 0x02800000, 0x00000004 },
977 { 0x00c000ab, 0x00000018 },
978 { 0x4180e000, 0x00000040 },
979 { 0x000000ad, 0x00000024 },
980 { 0x01000000, 0x0000000c },
981 { 0x0100e51d, 0x0000000c },
982 { 0x000045bb, 0x00000004 },
983 { 0x000080a7, 0x00000008 },
984 { 0x0000f3ce, 0x00000004 },
985 { 0x0140a000, 0x00000004 },
986 { 0x00cc2000, 0x00000004 },
987 { 0x08c053cf, 0x00000040 },
988 { 0x00008000, 0000000000 },
989 { 0x0000f3d2, 0x00000004 },
990 { 0x0140a000, 0x00000004 },
991 { 0x00cc2000, 0x00000004 },
992 { 0x08c053d3, 0x00000040 },
993 { 0x00008000, 0000000000 },
994 { 0x0000f39d, 0x00000004 },
995 { 0x0140a000, 0x00000004 },
996 { 0x00cc2000, 0x00000004 },
997 { 0x08c0539e, 0x00000040 },
998 { 0x00008000, 0000000000 },
999 { 0x03c00830, 0x00000004 },
1000 { 0x4200e000, 0000000000 },
1001 { 0x0000a000, 0x00000004 },
1002 { 0x200045e0, 0x00000004 },
1003 { 0x0000e5e1, 0000000000 },
1004 { 0x00000001, 0000000000 },
1005 { 0x000700c4, 0x00000004 },
1006 { 0x0800e394, 0000000000 },
1007 { 0000000000, 0000000000 },
1008 { 0x0000e8c4, 0x00000004 },
1009 { 0x0000e8c5, 0x00000004 },
1010 { 0x0000e8c6, 0x00000004 },
1011 { 0x0000e928, 0x00000004 },
1012 { 0x0000e929, 0x00000004 },
1013 { 0x0000e92a, 0x00000004 },
1014 { 0x000000c8, 0x00000008 },
1015 { 0x0000e928, 0x00000004 },
1016 { 0x0000e929, 0x00000004 },
1017 { 0x0000e92a, 0x00000004 },
1018 { 0x000000cf, 0x00000008 },
1019 { 0x02c02000, 0x00000004 },
1020 { 0x00060000, 0x00000004 },
1021 { 0x000000d7, 0x00000034 },
1022 { 0x000000d4, 0x00000008 },
1023 { 0x00008000, 0x00000004 },
1024 { 0xc000e000, 0000000000 },
1025 { 0x0000e1cc, 0x00000004 },
1026 { 0x0500e1cd, 0x00000004 },
1027 { 0x000ca000, 0x00000004 },
1028 { 0x000000de, 0x00000034 },
1029 { 0x000000da, 0x00000008 },
1030 { 0x0000a000, 0000000000 },
1031 { 0x0019e1cc, 0x00000004 },
1032 { 0x001b0001, 0x00000004 },
1033 { 0x0500a000, 0x00000004 },
1034 { 0x080041cd, 0x00000004 },
1035 { 0x000ca000, 0x00000004 },
1036 { 0x000000fb, 0x00000034 },
1037 { 0x0000004a, 0x00000008 },
1038 { 0000000000, 0000000000 },
1039 { 0000000000, 0000000000 },
1040 { 0000000000, 0000000000 },
1041 { 0000000000, 0000000000 },
1042 { 0000000000, 0000000000 },
1043 { 0000000000, 0000000000 },
1044 { 0000000000, 0000000000 },
1045 { 0000000000, 0000000000 },
1046 { 0000000000, 0000000000 },
1047 { 0000000000, 0000000000 },
1048 { 0000000000, 0000000000 },
1049 { 0000000000, 0000000000 },
1050 { 0000000000, 0000000000 },
1051 { 0000000000, 0000000000 },
1052 { 0000000000, 0000000000 },
1053 { 0000000000, 0000000000 },
1054 { 0x000c2000, 0x00000004 },
1055 { 0x001d0018, 0x00000004 },
1056 { 0x001a0001, 0x00000004 },
1057 { 0x000000fb, 0x00000034 },
1058 { 0x0000004a, 0x00000008 },
1059 { 0x0500a04a, 0x00000008 },
1060 { 0000000000, 0000000000 },
1061 { 0000000000, 0000000000 },
1062 { 0000000000, 0000000000 },
1063 { 0000000000, 0000000000 },
1064};
1065
1066static const u32 RS600_cp_microcode[][2] = {
1067 { 0x4200e000, 0000000000 },
1068 { 0x4000e000, 0000000000 },
1069 { 0x000000a0, 0x00000008 },
1070 { 0x000000a4, 0x00000008 },
1071 { 0x4a554b4a, 0000000000 },
1072 { 0x4a4a4467, 0000000000 },
1073 { 0x55526f75, 0000000000 },
1074 { 0x4a7e7d65, 0000000000 },
1075 { 0x4ae74af6, 0000000000 },
1076 { 0x4ad34a4a, 0000000000 },
1077 { 0xd6898989, 0000000000 },
1078 { 0xcd4addcf, 0000000000 },
1079 { 0x8ebe4ae2, 0000000000 },
1080 { 0xc38a8a8a, 0000000000 },
1081 { 0x4a0f8cc8, 0000000000 },
1082 { 0x000ca000, 0x00000004 },
1083 { 0x000d0012, 0x00000038 },
1084 { 0x0000e8b4, 0x00000004 },
1085 { 0x000d0014, 0x00000038 },
1086 { 0x0000e8b6, 0x00000004 },
1087 { 0x000d0016, 0x00000038 },
1088 { 0x0000e854, 0x00000004 },
1089 { 0x000d0018, 0x00000038 },
1090 { 0x0000e855, 0x00000004 },
1091 { 0x000d001a, 0x00000038 },
1092 { 0x0000e856, 0x00000004 },
1093 { 0x000d001c, 0x00000038 },
1094 { 0x0000e857, 0x00000004 },
1095 { 0x000d001e, 0x00000038 },
1096 { 0x0000e824, 0x00000004 },
1097 { 0x000d0020, 0x00000038 },
1098 { 0x0000e825, 0x00000004 },
1099 { 0x000d0022, 0x00000038 },
1100 { 0x0000e830, 0x00000004 },
1101 { 0x000d0024, 0x00000038 },
1102 { 0x0000f0c0, 0x00000004 },
1103 { 0x000d0026, 0x00000038 },
1104 { 0x0000f0c1, 0x00000004 },
1105 { 0x000d0028, 0x00000038 },
1106 { 0x0000f041, 0x00000004 },
1107 { 0x000d002a, 0x00000038 },
1108 { 0x0000f184, 0x00000004 },
1109 { 0x000d002c, 0x00000038 },
1110 { 0x0000f185, 0x00000004 },
1111 { 0x000d002e, 0x00000038 },
1112 { 0x0000f186, 0x00000004 },
1113 { 0x000d0030, 0x00000038 },
1114 { 0x0000f187, 0x00000004 },
1115 { 0x000d0032, 0x00000038 },
1116 { 0x0000f180, 0x00000004 },
1117 { 0x000d0034, 0x00000038 },
1118 { 0x0000f393, 0x00000004 },
1119 { 0x000d0036, 0x00000038 },
1120 { 0x0000f38a, 0x00000004 },
1121 { 0x000d0038, 0x00000038 },
1122 { 0x0000f38e, 0x00000004 },
1123 { 0x0000e821, 0x00000004 },
1124 { 0x0140a000, 0x00000004 },
1125 { 0x00000043, 0x00000018 },
1126 { 0x00cce800, 0x00000004 },
1127 { 0x001b0001, 0x00000004 },
1128 { 0x08004800, 0x00000004 },
1129 { 0x001b0001, 0x00000004 },
1130 { 0x08004800, 0x00000004 },
1131 { 0x001b0001, 0x00000004 },
1132 { 0x08004800, 0x00000004 },
1133 { 0x0000003a, 0x00000008 },
1134 { 0x0000a000, 0000000000 },
1135 { 0x2000451d, 0x00000004 },
1136 { 0x0000e580, 0x00000004 },
1137 { 0x000ce581, 0x00000004 },
1138 { 0x08004580, 0x00000004 },
1139 { 0x000ce581, 0x00000004 },
1140 { 0x00000047, 0x00000008 },
1141 { 0x0000a000, 0000000000 },
1142 { 0x000c2000, 0x00000004 },
1143 { 0x0000e50e, 0x00000004 },
1144 { 0x00032000, 0x00000004 },
1145 { 0x00022051, 0x00000028 },
1146 { 0x00000051, 0x00000024 },
1147 { 0x0800450f, 0x00000004 },
1148 { 0x0000a04b, 0x00000008 },
1149 { 0x0000e565, 0x00000004 },
1150 { 0x0000e566, 0x00000004 },
1151 { 0x00000052, 0x00000008 },
1152 { 0x03cca5b4, 0x00000004 },
1153 { 0x05432000, 0x00000004 },
1154 { 0x00022000, 0x00000004 },
1155 { 0x4ccce05e, 0x00000030 },
1156 { 0x08274565, 0x00000004 },
1157 { 0x0000005e, 0x00000030 },
1158 { 0x08004564, 0x00000004 },
1159 { 0x0000e566, 0x00000004 },
1160 { 0x00000055, 0x00000008 },
1161 { 0x00802061, 0x00000010 },
1162 { 0x00202000, 0x00000004 },
1163 { 0x001b00ff, 0x00000004 },
1164 { 0x01000064, 0x00000010 },
1165 { 0x001f2000, 0x00000004 },
1166 { 0x001c00ff, 0x00000004 },
1167 { 0000000000, 0x0000000c },
1168 { 0x00000072, 0x00000030 },
1169 { 0x00000055, 0x00000008 },
1170 { 0x0000e576, 0x00000004 },
1171 { 0x0000e577, 0x00000004 },
1172 { 0x0000e50e, 0x00000004 },
1173 { 0x0000e50f, 0x00000004 },
1174 { 0x0140a000, 0x00000004 },
1175 { 0x00000069, 0x00000018 },
1176 { 0x00c0e5f9, 0x000000c2 },
1177 { 0x00000069, 0x00000008 },
1178 { 0x0014e50e, 0x00000004 },
1179 { 0x0040e50f, 0x00000004 },
1180 { 0x00c0006c, 0x00000008 },
1181 { 0x0000e570, 0x00000004 },
1182 { 0x0000e571, 0x00000004 },
1183 { 0x0000e572, 0x0000000c },
1184 { 0x0000a000, 0x00000004 },
1185 { 0x0140a000, 0x00000004 },
1186 { 0x0000e568, 0x00000004 },
1187 { 0x000c2000, 0x00000004 },
1188 { 0x00000076, 0x00000018 },
1189 { 0x000b0000, 0x00000004 },
1190 { 0x18c0e562, 0x00000004 },
1191 { 0x00000078, 0x00000008 },
1192 { 0x00c00077, 0x00000008 },
1193 { 0x000700d5, 0x00000004 },
1194 { 0x00000084, 0x00000038 },
1195 { 0x000ca086, 0x00000030 },
1196 { 0x080045bb, 0x00000004 },
1197 { 0x000c2087, 0x00000030 },
1198 { 0x0800e5bc, 0000000000 },
1199 { 0x0000e5bb, 0x00000004 },
1200 { 0x0000e5bc, 0000000000 },
1201 { 0x00120000, 0x0000000c },
1202 { 0x00120000, 0x00000004 },
1203 { 0x001b0002, 0x0000000c },
1204 { 0x0000a000, 0x00000004 },
1205 { 0x0000e821, 0x00000004 },
1206 { 0x0000e800, 0000000000 },
1207 { 0x0000e821, 0x00000004 },
1208 { 0x0000e82e, 0000000000 },
1209 { 0x02cca000, 0x00000004 },
1210 { 0x00140000, 0x00000004 },
1211 { 0x000ce1cc, 0x00000004 },
1212 { 0x050de1cd, 0x00000004 },
1213 { 0x00400000, 0x00000004 },
1214 { 0x00000096, 0x00000018 },
1215 { 0x00c0a000, 0x00000004 },
1216 { 0x00000093, 0x00000008 },
1217 { 0x00000098, 0x00000020 },
1218 { 0x4200e000, 0000000000 },
1219 { 0x0000009f, 0x00000038 },
1220 { 0x000ca000, 0x00000004 },
1221 { 0x00140000, 0x00000004 },
1222 { 0x000c2000, 0x00000004 },
1223 { 0x00160000, 0x00000004 },
1224 { 0x700ce000, 0x00000004 },
1225 { 0x0014009b, 0x00000008 },
1226 { 0x4000e000, 0000000000 },
1227 { 0x02400000, 0x00000004 },
1228 { 0x400ee000, 0x00000004 },
1229 { 0x02400000, 0x00000004 },
1230 { 0x4000e000, 0000000000 },
1231 { 0x000c2000, 0x00000004 },
1232 { 0x0240e51b, 0x00000004 },
1233 { 0x0080e50a, 0x00000005 },
1234 { 0x0080e50b, 0x00000005 },
1235 { 0x00220000, 0x00000004 },
1236 { 0x000700d5, 0x00000004 },
1237 { 0x000000b2, 0x00000038 },
1238 { 0x000c2087, 0x00000030 },
1239 { 0x0880e5bd, 0x00000005 },
1240 { 0x000c2086, 0x00000030 },
1241 { 0x0800e5bb, 0x00000005 },
1242 { 0x000c2087, 0x00000030 },
1243 { 0x0880e5bc, 0x00000005 },
1244 { 0x000000b5, 0x00000008 },
1245 { 0x0080e5bd, 0x00000005 },
1246 { 0x0000e5bb, 0x00000005 },
1247 { 0x0080e5bc, 0x00000005 },
1248 { 0x00210000, 0x00000004 },
1249 { 0x02800000, 0x00000004 },
1250 { 0x00c000b9, 0x00000018 },
1251 { 0x4180e000, 0x00000040 },
1252 { 0x000000bb, 0x00000024 },
1253 { 0x01000000, 0x0000000c },
1254 { 0x0100e51d, 0x0000000c },
1255 { 0x000045bb, 0x00000004 },
1256 { 0x000080b5, 0x00000008 },
1257 { 0x0000f3ce, 0x00000004 },
1258 { 0x0140a000, 0x00000004 },
1259 { 0x00cc2000, 0x00000004 },
1260 { 0x08c053cf, 0x00000040 },
1261 { 0x00008000, 0000000000 },
1262 { 0x0000f3d2, 0x00000004 },
1263 { 0x0140a000, 0x00000004 },
1264 { 0x00cc2000, 0x00000004 },
1265 { 0x08c053d3, 0x00000040 },
1266 { 0x00008000, 0000000000 },
1267 { 0x0000f39d, 0x00000004 },
1268 { 0x0140a000, 0x00000004 },
1269 { 0x00cc2000, 0x00000004 },
1270 { 0x08c0539e, 0x00000040 },
1271 { 0x00008000, 0000000000 },
1272 { 0x03c00830, 0x00000004 },
1273 { 0x4200e000, 0000000000 },
1274 { 0x0000a000, 0x00000004 },
1275 { 0x200045e0, 0x00000004 },
1276 { 0x0000e5e1, 0000000000 },
1277 { 0x00000001, 0000000000 },
1278 { 0x000700d2, 0x00000004 },
1279 { 0x0800e394, 0000000000 },
1280 { 0000000000, 0000000000 },
1281 { 0x0000e8c4, 0x00000004 },
1282 { 0x0000e8c5, 0x00000004 },
1283 { 0x0000e8c6, 0x00000004 },
1284 { 0x0000e928, 0x00000004 },
1285 { 0x0000e929, 0x00000004 },
1286 { 0x0000e92a, 0x00000004 },
1287 { 0x000000d6, 0x00000008 },
1288 { 0x0000e928, 0x00000004 },
1289 { 0x0000e929, 0x00000004 },
1290 { 0x0000e92a, 0x00000004 },
1291 { 0x000000dd, 0x00000008 },
1292 { 0x00e00116, 0000000000 },
1293 { 0x000700e1, 0x00000004 },
1294 { 0x0800401c, 0x00000004 },
1295 { 0x200050e7, 0x00000004 },
1296 { 0x0000e01d, 0x00000004 },
1297 { 0x000000e4, 0x00000008 },
1298 { 0x02c02000, 0x00000004 },
1299 { 0x00060000, 0x00000004 },
1300 { 0x000000eb, 0x00000034 },
1301 { 0x000000e8, 0x00000008 },
1302 { 0x00008000, 0x00000004 },
1303 { 0xc000e000, 0000000000 },
1304 { 0000000000, 0000000000 },
1305 { 0000000000, 0000000000 },
1306 { 0000000000, 0000000000 },
1307 { 0000000000, 0000000000 },
1308 { 0000000000, 0000000000 },
1309 { 0000000000, 0000000000 },
1310 { 0000000000, 0000000000 },
1311 { 0000000000, 0000000000 },
1312 { 0000000000, 0000000000 },
1313 { 0x000c2000, 0x00000004 },
1314 { 0x001d0018, 0x00000004 },
1315 { 0x001a0001, 0x00000004 },
1316 { 0x000000fb, 0x00000034 },
1317 { 0x0000004a, 0x00000008 },
1318 { 0x0500a04a, 0x00000008 },
1319 { 0000000000, 0000000000 },
1320 { 0000000000, 0000000000 },
1321 { 0000000000, 0000000000 },
1322 { 0000000000, 0000000000 },
1323};
1324
1325static const u32 RS690_cp_microcode[][2] = {
1326 { 0x000000dd, 0x00000008 },
1327 { 0x000000df, 0x00000008 },
1328 { 0x000000a0, 0x00000008 },
1329 { 0x000000a4, 0x00000008 },
1330 { 0x4a554b4a, 0000000000 },
1331 { 0x4a4a4467, 0000000000 },
1332 { 0x55526f75, 0000000000 },
1333 { 0x4a7e7d65, 0000000000 },
1334 { 0x4ad74af6, 0000000000 },
1335 { 0x4ac94a4a, 0000000000 },
1336 { 0xcc898989, 0000000000 },
1337 { 0xc34ad3c5, 0000000000 },
1338 { 0x8e4a4a4a, 0000000000 },
1339 { 0x4a8a8a8a, 0000000000 },
1340 { 0x4a0f8c4a, 0000000000 },
1341 { 0x000ca000, 0x00000004 },
1342 { 0x000d0012, 0x00000038 },
1343 { 0x0000e8b4, 0x00000004 },
1344 { 0x000d0014, 0x00000038 },
1345 { 0x0000e8b6, 0x00000004 },
1346 { 0x000d0016, 0x00000038 },
1347 { 0x0000e854, 0x00000004 },
1348 { 0x000d0018, 0x00000038 },
1349 { 0x0000e855, 0x00000004 },
1350 { 0x000d001a, 0x00000038 },
1351 { 0x0000e856, 0x00000004 },
1352 { 0x000d001c, 0x00000038 },
1353 { 0x0000e857, 0x00000004 },
1354 { 0x000d001e, 0x00000038 },
1355 { 0x0000e824, 0x00000004 },
1356 { 0x000d0020, 0x00000038 },
1357 { 0x0000e825, 0x00000004 },
1358 { 0x000d0022, 0x00000038 },
1359 { 0x0000e830, 0x00000004 },
1360 { 0x000d0024, 0x00000038 },
1361 { 0x0000f0c0, 0x00000004 },
1362 { 0x000d0026, 0x00000038 },
1363 { 0x0000f0c1, 0x00000004 },
1364 { 0x000d0028, 0x00000038 },
1365 { 0x0000f041, 0x00000004 },
1366 { 0x000d002a, 0x00000038 },
1367 { 0x0000f184, 0x00000004 },
1368 { 0x000d002c, 0x00000038 },
1369 { 0x0000f185, 0x00000004 },
1370 { 0x000d002e, 0x00000038 },
1371 { 0x0000f186, 0x00000004 },
1372 { 0x000d0030, 0x00000038 },
1373 { 0x0000f187, 0x00000004 },
1374 { 0x000d0032, 0x00000038 },
1375 { 0x0000f180, 0x00000004 },
1376 { 0x000d0034, 0x00000038 },
1377 { 0x0000f393, 0x00000004 },
1378 { 0x000d0036, 0x00000038 },
1379 { 0x0000f38a, 0x00000004 },
1380 { 0x000d0038, 0x00000038 },
1381 { 0x0000f38e, 0x00000004 },
1382 { 0x0000e821, 0x00000004 },
1383 { 0x0140a000, 0x00000004 },
1384 { 0x00000043, 0x00000018 },
1385 { 0x00cce800, 0x00000004 },
1386 { 0x001b0001, 0x00000004 },
1387 { 0x08004800, 0x00000004 },
1388 { 0x001b0001, 0x00000004 },
1389 { 0x08004800, 0x00000004 },
1390 { 0x001b0001, 0x00000004 },
1391 { 0x08004800, 0x00000004 },
1392 { 0x0000003a, 0x00000008 },
1393 { 0x0000a000, 0000000000 },
1394 { 0x2000451d, 0x00000004 },
1395 { 0x0000e580, 0x00000004 },
1396 { 0x000ce581, 0x00000004 },
1397 { 0x08004580, 0x00000004 },
1398 { 0x000ce581, 0x00000004 },
1399 { 0x00000047, 0x00000008 },
1400 { 0x0000a000, 0000000000 },
1401 { 0x000c2000, 0x00000004 },
1402 { 0x0000e50e, 0x00000004 },
1403 { 0x00032000, 0x00000004 },
1404 { 0x00022051, 0x00000028 },
1405 { 0x00000051, 0x00000024 },
1406 { 0x0800450f, 0x00000004 },
1407 { 0x0000a04b, 0x00000008 },
1408 { 0x0000e565, 0x00000004 },
1409 { 0x0000e566, 0x00000004 },
1410 { 0x00000052, 0x00000008 },
1411 { 0x03cca5b4, 0x00000004 },
1412 { 0x05432000, 0x00000004 },
1413 { 0x00022000, 0x00000004 },
1414 { 0x4ccce05e, 0x00000030 },
1415 { 0x08274565, 0x00000004 },
1416 { 0x0000005e, 0x00000030 },
1417 { 0x08004564, 0x00000004 },
1418 { 0x0000e566, 0x00000004 },
1419 { 0x00000055, 0x00000008 },
1420 { 0x00802061, 0x00000010 },
1421 { 0x00202000, 0x00000004 },
1422 { 0x001b00ff, 0x00000004 },
1423 { 0x01000064, 0x00000010 },
1424 { 0x001f2000, 0x00000004 },
1425 { 0x001c00ff, 0x00000004 },
1426 { 0000000000, 0x0000000c },
1427 { 0x00000072, 0x00000030 },
1428 { 0x00000055, 0x00000008 },
1429 { 0x0000e576, 0x00000004 },
1430 { 0x0000e577, 0x00000004 },
1431 { 0x0000e50e, 0x00000004 },
1432 { 0x0000e50f, 0x00000004 },
1433 { 0x0140a000, 0x00000004 },
1434 { 0x00000069, 0x00000018 },
1435 { 0x00c0e5f9, 0x000000c2 },
1436 { 0x00000069, 0x00000008 },
1437 { 0x0014e50e, 0x00000004 },
1438 { 0x0040e50f, 0x00000004 },
1439 { 0x00c0006c, 0x00000008 },
1440 { 0x0000e570, 0x00000004 },
1441 { 0x0000e571, 0x00000004 },
1442 { 0x0000e572, 0x0000000c },
1443 { 0x0000a000, 0x00000004 },
1444 { 0x0140a000, 0x00000004 },
1445 { 0x0000e568, 0x00000004 },
1446 { 0x000c2000, 0x00000004 },
1447 { 0x00000076, 0x00000018 },
1448 { 0x000b0000, 0x00000004 },
1449 { 0x18c0e562, 0x00000004 },
1450 { 0x00000078, 0x00000008 },
1451 { 0x00c00077, 0x00000008 },
1452 { 0x000700cb, 0x00000004 },
1453 { 0x00000084, 0x00000038 },
1454 { 0x000ca086, 0x00000030 },
1455 { 0x080045bb, 0x00000004 },
1456 { 0x000c2087, 0x00000030 },
1457 { 0x0800e5bc, 0000000000 },
1458 { 0x0000e5bb, 0x00000004 },
1459 { 0x0000e5bc, 0000000000 },
1460 { 0x00120000, 0x0000000c },
1461 { 0x00120000, 0x00000004 },
1462 { 0x001b0002, 0x0000000c },
1463 { 0x0000a000, 0x00000004 },
1464 { 0x0000e821, 0x00000004 },
1465 { 0x0000e800, 0000000000 },
1466 { 0x0000e821, 0x00000004 },
1467 { 0x0000e82e, 0000000000 },
1468 { 0x02cca000, 0x00000004 },
1469 { 0x00140000, 0x00000004 },
1470 { 0x000ce1cc, 0x00000004 },
1471 { 0x050de1cd, 0x00000004 },
1472 { 0x00400000, 0x00000004 },
1473 { 0x00000096, 0x00000018 },
1474 { 0x00c0a000, 0x00000004 },
1475 { 0x00000093, 0x00000008 },
1476 { 0x00000098, 0x00000020 },
1477 { 0x4200e000, 0000000000 },
1478 { 0x0000009f, 0x00000038 },
1479 { 0x000ca000, 0x00000004 },
1480 { 0x00140000, 0x00000004 },
1481 { 0x000c2000, 0x00000004 },
1482 { 0x00160000, 0x00000004 },
1483 { 0x700ce000, 0x00000004 },
1484 { 0x0014009b, 0x00000008 },
1485 { 0x4000e000, 0000000000 },
1486 { 0x02400000, 0x00000004 },
1487 { 0x400ee000, 0x00000004 },
1488 { 0x02400000, 0x00000004 },
1489 { 0x4000e000, 0000000000 },
1490 { 0x00100000, 0x0000002c },
1491 { 0x00004000, 0000000000 },
1492 { 0x080045c8, 0x00000004 },
1493 { 0x00240005, 0x00000004 },
1494 { 0x08004d0b, 0x00000004 },
1495 { 0x000c2000, 0x00000004 },
1496 { 0x0240e51b, 0x00000004 },
1497 { 0x0080e50a, 0x00000005 },
1498 { 0x0080e50b, 0x00000005 },
1499 { 0x00220000, 0x00000004 },
1500 { 0x000700cb, 0x00000004 },
1501 { 0x000000b7, 0x00000038 },
1502 { 0x000c2087, 0x00000030 },
1503 { 0x0880e5bd, 0x00000005 },
1504 { 0x000c2086, 0x00000030 },
1505 { 0x0800e5bb, 0x00000005 },
1506 { 0x000c2087, 0x00000030 },
1507 { 0x0880e5bc, 0x00000005 },
1508 { 0x000000ba, 0x00000008 },
1509 { 0x0080e5bd, 0x00000005 },
1510 { 0x0000e5bb, 0x00000005 },
1511 { 0x0080e5bc, 0x00000005 },
1512 { 0x00210000, 0x00000004 },
1513 { 0x02800000, 0x00000004 },
1514 { 0x00c000be, 0x00000018 },
1515 { 0x4180e000, 0x00000040 },
1516 { 0x000000c0, 0x00000024 },
1517 { 0x01000000, 0x0000000c },
1518 { 0x0100e51d, 0x0000000c },
1519 { 0x000045bb, 0x00000004 },
1520 { 0x000080ba, 0x00000008 },
1521 { 0x03c00830, 0x00000004 },
1522 { 0x4200e000, 0000000000 },
1523 { 0x0000a000, 0x00000004 },
1524 { 0x200045e0, 0x00000004 },
1525 { 0x0000e5e1, 0000000000 },
1526 { 0x00000001, 0000000000 },
1527 { 0x000700c8, 0x00000004 },
1528 { 0x0800e394, 0000000000 },
1529 { 0000000000, 0000000000 },
1530 { 0x0000e8c4, 0x00000004 },
1531 { 0x0000e8c5, 0x00000004 },
1532 { 0x0000e8c6, 0x00000004 },
1533 { 0x0000e928, 0x00000004 },
1534 { 0x0000e929, 0x00000004 },
1535 { 0x0000e92a, 0x00000004 },
1536 { 0x000000cc, 0x00000008 },
1537 { 0x0000e928, 0x00000004 },
1538 { 0x0000e929, 0x00000004 },
1539 { 0x0000e92a, 0x00000004 },
1540 { 0x000000d3, 0x00000008 },
1541 { 0x02c02000, 0x00000004 },
1542 { 0x00060000, 0x00000004 },
1543 { 0x000000db, 0x00000034 },
1544 { 0x000000d8, 0x00000008 },
1545 { 0x00008000, 0x00000004 },
1546 { 0xc000e000, 0000000000 },
1547 { 0x000000e1, 0x00000030 },
1548 { 0x4200e000, 0000000000 },
1549 { 0x000000e1, 0x00000030 },
1550 { 0x4000e000, 0000000000 },
1551 { 0x0025001b, 0x00000004 },
1552 { 0x00230000, 0x00000004 },
1553 { 0x00250005, 0x00000004 },
1554 { 0x000000e6, 0x00000034 },
1555 { 0000000000, 0x0000000c },
1556 { 0x00244000, 0x00000004 },
1557 { 0x080045c8, 0x00000004 },
1558 { 0x00240005, 0x00000004 },
1559 { 0x08004d0b, 0x0000000c },
1560 { 0000000000, 0000000000 },
1561 { 0000000000, 0000000000 },
1562 { 0000000000, 0000000000 },
1563 { 0000000000, 0000000000 },
1564 { 0000000000, 0000000000 },
1565 { 0000000000, 0000000000 },
1566 { 0000000000, 0000000000 },
1567 { 0000000000, 0000000000 },
1568 { 0000000000, 0000000000 },
1569 { 0000000000, 0000000000 },
1570 { 0000000000, 0000000000 },
1571 { 0000000000, 0000000000 },
1572 { 0x000c2000, 0x00000004 },
1573 { 0x001d0018, 0x00000004 },
1574 { 0x001a0001, 0x00000004 },
1575 { 0x000000fb, 0x00000034 },
1576 { 0x0000004a, 0x00000008 },
1577 { 0x0500a04a, 0x00000008 },
1578 { 0000000000, 0000000000 },
1579 { 0000000000, 0000000000 },
1580 { 0000000000, 0000000000 },
1581 { 0000000000, 0000000000 },
1582};
1583
1584static const u32 R520_cp_microcode[][2] = {
1585 { 0x4200e000, 0000000000 },
1586 { 0x4000e000, 0000000000 },
1587 { 0x00000099, 0x00000008 },
1588 { 0x0000009d, 0x00000008 },
1589 { 0x4a554b4a, 0000000000 },
1590 { 0x4a4a4467, 0000000000 },
1591 { 0x55526f75, 0000000000 },
1592 { 0x4a7e7d65, 0000000000 },
1593 { 0xe0dae6f6, 0000000000 },
1594 { 0x4ac54a4a, 0000000000 },
1595 { 0xc8828282, 0000000000 },
1596 { 0xbf4acfc1, 0000000000 },
1597 { 0x87b04ad5, 0000000000 },
1598 { 0xb5838383, 0000000000 },
1599 { 0x4a0f85ba, 0000000000 },
1600 { 0x000ca000, 0x00000004 },
1601 { 0x000d0012, 0x00000038 },
1602 { 0x0000e8b4, 0x00000004 },
1603 { 0x000d0014, 0x00000038 },
1604 { 0x0000e8b6, 0x00000004 },
1605 { 0x000d0016, 0x00000038 },
1606 { 0x0000e854, 0x00000004 },
1607 { 0x000d0018, 0x00000038 },
1608 { 0x0000e855, 0x00000004 },
1609 { 0x000d001a, 0x00000038 },
1610 { 0x0000e856, 0x00000004 },
1611 { 0x000d001c, 0x00000038 },
1612 { 0x0000e857, 0x00000004 },
1613 { 0x000d001e, 0x00000038 },
1614 { 0x0000e824, 0x00000004 },
1615 { 0x000d0020, 0x00000038 },
1616 { 0x0000e825, 0x00000004 },
1617 { 0x000d0022, 0x00000038 },
1618 { 0x0000e830, 0x00000004 },
1619 { 0x000d0024, 0x00000038 },
1620 { 0x0000f0c0, 0x00000004 },
1621 { 0x000d0026, 0x00000038 },
1622 { 0x0000f0c1, 0x00000004 },
1623 { 0x000d0028, 0x00000038 },
1624 { 0x0000e000, 0x00000004 },
1625 { 0x000d002a, 0x00000038 },
1626 { 0x0000e000, 0x00000004 },
1627 { 0x000d002c, 0x00000038 },
1628 { 0x0000e000, 0x00000004 },
1629 { 0x000d002e, 0x00000038 },
1630 { 0x0000e000, 0x00000004 },
1631 { 0x000d0030, 0x00000038 },
1632 { 0x0000e000, 0x00000004 },
1633 { 0x000d0032, 0x00000038 },
1634 { 0x0000f180, 0x00000004 },
1635 { 0x000d0034, 0x00000038 },
1636 { 0x0000f393, 0x00000004 },
1637 { 0x000d0036, 0x00000038 },
1638 { 0x0000f38a, 0x00000004 },
1639 { 0x000d0038, 0x00000038 },
1640 { 0x0000f38e, 0x00000004 },
1641 { 0x0000e821, 0x00000004 },
1642 { 0x0140a000, 0x00000004 },
1643 { 0x00000043, 0x00000018 },
1644 { 0x00cce800, 0x00000004 },
1645 { 0x001b0001, 0x00000004 },
1646 { 0x08004800, 0x00000004 },
1647 { 0x001b0001, 0x00000004 },
1648 { 0x08004800, 0x00000004 },
1649 { 0x001b0001, 0x00000004 },
1650 { 0x08004800, 0x00000004 },
1651 { 0x0000003a, 0x00000008 },
1652 { 0x0000a000, 0000000000 },
1653 { 0x2000451d, 0x00000004 },
1654 { 0x0000e580, 0x00000004 },
1655 { 0x000ce581, 0x00000004 },
1656 { 0x08004580, 0x00000004 },
1657 { 0x000ce581, 0x00000004 },
1658 { 0x00000047, 0x00000008 },
1659 { 0x0000a000, 0000000000 },
1660 { 0x000c2000, 0x00000004 },
1661 { 0x0000e50e, 0x00000004 },
1662 { 0x00032000, 0x00000004 },
1663 { 0x00022051, 0x00000028 },
1664 { 0x00000051, 0x00000024 },
1665 { 0x0800450f, 0x00000004 },
1666 { 0x0000a04b, 0x00000008 },
1667 { 0x0000e565, 0x00000004 },
1668 { 0x0000e566, 0x00000004 },
1669 { 0x00000052, 0x00000008 },
1670 { 0x03cca5b4, 0x00000004 },
1671 { 0x05432000, 0x00000004 },
1672 { 0x00022000, 0x00000004 },
1673 { 0x4ccce05e, 0x00000030 },
1674 { 0x08274565, 0x00000004 },
1675 { 0x0000005e, 0x00000030 },
1676 { 0x08004564, 0x00000004 },
1677 { 0x0000e566, 0x00000004 },
1678 { 0x00000055, 0x00000008 },
1679 { 0x00802061, 0x00000010 },
1680 { 0x00202000, 0x00000004 },
1681 { 0x001b00ff, 0x00000004 },
1682 { 0x01000064, 0x00000010 },
1683 { 0x001f2000, 0x00000004 },
1684 { 0x001c00ff, 0x00000004 },
1685 { 0000000000, 0x0000000c },
1686 { 0x00000072, 0x00000030 },
1687 { 0x00000055, 0x00000008 },
1688 { 0x0000e576, 0x00000004 },
1689 { 0x0000e577, 0x00000004 },
1690 { 0x0000e50e, 0x00000004 },
1691 { 0x0000e50f, 0x00000004 },
1692 { 0x0140a000, 0x00000004 },
1693 { 0x00000069, 0x00000018 },
1694 { 0x00c0e5f9, 0x000000c2 },
1695 { 0x00000069, 0x00000008 },
1696 { 0x0014e50e, 0x00000004 },
1697 { 0x0040e50f, 0x00000004 },
1698 { 0x00c0006c, 0x00000008 },
1699 { 0x0000e570, 0x00000004 },
1700 { 0x0000e571, 0x00000004 },
1701 { 0x0000e572, 0x0000000c },
1702 { 0x0000a000, 0x00000004 },
1703 { 0x0140a000, 0x00000004 },
1704 { 0x0000e568, 0x00000004 },
1705 { 0x000c2000, 0x00000004 },
1706 { 0x00000076, 0x00000018 },
1707 { 0x000b0000, 0x00000004 },
1708 { 0x18c0e562, 0x00000004 },
1709 { 0x00000078, 0x00000008 },
1710 { 0x00c00077, 0x00000008 },
1711 { 0x000700c7, 0x00000004 },
1712 { 0x00000080, 0x00000038 },
1713 { 0x0000e5bb, 0x00000004 },
1714 { 0x0000e5bc, 0000000000 },
1715 { 0x0000a000, 0x00000004 },
1716 { 0x0000e821, 0x00000004 },
1717 { 0x0000e800, 0000000000 },
1718 { 0x0000e821, 0x00000004 },
1719 { 0x0000e82e, 0000000000 },
1720 { 0x02cca000, 0x00000004 },
1721 { 0x00140000, 0x00000004 },
1722 { 0x000ce1cc, 0x00000004 },
1723 { 0x050de1cd, 0x00000004 },
1724 { 0x00400000, 0x00000004 },
1725 { 0x0000008f, 0x00000018 },
1726 { 0x00c0a000, 0x00000004 },
1727 { 0x0000008c, 0x00000008 },
1728 { 0x00000091, 0x00000020 },
1729 { 0x4200e000, 0000000000 },
1730 { 0x00000098, 0x00000038 },
1731 { 0x000ca000, 0x00000004 },
1732 { 0x00140000, 0x00000004 },
1733 { 0x000c2000, 0x00000004 },
1734 { 0x00160000, 0x00000004 },
1735 { 0x700ce000, 0x00000004 },
1736 { 0x00140094, 0x00000008 },
1737 { 0x4000e000, 0000000000 },
1738 { 0x02400000, 0x00000004 },
1739 { 0x400ee000, 0x00000004 },
1740 { 0x02400000, 0x00000004 },
1741 { 0x4000e000, 0000000000 },
1742 { 0x000c2000, 0x00000004 },
1743 { 0x0240e51b, 0x00000004 },
1744 { 0x0080e50a, 0x00000005 },
1745 { 0x0080e50b, 0x00000005 },
1746 { 0x00220000, 0x00000004 },
1747 { 0x000700c7, 0x00000004 },
1748 { 0x000000a4, 0x00000038 },
1749 { 0x0080e5bd, 0x00000005 },
1750 { 0x0000e5bb, 0x00000005 },
1751 { 0x0080e5bc, 0x00000005 },
1752 { 0x00210000, 0x00000004 },
1753 { 0x02800000, 0x00000004 },
1754 { 0x00c000ab, 0x00000018 },
1755 { 0x4180e000, 0x00000040 },
1756 { 0x000000ad, 0x00000024 },
1757 { 0x01000000, 0x0000000c },
1758 { 0x0100e51d, 0x0000000c },
1759 { 0x000045bb, 0x00000004 },
1760 { 0x000080a7, 0x00000008 },
1761 { 0x0000f3ce, 0x00000004 },
1762 { 0x0140a000, 0x00000004 },
1763 { 0x00cc2000, 0x00000004 },
1764 { 0x08c053cf, 0x00000040 },
1765 { 0x00008000, 0000000000 },
1766 { 0x0000f3d2, 0x00000004 },
1767 { 0x0140a000, 0x00000004 },
1768 { 0x00cc2000, 0x00000004 },
1769 { 0x08c053d3, 0x00000040 },
1770 { 0x00008000, 0000000000 },
1771 { 0x0000f39d, 0x00000004 },
1772 { 0x0140a000, 0x00000004 },
1773 { 0x00cc2000, 0x00000004 },
1774 { 0x08c0539e, 0x00000040 },
1775 { 0x00008000, 0000000000 },
1776 { 0x03c00830, 0x00000004 },
1777 { 0x4200e000, 0000000000 },
1778 { 0x0000a000, 0x00000004 },
1779 { 0x200045e0, 0x00000004 },
1780 { 0x0000e5e1, 0000000000 },
1781 { 0x00000001, 0000000000 },
1782 { 0x000700c4, 0x00000004 },
1783 { 0x0800e394, 0000000000 },
1784 { 0000000000, 0000000000 },
1785 { 0x0000e8c4, 0x00000004 },
1786 { 0x0000e8c5, 0x00000004 },
1787 { 0x0000e8c6, 0x00000004 },
1788 { 0x0000e928, 0x00000004 },
1789 { 0x0000e929, 0x00000004 },
1790 { 0x0000e92a, 0x00000004 },
1791 { 0x000000c8, 0x00000008 },
1792 { 0x0000e928, 0x00000004 },
1793 { 0x0000e929, 0x00000004 },
1794 { 0x0000e92a, 0x00000004 },
1795 { 0x000000cf, 0x00000008 },
1796 { 0xdeadbeef, 0000000000 },
1797 { 0x00000116, 0000000000 },
1798 { 0x000700d3, 0x00000004 },
1799 { 0x080050e7, 0x00000004 },
1800 { 0x000700d4, 0x00000004 },
1801 { 0x0800401c, 0x00000004 },
1802 { 0x0000e01d, 0000000000 },
1803 { 0x02c02000, 0x00000004 },
1804 { 0x00060000, 0x00000004 },
1805 { 0x000000de, 0x00000034 },
1806 { 0x000000db, 0x00000008 },
1807 { 0x00008000, 0x00000004 },
1808 { 0xc000e000, 0000000000 },
1809 { 0x0000e1cc, 0x00000004 },
1810 { 0x0500e1cd, 0x00000004 },
1811 { 0x000ca000, 0x00000004 },
1812 { 0x000000e5, 0x00000034 },
1813 { 0x000000e1, 0x00000008 },
1814 { 0x0000a000, 0000000000 },
1815 { 0x0019e1cc, 0x00000004 },
1816 { 0x001b0001, 0x00000004 },
1817 { 0x0500a000, 0x00000004 },
1818 { 0x080041cd, 0x00000004 },
1819 { 0x000ca000, 0x00000004 },
1820 { 0x000000fb, 0x00000034 },
1821 { 0x0000004a, 0x00000008 },
1822 { 0000000000, 0000000000 },
1823 { 0000000000, 0000000000 },
1824 { 0000000000, 0000000000 },
1825 { 0000000000, 0000000000 },
1826 { 0000000000, 0000000000 },
1827 { 0000000000, 0000000000 },
1828 { 0000000000, 0000000000 },
1829 { 0000000000, 0000000000 },
1830 { 0000000000, 0000000000 },
1831 { 0x000c2000, 0x00000004 },
1832 { 0x001d0018, 0x00000004 },
1833 { 0x001a0001, 0x00000004 },
1834 { 0x000000fb, 0x00000034 },
1835 { 0x0000004a, 0x00000008 },
1836 { 0x0500a04a, 0x00000008 },
1837 { 0000000000, 0000000000 },
1838 { 0000000000, 0000000000 },
1839 { 0000000000, 0000000000 },
1840 { 0000000000, 0000000000 },
1841};
1842
1843
1844#endif
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
new file mode 100644
index 000000000000..11c146b49211
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -0,0 +1,3203 @@
1/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
2/*
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "drm_sarea.h"
33#include "radeon_drm.h"
34#include "radeon_drv.h"
35
36/* ================================================================
37 * Helper functions for client state checking and fixup
38 */
39
40static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
41 dev_priv,
42 struct drm_file * file_priv,
43 u32 *offset)
44{
45 u64 off = *offset;
46 u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
47 struct drm_radeon_driver_file_fields *radeon_priv;
48
49 /* Hrm ... the story of the offset ... So this function converts
50 * the various ideas of what userland clients might have for an
51 * offset in the card address space into an offset into the card
52 * address space :) So with a sane client, it should just keep
53 * the value intact and just do some boundary checking. However,
54 * not all clients are sane. Some older clients pass us 0 based
55 * offsets relative to the start of the framebuffer and some may
56 * assume the AGP aperture it appended to the framebuffer, so we
57 * try to detect those cases and fix them up.
58 *
59 * Note: It might be a good idea here to make sure the offset lands
60 * in some "allowed" area to protect things like the PCIE GART...
61 */
62
63 /* First, the best case, the offset already lands in either the
64 * framebuffer or the GART mapped space
65 */
66 if (radeon_check_offset(dev_priv, off))
67 return 0;
68
69 /* Ok, that didn't happen... now check if we have a zero based
70 * offset that fits in the framebuffer + gart space, apply the
71 * magic offset we get from SETPARAM or calculated from fb_location
72 */
73 if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
74 radeon_priv = file_priv->driver_priv;
75 off += radeon_priv->radeon_fb_delta;
76 }
77
78 /* Finally, assume we aimed at a GART offset if beyond the fb */
79 if (off > fb_end)
80 off = off - fb_end - 1 + dev_priv->gart_vm_start;
81
82 /* Now recheck and fail if out of bounds */
83 if (radeon_check_offset(dev_priv, off)) {
84 DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
85 *offset = off;
86 return 0;
87 }
88 return -EINVAL;
89}
90
91static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
92 dev_priv,
93 struct drm_file *file_priv,
94 int id, u32 *data)
95{
96 switch (id) {
97
98 case RADEON_EMIT_PP_MISC:
99 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
100 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
101 DRM_ERROR("Invalid depth buffer offset\n");
102 return -EINVAL;
103 }
104 break;
105
106 case RADEON_EMIT_PP_CNTL:
107 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
108 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
109 DRM_ERROR("Invalid colour buffer offset\n");
110 return -EINVAL;
111 }
112 break;
113
114 case R200_EMIT_PP_TXOFFSET_0:
115 case R200_EMIT_PP_TXOFFSET_1:
116 case R200_EMIT_PP_TXOFFSET_2:
117 case R200_EMIT_PP_TXOFFSET_3:
118 case R200_EMIT_PP_TXOFFSET_4:
119 case R200_EMIT_PP_TXOFFSET_5:
120 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
121 &data[0])) {
122 DRM_ERROR("Invalid R200 texture offset\n");
123 return -EINVAL;
124 }
125 break;
126
127 case RADEON_EMIT_PP_TXFILTER_0:
128 case RADEON_EMIT_PP_TXFILTER_1:
129 case RADEON_EMIT_PP_TXFILTER_2:
130 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
131 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
132 DRM_ERROR("Invalid R100 texture offset\n");
133 return -EINVAL;
134 }
135 break;
136
137 case R200_EMIT_PP_CUBIC_OFFSETS_0:
138 case R200_EMIT_PP_CUBIC_OFFSETS_1:
139 case R200_EMIT_PP_CUBIC_OFFSETS_2:
140 case R200_EMIT_PP_CUBIC_OFFSETS_3:
141 case R200_EMIT_PP_CUBIC_OFFSETS_4:
142 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
143 int i;
144 for (i = 0; i < 5; i++) {
145 if (radeon_check_and_fixup_offset(dev_priv,
146 file_priv,
147 &data[i])) {
148 DRM_ERROR
149 ("Invalid R200 cubic texture offset\n");
150 return -EINVAL;
151 }
152 }
153 break;
154 }
155
156 case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
157 case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
158 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
159 int i;
160 for (i = 0; i < 5; i++) {
161 if (radeon_check_and_fixup_offset(dev_priv,
162 file_priv,
163 &data[i])) {
164 DRM_ERROR
165 ("Invalid R100 cubic texture offset\n");
166 return -EINVAL;
167 }
168 }
169 }
170 break;
171
172 case R200_EMIT_VAP_CTL:{
173 RING_LOCALS;
174 BEGIN_RING(2);
175 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
176 ADVANCE_RING();
177 }
178 break;
179
180 case RADEON_EMIT_RB3D_COLORPITCH:
181 case RADEON_EMIT_RE_LINE_PATTERN:
182 case RADEON_EMIT_SE_LINE_WIDTH:
183 case RADEON_EMIT_PP_LUM_MATRIX:
184 case RADEON_EMIT_PP_ROT_MATRIX_0:
185 case RADEON_EMIT_RB3D_STENCILREFMASK:
186 case RADEON_EMIT_SE_VPORT_XSCALE:
187 case RADEON_EMIT_SE_CNTL:
188 case RADEON_EMIT_SE_CNTL_STATUS:
189 case RADEON_EMIT_RE_MISC:
190 case RADEON_EMIT_PP_BORDER_COLOR_0:
191 case RADEON_EMIT_PP_BORDER_COLOR_1:
192 case RADEON_EMIT_PP_BORDER_COLOR_2:
193 case RADEON_EMIT_SE_ZBIAS_FACTOR:
194 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
195 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
196 case R200_EMIT_PP_TXCBLEND_0:
197 case R200_EMIT_PP_TXCBLEND_1:
198 case R200_EMIT_PP_TXCBLEND_2:
199 case R200_EMIT_PP_TXCBLEND_3:
200 case R200_EMIT_PP_TXCBLEND_4:
201 case R200_EMIT_PP_TXCBLEND_5:
202 case R200_EMIT_PP_TXCBLEND_6:
203 case R200_EMIT_PP_TXCBLEND_7:
204 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
205 case R200_EMIT_TFACTOR_0:
206 case R200_EMIT_VTX_FMT_0:
207 case R200_EMIT_MATRIX_SELECT_0:
208 case R200_EMIT_TEX_PROC_CTL_2:
209 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
210 case R200_EMIT_PP_TXFILTER_0:
211 case R200_EMIT_PP_TXFILTER_1:
212 case R200_EMIT_PP_TXFILTER_2:
213 case R200_EMIT_PP_TXFILTER_3:
214 case R200_EMIT_PP_TXFILTER_4:
215 case R200_EMIT_PP_TXFILTER_5:
216 case R200_EMIT_VTE_CNTL:
217 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
218 case R200_EMIT_PP_TAM_DEBUG3:
219 case R200_EMIT_PP_CNTL_X:
220 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
221 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
222 case R200_EMIT_RE_SCISSOR_TL_0:
223 case R200_EMIT_RE_SCISSOR_TL_1:
224 case R200_EMIT_RE_SCISSOR_TL_2:
225 case R200_EMIT_SE_VAP_CNTL_STATUS:
226 case R200_EMIT_SE_VTX_STATE_CNTL:
227 case R200_EMIT_RE_POINTSIZE:
228 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
229 case R200_EMIT_PP_CUBIC_FACES_0:
230 case R200_EMIT_PP_CUBIC_FACES_1:
231 case R200_EMIT_PP_CUBIC_FACES_2:
232 case R200_EMIT_PP_CUBIC_FACES_3:
233 case R200_EMIT_PP_CUBIC_FACES_4:
234 case R200_EMIT_PP_CUBIC_FACES_5:
235 case RADEON_EMIT_PP_TEX_SIZE_0:
236 case RADEON_EMIT_PP_TEX_SIZE_1:
237 case RADEON_EMIT_PP_TEX_SIZE_2:
238 case R200_EMIT_RB3D_BLENDCOLOR:
239 case R200_EMIT_TCL_POINT_SPRITE_CNTL:
240 case RADEON_EMIT_PP_CUBIC_FACES_0:
241 case RADEON_EMIT_PP_CUBIC_FACES_1:
242 case RADEON_EMIT_PP_CUBIC_FACES_2:
243 case R200_EMIT_PP_TRI_PERF_CNTL:
244 case R200_EMIT_PP_AFS_0:
245 case R200_EMIT_PP_AFS_1:
246 case R200_EMIT_ATF_TFACTOR:
247 case R200_EMIT_PP_TXCTLALL_0:
248 case R200_EMIT_PP_TXCTLALL_1:
249 case R200_EMIT_PP_TXCTLALL_2:
250 case R200_EMIT_PP_TXCTLALL_3:
251 case R200_EMIT_PP_TXCTLALL_4:
252 case R200_EMIT_PP_TXCTLALL_5:
253 case R200_EMIT_VAP_PVS_CNTL:
254 /* These packets don't contain memory offsets */
255 break;
256
257 default:
258 DRM_ERROR("Unknown state packet ID %d\n", id);
259 return -EINVAL;
260 }
261
262 return 0;
263}
264
265static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
266 dev_priv,
267 struct drm_file *file_priv,
268 drm_radeon_kcmd_buffer_t *
269 cmdbuf,
270 unsigned int *cmdsz)
271{
272 u32 *cmd = (u32 *) cmdbuf->buf;
273 u32 offset, narrays;
274 int count, i, k;
275
276 *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
277
278 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
279 DRM_ERROR("Not a type 3 packet\n");
280 return -EINVAL;
281 }
282
283 if (4 * *cmdsz > cmdbuf->bufsz) {
284 DRM_ERROR("Packet size larger than size of data provided\n");
285 return -EINVAL;
286 }
287
288 switch(cmd[0] & 0xff00) {
289 /* XXX Are there old drivers needing other packets? */
290
291 case RADEON_3D_DRAW_IMMD:
292 case RADEON_3D_DRAW_VBUF:
293 case RADEON_3D_DRAW_INDX:
294 case RADEON_WAIT_FOR_IDLE:
295 case RADEON_CP_NOP:
296 case RADEON_3D_CLEAR_ZMASK:
297/* case RADEON_CP_NEXT_CHAR:
298 case RADEON_CP_PLY_NEXTSCAN:
299 case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
300 /* these packets are safe */
301 break;
302
303 case RADEON_CP_3D_DRAW_IMMD_2:
304 case RADEON_CP_3D_DRAW_VBUF_2:
305 case RADEON_CP_3D_DRAW_INDX_2:
306 case RADEON_3D_CLEAR_HIZ:
307 /* safe but r200 only */
308 if (dev_priv->microcode_version != UCODE_R200) {
309 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
310 return -EINVAL;
311 }
312 break;
313
314 case RADEON_3D_LOAD_VBPNTR:
315 count = (cmd[0] >> 16) & 0x3fff;
316
317 if (count > 18) { /* 12 arrays max */
318 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
319 count);
320 return -EINVAL;
321 }
322
323 /* carefully check packet contents */
324 narrays = cmd[1] & ~0xc000;
325 k = 0;
326 i = 2;
327 while ((k < narrays) && (i < (count + 2))) {
328 i++; /* skip attribute field */
329 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
330 &cmd[i])) {
331 DRM_ERROR
332 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
333 k, i);
334 return -EINVAL;
335 }
336 k++;
337 i++;
338 if (k == narrays)
339 break;
340 /* have one more to process, they come in pairs */
341 if (radeon_check_and_fixup_offset(dev_priv,
342 file_priv, &cmd[i]))
343 {
344 DRM_ERROR
345 ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
346 k, i);
347 return -EINVAL;
348 }
349 k++;
350 i++;
351 }
352 /* do the counts match what we expect ? */
353 if ((k != narrays) || (i != (count + 2))) {
354 DRM_ERROR
355 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
356 k, i, narrays, count + 1);
357 return -EINVAL;
358 }
359 break;
360
361 case RADEON_3D_RNDR_GEN_INDX_PRIM:
362 if (dev_priv->microcode_version != UCODE_R100) {
363 DRM_ERROR("Invalid 3d packet for r200-class chip\n");
364 return -EINVAL;
365 }
366 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
367 DRM_ERROR("Invalid rndr_gen_indx offset\n");
368 return -EINVAL;
369 }
370 break;
371
372 case RADEON_CP_INDX_BUFFER:
373 if (dev_priv->microcode_version != UCODE_R200) {
374 DRM_ERROR("Invalid 3d packet for r100-class chip\n");
375 return -EINVAL;
376 }
377 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
378 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
379 return -EINVAL;
380 }
381 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
382 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
383 return -EINVAL;
384 }
385 break;
386
387 case RADEON_CNTL_HOSTDATA_BLT:
388 case RADEON_CNTL_PAINT_MULTI:
389 case RADEON_CNTL_BITBLT_MULTI:
390 /* MSB of opcode: next DWORD GUI_CNTL */
391 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
392 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
393 offset = cmd[2] << 10;
394 if (radeon_check_and_fixup_offset
395 (dev_priv, file_priv, &offset)) {
396 DRM_ERROR("Invalid first packet offset\n");
397 return -EINVAL;
398 }
399 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
400 }
401
402 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
403 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
404 offset = cmd[3] << 10;
405 if (radeon_check_and_fixup_offset
406 (dev_priv, file_priv, &offset)) {
407 DRM_ERROR("Invalid second packet offset\n");
408 return -EINVAL;
409 }
410 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
411 }
412 break;
413
414 default:
415 DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
416 return -EINVAL;
417 }
418
419 return 0;
420}
421
422/* ================================================================
423 * CP hardware state programming functions
424 */
425
426static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
427 struct drm_clip_rect * box)
428{
429 RING_LOCALS;
430
431 DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n",
432 box->x1, box->y1, box->x2, box->y2);
433
434 BEGIN_RING(4);
435 OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
436 OUT_RING((box->y1 << 16) | box->x1);
437 OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
438 OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
439 ADVANCE_RING();
440}
441
442/* Emit 1.1 state
443 */
444static int radeon_emit_state(drm_radeon_private_t * dev_priv,
445 struct drm_file *file_priv,
446 drm_radeon_context_regs_t * ctx,
447 drm_radeon_texture_regs_t * tex,
448 unsigned int dirty)
449{
450 RING_LOCALS;
451 DRM_DEBUG("dirty=0x%08x\n", dirty);
452
453 if (dirty & RADEON_UPLOAD_CONTEXT) {
454 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
455 &ctx->rb3d_depthoffset)) {
456 DRM_ERROR("Invalid depth buffer offset\n");
457 return -EINVAL;
458 }
459
460 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
461 &ctx->rb3d_coloroffset)) {
462 DRM_ERROR("Invalid depth buffer offset\n");
463 return -EINVAL;
464 }
465
466 BEGIN_RING(14);
467 OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
468 OUT_RING(ctx->pp_misc);
469 OUT_RING(ctx->pp_fog_color);
470 OUT_RING(ctx->re_solid_color);
471 OUT_RING(ctx->rb3d_blendcntl);
472 OUT_RING(ctx->rb3d_depthoffset);
473 OUT_RING(ctx->rb3d_depthpitch);
474 OUT_RING(ctx->rb3d_zstencilcntl);
475 OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
476 OUT_RING(ctx->pp_cntl);
477 OUT_RING(ctx->rb3d_cntl);
478 OUT_RING(ctx->rb3d_coloroffset);
479 OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
480 OUT_RING(ctx->rb3d_colorpitch);
481 ADVANCE_RING();
482 }
483
484 if (dirty & RADEON_UPLOAD_VERTFMT) {
485 BEGIN_RING(2);
486 OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
487 OUT_RING(ctx->se_coord_fmt);
488 ADVANCE_RING();
489 }
490
491 if (dirty & RADEON_UPLOAD_LINE) {
492 BEGIN_RING(5);
493 OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
494 OUT_RING(ctx->re_line_pattern);
495 OUT_RING(ctx->re_line_state);
496 OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
497 OUT_RING(ctx->se_line_width);
498 ADVANCE_RING();
499 }
500
501 if (dirty & RADEON_UPLOAD_BUMPMAP) {
502 BEGIN_RING(5);
503 OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
504 OUT_RING(ctx->pp_lum_matrix);
505 OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
506 OUT_RING(ctx->pp_rot_matrix_0);
507 OUT_RING(ctx->pp_rot_matrix_1);
508 ADVANCE_RING();
509 }
510
511 if (dirty & RADEON_UPLOAD_MASKS) {
512 BEGIN_RING(4);
513 OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
514 OUT_RING(ctx->rb3d_stencilrefmask);
515 OUT_RING(ctx->rb3d_ropcntl);
516 OUT_RING(ctx->rb3d_planemask);
517 ADVANCE_RING();
518 }
519
520 if (dirty & RADEON_UPLOAD_VIEWPORT) {
521 BEGIN_RING(7);
522 OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
523 OUT_RING(ctx->se_vport_xscale);
524 OUT_RING(ctx->se_vport_xoffset);
525 OUT_RING(ctx->se_vport_yscale);
526 OUT_RING(ctx->se_vport_yoffset);
527 OUT_RING(ctx->se_vport_zscale);
528 OUT_RING(ctx->se_vport_zoffset);
529 ADVANCE_RING();
530 }
531
532 if (dirty & RADEON_UPLOAD_SETUP) {
533 BEGIN_RING(4);
534 OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
535 OUT_RING(ctx->se_cntl);
536 OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
537 OUT_RING(ctx->se_cntl_status);
538 ADVANCE_RING();
539 }
540
541 if (dirty & RADEON_UPLOAD_MISC) {
542 BEGIN_RING(2);
543 OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
544 OUT_RING(ctx->re_misc);
545 ADVANCE_RING();
546 }
547
548 if (dirty & RADEON_UPLOAD_TEX0) {
549 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
550 &tex[0].pp_txoffset)) {
551 DRM_ERROR("Invalid texture offset for unit 0\n");
552 return -EINVAL;
553 }
554
555 BEGIN_RING(9);
556 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
557 OUT_RING(tex[0].pp_txfilter);
558 OUT_RING(tex[0].pp_txformat);
559 OUT_RING(tex[0].pp_txoffset);
560 OUT_RING(tex[0].pp_txcblend);
561 OUT_RING(tex[0].pp_txablend);
562 OUT_RING(tex[0].pp_tfactor);
563 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
564 OUT_RING(tex[0].pp_border_color);
565 ADVANCE_RING();
566 }
567
568 if (dirty & RADEON_UPLOAD_TEX1) {
569 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
570 &tex[1].pp_txoffset)) {
571 DRM_ERROR("Invalid texture offset for unit 1\n");
572 return -EINVAL;
573 }
574
575 BEGIN_RING(9);
576 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
577 OUT_RING(tex[1].pp_txfilter);
578 OUT_RING(tex[1].pp_txformat);
579 OUT_RING(tex[1].pp_txoffset);
580 OUT_RING(tex[1].pp_txcblend);
581 OUT_RING(tex[1].pp_txablend);
582 OUT_RING(tex[1].pp_tfactor);
583 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
584 OUT_RING(tex[1].pp_border_color);
585 ADVANCE_RING();
586 }
587
588 if (dirty & RADEON_UPLOAD_TEX2) {
589 if (radeon_check_and_fixup_offset(dev_priv, file_priv,
590 &tex[2].pp_txoffset)) {
591 DRM_ERROR("Invalid texture offset for unit 2\n");
592 return -EINVAL;
593 }
594
595 BEGIN_RING(9);
596 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
597 OUT_RING(tex[2].pp_txfilter);
598 OUT_RING(tex[2].pp_txformat);
599 OUT_RING(tex[2].pp_txoffset);
600 OUT_RING(tex[2].pp_txcblend);
601 OUT_RING(tex[2].pp_txablend);
602 OUT_RING(tex[2].pp_tfactor);
603 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
604 OUT_RING(tex[2].pp_border_color);
605 ADVANCE_RING();
606 }
607
608 return 0;
609}
610
611/* Emit 1.2 state
612 */
613static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
614 struct drm_file *file_priv,
615 drm_radeon_state_t * state)
616{
617 RING_LOCALS;
618
619 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
620 BEGIN_RING(3);
621 OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
622 OUT_RING(state->context2.se_zbias_factor);
623 OUT_RING(state->context2.se_zbias_constant);
624 ADVANCE_RING();
625 }
626
627 return radeon_emit_state(dev_priv, file_priv, &state->context,
628 state->tex, state->dirty);
629}
630
631/* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
632 * 1.3 cmdbuffers allow all previous state to be updated as well as
633 * the tcl scalar and vector areas.
634 */
635static struct {
636 int start;
637 int len;
638 const char *name;
639} packet[RADEON_MAX_STATE_PACKETS] = {
640 {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
641 {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
642 {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
643 {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
644 {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
645 {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
646 {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
647 {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
648 {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
649 {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
650 {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
651 {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
652 {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
653 {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
654 {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
655 {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
656 {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
657 {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
658 {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
659 {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
660 {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
661 "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
662 {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
663 {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
664 {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
665 {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
666 {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
667 {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
668 {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
669 {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
670 {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
671 {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
672 {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
673 {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
674 {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
675 {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
676 {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
677 {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
678 {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
679 {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
680 {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
681 {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
682 {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
683 {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
684 {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
685 {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
686 {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
687 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
688 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
689 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
690 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
691 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
692 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
693 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
694 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
695 {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
696 {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
697 {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
698 {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
699 {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
700 {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
701 {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
702 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
703 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
704 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
705 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
706 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
707 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
708 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
709 {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
710 {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
711 {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
712 {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
713 {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
714 {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
715 {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
716 {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
717 {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
718 {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
719 {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
720 {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
721 {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
722 {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
723 {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
724 {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
725 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
726 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
727 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
728 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
729 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
730 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
731 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
732 {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
733 {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
734 {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
735 {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
736 {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
737 {R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
738};
739
740/* ================================================================
741 * Performance monitoring functions
742 */
743
744static void radeon_clear_box(drm_radeon_private_t * dev_priv,
745 int x, int y, int w, int h, int r, int g, int b)
746{
747 u32 color;
748 RING_LOCALS;
749
750 x += dev_priv->sarea_priv->boxes[0].x1;
751 y += dev_priv->sarea_priv->boxes[0].y1;
752
753 switch (dev_priv->color_fmt) {
754 case RADEON_COLOR_FORMAT_RGB565:
755 color = (((r & 0xf8) << 8) |
756 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
757 break;
758 case RADEON_COLOR_FORMAT_ARGB8888:
759 default:
760 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
761 break;
762 }
763
764 BEGIN_RING(4);
765 RADEON_WAIT_UNTIL_3D_IDLE();
766 OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
767 OUT_RING(0xffffffff);
768 ADVANCE_RING();
769
770 BEGIN_RING(6);
771
772 OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
773 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
774 RADEON_GMC_BRUSH_SOLID_COLOR |
775 (dev_priv->color_fmt << 8) |
776 RADEON_GMC_SRC_DATATYPE_COLOR |
777 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
778
779 if (dev_priv->sarea_priv->pfCurrentPage == 1) {
780 OUT_RING(dev_priv->front_pitch_offset);
781 } else {
782 OUT_RING(dev_priv->back_pitch_offset);
783 }
784
785 OUT_RING(color);
786
787 OUT_RING((x << 16) | y);
788 OUT_RING((w << 16) | h);
789
790 ADVANCE_RING();
791}
792
793static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
794{
795 /* Collapse various things into a wait flag -- trying to
796 * guess if userspase slept -- better just to have them tell us.
797 */
798 if (dev_priv->stats.last_frame_reads > 1 ||
799 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
800 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
801 }
802
803 if (dev_priv->stats.freelist_loops) {
804 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
805 }
806
807 /* Purple box for page flipping
808 */
809 if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
810 radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
811
812 /* Red box if we have to wait for idle at any point
813 */
814 if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
815 radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
816
817 /* Blue box: lost context?
818 */
819
820 /* Yellow box for texture swaps
821 */
822 if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
823 radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
824
825 /* Green box if hardware never idles (as far as we can tell)
826 */
827 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
828 radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
829
830 /* Draw bars indicating number of buffers allocated
831 * (not a great measure, easily confused)
832 */
833 if (dev_priv->stats.requested_bufs) {
834 if (dev_priv->stats.requested_bufs > 100)
835 dev_priv->stats.requested_bufs = 100;
836
837 radeon_clear_box(dev_priv, 4, 16,
838 dev_priv->stats.requested_bufs, 4,
839 196, 128, 128);
840 }
841
842 memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
843
844}
845
846/* ================================================================
847 * CP command dispatch functions
848 */
849
850static void radeon_cp_dispatch_clear(struct drm_device * dev,
851 drm_radeon_clear_t * clear,
852 drm_radeon_clear_rect_t * depth_boxes)
853{
854 drm_radeon_private_t *dev_priv = dev->dev_private;
855 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
856 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
857 int nbox = sarea_priv->nbox;
858 struct drm_clip_rect *pbox = sarea_priv->boxes;
859 unsigned int flags = clear->flags;
860 u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
861 int i;
862 RING_LOCALS;
863 DRM_DEBUG("flags = 0x%x\n", flags);
864
865 dev_priv->stats.clears++;
866
867 if (dev_priv->sarea_priv->pfCurrentPage == 1) {
868 unsigned int tmp = flags;
869
870 flags &= ~(RADEON_FRONT | RADEON_BACK);
871 if (tmp & RADEON_FRONT)
872 flags |= RADEON_BACK;
873 if (tmp & RADEON_BACK)
874 flags |= RADEON_FRONT;
875 }
876
877 if (flags & (RADEON_FRONT | RADEON_BACK)) {
878
879 BEGIN_RING(4);
880
881 /* Ensure the 3D stream is idle before doing a
882 * 2D fill to clear the front or back buffer.
883 */
884 RADEON_WAIT_UNTIL_3D_IDLE();
885
886 OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
887 OUT_RING(clear->color_mask);
888
889 ADVANCE_RING();
890
891 /* Make sure we restore the 3D state next time.
892 */
893 dev_priv->sarea_priv->ctx_owner = 0;
894
895 for (i = 0; i < nbox; i++) {
896 int x = pbox[i].x1;
897 int y = pbox[i].y1;
898 int w = pbox[i].x2 - x;
899 int h = pbox[i].y2 - y;
900
901 DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
902 x, y, w, h, flags);
903
904 if (flags & RADEON_FRONT) {
905 BEGIN_RING(6);
906
907 OUT_RING(CP_PACKET3
908 (RADEON_CNTL_PAINT_MULTI, 4));
909 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
910 RADEON_GMC_BRUSH_SOLID_COLOR |
911 (dev_priv->
912 color_fmt << 8) |
913 RADEON_GMC_SRC_DATATYPE_COLOR |
914 RADEON_ROP3_P |
915 RADEON_GMC_CLR_CMP_CNTL_DIS);
916
917 OUT_RING(dev_priv->front_pitch_offset);
918 OUT_RING(clear->clear_color);
919
920 OUT_RING((x << 16) | y);
921 OUT_RING((w << 16) | h);
922
923 ADVANCE_RING();
924 }
925
926 if (flags & RADEON_BACK) {
927 BEGIN_RING(6);
928
929 OUT_RING(CP_PACKET3
930 (RADEON_CNTL_PAINT_MULTI, 4));
931 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
932 RADEON_GMC_BRUSH_SOLID_COLOR |
933 (dev_priv->
934 color_fmt << 8) |
935 RADEON_GMC_SRC_DATATYPE_COLOR |
936 RADEON_ROP3_P |
937 RADEON_GMC_CLR_CMP_CNTL_DIS);
938
939 OUT_RING(dev_priv->back_pitch_offset);
940 OUT_RING(clear->clear_color);
941
942 OUT_RING((x << 16) | y);
943 OUT_RING((w << 16) | h);
944
945 ADVANCE_RING();
946 }
947 }
948 }
949
950 /* hyper z clear */
951 /* no docs available, based on reverse engeneering by Stephane Marchesin */
952 if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
953 && (flags & RADEON_CLEAR_FASTZ)) {
954
955 int i;
956 int depthpixperline =
957 dev_priv->depth_fmt ==
958 RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
959 2) : (dev_priv->
960 depth_pitch / 4);
961
962 u32 clearmask;
963
964 u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
965 ((clear->depth_mask & 0xff) << 24);
966
967 /* Make sure we restore the 3D state next time.
968 * we haven't touched any "normal" state - still need this?
969 */
970 dev_priv->sarea_priv->ctx_owner = 0;
971
972 if ((dev_priv->flags & RADEON_HAS_HIERZ)
973 && (flags & RADEON_USE_HIERZ)) {
974 /* FIXME : reverse engineer that for Rx00 cards */
975 /* FIXME : the mask supposedly contains low-res z values. So can't set
976 just to the max (0xff? or actually 0x3fff?), need to take z clear
977 value into account? */
978 /* pattern seems to work for r100, though get slight
979 rendering errors with glxgears. If hierz is not enabled for r100,
980 only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
981 other ones are ignored, and the same clear mask can be used. That's
982 very different behaviour than R200 which needs different clear mask
983 and different number of tiles to clear if hierz is enabled or not !?!
984 */
985 clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
986 } else {
987 /* clear mask : chooses the clearing pattern.
988 rv250: could be used to clear only parts of macrotiles
989 (but that would get really complicated...)?
990 bit 0 and 1 (either or both of them ?!?!) are used to
991 not clear tile (or maybe one of the bits indicates if the tile is
992 compressed or not), bit 2 and 3 to not clear tile 1,...,.
993 Pattern is as follows:
994 | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
995 bits -------------------------------------------------
996 | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
997 rv100: clearmask covers 2x8 4x1 tiles, but one clear still
998 covers 256 pixels ?!?
999 */
1000 clearmask = 0x0;
1001 }
1002
1003 BEGIN_RING(8);
1004 RADEON_WAIT_UNTIL_2D_IDLE();
1005 OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
1006 tempRB3D_DEPTHCLEARVALUE);
1007 /* what offset is this exactly ? */
1008 OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
1009 /* need ctlstat, otherwise get some strange black flickering */
1010 OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
1011 RADEON_RB3D_ZC_FLUSH_ALL);
1012 ADVANCE_RING();
1013
1014 for (i = 0; i < nbox; i++) {
1015 int tileoffset, nrtilesx, nrtilesy, j;
1016 /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
1017 if ((dev_priv->flags & RADEON_HAS_HIERZ)
1018 && !(dev_priv->microcode_version == UCODE_R200)) {
1019 /* FIXME : figure this out for r200 (when hierz is enabled). Or
1020 maybe r200 actually doesn't need to put the low-res z value into
1021 the tile cache like r100, but just needs to clear the hi-level z-buffer?
1022 Works for R100, both with hierz and without.
1023 R100 seems to operate on 2x1 8x8 tiles, but...
1024 odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
1025 problematic with resolutions which are not 64 pix aligned? */
1026 tileoffset =
1027 ((pbox[i].y1 >> 3) * depthpixperline +
1028 pbox[i].x1) >> 6;
1029 nrtilesx =
1030 ((pbox[i].x2 & ~63) -
1031 (pbox[i].x1 & ~63)) >> 4;
1032 nrtilesy =
1033 (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
1034 for (j = 0; j <= nrtilesy; j++) {
1035 BEGIN_RING(4);
1036 OUT_RING(CP_PACKET3
1037 (RADEON_3D_CLEAR_ZMASK, 2));
1038 /* first tile */
1039 OUT_RING(tileoffset * 8);
1040 /* the number of tiles to clear */
1041 OUT_RING(nrtilesx + 4);
1042 /* clear mask : chooses the clearing pattern. */
1043 OUT_RING(clearmask);
1044 ADVANCE_RING();
1045 tileoffset += depthpixperline >> 6;
1046 }
1047 } else if (dev_priv->microcode_version == UCODE_R200) {
1048 /* works for rv250. */
1049 /* find first macro tile (8x2 4x4 z-pixels on rv250) */
1050 tileoffset =
1051 ((pbox[i].y1 >> 3) * depthpixperline +
1052 pbox[i].x1) >> 5;
1053 nrtilesx =
1054 (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
1055 nrtilesy =
1056 (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
1057 for (j = 0; j <= nrtilesy; j++) {
1058 BEGIN_RING(4);
1059 OUT_RING(CP_PACKET3
1060 (RADEON_3D_CLEAR_ZMASK, 2));
1061 /* first tile */
1062 /* judging by the first tile offset needed, could possibly
1063 directly address/clear 4x4 tiles instead of 8x2 * 4x4
1064 macro tiles, though would still need clear mask for
1065 right/bottom if truely 4x4 granularity is desired ? */
1066 OUT_RING(tileoffset * 16);
1067 /* the number of tiles to clear */
1068 OUT_RING(nrtilesx + 1);
1069 /* clear mask : chooses the clearing pattern. */
1070 OUT_RING(clearmask);
1071 ADVANCE_RING();
1072 tileoffset += depthpixperline >> 5;
1073 }
1074 } else { /* rv 100 */
1075 /* rv100 might not need 64 pix alignment, who knows */
1076 /* offsets are, hmm, weird */
1077 tileoffset =
1078 ((pbox[i].y1 >> 4) * depthpixperline +
1079 pbox[i].x1) >> 6;
1080 nrtilesx =
1081 ((pbox[i].x2 & ~63) -
1082 (pbox[i].x1 & ~63)) >> 4;
1083 nrtilesy =
1084 (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
1085 for (j = 0; j <= nrtilesy; j++) {
1086 BEGIN_RING(4);
1087 OUT_RING(CP_PACKET3
1088 (RADEON_3D_CLEAR_ZMASK, 2));
1089 OUT_RING(tileoffset * 128);
1090 /* the number of tiles to clear */
1091 OUT_RING(nrtilesx + 4);
1092 /* clear mask : chooses the clearing pattern. */
1093 OUT_RING(clearmask);
1094 ADVANCE_RING();
1095 tileoffset += depthpixperline >> 6;
1096 }
1097 }
1098 }
1099
1100 /* TODO don't always clear all hi-level z tiles */
1101 if ((dev_priv->flags & RADEON_HAS_HIERZ)
1102 && (dev_priv->microcode_version == UCODE_R200)
1103 && (flags & RADEON_USE_HIERZ))
1104 /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
1105 /* FIXME : the mask supposedly contains low-res z values. So can't set
1106 just to the max (0xff? or actually 0x3fff?), need to take z clear
1107 value into account? */
1108 {
1109 BEGIN_RING(4);
1110 OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
1111 OUT_RING(0x0); /* First tile */
1112 OUT_RING(0x3cc0);
1113 OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
1114 ADVANCE_RING();
1115 }
1116 }
1117
1118 /* We have to clear the depth and/or stencil buffers by
1119 * rendering a quad into just those buffers. Thus, we have to
1120 * make sure the 3D engine is configured correctly.
1121 */
1122 else if ((dev_priv->microcode_version == UCODE_R200) &&
1123 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
1124
1125 int tempPP_CNTL;
1126 int tempRE_CNTL;
1127 int tempRB3D_CNTL;
1128 int tempRB3D_ZSTENCILCNTL;
1129 int tempRB3D_STENCILREFMASK;
1130 int tempRB3D_PLANEMASK;
1131 int tempSE_CNTL;
1132 int tempSE_VTE_CNTL;
1133 int tempSE_VTX_FMT_0;
1134 int tempSE_VTX_FMT_1;
1135 int tempSE_VAP_CNTL;
1136 int tempRE_AUX_SCISSOR_CNTL;
1137
1138 tempPP_CNTL = 0;
1139 tempRE_CNTL = 0;
1140
1141 tempRB3D_CNTL = depth_clear->rb3d_cntl;
1142
1143 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1144 tempRB3D_STENCILREFMASK = 0x0;
1145
1146 tempSE_CNTL = depth_clear->se_cntl;
1147
1148 /* Disable TCL */
1149
1150 tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
1151 (0x9 <<
1152 SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
1153
1154 tempRB3D_PLANEMASK = 0x0;
1155
1156 tempRE_AUX_SCISSOR_CNTL = 0x0;
1157
1158 tempSE_VTE_CNTL =
1159 SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
1160
1161 /* Vertex format (X, Y, Z, W) */
1162 tempSE_VTX_FMT_0 =
1163 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
1164 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
1165 tempSE_VTX_FMT_1 = 0x0;
1166
1167 /*
1168 * Depth buffer specific enables
1169 */
1170 if (flags & RADEON_DEPTH) {
1171 /* Enable depth buffer */
1172 tempRB3D_CNTL |= RADEON_Z_ENABLE;
1173 } else {
1174 /* Disable depth buffer */
1175 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
1176 }
1177
1178 /*
1179 * Stencil buffer specific enables
1180 */
1181 if (flags & RADEON_STENCIL) {
1182 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
1183 tempRB3D_STENCILREFMASK = clear->depth_mask;
1184 } else {
1185 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
1186 tempRB3D_STENCILREFMASK = 0x00000000;
1187 }
1188
1189 if (flags & RADEON_USE_COMP_ZBUF) {
1190 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1191 RADEON_Z_DECOMPRESSION_ENABLE;
1192 }
1193 if (flags & RADEON_USE_HIERZ) {
1194 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1195 }
1196
1197 BEGIN_RING(26);
1198 RADEON_WAIT_UNTIL_2D_IDLE();
1199
1200 OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
1201 OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
1202 OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
1203 OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
1204 OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
1205 tempRB3D_STENCILREFMASK);
1206 OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
1207 OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
1208 OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
1209 OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
1210 OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
1211 OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
1212 OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
1213 ADVANCE_RING();
1214
1215 /* Make sure we restore the 3D state next time.
1216 */
1217 dev_priv->sarea_priv->ctx_owner = 0;
1218
1219 for (i = 0; i < nbox; i++) {
1220
1221 /* Funny that this should be required --
1222 * sets top-left?
1223 */
1224 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1225
1226 BEGIN_RING(14);
1227 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
1228 OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
1229 RADEON_PRIM_WALK_RING |
1230 (3 << RADEON_NUM_VERTICES_SHIFT)));
1231 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1232 OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
1233 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1234 OUT_RING(0x3f800000);
1235 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1236 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1237 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1238 OUT_RING(0x3f800000);
1239 OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
1240 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1241 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1242 OUT_RING(0x3f800000);
1243 ADVANCE_RING();
1244 }
1245 } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
1246
1247 int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1248
1249 rb3d_cntl = depth_clear->rb3d_cntl;
1250
1251 if (flags & RADEON_DEPTH) {
1252 rb3d_cntl |= RADEON_Z_ENABLE;
1253 } else {
1254 rb3d_cntl &= ~RADEON_Z_ENABLE;
1255 }
1256
1257 if (flags & RADEON_STENCIL) {
1258 rb3d_cntl |= RADEON_STENCIL_ENABLE;
1259 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
1260 } else {
1261 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
1262 rb3d_stencilrefmask = 0x00000000;
1263 }
1264
1265 if (flags & RADEON_USE_COMP_ZBUF) {
1266 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1267 RADEON_Z_DECOMPRESSION_ENABLE;
1268 }
1269 if (flags & RADEON_USE_HIERZ) {
1270 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1271 }
1272
1273 BEGIN_RING(13);
1274 RADEON_WAIT_UNTIL_2D_IDLE();
1275
1276 OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
1277 OUT_RING(0x00000000);
1278 OUT_RING(rb3d_cntl);
1279
1280 OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
1281 OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
1282 OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
1283 OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
1284 ADVANCE_RING();
1285
1286 /* Make sure we restore the 3D state next time.
1287 */
1288 dev_priv->sarea_priv->ctx_owner = 0;
1289
1290 for (i = 0; i < nbox; i++) {
1291
1292 /* Funny that this should be required --
1293 * sets top-left?
1294 */
1295 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1296
1297 BEGIN_RING(15);
1298
1299 OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
1300 OUT_RING(RADEON_VTX_Z_PRESENT |
1301 RADEON_VTX_PKCOLOR_PRESENT);
1302 OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
1303 RADEON_PRIM_WALK_RING |
1304 RADEON_MAOS_ENABLE |
1305 RADEON_VTX_FMT_RADEON_MODE |
1306 (3 << RADEON_NUM_VERTICES_SHIFT)));
1307
1308 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1309 OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
1310 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1311 OUT_RING(0x0);
1312
1313 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1314 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1315 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1316 OUT_RING(0x0);
1317
1318 OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
1319 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1320 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1321 OUT_RING(0x0);
1322
1323 ADVANCE_RING();
1324 }
1325 }
1326
1327 /* Increment the clear counter. The client-side 3D driver must
1328 * wait on this value before performing the clear ioctl. We
1329 * need this because the card's so damned fast...
1330 */
1331 dev_priv->sarea_priv->last_clear++;
1332
1333 BEGIN_RING(4);
1334
1335 RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
1336 RADEON_WAIT_UNTIL_IDLE();
1337
1338 ADVANCE_RING();
1339}
1340
1341static void radeon_cp_dispatch_swap(struct drm_device * dev)
1342{
1343 drm_radeon_private_t *dev_priv = dev->dev_private;
1344 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1345 int nbox = sarea_priv->nbox;
1346 struct drm_clip_rect *pbox = sarea_priv->boxes;
1347 int i;
1348 RING_LOCALS;
1349 DRM_DEBUG("\n");
1350
1351 /* Do some trivial performance monitoring...
1352 */
1353 if (dev_priv->do_boxes)
1354 radeon_cp_performance_boxes(dev_priv);
1355
1356 /* Wait for the 3D stream to idle before dispatching the bitblt.
1357 * This will prevent data corruption between the two streams.
1358 */
1359 BEGIN_RING(2);
1360
1361 RADEON_WAIT_UNTIL_3D_IDLE();
1362
1363 ADVANCE_RING();
1364
1365 for (i = 0; i < nbox; i++) {
1366 int x = pbox[i].x1;
1367 int y = pbox[i].y1;
1368 int w = pbox[i].x2 - x;
1369 int h = pbox[i].y2 - y;
1370
1371 DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
1372
1373 BEGIN_RING(9);
1374
1375 OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
1376 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1377 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1378 RADEON_GMC_BRUSH_NONE |
1379 (dev_priv->color_fmt << 8) |
1380 RADEON_GMC_SRC_DATATYPE_COLOR |
1381 RADEON_ROP3_S |
1382 RADEON_DP_SRC_SOURCE_MEMORY |
1383 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
1384
1385 /* Make this work even if front & back are flipped:
1386 */
1387 OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
1388 if (dev_priv->sarea_priv->pfCurrentPage == 0) {
1389 OUT_RING(dev_priv->back_pitch_offset);
1390 OUT_RING(dev_priv->front_pitch_offset);
1391 } else {
1392 OUT_RING(dev_priv->front_pitch_offset);
1393 OUT_RING(dev_priv->back_pitch_offset);
1394 }
1395
1396 OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
1397 OUT_RING((x << 16) | y);
1398 OUT_RING((x << 16) | y);
1399 OUT_RING((w << 16) | h);
1400
1401 ADVANCE_RING();
1402 }
1403
1404 /* Increment the frame counter. The client-side 3D driver must
1405 * throttle the framerate by waiting for this value before
1406 * performing the swapbuffer ioctl.
1407 */
1408 dev_priv->sarea_priv->last_frame++;
1409
1410 BEGIN_RING(4);
1411
1412 RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
1413 RADEON_WAIT_UNTIL_2D_IDLE();
1414
1415 ADVANCE_RING();
1416}
1417
1418static void radeon_cp_dispatch_flip(struct drm_device * dev)
1419{
1420 drm_radeon_private_t *dev_priv = dev->dev_private;
1421 struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
1422 int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
1423 ? dev_priv->front_offset : dev_priv->back_offset;
1424 RING_LOCALS;
1425 DRM_DEBUG("pfCurrentPage=%d\n",
1426 dev_priv->sarea_priv->pfCurrentPage);
1427
1428 /* Do some trivial performance monitoring...
1429 */
1430 if (dev_priv->do_boxes) {
1431 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1432 radeon_cp_performance_boxes(dev_priv);
1433 }
1434
1435 /* Update the frame offsets for both CRTCs
1436 */
1437 BEGIN_RING(6);
1438
1439 RADEON_WAIT_UNTIL_3D_IDLE();
1440 OUT_RING_REG(RADEON_CRTC_OFFSET,
1441 ((sarea->frame.y * dev_priv->front_pitch +
1442 sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
1443 + offset);
1444 OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
1445 + offset);
1446
1447 ADVANCE_RING();
1448
1449 /* Increment the frame counter. The client-side 3D driver must
1450 * throttle the framerate by waiting for this value before
1451 * performing the swapbuffer ioctl.
1452 */
1453 dev_priv->sarea_priv->last_frame++;
1454 dev_priv->sarea_priv->pfCurrentPage =
1455 1 - dev_priv->sarea_priv->pfCurrentPage;
1456
1457 BEGIN_RING(2);
1458
1459 RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
1460
1461 ADVANCE_RING();
1462}
1463
1464static int bad_prim_vertex_nr(int primitive, int nr)
1465{
1466 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1467 case RADEON_PRIM_TYPE_NONE:
1468 case RADEON_PRIM_TYPE_POINT:
1469 return nr < 1;
1470 case RADEON_PRIM_TYPE_LINE:
1471 return (nr & 1) || nr == 0;
1472 case RADEON_PRIM_TYPE_LINE_STRIP:
1473 return nr < 2;
1474 case RADEON_PRIM_TYPE_TRI_LIST:
1475 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1476 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1477 case RADEON_PRIM_TYPE_RECT_LIST:
1478 return nr % 3 || nr == 0;
1479 case RADEON_PRIM_TYPE_TRI_FAN:
1480 case RADEON_PRIM_TYPE_TRI_STRIP:
1481 return nr < 3;
1482 default:
1483 return 1;
1484 }
1485}
1486
1487typedef struct {
1488 unsigned int start;
1489 unsigned int finish;
1490 unsigned int prim;
1491 unsigned int numverts;
1492 unsigned int offset;
1493 unsigned int vc_format;
1494} drm_radeon_tcl_prim_t;
1495
1496static void radeon_cp_dispatch_vertex(struct drm_device * dev,
1497 struct drm_buf * buf,
1498 drm_radeon_tcl_prim_t * prim)
1499{
1500 drm_radeon_private_t *dev_priv = dev->dev_private;
1501 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1502 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1503 int numverts = (int)prim->numverts;
1504 int nbox = sarea_priv->nbox;
1505 int i = 0;
1506 RING_LOCALS;
1507
1508 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1509 prim->prim,
1510 prim->vc_format, prim->start, prim->finish, prim->numverts);
1511
1512 if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
1513 DRM_ERROR("bad prim %x numverts %d\n",
1514 prim->prim, prim->numverts);
1515 return;
1516 }
1517
1518 do {
1519 /* Emit the next cliprect */
1520 if (i < nbox) {
1521 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1522 }
1523
1524 /* Emit the vertex buffer rendering commands */
1525 BEGIN_RING(5);
1526
1527 OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
1528 OUT_RING(offset);
1529 OUT_RING(numverts);
1530 OUT_RING(prim->vc_format);
1531 OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
1532 RADEON_COLOR_ORDER_RGBA |
1533 RADEON_VTX_FMT_RADEON_MODE |
1534 (numverts << RADEON_NUM_VERTICES_SHIFT));
1535
1536 ADVANCE_RING();
1537
1538 i++;
1539 } while (i < nbox);
1540}
1541
1542static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
1543{
1544 drm_radeon_private_t *dev_priv = dev->dev_private;
1545 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1546 RING_LOCALS;
1547
1548 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
1549
1550 /* Emit the vertex buffer age */
1551 BEGIN_RING(2);
1552 RADEON_DISPATCH_AGE(buf_priv->age);
1553 ADVANCE_RING();
1554
1555 buf->pending = 1;
1556 buf->used = 0;
1557}
1558
1559static void radeon_cp_dispatch_indirect(struct drm_device * dev,
1560 struct drm_buf * buf, int start, int end)
1561{
1562 drm_radeon_private_t *dev_priv = dev->dev_private;
1563 RING_LOCALS;
1564 DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
1565
1566 if (start != end) {
1567 int offset = (dev_priv->gart_buffers_offset
1568 + buf->offset + start);
1569 int dwords = (end - start + 3) / sizeof(u32);
1570
1571 /* Indirect buffer data must be an even number of
1572 * dwords, so if we've been given an odd number we must
1573 * pad the data with a Type-2 CP packet.
1574 */
1575 if (dwords & 1) {
1576 u32 *data = (u32 *)
1577 ((char *)dev->agp_buffer_map->handle
1578 + buf->offset + start);
1579 data[dwords++] = RADEON_CP_PACKET2;
1580 }
1581
1582 /* Fire off the indirect buffer */
1583 BEGIN_RING(3);
1584
1585 OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
1586 OUT_RING(offset);
1587 OUT_RING(dwords);
1588
1589 ADVANCE_RING();
1590 }
1591}
1592
1593static void radeon_cp_dispatch_indices(struct drm_device * dev,
1594 struct drm_buf * elt_buf,
1595 drm_radeon_tcl_prim_t * prim)
1596{
1597 drm_radeon_private_t *dev_priv = dev->dev_private;
1598 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1599 int offset = dev_priv->gart_buffers_offset + prim->offset;
1600 u32 *data;
1601 int dwords;
1602 int i = 0;
1603 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1604 int count = (prim->finish - start) / sizeof(u16);
1605 int nbox = sarea_priv->nbox;
1606
1607 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1608 prim->prim,
1609 prim->vc_format,
1610 prim->start, prim->finish, prim->offset, prim->numverts);
1611
1612 if (bad_prim_vertex_nr(prim->prim, count)) {
1613 DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
1614 return;
1615 }
1616
1617 if (start >= prim->finish || (prim->start & 0x7)) {
1618 DRM_ERROR("buffer prim %d\n", prim->prim);
1619 return;
1620 }
1621
1622 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1623
1624 data = (u32 *) ((char *)dev->agp_buffer_map->handle +
1625 elt_buf->offset + prim->start);
1626
1627 data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
1628 data[1] = offset;
1629 data[2] = prim->numverts;
1630 data[3] = prim->vc_format;
1631 data[4] = (prim->prim |
1632 RADEON_PRIM_WALK_IND |
1633 RADEON_COLOR_ORDER_RGBA |
1634 RADEON_VTX_FMT_RADEON_MODE |
1635 (count << RADEON_NUM_VERTICES_SHIFT));
1636
1637 do {
1638 if (i < nbox)
1639 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1640
1641 radeon_cp_dispatch_indirect(dev, elt_buf,
1642 prim->start, prim->finish);
1643
1644 i++;
1645 } while (i < nbox);
1646
1647}
1648
1649#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1650
1651static int radeon_cp_dispatch_texture(struct drm_device * dev,
1652 struct drm_file *file_priv,
1653 drm_radeon_texture_t * tex,
1654 drm_radeon_tex_image_t * image)
1655{
1656 drm_radeon_private_t *dev_priv = dev->dev_private;
1657 struct drm_buf *buf;
1658 u32 format;
1659 u32 *buffer;
1660 const u8 __user *data;
1661 int size, dwords, tex_width, blit_width, spitch;
1662 u32 height;
1663 int i;
1664 u32 texpitch, microtile;
1665 u32 offset, byte_offset;
1666 RING_LOCALS;
1667
1668 if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
1669 DRM_ERROR("Invalid destination offset\n");
1670 return -EINVAL;
1671 }
1672
1673 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1674
1675 /* Flush the pixel cache. This ensures no pixel data gets mixed
1676 * up with the texture data from the host data blit, otherwise
1677 * part of the texture image may be corrupted.
1678 */
1679 BEGIN_RING(4);
1680 RADEON_FLUSH_CACHE();
1681 RADEON_WAIT_UNTIL_IDLE();
1682 ADVANCE_RING();
1683
1684 /* The compiler won't optimize away a division by a variable,
1685 * even if the only legal values are powers of two. Thus, we'll
1686 * use a shift instead.
1687 */
1688 switch (tex->format) {
1689 case RADEON_TXFORMAT_ARGB8888:
1690 case RADEON_TXFORMAT_RGBA8888:
1691 format = RADEON_COLOR_FORMAT_ARGB8888;
1692 tex_width = tex->width * 4;
1693 blit_width = image->width * 4;
1694 break;
1695 case RADEON_TXFORMAT_AI88:
1696 case RADEON_TXFORMAT_ARGB1555:
1697 case RADEON_TXFORMAT_RGB565:
1698 case RADEON_TXFORMAT_ARGB4444:
1699 case RADEON_TXFORMAT_VYUY422:
1700 case RADEON_TXFORMAT_YVYU422:
1701 format = RADEON_COLOR_FORMAT_RGB565;
1702 tex_width = tex->width * 2;
1703 blit_width = image->width * 2;
1704 break;
1705 case RADEON_TXFORMAT_I8:
1706 case RADEON_TXFORMAT_RGB332:
1707 format = RADEON_COLOR_FORMAT_CI8;
1708 tex_width = tex->width * 1;
1709 blit_width = image->width * 1;
1710 break;
1711 default:
1712 DRM_ERROR("invalid texture format %d\n", tex->format);
1713 return -EINVAL;
1714 }
1715 spitch = blit_width >> 6;
1716 if (spitch == 0 && image->height > 1)
1717 return -EINVAL;
1718
1719 texpitch = tex->pitch;
1720 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1721 microtile = 1;
1722 if (tex_width < 64) {
1723 texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
1724 /* we got tiled coordinates, untile them */
1725 image->x *= 2;
1726 }
1727 } else
1728 microtile = 0;
1729
1730 /* this might fail for zero-sized uploads - are those illegal? */
1731 if (!radeon_check_offset(dev_priv, tex->offset + image->height *
1732 blit_width - 1)) {
1733 DRM_ERROR("Invalid final destination offset\n");
1734 return -EINVAL;
1735 }
1736
1737 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
1738
1739 do {
1740 DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1741 tex->offset >> 10, tex->pitch, tex->format,
1742 image->x, image->y, image->width, image->height);
1743
1744 /* Make a copy of some parameters in case we have to
1745 * update them for a multi-pass texture blit.
1746 */
1747 height = image->height;
1748 data = (const u8 __user *)image->data;
1749
1750 size = height * blit_width;
1751
1752 if (size > RADEON_MAX_TEXTURE_SIZE) {
1753 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1754 size = height * blit_width;
1755 } else if (size < 4 && size > 0) {
1756 size = 4;
1757 } else if (size == 0) {
1758 return 0;
1759 }
1760
1761 buf = radeon_freelist_get(dev);
1762 if (0 && !buf) {
1763 radeon_do_cp_idle(dev_priv);
1764 buf = radeon_freelist_get(dev);
1765 }
1766 if (!buf) {
1767 DRM_DEBUG("EAGAIN\n");
1768 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
1769 return -EFAULT;
1770 return -EAGAIN;
1771 }
1772
1773 /* Dispatch the indirect buffer.
1774 */
1775 buffer =
1776 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
1777 dwords = size / 4;
1778
1779#define RADEON_COPY_MT(_buf, _data, _width) \
1780 do { \
1781 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1782 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1783 return -EFAULT; \
1784 } \
1785 } while(0)
1786
1787 if (microtile) {
1788 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1789 however, we cannot use blitter directly for texture width < 64 bytes,
1790 since minimum tex pitch is 64 bytes and we need this to match
1791 the texture width, otherwise the blitter will tile it wrong.
1792 Thus, tiling manually in this case. Additionally, need to special
1793 case tex height = 1, since our actual image will have height 2
1794 and we need to ensure we don't read beyond the texture size
1795 from user space. */
1796 if (tex->height == 1) {
1797 if (tex_width >= 64 || tex_width <= 16) {
1798 RADEON_COPY_MT(buffer, data,
1799 (int)(tex_width * sizeof(u32)));
1800 } else if (tex_width == 32) {
1801 RADEON_COPY_MT(buffer, data, 16);
1802 RADEON_COPY_MT(buffer + 8,
1803 data + 16, 16);
1804 }
1805 } else if (tex_width >= 64 || tex_width == 16) {
1806 RADEON_COPY_MT(buffer, data,
1807 (int)(dwords * sizeof(u32)));
1808 } else if (tex_width < 16) {
1809 for (i = 0; i < tex->height; i++) {
1810 RADEON_COPY_MT(buffer, data, tex_width);
1811 buffer += 4;
1812 data += tex_width;
1813 }
1814 } else if (tex_width == 32) {
1815 /* TODO: make sure this works when not fitting in one buffer
1816 (i.e. 32bytes x 2048...) */
1817 for (i = 0; i < tex->height; i += 2) {
1818 RADEON_COPY_MT(buffer, data, 16);
1819 data += 16;
1820 RADEON_COPY_MT(buffer + 8, data, 16);
1821 data += 16;
1822 RADEON_COPY_MT(buffer + 4, data, 16);
1823 data += 16;
1824 RADEON_COPY_MT(buffer + 12, data, 16);
1825 data += 16;
1826 buffer += 16;
1827 }
1828 }
1829 } else {
1830 if (tex_width >= 32) {
1831 /* Texture image width is larger than the minimum, so we
1832 * can upload it directly.
1833 */
1834 RADEON_COPY_MT(buffer, data,
1835 (int)(dwords * sizeof(u32)));
1836 } else {
1837 /* Texture image width is less than the minimum, so we
1838 * need to pad out each image scanline to the minimum
1839 * width.
1840 */
1841 for (i = 0; i < tex->height; i++) {
1842 RADEON_COPY_MT(buffer, data, tex_width);
1843 buffer += 8;
1844 data += tex_width;
1845 }
1846 }
1847 }
1848
1849#undef RADEON_COPY_MT
1850 byte_offset = (image->y & ~2047) * blit_width;
1851 buf->file_priv = file_priv;
1852 buf->used = size;
1853 offset = dev_priv->gart_buffers_offset + buf->offset;
1854 BEGIN_RING(9);
1855 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1856 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1857 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1858 RADEON_GMC_BRUSH_NONE |
1859 (format << 8) |
1860 RADEON_GMC_SRC_DATATYPE_COLOR |
1861 RADEON_ROP3_S |
1862 RADEON_DP_SRC_SOURCE_MEMORY |
1863 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
1864 OUT_RING((spitch << 22) | (offset >> 10));
1865 OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
1866 OUT_RING(0);
1867 OUT_RING((image->x << 16) | (image->y % 2048));
1868 OUT_RING((image->width << 16) | height);
1869 RADEON_WAIT_UNTIL_2D_IDLE();
1870 ADVANCE_RING();
1871 COMMIT_RING();
1872
1873 radeon_cp_discard_buffer(dev, buf);
1874
1875 /* Update the input parameters for next time */
1876 image->y += height;
1877 image->height -= height;
1878 image->data = (const u8 __user *)image->data + size;
1879 } while (image->height > 0);
1880
1881 /* Flush the pixel cache after the blit completes. This ensures
1882 * the texture data is written out to memory before rendering
1883 * continues.
1884 */
1885 BEGIN_RING(4);
1886 RADEON_FLUSH_CACHE();
1887 RADEON_WAIT_UNTIL_2D_IDLE();
1888 ADVANCE_RING();
1889 COMMIT_RING();
1890
1891 return 0;
1892}
1893
1894static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
1895{
1896 drm_radeon_private_t *dev_priv = dev->dev_private;
1897 int i;
1898 RING_LOCALS;
1899 DRM_DEBUG("\n");
1900
1901 BEGIN_RING(35);
1902
1903 OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
1904 OUT_RING(0x00000000);
1905
1906 OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
1907 for (i = 0; i < 32; i++) {
1908 OUT_RING(stipple[i]);
1909 }
1910
1911 ADVANCE_RING();
1912}
1913
1914static void radeon_apply_surface_regs(int surf_index,
1915 drm_radeon_private_t *dev_priv)
1916{
1917 if (!dev_priv->mmio)
1918 return;
1919
1920 radeon_do_cp_idle(dev_priv);
1921
1922 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
1923 dev_priv->surfaces[surf_index].flags);
1924 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
1925 dev_priv->surfaces[surf_index].lower);
1926 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
1927 dev_priv->surfaces[surf_index].upper);
1928}
1929
1930/* Allocates a virtual surface
1931 * doesn't always allocate a real surface, will stretch an existing
1932 * surface when possible.
1933 *
1934 * Note that refcount can be at most 2, since during a free refcount=3
1935 * might mean we have to allocate a new surface which might not always
1936 * be available.
1937 * For example : we allocate three contigous surfaces ABC. If B is
1938 * freed, we suddenly need two surfaces to store A and C, which might
1939 * not always be available.
1940 */
1941static int alloc_surface(drm_radeon_surface_alloc_t *new,
1942 drm_radeon_private_t *dev_priv,
1943 struct drm_file *file_priv)
1944{
1945 struct radeon_virt_surface *s;
1946 int i;
1947 int virt_surface_index;
1948 uint32_t new_upper, new_lower;
1949
1950 new_lower = new->address;
1951 new_upper = new_lower + new->size - 1;
1952
1953 /* sanity check */
1954 if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
1955 ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
1956 RADEON_SURF_ADDRESS_FIXED_MASK)
1957 || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
1958 return -1;
1959
1960 /* make sure there is no overlap with existing surfaces */
1961 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1962 if ((dev_priv->surfaces[i].refcount != 0) &&
1963 (((new_lower >= dev_priv->surfaces[i].lower) &&
1964 (new_lower < dev_priv->surfaces[i].upper)) ||
1965 ((new_lower < dev_priv->surfaces[i].lower) &&
1966 (new_upper > dev_priv->surfaces[i].lower)))) {
1967 return -1;
1968 }
1969 }
1970
1971 /* find a virtual surface */
1972 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
1973 if (dev_priv->virt_surfaces[i].file_priv == 0)
1974 break;
1975 if (i == 2 * RADEON_MAX_SURFACES) {
1976 return -1;
1977 }
1978 virt_surface_index = i;
1979
1980 /* try to reuse an existing surface */
1981 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1982 /* extend before */
1983 if ((dev_priv->surfaces[i].refcount == 1) &&
1984 (new->flags == dev_priv->surfaces[i].flags) &&
1985 (new_upper + 1 == dev_priv->surfaces[i].lower)) {
1986 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1987 s->surface_index = i;
1988 s->lower = new_lower;
1989 s->upper = new_upper;
1990 s->flags = new->flags;
1991 s->file_priv = file_priv;
1992 dev_priv->surfaces[i].refcount++;
1993 dev_priv->surfaces[i].lower = s->lower;
1994 radeon_apply_surface_regs(s->surface_index, dev_priv);
1995 return virt_surface_index;
1996 }
1997
1998 /* extend after */
1999 if ((dev_priv->surfaces[i].refcount == 1) &&
2000 (new->flags == dev_priv->surfaces[i].flags) &&
2001 (new_lower == dev_priv->surfaces[i].upper + 1)) {
2002 s = &(dev_priv->virt_surfaces[virt_surface_index]);
2003 s->surface_index = i;
2004 s->lower = new_lower;
2005 s->upper = new_upper;
2006 s->flags = new->flags;
2007 s->file_priv = file_priv;
2008 dev_priv->surfaces[i].refcount++;
2009 dev_priv->surfaces[i].upper = s->upper;
2010 radeon_apply_surface_regs(s->surface_index, dev_priv);
2011 return virt_surface_index;
2012 }
2013 }
2014
2015 /* okay, we need a new one */
2016 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
2017 if (dev_priv->surfaces[i].refcount == 0) {
2018 s = &(dev_priv->virt_surfaces[virt_surface_index]);
2019 s->surface_index = i;
2020 s->lower = new_lower;
2021 s->upper = new_upper;
2022 s->flags = new->flags;
2023 s->file_priv = file_priv;
2024 dev_priv->surfaces[i].refcount = 1;
2025 dev_priv->surfaces[i].lower = s->lower;
2026 dev_priv->surfaces[i].upper = s->upper;
2027 dev_priv->surfaces[i].flags = s->flags;
2028 radeon_apply_surface_regs(s->surface_index, dev_priv);
2029 return virt_surface_index;
2030 }
2031 }
2032
2033 /* we didn't find anything */
2034 return -1;
2035}
2036
2037static int free_surface(struct drm_file *file_priv,
2038 drm_radeon_private_t * dev_priv,
2039 int lower)
2040{
2041 struct radeon_virt_surface *s;
2042 int i;
2043 /* find the virtual surface */
2044 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2045 s = &(dev_priv->virt_surfaces[i]);
2046 if (s->file_priv) {
2047 if ((lower == s->lower) && (file_priv == s->file_priv))
2048 {
2049 if (dev_priv->surfaces[s->surface_index].
2050 lower == s->lower)
2051 dev_priv->surfaces[s->surface_index].
2052 lower = s->upper;
2053
2054 if (dev_priv->surfaces[s->surface_index].
2055 upper == s->upper)
2056 dev_priv->surfaces[s->surface_index].
2057 upper = s->lower;
2058
2059 dev_priv->surfaces[s->surface_index].refcount--;
2060 if (dev_priv->surfaces[s->surface_index].
2061 refcount == 0)
2062 dev_priv->surfaces[s->surface_index].
2063 flags = 0;
2064 s->file_priv = NULL;
2065 radeon_apply_surface_regs(s->surface_index,
2066 dev_priv);
2067 return 0;
2068 }
2069 }
2070 }
2071 return 1;
2072}
2073
2074static void radeon_surfaces_release(struct drm_file *file_priv,
2075 drm_radeon_private_t * dev_priv)
2076{
2077 int i;
2078 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
2079 if (dev_priv->virt_surfaces[i].file_priv == file_priv)
2080 free_surface(file_priv, dev_priv,
2081 dev_priv->virt_surfaces[i].lower);
2082 }
2083}
2084
2085/* ================================================================
2086 * IOCTL functions
2087 */
2088static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
2089{
2090 drm_radeon_private_t *dev_priv = dev->dev_private;
2091 drm_radeon_surface_alloc_t *alloc = data;
2092
2093 if (alloc_surface(alloc, dev_priv, file_priv) == -1)
2094 return -EINVAL;
2095 else
2096 return 0;
2097}
2098
2099static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
2100{
2101 drm_radeon_private_t *dev_priv = dev->dev_private;
2102 drm_radeon_surface_free_t *memfree = data;
2103
2104 if (free_surface(file_priv, dev_priv, memfree->address))
2105 return -EINVAL;
2106 else
2107 return 0;
2108}
2109
2110static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
2111{
2112 drm_radeon_private_t *dev_priv = dev->dev_private;
2113 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2114 drm_radeon_clear_t *clear = data;
2115 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
2116 DRM_DEBUG("\n");
2117
2118 LOCK_TEST_WITH_RETURN(dev, file_priv);
2119
2120 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2121
2122 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2123 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2124
2125 if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
2126 sarea_priv->nbox * sizeof(depth_boxes[0])))
2127 return -EFAULT;
2128
2129 radeon_cp_dispatch_clear(dev, clear, depth_boxes);
2130
2131 COMMIT_RING();
2132 return 0;
2133}
2134
2135/* Not sure why this isn't set all the time:
2136 */
2137static int radeon_do_init_pageflip(struct drm_device * dev)
2138{
2139 drm_radeon_private_t *dev_priv = dev->dev_private;
2140 RING_LOCALS;
2141
2142 DRM_DEBUG("\n");
2143
2144 BEGIN_RING(6);
2145 RADEON_WAIT_UNTIL_3D_IDLE();
2146 OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
2147 OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
2148 RADEON_CRTC_OFFSET_FLIP_CNTL);
2149 OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
2150 OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
2151 RADEON_CRTC_OFFSET_FLIP_CNTL);
2152 ADVANCE_RING();
2153
2154 dev_priv->page_flipping = 1;
2155
2156 if (dev_priv->sarea_priv->pfCurrentPage != 1)
2157 dev_priv->sarea_priv->pfCurrentPage = 0;
2158
2159 return 0;
2160}
2161
2162/* Swapping and flipping are different operations, need different ioctls.
2163 * They can & should be intermixed to support multiple 3d windows.
2164 */
2165static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
2166{
2167 drm_radeon_private_t *dev_priv = dev->dev_private;
2168 DRM_DEBUG("\n");
2169
2170 LOCK_TEST_WITH_RETURN(dev, file_priv);
2171
2172 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2173
2174 if (!dev_priv->page_flipping)
2175 radeon_do_init_pageflip(dev);
2176
2177 radeon_cp_dispatch_flip(dev);
2178
2179 COMMIT_RING();
2180 return 0;
2181}
2182
2183static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
2184{
2185 drm_radeon_private_t *dev_priv = dev->dev_private;
2186 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2187 DRM_DEBUG("\n");
2188
2189 LOCK_TEST_WITH_RETURN(dev, file_priv);
2190
2191 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2192
2193 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2194 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2195
2196 radeon_cp_dispatch_swap(dev);
2197 dev_priv->sarea_priv->ctx_owner = 0;
2198
2199 COMMIT_RING();
2200 return 0;
2201}
2202
2203static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
2204{
2205 drm_radeon_private_t *dev_priv = dev->dev_private;
2206 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2207 struct drm_device_dma *dma = dev->dma;
2208 struct drm_buf *buf;
2209 drm_radeon_vertex_t *vertex = data;
2210 drm_radeon_tcl_prim_t prim;
2211
2212 LOCK_TEST_WITH_RETURN(dev, file_priv);
2213
2214 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
2215 DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
2216
2217 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2218 DRM_ERROR("buffer index %d (of %d max)\n",
2219 vertex->idx, dma->buf_count - 1);
2220 return -EINVAL;
2221 }
2222 if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2223 DRM_ERROR("buffer prim %d\n", vertex->prim);
2224 return -EINVAL;
2225 }
2226
2227 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2228 VB_AGE_TEST_WITH_RETURN(dev_priv);
2229
2230 buf = dma->buflist[vertex->idx];
2231
2232 if (buf->file_priv != file_priv) {
2233 DRM_ERROR("process %d using buffer owned by %p\n",
2234 DRM_CURRENTPID, buf->file_priv);
2235 return -EINVAL;
2236 }
2237 if (buf->pending) {
2238 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2239 return -EINVAL;
2240 }
2241
2242 /* Build up a prim_t record:
2243 */
2244 if (vertex->count) {
2245 buf->used = vertex->count; /* not used? */
2246
2247 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2248 if (radeon_emit_state(dev_priv, file_priv,
2249 &sarea_priv->context_state,
2250 sarea_priv->tex_state,
2251 sarea_priv->dirty)) {
2252 DRM_ERROR("radeon_emit_state failed\n");
2253 return -EINVAL;
2254 }
2255
2256 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2257 RADEON_UPLOAD_TEX1IMAGES |
2258 RADEON_UPLOAD_TEX2IMAGES |
2259 RADEON_REQUIRE_QUIESCENCE);
2260 }
2261
2262 prim.start = 0;
2263 prim.finish = vertex->count; /* unused */
2264 prim.prim = vertex->prim;
2265 prim.numverts = vertex->count;
2266 prim.vc_format = dev_priv->sarea_priv->vc_format;
2267
2268 radeon_cp_dispatch_vertex(dev, buf, &prim);
2269 }
2270
2271 if (vertex->discard) {
2272 radeon_cp_discard_buffer(dev, buf);
2273 }
2274
2275 COMMIT_RING();
2276 return 0;
2277}
2278
2279static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
2280{
2281 drm_radeon_private_t *dev_priv = dev->dev_private;
2282 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2283 struct drm_device_dma *dma = dev->dma;
2284 struct drm_buf *buf;
2285 drm_radeon_indices_t *elts = data;
2286 drm_radeon_tcl_prim_t prim;
2287 int count;
2288
2289 LOCK_TEST_WITH_RETURN(dev, file_priv);
2290
2291 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
2292 DRM_CURRENTPID, elts->idx, elts->start, elts->end,
2293 elts->discard);
2294
2295 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
2296 DRM_ERROR("buffer index %d (of %d max)\n",
2297 elts->idx, dma->buf_count - 1);
2298 return -EINVAL;
2299 }
2300 if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2301 DRM_ERROR("buffer prim %d\n", elts->prim);
2302 return -EINVAL;
2303 }
2304
2305 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2306 VB_AGE_TEST_WITH_RETURN(dev_priv);
2307
2308 buf = dma->buflist[elts->idx];
2309
2310 if (buf->file_priv != file_priv) {
2311 DRM_ERROR("process %d using buffer owned by %p\n",
2312 DRM_CURRENTPID, buf->file_priv);
2313 return -EINVAL;
2314 }
2315 if (buf->pending) {
2316 DRM_ERROR("sending pending buffer %d\n", elts->idx);
2317 return -EINVAL;
2318 }
2319
2320 count = (elts->end - elts->start) / sizeof(u16);
2321 elts->start -= RADEON_INDEX_PRIM_OFFSET;
2322
2323 if (elts->start & 0x7) {
2324 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
2325 return -EINVAL;
2326 }
2327 if (elts->start < buf->used) {
2328 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
2329 return -EINVAL;
2330 }
2331
2332 buf->used = elts->end;
2333
2334 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2335 if (radeon_emit_state(dev_priv, file_priv,
2336 &sarea_priv->context_state,
2337 sarea_priv->tex_state,
2338 sarea_priv->dirty)) {
2339 DRM_ERROR("radeon_emit_state failed\n");
2340 return -EINVAL;
2341 }
2342
2343 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2344 RADEON_UPLOAD_TEX1IMAGES |
2345 RADEON_UPLOAD_TEX2IMAGES |
2346 RADEON_REQUIRE_QUIESCENCE);
2347 }
2348
2349 /* Build up a prim_t record:
2350 */
2351 prim.start = elts->start;
2352 prim.finish = elts->end;
2353 prim.prim = elts->prim;
2354 prim.offset = 0; /* offset from start of dma buffers */
2355 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2356 prim.vc_format = dev_priv->sarea_priv->vc_format;
2357
2358 radeon_cp_dispatch_indices(dev, buf, &prim);
2359 if (elts->discard) {
2360 radeon_cp_discard_buffer(dev, buf);
2361 }
2362
2363 COMMIT_RING();
2364 return 0;
2365}
2366
2367static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
2368{
2369 drm_radeon_private_t *dev_priv = dev->dev_private;
2370 drm_radeon_texture_t *tex = data;
2371 drm_radeon_tex_image_t image;
2372 int ret;
2373
2374 LOCK_TEST_WITH_RETURN(dev, file_priv);
2375
2376 if (tex->image == NULL) {
2377 DRM_ERROR("null texture image!\n");
2378 return -EINVAL;
2379 }
2380
2381 if (DRM_COPY_FROM_USER(&image,
2382 (drm_radeon_tex_image_t __user *) tex->image,
2383 sizeof(image)))
2384 return -EFAULT;
2385
2386 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2387 VB_AGE_TEST_WITH_RETURN(dev_priv);
2388
2389 ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
2390
2391 return ret;
2392}
2393
2394static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
2395{
2396 drm_radeon_private_t *dev_priv = dev->dev_private;
2397 drm_radeon_stipple_t *stipple = data;
2398 u32 mask[32];
2399
2400 LOCK_TEST_WITH_RETURN(dev, file_priv);
2401
2402 if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
2403 return -EFAULT;
2404
2405 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2406
2407 radeon_cp_dispatch_stipple(dev, mask);
2408
2409 COMMIT_RING();
2410 return 0;
2411}
2412
2413static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
2414{
2415 drm_radeon_private_t *dev_priv = dev->dev_private;
2416 struct drm_device_dma *dma = dev->dma;
2417 struct drm_buf *buf;
2418 drm_radeon_indirect_t *indirect = data;
2419 RING_LOCALS;
2420
2421 LOCK_TEST_WITH_RETURN(dev, file_priv);
2422
2423 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
2424 indirect->idx, indirect->start, indirect->end,
2425 indirect->discard);
2426
2427 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
2428 DRM_ERROR("buffer index %d (of %d max)\n",
2429 indirect->idx, dma->buf_count - 1);
2430 return -EINVAL;
2431 }
2432
2433 buf = dma->buflist[indirect->idx];
2434
2435 if (buf->file_priv != file_priv) {
2436 DRM_ERROR("process %d using buffer owned by %p\n",
2437 DRM_CURRENTPID, buf->file_priv);
2438 return -EINVAL;
2439 }
2440 if (buf->pending) {
2441 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
2442 return -EINVAL;
2443 }
2444
2445 if (indirect->start < buf->used) {
2446 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
2447 indirect->start, buf->used);
2448 return -EINVAL;
2449 }
2450
2451 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2452 VB_AGE_TEST_WITH_RETURN(dev_priv);
2453
2454 buf->used = indirect->end;
2455
2456 /* Wait for the 3D stream to idle before the indirect buffer
2457 * containing 2D acceleration commands is processed.
2458 */
2459 BEGIN_RING(2);
2460
2461 RADEON_WAIT_UNTIL_3D_IDLE();
2462
2463 ADVANCE_RING();
2464
2465 /* Dispatch the indirect buffer full of commands from the
2466 * X server. This is insecure and is thus only available to
2467 * privileged clients.
2468 */
2469 radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
2470 if (indirect->discard) {
2471 radeon_cp_discard_buffer(dev, buf);
2472 }
2473
2474 COMMIT_RING();
2475 return 0;
2476}
2477
2478static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
2479{
2480 drm_radeon_private_t *dev_priv = dev->dev_private;
2481 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2482 struct drm_device_dma *dma = dev->dma;
2483 struct drm_buf *buf;
2484 drm_radeon_vertex2_t *vertex = data;
2485 int i;
2486 unsigned char laststate;
2487
2488 LOCK_TEST_WITH_RETURN(dev, file_priv);
2489
2490 DRM_DEBUG("pid=%d index=%d discard=%d\n",
2491 DRM_CURRENTPID, vertex->idx, vertex->discard);
2492
2493 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
2494 DRM_ERROR("buffer index %d (of %d max)\n",
2495 vertex->idx, dma->buf_count - 1);
2496 return -EINVAL;
2497 }
2498
2499 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2500 VB_AGE_TEST_WITH_RETURN(dev_priv);
2501
2502 buf = dma->buflist[vertex->idx];
2503
2504 if (buf->file_priv != file_priv) {
2505 DRM_ERROR("process %d using buffer owned by %p\n",
2506 DRM_CURRENTPID, buf->file_priv);
2507 return -EINVAL;
2508 }
2509
2510 if (buf->pending) {
2511 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
2512 return -EINVAL;
2513 }
2514
2515 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2516 return -EINVAL;
2517
2518 for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
2519 drm_radeon_prim_t prim;
2520 drm_radeon_tcl_prim_t tclprim;
2521
2522 if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
2523 return -EFAULT;
2524
2525 if (prim.stateidx != laststate) {
2526 drm_radeon_state_t state;
2527
2528 if (DRM_COPY_FROM_USER(&state,
2529 &vertex->state[prim.stateidx],
2530 sizeof(state)))
2531 return -EFAULT;
2532
2533 if (radeon_emit_state2(dev_priv, file_priv, &state)) {
2534 DRM_ERROR("radeon_emit_state2 failed\n");
2535 return -EINVAL;
2536 }
2537
2538 laststate = prim.stateidx;
2539 }
2540
2541 tclprim.start = prim.start;
2542 tclprim.finish = prim.finish;
2543 tclprim.prim = prim.prim;
2544 tclprim.vc_format = prim.vc_format;
2545
2546 if (prim.prim & RADEON_PRIM_WALK_IND) {
2547 tclprim.offset = prim.numverts * 64;
2548 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2549
2550 radeon_cp_dispatch_indices(dev, buf, &tclprim);
2551 } else {
2552 tclprim.numverts = prim.numverts;
2553 tclprim.offset = 0; /* not used */
2554
2555 radeon_cp_dispatch_vertex(dev, buf, &tclprim);
2556 }
2557
2558 if (sarea_priv->nbox == 1)
2559 sarea_priv->nbox = 0;
2560 }
2561
2562 if (vertex->discard) {
2563 radeon_cp_discard_buffer(dev, buf);
2564 }
2565
2566 COMMIT_RING();
2567 return 0;
2568}
2569
2570static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2571 struct drm_file *file_priv,
2572 drm_radeon_cmd_header_t header,
2573 drm_radeon_kcmd_buffer_t *cmdbuf)
2574{
2575 int id = (int)header.packet.packet_id;
2576 int sz, reg;
2577 int *data = (int *)cmdbuf->buf;
2578 RING_LOCALS;
2579
2580 if (id >= RADEON_MAX_STATE_PACKETS)
2581 return -EINVAL;
2582
2583 sz = packet[id].len;
2584 reg = packet[id].start;
2585
2586 if (sz * sizeof(int) > cmdbuf->bufsz) {
2587 DRM_ERROR("Packet size provided larger than data provided\n");
2588 return -EINVAL;
2589 }
2590
2591 if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
2592 DRM_ERROR("Packet verification failed\n");
2593 return -EINVAL;
2594 }
2595
2596 BEGIN_RING(sz + 1);
2597 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2598 OUT_RING_TABLE(data, sz);
2599 ADVANCE_RING();
2600
2601 cmdbuf->buf += sz * sizeof(int);
2602 cmdbuf->bufsz -= sz * sizeof(int);
2603 return 0;
2604}
2605
2606static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2607 drm_radeon_cmd_header_t header,
2608 drm_radeon_kcmd_buffer_t *cmdbuf)
2609{
2610 int sz = header.scalars.count;
2611 int start = header.scalars.offset;
2612 int stride = header.scalars.stride;
2613 RING_LOCALS;
2614
2615 BEGIN_RING(3 + sz);
2616 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2617 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2618 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2619 OUT_RING_TABLE(cmdbuf->buf, sz);
2620 ADVANCE_RING();
2621 cmdbuf->buf += sz * sizeof(int);
2622 cmdbuf->bufsz -= sz * sizeof(int);
2623 return 0;
2624}
2625
2626/* God this is ugly
2627 */
2628static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2629 drm_radeon_cmd_header_t header,
2630 drm_radeon_kcmd_buffer_t *cmdbuf)
2631{
2632 int sz = header.scalars.count;
2633 int start = ((unsigned int)header.scalars.offset) + 0x100;
2634 int stride = header.scalars.stride;
2635 RING_LOCALS;
2636
2637 BEGIN_RING(3 + sz);
2638 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2639 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2640 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2641 OUT_RING_TABLE(cmdbuf->buf, sz);
2642 ADVANCE_RING();
2643 cmdbuf->buf += sz * sizeof(int);
2644 cmdbuf->bufsz -= sz * sizeof(int);
2645 return 0;
2646}
2647
2648static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2649 drm_radeon_cmd_header_t header,
2650 drm_radeon_kcmd_buffer_t *cmdbuf)
2651{
2652 int sz = header.vectors.count;
2653 int start = header.vectors.offset;
2654 int stride = header.vectors.stride;
2655 RING_LOCALS;
2656
2657 BEGIN_RING(5 + sz);
2658 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
2659 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2660 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2661 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2662 OUT_RING_TABLE(cmdbuf->buf, sz);
2663 ADVANCE_RING();
2664
2665 cmdbuf->buf += sz * sizeof(int);
2666 cmdbuf->bufsz -= sz * sizeof(int);
2667 return 0;
2668}
2669
2670static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
2671 drm_radeon_cmd_header_t header,
2672 drm_radeon_kcmd_buffer_t *cmdbuf)
2673{
2674 int sz = header.veclinear.count * 4;
2675 int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
2676 RING_LOCALS;
2677
2678 if (!sz)
2679 return 0;
2680 if (sz * 4 > cmdbuf->bufsz)
2681 return -EINVAL;
2682
2683 BEGIN_RING(5 + sz);
2684 OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
2685 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2686 OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2687 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2688 OUT_RING_TABLE(cmdbuf->buf, sz);
2689 ADVANCE_RING();
2690
2691 cmdbuf->buf += sz * sizeof(int);
2692 cmdbuf->bufsz -= sz * sizeof(int);
2693 return 0;
2694}
2695
2696static int radeon_emit_packet3(struct drm_device * dev,
2697 struct drm_file *file_priv,
2698 drm_radeon_kcmd_buffer_t *cmdbuf)
2699{
2700 drm_radeon_private_t *dev_priv = dev->dev_private;
2701 unsigned int cmdsz;
2702 int ret;
2703 RING_LOCALS;
2704
2705 DRM_DEBUG("\n");
2706
2707 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2708 cmdbuf, &cmdsz))) {
2709 DRM_ERROR("Packet verification failed\n");
2710 return ret;
2711 }
2712
2713 BEGIN_RING(cmdsz);
2714 OUT_RING_TABLE(cmdbuf->buf, cmdsz);
2715 ADVANCE_RING();
2716
2717 cmdbuf->buf += cmdsz * 4;
2718 cmdbuf->bufsz -= cmdsz * 4;
2719 return 0;
2720}
2721
2722static int radeon_emit_packet3_cliprect(struct drm_device *dev,
2723 struct drm_file *file_priv,
2724 drm_radeon_kcmd_buffer_t *cmdbuf,
2725 int orig_nbox)
2726{
2727 drm_radeon_private_t *dev_priv = dev->dev_private;
2728 struct drm_clip_rect box;
2729 unsigned int cmdsz;
2730 int ret;
2731 struct drm_clip_rect __user *boxes = cmdbuf->boxes;
2732 int i = 0;
2733 RING_LOCALS;
2734
2735 DRM_DEBUG("\n");
2736
2737 if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
2738 cmdbuf, &cmdsz))) {
2739 DRM_ERROR("Packet verification failed\n");
2740 return ret;
2741 }
2742
2743 if (!orig_nbox)
2744 goto out;
2745
2746 do {
2747 if (i < cmdbuf->nbox) {
2748 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
2749 return -EFAULT;
2750 /* FIXME The second and subsequent times round
2751 * this loop, send a WAIT_UNTIL_3D_IDLE before
2752 * calling emit_clip_rect(). This fixes a
2753 * lockup on fast machines when sending
2754 * several cliprects with a cmdbuf, as when
2755 * waving a 2D window over a 3D
2756 * window. Something in the commands from user
2757 * space seems to hang the card when they're
2758 * sent several times in a row. That would be
2759 * the correct place to fix it but this works
2760 * around it until I can figure that out - Tim
2761 * Smith */
2762 if (i) {
2763 BEGIN_RING(2);
2764 RADEON_WAIT_UNTIL_3D_IDLE();
2765 ADVANCE_RING();
2766 }
2767 radeon_emit_clip_rect(dev_priv, &box);
2768 }
2769
2770 BEGIN_RING(cmdsz);
2771 OUT_RING_TABLE(cmdbuf->buf, cmdsz);
2772 ADVANCE_RING();
2773
2774 } while (++i < cmdbuf->nbox);
2775 if (cmdbuf->nbox == 1)
2776 cmdbuf->nbox = 0;
2777
2778 out:
2779 cmdbuf->buf += cmdsz * 4;
2780 cmdbuf->bufsz -= cmdsz * 4;
2781 return 0;
2782}
2783
2784static int radeon_emit_wait(struct drm_device * dev, int flags)
2785{
2786 drm_radeon_private_t *dev_priv = dev->dev_private;
2787 RING_LOCALS;
2788
2789 DRM_DEBUG("%x\n", flags);
2790 switch (flags) {
2791 case RADEON_WAIT_2D:
2792 BEGIN_RING(2);
2793 RADEON_WAIT_UNTIL_2D_IDLE();
2794 ADVANCE_RING();
2795 break;
2796 case RADEON_WAIT_3D:
2797 BEGIN_RING(2);
2798 RADEON_WAIT_UNTIL_3D_IDLE();
2799 ADVANCE_RING();
2800 break;
2801 case RADEON_WAIT_2D | RADEON_WAIT_3D:
2802 BEGIN_RING(2);
2803 RADEON_WAIT_UNTIL_IDLE();
2804 ADVANCE_RING();
2805 break;
2806 default:
2807 return -EINVAL;
2808 }
2809
2810 return 0;
2811}
2812
2813static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
2814{
2815 drm_radeon_private_t *dev_priv = dev->dev_private;
2816 struct drm_device_dma *dma = dev->dma;
2817 struct drm_buf *buf = NULL;
2818 int idx;
2819 drm_radeon_kcmd_buffer_t *cmdbuf = data;
2820 drm_radeon_cmd_header_t header;
2821 int orig_nbox, orig_bufsz;
2822 char *kbuf = NULL;
2823
2824 LOCK_TEST_WITH_RETURN(dev, file_priv);
2825
2826 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2827 VB_AGE_TEST_WITH_RETURN(dev_priv);
2828
2829 if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
2830 return -EINVAL;
2831 }
2832
2833 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2834 * races between checking values and using those values in other code,
2835 * and simply to avoid a lot of function calls to copy in data.
2836 */
2837 orig_bufsz = cmdbuf->bufsz;
2838 if (orig_bufsz != 0) {
2839 kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
2840 if (kbuf == NULL)
2841 return -ENOMEM;
2842 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
2843 cmdbuf->bufsz)) {
2844 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2845 return -EFAULT;
2846 }
2847 cmdbuf->buf = kbuf;
2848 }
2849
2850 orig_nbox = cmdbuf->nbox;
2851
2852 if (dev_priv->microcode_version == UCODE_R300) {
2853 int temp;
2854 temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
2855
2856 if (orig_bufsz != 0)
2857 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2858
2859 return temp;
2860 }
2861
2862 /* microcode_version != r300 */
2863 while (cmdbuf->bufsz >= sizeof(header)) {
2864
2865 header.i = *(int *)cmdbuf->buf;
2866 cmdbuf->buf += sizeof(header);
2867 cmdbuf->bufsz -= sizeof(header);
2868
2869 switch (header.header.cmd_type) {
2870 case RADEON_CMD_PACKET:
2871 DRM_DEBUG("RADEON_CMD_PACKET\n");
2872 if (radeon_emit_packets
2873 (dev_priv, file_priv, header, cmdbuf)) {
2874 DRM_ERROR("radeon_emit_packets failed\n");
2875 goto err;
2876 }
2877 break;
2878
2879 case RADEON_CMD_SCALARS:
2880 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2881 if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
2882 DRM_ERROR("radeon_emit_scalars failed\n");
2883 goto err;
2884 }
2885 break;
2886
2887 case RADEON_CMD_VECTORS:
2888 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2889 if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
2890 DRM_ERROR("radeon_emit_vectors failed\n");
2891 goto err;
2892 }
2893 break;
2894
2895 case RADEON_CMD_DMA_DISCARD:
2896 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2897 idx = header.dma.buf_idx;
2898 if (idx < 0 || idx >= dma->buf_count) {
2899 DRM_ERROR("buffer index %d (of %d max)\n",
2900 idx, dma->buf_count - 1);
2901 goto err;
2902 }
2903
2904 buf = dma->buflist[idx];
2905 if (buf->file_priv != file_priv || buf->pending) {
2906 DRM_ERROR("bad buffer %p %p %d\n",
2907 buf->file_priv, file_priv,
2908 buf->pending);
2909 goto err;
2910 }
2911
2912 radeon_cp_discard_buffer(dev, buf);
2913 break;
2914
2915 case RADEON_CMD_PACKET3:
2916 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2917 if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
2918 DRM_ERROR("radeon_emit_packet3 failed\n");
2919 goto err;
2920 }
2921 break;
2922
2923 case RADEON_CMD_PACKET3_CLIP:
2924 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2925 if (radeon_emit_packet3_cliprect
2926 (dev, file_priv, cmdbuf, orig_nbox)) {
2927 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2928 goto err;
2929 }
2930 break;
2931
2932 case RADEON_CMD_SCALARS2:
2933 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2934 if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
2935 DRM_ERROR("radeon_emit_scalars2 failed\n");
2936 goto err;
2937 }
2938 break;
2939
2940 case RADEON_CMD_WAIT:
2941 DRM_DEBUG("RADEON_CMD_WAIT\n");
2942 if (radeon_emit_wait(dev, header.wait.flags)) {
2943 DRM_ERROR("radeon_emit_wait failed\n");
2944 goto err;
2945 }
2946 break;
2947 case RADEON_CMD_VECLINEAR:
2948 DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
2949 if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
2950 DRM_ERROR("radeon_emit_veclinear failed\n");
2951 goto err;
2952 }
2953 break;
2954
2955 default:
2956 DRM_ERROR("bad cmd_type %d at %p\n",
2957 header.header.cmd_type,
2958 cmdbuf->buf - sizeof(header));
2959 goto err;
2960 }
2961 }
2962
2963 if (orig_bufsz != 0)
2964 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2965
2966 DRM_DEBUG("DONE\n");
2967 COMMIT_RING();
2968 return 0;
2969
2970 err:
2971 if (orig_bufsz != 0)
2972 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2973 return -EINVAL;
2974}
2975
2976static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
2977{
2978 drm_radeon_private_t *dev_priv = dev->dev_private;
2979 drm_radeon_getparam_t *param = data;
2980 int value;
2981
2982 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
2983
2984 switch (param->param) {
2985 case RADEON_PARAM_GART_BUFFER_OFFSET:
2986 value = dev_priv->gart_buffers_offset;
2987 break;
2988 case RADEON_PARAM_LAST_FRAME:
2989 dev_priv->stats.last_frame_reads++;
2990 value = GET_SCRATCH(0);
2991 break;
2992 case RADEON_PARAM_LAST_DISPATCH:
2993 value = GET_SCRATCH(1);
2994 break;
2995 case RADEON_PARAM_LAST_CLEAR:
2996 dev_priv->stats.last_clear_reads++;
2997 value = GET_SCRATCH(2);
2998 break;
2999 case RADEON_PARAM_IRQ_NR:
3000 value = dev->irq;
3001 break;
3002 case RADEON_PARAM_GART_BASE:
3003 value = dev_priv->gart_vm_start;
3004 break;
3005 case RADEON_PARAM_REGISTER_HANDLE:
3006 value = dev_priv->mmio->offset;
3007 break;
3008 case RADEON_PARAM_STATUS_HANDLE:
3009 value = dev_priv->ring_rptr_offset;
3010 break;
3011#if BITS_PER_LONG == 32
3012 /*
3013 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
3014 * pointer which can't fit into an int-sized variable. According to
3015 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
3016 * not supporting it shouldn't be a problem. If the same functionality
3017 * is needed on 64-bit platforms, a new ioctl() would have to be added,
3018 * so backwards-compatibility for the embedded platforms can be
3019 * maintained. --davidm 4-Feb-2004.
3020 */
3021 case RADEON_PARAM_SAREA_HANDLE:
3022 /* The lock is the first dword in the sarea. */
3023 value = (long)dev->lock.hw_lock;
3024 break;
3025#endif
3026 case RADEON_PARAM_GART_TEX_HANDLE:
3027 value = dev_priv->gart_textures_offset;
3028 break;
3029 case RADEON_PARAM_SCRATCH_OFFSET:
3030 if (!dev_priv->writeback_works)
3031 return -EINVAL;
3032 value = RADEON_SCRATCH_REG_OFFSET;
3033 break;
3034 case RADEON_PARAM_CARD_TYPE:
3035 if (dev_priv->flags & RADEON_IS_PCIE)
3036 value = RADEON_CARD_PCIE;
3037 else if (dev_priv->flags & RADEON_IS_AGP)
3038 value = RADEON_CARD_AGP;
3039 else
3040 value = RADEON_CARD_PCI;
3041 break;
3042 case RADEON_PARAM_VBLANK_CRTC:
3043 value = radeon_vblank_crtc_get(dev);
3044 break;
3045 case RADEON_PARAM_FB_LOCATION:
3046 value = radeon_read_fb_location(dev_priv);
3047 break;
3048 case RADEON_PARAM_NUM_GB_PIPES:
3049 value = dev_priv->num_gb_pipes;
3050 break;
3051 default:
3052 DRM_DEBUG("Invalid parameter %d\n", param->param);
3053 return -EINVAL;
3054 }
3055
3056 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
3057 DRM_ERROR("copy_to_user\n");
3058 return -EFAULT;
3059 }
3060
3061 return 0;
3062}
3063
3064static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
3065{
3066 drm_radeon_private_t *dev_priv = dev->dev_private;
3067 drm_radeon_setparam_t *sp = data;
3068 struct drm_radeon_driver_file_fields *radeon_priv;
3069
3070 switch (sp->param) {
3071 case RADEON_SETPARAM_FB_LOCATION:
3072 radeon_priv = file_priv->driver_priv;
3073 radeon_priv->radeon_fb_delta = dev_priv->fb_location -
3074 sp->value;
3075 break;
3076 case RADEON_SETPARAM_SWITCH_TILING:
3077 if (sp->value == 0) {
3078 DRM_DEBUG("color tiling disabled\n");
3079 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3080 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3081 dev_priv->sarea_priv->tiling_enabled = 0;
3082 } else if (sp->value == 1) {
3083 DRM_DEBUG("color tiling enabled\n");
3084 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3085 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
3086 dev_priv->sarea_priv->tiling_enabled = 1;
3087 }
3088 break;
3089 case RADEON_SETPARAM_PCIGART_LOCATION:
3090 dev_priv->pcigart_offset = sp->value;
3091 dev_priv->pcigart_offset_set = 1;
3092 break;
3093 case RADEON_SETPARAM_NEW_MEMMAP:
3094 dev_priv->new_memmap = sp->value;
3095 break;
3096 case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
3097 dev_priv->gart_info.table_size = sp->value;
3098 if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
3099 dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
3100 break;
3101 case RADEON_SETPARAM_VBLANK_CRTC:
3102 return radeon_vblank_crtc_set(dev, sp->value);
3103 break;
3104 default:
3105 DRM_DEBUG("Invalid parameter %d\n", sp->param);
3106 return -EINVAL;
3107 }
3108
3109 return 0;
3110}
3111
3112/* When a client dies:
3113 * - Check for and clean up flipped page state
3114 * - Free any alloced GART memory.
3115 * - Free any alloced radeon surfaces.
3116 *
3117 * DRM infrastructure takes care of reclaiming dma buffers.
3118 */
3119void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
3120{
3121 if (dev->dev_private) {
3122 drm_radeon_private_t *dev_priv = dev->dev_private;
3123 dev_priv->page_flipping = 0;
3124 radeon_mem_release(file_priv, dev_priv->gart_heap);
3125 radeon_mem_release(file_priv, dev_priv->fb_heap);
3126 radeon_surfaces_release(file_priv, dev_priv);
3127 }
3128}
3129
3130void radeon_driver_lastclose(struct drm_device *dev)
3131{
3132 if (dev->dev_private) {
3133 drm_radeon_private_t *dev_priv = dev->dev_private;
3134
3135 if (dev_priv->sarea_priv &&
3136 dev_priv->sarea_priv->pfCurrentPage != 0)
3137 radeon_cp_dispatch_flip(dev);
3138 }
3139
3140 radeon_do_release(dev);
3141}
3142
3143int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
3144{
3145 drm_radeon_private_t *dev_priv = dev->dev_private;
3146 struct drm_radeon_driver_file_fields *radeon_priv;
3147
3148 DRM_DEBUG("\n");
3149 radeon_priv =
3150 (struct drm_radeon_driver_file_fields *)
3151 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
3152
3153 if (!radeon_priv)
3154 return -ENOMEM;
3155
3156 file_priv->driver_priv = radeon_priv;
3157
3158 if (dev_priv)
3159 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3160 else
3161 radeon_priv->radeon_fb_delta = 0;
3162 return 0;
3163}
3164
3165void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3166{
3167 struct drm_radeon_driver_file_fields *radeon_priv =
3168 file_priv->driver_priv;
3169
3170 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
3171}
3172
3173struct drm_ioctl_desc radeon_ioctls[] = {
3174 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3175 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3176 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3177 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3178 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
3179 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
3180 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
3181 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
3182 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3183 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3184 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3185 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3186 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3187 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3188 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3189 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3190 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3191 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3192 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3193 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
3194 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
3195 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3196 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
3197 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3198 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3199 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3200 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
3201};
3202
3203int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile
new file mode 100644
index 000000000000..d8f84ac7bb26
--- /dev/null
+++ b/drivers/gpu/drm/savage/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y = -Iinclude/drm
6savage-y := savage_drv.o savage_bci.o savage_state.o
7
8obj-$(CONFIG_DRM_SAVAGE)+= savage.o
9
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
new file mode 100644
index 000000000000..d465b2f9c1cd
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -0,0 +1,1095 @@
1/* savage_bci.c -- BCI support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29/* Need a long timeout for shadow status updates can take a while
30 * and so can waiting for events when the queue is full. */
31#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
32#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
33#define SAVAGE_FREELIST_DEBUG 0
34
35static int savage_do_cleanup_bci(struct drm_device *dev);
36
37static int
38savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
39{
40 uint32_t mask = dev_priv->status_used_mask;
41 uint32_t threshold = dev_priv->bci_threshold_hi;
42 uint32_t status;
43 int i;
44
45#if SAVAGE_BCI_DEBUG
46 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
47 DRM_ERROR("Trying to emit %d words "
48 "(more than guaranteed space in COB)\n", n);
49#endif
50
51 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
52 DRM_MEMORYBARRIER();
53 status = dev_priv->status_ptr[0];
54 if ((status & mask) < threshold)
55 return 0;
56 DRM_UDELAY(1);
57 }
58
59#if SAVAGE_BCI_DEBUG
60 DRM_ERROR("failed!\n");
61 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
62#endif
63 return -EBUSY;
64}
65
66static int
67savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
68{
69 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
70 uint32_t status;
71 int i;
72
73 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
74 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
75 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
76 return 0;
77 DRM_UDELAY(1);
78 }
79
80#if SAVAGE_BCI_DEBUG
81 DRM_ERROR("failed!\n");
82 DRM_INFO(" status=0x%08x\n", status);
83#endif
84 return -EBUSY;
85}
86
87static int
88savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
89{
90 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
91 uint32_t status;
92 int i;
93
94 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
95 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
96 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
97 return 0;
98 DRM_UDELAY(1);
99 }
100
101#if SAVAGE_BCI_DEBUG
102 DRM_ERROR("failed!\n");
103 DRM_INFO(" status=0x%08x\n", status);
104#endif
105 return -EBUSY;
106}
107
108/*
109 * Waiting for events.
110 *
111 * The BIOSresets the event tag to 0 on mode changes. Therefore we
112 * never emit 0 to the event tag. If we find a 0 event tag we know the
113 * BIOS stomped on it and return success assuming that the BIOS waited
114 * for engine idle.
115 *
116 * Note: if the Xserver uses the event tag it has to follow the same
117 * rule. Otherwise there may be glitches every 2^16 events.
118 */
119static int
120savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
121{
122 uint32_t status;
123 int i;
124
125 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
126 DRM_MEMORYBARRIER();
127 status = dev_priv->status_ptr[1];
128 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
129 (status & 0xffff) == 0)
130 return 0;
131 DRM_UDELAY(1);
132 }
133
134#if SAVAGE_BCI_DEBUG
135 DRM_ERROR("failed!\n");
136 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
137#endif
138
139 return -EBUSY;
140}
141
142static int
143savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
144{
145 uint32_t status;
146 int i;
147
148 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
149 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
150 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
151 (status & 0xffff) == 0)
152 return 0;
153 DRM_UDELAY(1);
154 }
155
156#if SAVAGE_BCI_DEBUG
157 DRM_ERROR("failed!\n");
158 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
159#endif
160
161 return -EBUSY;
162}
163
164uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
165 unsigned int flags)
166{
167 uint16_t count;
168 BCI_LOCALS;
169
170 if (dev_priv->status_ptr) {
171 /* coordinate with Xserver */
172 count = dev_priv->status_ptr[1023];
173 if (count < dev_priv->event_counter)
174 dev_priv->event_wrap++;
175 } else {
176 count = dev_priv->event_counter;
177 }
178 count = (count + 1) & 0xffff;
179 if (count == 0) {
180 count++; /* See the comment above savage_wait_event_*. */
181 dev_priv->event_wrap++;
182 }
183 dev_priv->event_counter = count;
184 if (dev_priv->status_ptr)
185 dev_priv->status_ptr[1023] = (uint32_t) count;
186
187 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
188 unsigned int wait_cmd = BCI_CMD_WAIT;
189 if ((flags & SAVAGE_WAIT_2D))
190 wait_cmd |= BCI_CMD_WAIT_2D;
191 if ((flags & SAVAGE_WAIT_3D))
192 wait_cmd |= BCI_CMD_WAIT_3D;
193 BEGIN_BCI(2);
194 BCI_WRITE(wait_cmd);
195 } else {
196 BEGIN_BCI(1);
197 }
198 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
199
200 return count;
201}
202
203/*
204 * Freelist management
205 */
206static int savage_freelist_init(struct drm_device * dev)
207{
208 drm_savage_private_t *dev_priv = dev->dev_private;
209 struct drm_device_dma *dma = dev->dma;
210 struct drm_buf *buf;
211 drm_savage_buf_priv_t *entry;
212 int i;
213 DRM_DEBUG("count=%d\n", dma->buf_count);
214
215 dev_priv->head.next = &dev_priv->tail;
216 dev_priv->head.prev = NULL;
217 dev_priv->head.buf = NULL;
218
219 dev_priv->tail.next = NULL;
220 dev_priv->tail.prev = &dev_priv->head;
221 dev_priv->tail.buf = NULL;
222
223 for (i = 0; i < dma->buf_count; i++) {
224 buf = dma->buflist[i];
225 entry = buf->dev_private;
226
227 SET_AGE(&entry->age, 0, 0);
228 entry->buf = buf;
229
230 entry->next = dev_priv->head.next;
231 entry->prev = &dev_priv->head;
232 dev_priv->head.next->prev = entry;
233 dev_priv->head.next = entry;
234 }
235
236 return 0;
237}
238
239static struct drm_buf *savage_freelist_get(struct drm_device * dev)
240{
241 drm_savage_private_t *dev_priv = dev->dev_private;
242 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
243 uint16_t event;
244 unsigned int wrap;
245 DRM_DEBUG("\n");
246
247 UPDATE_EVENT_COUNTER();
248 if (dev_priv->status_ptr)
249 event = dev_priv->status_ptr[1] & 0xffff;
250 else
251 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
252 wrap = dev_priv->event_wrap;
253 if (event > dev_priv->event_counter)
254 wrap--; /* hardware hasn't passed the last wrap yet */
255
256 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
257 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
258
259 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
260 drm_savage_buf_priv_t *next = tail->next;
261 drm_savage_buf_priv_t *prev = tail->prev;
262 prev->next = next;
263 next->prev = prev;
264 tail->next = tail->prev = NULL;
265 return tail->buf;
266 }
267
268 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
269 return NULL;
270}
271
272void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
273{
274 drm_savage_private_t *dev_priv = dev->dev_private;
275 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
276
277 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
278
279 if (entry->next != NULL || entry->prev != NULL) {
280 DRM_ERROR("entry already on freelist.\n");
281 return;
282 }
283
284 prev = &dev_priv->head;
285 next = prev->next;
286 prev->next = entry;
287 next->prev = entry;
288 entry->prev = prev;
289 entry->next = next;
290}
291
292/*
293 * Command DMA
294 */
295static int savage_dma_init(drm_savage_private_t * dev_priv)
296{
297 unsigned int i;
298
299 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
300 (SAVAGE_DMA_PAGE_SIZE * 4);
301 dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
302 dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
303 if (dev_priv->dma_pages == NULL)
304 return -ENOMEM;
305
306 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
307 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
308 dev_priv->dma_pages[i].used = 0;
309 dev_priv->dma_pages[i].flushed = 0;
310 }
311 SET_AGE(&dev_priv->last_dma_age, 0, 0);
312
313 dev_priv->first_dma_page = 0;
314 dev_priv->current_dma_page = 0;
315
316 return 0;
317}
318
319void savage_dma_reset(drm_savage_private_t * dev_priv)
320{
321 uint16_t event;
322 unsigned int wrap, i;
323 event = savage_bci_emit_event(dev_priv, 0);
324 wrap = dev_priv->event_wrap;
325 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
326 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
327 dev_priv->dma_pages[i].used = 0;
328 dev_priv->dma_pages[i].flushed = 0;
329 }
330 SET_AGE(&dev_priv->last_dma_age, event, wrap);
331 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
332}
333
334void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
335{
336 uint16_t event;
337 unsigned int wrap;
338
339 /* Faked DMA buffer pages don't age. */
340 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
341 return;
342
343 UPDATE_EVENT_COUNTER();
344 if (dev_priv->status_ptr)
345 event = dev_priv->status_ptr[1] & 0xffff;
346 else
347 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
348 wrap = dev_priv->event_wrap;
349 if (event > dev_priv->event_counter)
350 wrap--; /* hardware hasn't passed the last wrap yet */
351
352 if (dev_priv->dma_pages[page].age.wrap > wrap ||
353 (dev_priv->dma_pages[page].age.wrap == wrap &&
354 dev_priv->dma_pages[page].age.event > event)) {
355 if (dev_priv->wait_evnt(dev_priv,
356 dev_priv->dma_pages[page].age.event)
357 < 0)
358 DRM_ERROR("wait_evnt failed!\n");
359 }
360}
361
362uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
363{
364 unsigned int cur = dev_priv->current_dma_page;
365 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
366 dev_priv->dma_pages[cur].used;
367 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
368 SAVAGE_DMA_PAGE_SIZE;
369 uint32_t *dma_ptr;
370 unsigned int i;
371
372 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
373 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
374
375 if (cur + nr_pages < dev_priv->nr_dma_pages) {
376 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
377 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
378 if (n < rest)
379 rest = n;
380 dev_priv->dma_pages[cur].used += rest;
381 n -= rest;
382 cur++;
383 } else {
384 dev_priv->dma_flush(dev_priv);
385 nr_pages =
386 (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
387 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
388 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
389 dev_priv->dma_pages[i].used = 0;
390 dev_priv->dma_pages[i].flushed = 0;
391 }
392 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
393 dev_priv->first_dma_page = cur = 0;
394 }
395 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
396#if SAVAGE_DMA_DEBUG
397 if (dev_priv->dma_pages[i].used) {
398 DRM_ERROR("unflushed page %u: used=%u\n",
399 i, dev_priv->dma_pages[i].used);
400 }
401#endif
402 if (n > SAVAGE_DMA_PAGE_SIZE)
403 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
404 else
405 dev_priv->dma_pages[i].used = n;
406 n -= SAVAGE_DMA_PAGE_SIZE;
407 }
408 dev_priv->current_dma_page = --i;
409
410 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
411 i, dev_priv->dma_pages[i].used, n);
412
413 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
414
415 return dma_ptr;
416}
417
418static void savage_dma_flush(drm_savage_private_t * dev_priv)
419{
420 unsigned int first = dev_priv->first_dma_page;
421 unsigned int cur = dev_priv->current_dma_page;
422 uint16_t event;
423 unsigned int wrap, pad, align, len, i;
424 unsigned long phys_addr;
425 BCI_LOCALS;
426
427 if (first == cur &&
428 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
429 return;
430
431 /* pad length to multiples of 2 entries
432 * align start of next DMA block to multiles of 8 entries */
433 pad = -dev_priv->dma_pages[cur].used & 1;
434 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
435
436 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
437 "pad=%u, align=%u\n",
438 first, cur, dev_priv->dma_pages[first].flushed,
439 dev_priv->dma_pages[cur].used, pad, align);
440
441 /* pad with noops */
442 if (pad) {
443 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
444 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
445 dev_priv->dma_pages[cur].used += pad;
446 while (pad != 0) {
447 *dma_ptr++ = BCI_CMD_WAIT;
448 pad--;
449 }
450 }
451
452 DRM_MEMORYBARRIER();
453
454 /* do flush ... */
455 phys_addr = dev_priv->cmd_dma->offset +
456 (first * SAVAGE_DMA_PAGE_SIZE +
457 dev_priv->dma_pages[first].flushed) * 4;
458 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
459 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
460
461 DRM_DEBUG("phys_addr=%lx, len=%u\n",
462 phys_addr | dev_priv->dma_type, len);
463
464 BEGIN_BCI(3);
465 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
466 BCI_WRITE(phys_addr | dev_priv->dma_type);
467 BCI_DMA(len);
468
469 /* fix alignment of the start of the next block */
470 dev_priv->dma_pages[cur].used += align;
471
472 /* age DMA pages */
473 event = savage_bci_emit_event(dev_priv, 0);
474 wrap = dev_priv->event_wrap;
475 for (i = first; i < cur; ++i) {
476 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
477 dev_priv->dma_pages[i].used = 0;
478 dev_priv->dma_pages[i].flushed = 0;
479 }
480 /* age the current page only when it's full */
481 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
482 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
483 dev_priv->dma_pages[cur].used = 0;
484 dev_priv->dma_pages[cur].flushed = 0;
485 /* advance to next page */
486 cur++;
487 if (cur == dev_priv->nr_dma_pages)
488 cur = 0;
489 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
490 } else {
491 dev_priv->first_dma_page = cur;
492 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
493 }
494 SET_AGE(&dev_priv->last_dma_age, event, wrap);
495
496 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
497 dev_priv->dma_pages[cur].used,
498 dev_priv->dma_pages[cur].flushed);
499}
500
501static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
502{
503 unsigned int i, j;
504 BCI_LOCALS;
505
506 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
507 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
508 return;
509
510 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
511 dev_priv->first_dma_page, dev_priv->current_dma_page,
512 dev_priv->dma_pages[dev_priv->current_dma_page].used);
513
514 for (i = dev_priv->first_dma_page;
515 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
516 ++i) {
517 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
518 i * SAVAGE_DMA_PAGE_SIZE;
519#if SAVAGE_DMA_DEBUG
520 /* Sanity check: all pages except the last one must be full. */
521 if (i < dev_priv->current_dma_page &&
522 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
523 DRM_ERROR("partial DMA page %u: used=%u",
524 i, dev_priv->dma_pages[i].used);
525 }
526#endif
527 BEGIN_BCI(dev_priv->dma_pages[i].used);
528 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
529 BCI_WRITE(dma_ptr[j]);
530 }
531 dev_priv->dma_pages[i].used = 0;
532 }
533
534 /* reset to first page */
535 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
536}
537
538int savage_driver_load(struct drm_device *dev, unsigned long chipset)
539{
540 drm_savage_private_t *dev_priv;
541
542 dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
543 if (dev_priv == NULL)
544 return -ENOMEM;
545
546 memset(dev_priv, 0, sizeof(drm_savage_private_t));
547 dev->dev_private = (void *)dev_priv;
548
549 dev_priv->chipset = (enum savage_family)chipset;
550
551 return 0;
552}
553
554
555/*
556 * Initalize mappings. On Savage4 and SavageIX the alignment
557 * and size of the aperture is not suitable for automatic MTRR setup
558 * in drm_addmap. Therefore we add them manually before the maps are
559 * initialized, and tear them down on last close.
560 */
561int savage_driver_firstopen(struct drm_device *dev)
562{
563 drm_savage_private_t *dev_priv = dev->dev_private;
564 unsigned long mmio_base, fb_base, fb_size, aperture_base;
565 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
566 * in case we decide we need information on the BAR for BSD in the
567 * future.
568 */
569 unsigned int fb_rsrc, aper_rsrc;
570 int ret = 0;
571
572 dev_priv->mtrr[0].handle = -1;
573 dev_priv->mtrr[1].handle = -1;
574 dev_priv->mtrr[2].handle = -1;
575 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
576 fb_rsrc = 0;
577 fb_base = drm_get_resource_start(dev, 0);
578 fb_size = SAVAGE_FB_SIZE_S3;
579 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
580 aper_rsrc = 0;
581 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
582 /* this should always be true */
583 if (drm_get_resource_len(dev, 0) == 0x08000000) {
584 /* Don't make MMIO write-cobining! We need 3
585 * MTRRs. */
586 dev_priv->mtrr[0].base = fb_base;
587 dev_priv->mtrr[0].size = 0x01000000;
588 dev_priv->mtrr[0].handle =
589 drm_mtrr_add(dev_priv->mtrr[0].base,
590 dev_priv->mtrr[0].size, DRM_MTRR_WC);
591 dev_priv->mtrr[1].base = fb_base + 0x02000000;
592 dev_priv->mtrr[1].size = 0x02000000;
593 dev_priv->mtrr[1].handle =
594 drm_mtrr_add(dev_priv->mtrr[1].base,
595 dev_priv->mtrr[1].size, DRM_MTRR_WC);
596 dev_priv->mtrr[2].base = fb_base + 0x04000000;
597 dev_priv->mtrr[2].size = 0x04000000;
598 dev_priv->mtrr[2].handle =
599 drm_mtrr_add(dev_priv->mtrr[2].base,
600 dev_priv->mtrr[2].size, DRM_MTRR_WC);
601 } else {
602 DRM_ERROR("strange pci_resource_len %08lx\n",
603 drm_get_resource_len(dev, 0));
604 }
605 } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
606 dev_priv->chipset != S3_SAVAGE2000) {
607 mmio_base = drm_get_resource_start(dev, 0);
608 fb_rsrc = 1;
609 fb_base = drm_get_resource_start(dev, 1);
610 fb_size = SAVAGE_FB_SIZE_S4;
611 aper_rsrc = 1;
612 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
613 /* this should always be true */
614 if (drm_get_resource_len(dev, 1) == 0x08000000) {
615 /* Can use one MTRR to cover both fb and
616 * aperture. */
617 dev_priv->mtrr[0].base = fb_base;
618 dev_priv->mtrr[0].size = 0x08000000;
619 dev_priv->mtrr[0].handle =
620 drm_mtrr_add(dev_priv->mtrr[0].base,
621 dev_priv->mtrr[0].size, DRM_MTRR_WC);
622 } else {
623 DRM_ERROR("strange pci_resource_len %08lx\n",
624 drm_get_resource_len(dev, 1));
625 }
626 } else {
627 mmio_base = drm_get_resource_start(dev, 0);
628 fb_rsrc = 1;
629 fb_base = drm_get_resource_start(dev, 1);
630 fb_size = drm_get_resource_len(dev, 1);
631 aper_rsrc = 2;
632 aperture_base = drm_get_resource_start(dev, 2);
633 /* Automatic MTRR setup will do the right thing. */
634 }
635
636 ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
637 _DRM_READ_ONLY, &dev_priv->mmio);
638 if (ret)
639 return ret;
640
641 ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
642 _DRM_WRITE_COMBINING, &dev_priv->fb);
643 if (ret)
644 return ret;
645
646 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
647 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
648 &dev_priv->aperture);
649 if (ret)
650 return ret;
651
652 return ret;
653}
654
655/*
656 * Delete MTRRs and free device-private data.
657 */
658void savage_driver_lastclose(struct drm_device *dev)
659{
660 drm_savage_private_t *dev_priv = dev->dev_private;
661 int i;
662
663 for (i = 0; i < 3; ++i)
664 if (dev_priv->mtrr[i].handle >= 0)
665 drm_mtrr_del(dev_priv->mtrr[i].handle,
666 dev_priv->mtrr[i].base,
667 dev_priv->mtrr[i].size, DRM_MTRR_WC);
668}
669
670int savage_driver_unload(struct drm_device *dev)
671{
672 drm_savage_private_t *dev_priv = dev->dev_private;
673
674 drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
675
676 return 0;
677}
678
679static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
680{
681 drm_savage_private_t *dev_priv = dev->dev_private;
682
683 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
684 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
685 return -EINVAL;
686 }
687 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
688 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
689 return -EINVAL;
690 }
691 if (init->dma_type != SAVAGE_DMA_AGP &&
692 init->dma_type != SAVAGE_DMA_PCI) {
693 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
694 return -EINVAL;
695 }
696
697 dev_priv->cob_size = init->cob_size;
698 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
699 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
700 dev_priv->dma_type = init->dma_type;
701
702 dev_priv->fb_bpp = init->fb_bpp;
703 dev_priv->front_offset = init->front_offset;
704 dev_priv->front_pitch = init->front_pitch;
705 dev_priv->back_offset = init->back_offset;
706 dev_priv->back_pitch = init->back_pitch;
707 dev_priv->depth_bpp = init->depth_bpp;
708 dev_priv->depth_offset = init->depth_offset;
709 dev_priv->depth_pitch = init->depth_pitch;
710
711 dev_priv->texture_offset = init->texture_offset;
712 dev_priv->texture_size = init->texture_size;
713
714 dev_priv->sarea = drm_getsarea(dev);
715 if (!dev_priv->sarea) {
716 DRM_ERROR("could not find sarea!\n");
717 savage_do_cleanup_bci(dev);
718 return -EINVAL;
719 }
720 if (init->status_offset != 0) {
721 dev_priv->status = drm_core_findmap(dev, init->status_offset);
722 if (!dev_priv->status) {
723 DRM_ERROR("could not find shadow status region!\n");
724 savage_do_cleanup_bci(dev);
725 return -EINVAL;
726 }
727 } else {
728 dev_priv->status = NULL;
729 }
730 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
731 dev->agp_buffer_token = init->buffers_offset;
732 dev->agp_buffer_map = drm_core_findmap(dev,
733 init->buffers_offset);
734 if (!dev->agp_buffer_map) {
735 DRM_ERROR("could not find DMA buffer region!\n");
736 savage_do_cleanup_bci(dev);
737 return -EINVAL;
738 }
739 drm_core_ioremap(dev->agp_buffer_map, dev);
740 if (!dev->agp_buffer_map) {
741 DRM_ERROR("failed to ioremap DMA buffer region!\n");
742 savage_do_cleanup_bci(dev);
743 return -ENOMEM;
744 }
745 }
746 if (init->agp_textures_offset) {
747 dev_priv->agp_textures =
748 drm_core_findmap(dev, init->agp_textures_offset);
749 if (!dev_priv->agp_textures) {
750 DRM_ERROR("could not find agp texture region!\n");
751 savage_do_cleanup_bci(dev);
752 return -EINVAL;
753 }
754 } else {
755 dev_priv->agp_textures = NULL;
756 }
757
758 if (init->cmd_dma_offset) {
759 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
760 DRM_ERROR("command DMA not supported on "
761 "Savage3D/MX/IX.\n");
762 savage_do_cleanup_bci(dev);
763 return -EINVAL;
764 }
765 if (dev->dma && dev->dma->buflist) {
766 DRM_ERROR("command and vertex DMA not supported "
767 "at the same time.\n");
768 savage_do_cleanup_bci(dev);
769 return -EINVAL;
770 }
771 dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
772 if (!dev_priv->cmd_dma) {
773 DRM_ERROR("could not find command DMA region!\n");
774 savage_do_cleanup_bci(dev);
775 return -EINVAL;
776 }
777 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
778 if (dev_priv->cmd_dma->type != _DRM_AGP) {
779 DRM_ERROR("AGP command DMA region is not a "
780 "_DRM_AGP map!\n");
781 savage_do_cleanup_bci(dev);
782 return -EINVAL;
783 }
784 drm_core_ioremap(dev_priv->cmd_dma, dev);
785 if (!dev_priv->cmd_dma->handle) {
786 DRM_ERROR("failed to ioremap command "
787 "DMA region!\n");
788 savage_do_cleanup_bci(dev);
789 return -ENOMEM;
790 }
791 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
792 DRM_ERROR("PCI command DMA region is not a "
793 "_DRM_CONSISTENT map!\n");
794 savage_do_cleanup_bci(dev);
795 return -EINVAL;
796 }
797 } else {
798 dev_priv->cmd_dma = NULL;
799 }
800
801 dev_priv->dma_flush = savage_dma_flush;
802 if (!dev_priv->cmd_dma) {
803 DRM_DEBUG("falling back to faked command DMA.\n");
804 dev_priv->fake_dma.offset = 0;
805 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
806 dev_priv->fake_dma.type = _DRM_SHM;
807 dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
808 DRM_MEM_DRIVER);
809 if (!dev_priv->fake_dma.handle) {
810 DRM_ERROR("could not allocate faked DMA buffer!\n");
811 savage_do_cleanup_bci(dev);
812 return -ENOMEM;
813 }
814 dev_priv->cmd_dma = &dev_priv->fake_dma;
815 dev_priv->dma_flush = savage_fake_dma_flush;
816 }
817
818 dev_priv->sarea_priv =
819 (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
820 init->sarea_priv_offset);
821
822 /* setup bitmap descriptors */
823 {
824 unsigned int color_tile_format;
825 unsigned int depth_tile_format;
826 unsigned int front_stride, back_stride, depth_stride;
827 if (dev_priv->chipset <= S3_SAVAGE4) {
828 color_tile_format = dev_priv->fb_bpp == 16 ?
829 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
830 depth_tile_format = dev_priv->depth_bpp == 16 ?
831 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
832 } else {
833 color_tile_format = SAVAGE_BD_TILE_DEST;
834 depth_tile_format = SAVAGE_BD_TILE_DEST;
835 }
836 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
837 back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
838 depth_stride =
839 dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
840
841 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
842 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
843 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
844
845 dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
846 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
847 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
848
849 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
850 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
851 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
852 }
853
854 /* setup status and bci ptr */
855 dev_priv->event_counter = 0;
856 dev_priv->event_wrap = 0;
857 dev_priv->bci_ptr = (volatile uint32_t *)
858 ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
859 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
860 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
861 } else {
862 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
863 }
864 if (dev_priv->status != NULL) {
865 dev_priv->status_ptr =
866 (volatile uint32_t *)dev_priv->status->handle;
867 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
868 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
869 dev_priv->status_ptr[1023] = dev_priv->event_counter;
870 } else {
871 dev_priv->status_ptr = NULL;
872 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
873 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
874 } else {
875 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
876 }
877 dev_priv->wait_evnt = savage_bci_wait_event_reg;
878 }
879
880 /* cliprect functions */
881 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
882 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
883 else
884 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
885
886 if (savage_freelist_init(dev) < 0) {
887 DRM_ERROR("could not initialize freelist\n");
888 savage_do_cleanup_bci(dev);
889 return -ENOMEM;
890 }
891
892 if (savage_dma_init(dev_priv) < 0) {
893 DRM_ERROR("could not initialize command DMA\n");
894 savage_do_cleanup_bci(dev);
895 return -ENOMEM;
896 }
897
898 return 0;
899}
900
901static int savage_do_cleanup_bci(struct drm_device * dev)
902{
903 drm_savage_private_t *dev_priv = dev->dev_private;
904
905 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
906 if (dev_priv->fake_dma.handle)
907 drm_free(dev_priv->fake_dma.handle,
908 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
909 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
910 dev_priv->cmd_dma->type == _DRM_AGP &&
911 dev_priv->dma_type == SAVAGE_DMA_AGP)
912 drm_core_ioremapfree(dev_priv->cmd_dma, dev);
913
914 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
915 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
916 drm_core_ioremapfree(dev->agp_buffer_map, dev);
917 /* make sure the next instance (which may be running
918 * in PCI mode) doesn't try to use an old
919 * agp_buffer_map. */
920 dev->agp_buffer_map = NULL;
921 }
922
923 if (dev_priv->dma_pages)
924 drm_free(dev_priv->dma_pages,
925 sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages,
926 DRM_MEM_DRIVER);
927
928 return 0;
929}
930
931static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
932{
933 drm_savage_init_t *init = data;
934
935 LOCK_TEST_WITH_RETURN(dev, file_priv);
936
937 switch (init->func) {
938 case SAVAGE_INIT_BCI:
939 return savage_do_init_bci(dev, init);
940 case SAVAGE_CLEANUP_BCI:
941 return savage_do_cleanup_bci(dev);
942 }
943
944 return -EINVAL;
945}
946
947static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
948{
949 drm_savage_private_t *dev_priv = dev->dev_private;
950 drm_savage_event_emit_t *event = data;
951
952 DRM_DEBUG("\n");
953
954 LOCK_TEST_WITH_RETURN(dev, file_priv);
955
956 event->count = savage_bci_emit_event(dev_priv, event->flags);
957 event->count |= dev_priv->event_wrap << 16;
958
959 return 0;
960}
961
962static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
963{
964 drm_savage_private_t *dev_priv = dev->dev_private;
965 drm_savage_event_wait_t *event = data;
966 unsigned int event_e, hw_e;
967 unsigned int event_w, hw_w;
968
969 DRM_DEBUG("\n");
970
971 UPDATE_EVENT_COUNTER();
972 if (dev_priv->status_ptr)
973 hw_e = dev_priv->status_ptr[1] & 0xffff;
974 else
975 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
976 hw_w = dev_priv->event_wrap;
977 if (hw_e > dev_priv->event_counter)
978 hw_w--; /* hardware hasn't passed the last wrap yet */
979
980 event_e = event->count & 0xffff;
981 event_w = event->count >> 16;
982
983 /* Don't need to wait if
984 * - event counter wrapped since the event was emitted or
985 * - the hardware has advanced up to or over the event to wait for.
986 */
987 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
988 return 0;
989 else
990 return dev_priv->wait_evnt(dev_priv, event_e);
991}
992
993/*
994 * DMA buffer management
995 */
996
997static int savage_bci_get_buffers(struct drm_device *dev,
998 struct drm_file *file_priv,
999 struct drm_dma *d)
1000{
1001 struct drm_buf *buf;
1002 int i;
1003
1004 for (i = d->granted_count; i < d->request_count; i++) {
1005 buf = savage_freelist_get(dev);
1006 if (!buf)
1007 return -EAGAIN;
1008
1009 buf->file_priv = file_priv;
1010
1011 if (DRM_COPY_TO_USER(&d->request_indices[i],
1012 &buf->idx, sizeof(buf->idx)))
1013 return -EFAULT;
1014 if (DRM_COPY_TO_USER(&d->request_sizes[i],
1015 &buf->total, sizeof(buf->total)))
1016 return -EFAULT;
1017
1018 d->granted_count++;
1019 }
1020 return 0;
1021}
1022
1023int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1024{
1025 struct drm_device_dma *dma = dev->dma;
1026 struct drm_dma *d = data;
1027 int ret = 0;
1028
1029 LOCK_TEST_WITH_RETURN(dev, file_priv);
1030
1031 /* Please don't send us buffers.
1032 */
1033 if (d->send_count != 0) {
1034 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1035 DRM_CURRENTPID, d->send_count);
1036 return -EINVAL;
1037 }
1038
1039 /* We'll send you buffers.
1040 */
1041 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1042 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1043 DRM_CURRENTPID, d->request_count, dma->buf_count);
1044 return -EINVAL;
1045 }
1046
1047 d->granted_count = 0;
1048
1049 if (d->request_count) {
1050 ret = savage_bci_get_buffers(dev, file_priv, d);
1051 }
1052
1053 return ret;
1054}
1055
1056void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1057{
1058 struct drm_device_dma *dma = dev->dma;
1059 drm_savage_private_t *dev_priv = dev->dev_private;
1060 int i;
1061
1062 if (!dma)
1063 return;
1064 if (!dev_priv)
1065 return;
1066 if (!dma->buflist)
1067 return;
1068
1069 /*i830_flush_queue(dev); */
1070
1071 for (i = 0; i < dma->buf_count; i++) {
1072 struct drm_buf *buf = dma->buflist[i];
1073 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1074
1075 if (buf->file_priv == file_priv && buf_priv &&
1076 buf_priv->next == NULL && buf_priv->prev == NULL) {
1077 uint16_t event;
1078 DRM_DEBUG("reclaimed from client\n");
1079 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1080 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1081 savage_freelist_put(dev, buf);
1082 }
1083 }
1084
1085 drm_core_reclaim_buffers(dev, file_priv);
1086}
1087
1088struct drm_ioctl_desc savage_ioctls[] = {
1089 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1090 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1091 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1092 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1093};
1094
1095int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
new file mode 100644
index 000000000000..eee52aa92a7c
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -0,0 +1,88 @@
1/* savage_drv.c -- Savage driver for Linux
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "savage_drm.h"
28#include "savage_drv.h"
29
30#include "drm_pciids.h"
31
32static struct pci_device_id pciidlist[] = {
33 savage_PCI_IDS
34};
35
36static struct drm_driver driver = {
37 .driver_features =
38 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
39 .dev_priv_size = sizeof(drm_savage_buf_priv_t),
40 .load = savage_driver_load,
41 .firstopen = savage_driver_firstopen,
42 .lastclose = savage_driver_lastclose,
43 .unload = savage_driver_unload,
44 .reclaim_buffers = savage_reclaim_buffers,
45 .get_map_ofs = drm_core_get_map_ofs,
46 .get_reg_ofs = drm_core_get_reg_ofs,
47 .ioctls = savage_ioctls,
48 .dma_ioctl = savage_bci_buffers,
49 .fops = {
50 .owner = THIS_MODULE,
51 .open = drm_open,
52 .release = drm_release,
53 .ioctl = drm_ioctl,
54 .mmap = drm_mmap,
55 .poll = drm_poll,
56 .fasync = drm_fasync,
57 },
58
59 .pci_driver = {
60 .name = DRIVER_NAME,
61 .id_table = pciidlist,
62 },
63
64 .name = DRIVER_NAME,
65 .desc = DRIVER_DESC,
66 .date = DRIVER_DATE,
67 .major = DRIVER_MAJOR,
68 .minor = DRIVER_MINOR,
69 .patchlevel = DRIVER_PATCHLEVEL,
70};
71
72static int __init savage_init(void)
73{
74 driver.num_ioctls = savage_max_ioctl;
75 return drm_init(&driver);
76}
77
78static void __exit savage_exit(void)
79{
80 drm_exit(&driver);
81}
82
83module_init(savage_init);
84module_exit(savage_exit);
85
86MODULE_AUTHOR(DRIVER_AUTHOR);
87MODULE_DESCRIPTION(DRIVER_DESC);
88MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
new file mode 100644
index 000000000000..df2aac6636f7
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -0,0 +1,575 @@
1/* savage_drv.h -- Private header for the savage driver */
2/*
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef __SAVAGE_DRV_H__
27#define __SAVAGE_DRV_H__
28
29#define DRIVER_AUTHOR "Felix Kuehling"
30
31#define DRIVER_NAME "savage"
32#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
33#define DRIVER_DATE "20050313"
34
35#define DRIVER_MAJOR 2
36#define DRIVER_MINOR 4
37#define DRIVER_PATCHLEVEL 1
38/* Interface history:
39 *
40 * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
41 * 2.0 The first real DRM
42 * 2.1 Scissors registers managed by the DRM, 3D operations clipped by
43 * cliprects of the cmdbuf ioctl
44 * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
45 * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
46 * wide and thus very long lived (unlikely to ever wrap). The size
47 * in the struct was 32 bits before, but only 16 bits were used
48 * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
49 * actually used
50 */
51
52typedef struct drm_savage_age {
53 uint16_t event;
54 unsigned int wrap;
55} drm_savage_age_t;
56
57typedef struct drm_savage_buf_priv {
58 struct drm_savage_buf_priv *next;
59 struct drm_savage_buf_priv *prev;
60 drm_savage_age_t age;
61 struct drm_buf *buf;
62} drm_savage_buf_priv_t;
63
64typedef struct drm_savage_dma_page {
65 drm_savage_age_t age;
66 unsigned int used, flushed;
67} drm_savage_dma_page_t;
68#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
69/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
70 * size of 16kbytes or 4k entries. Minimum requirement would be
71 * 10kbytes for 255 40-byte vertices in one drawing command. */
72#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
73
74/* interesting bits of hardware state that are saved in dev_priv */
75typedef union {
76 struct drm_savage_common_state {
77 uint32_t vbaddr;
78 } common;
79 struct {
80 unsigned char pad[sizeof(struct drm_savage_common_state)];
81 uint32_t texctrl, texaddr;
82 uint32_t scstart, new_scstart;
83 uint32_t scend, new_scend;
84 } s3d;
85 struct {
86 unsigned char pad[sizeof(struct drm_savage_common_state)];
87 uint32_t texdescr, texaddr0, texaddr1;
88 uint32_t drawctrl0, new_drawctrl0;
89 uint32_t drawctrl1, new_drawctrl1;
90 } s4;
91} drm_savage_state_t;
92
93/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
94enum savage_family {
95 S3_UNKNOWN = 0,
96 S3_SAVAGE3D,
97 S3_SAVAGE_MX,
98 S3_SAVAGE4,
99 S3_PROSAVAGE,
100 S3_TWISTER,
101 S3_PROSAVAGEDDR,
102 S3_SUPERSAVAGE,
103 S3_SAVAGE2000,
104 S3_LAST
105};
106
107extern struct drm_ioctl_desc savage_ioctls[];
108extern int savage_max_ioctl;
109
110#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
111
112#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
113 || (chip==S3_PROSAVAGE) \
114 || (chip==S3_TWISTER) \
115 || (chip==S3_PROSAVAGEDDR))
116
117#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
118
119#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
120
121#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
122 ||(chip==S3_PROSAVAGEDDR))
123
124/* flags */
125#define SAVAGE_IS_AGP 1
126
127typedef struct drm_savage_private {
128 drm_savage_sarea_t *sarea_priv;
129
130 drm_savage_buf_priv_t head, tail;
131
132 /* who am I? */
133 enum savage_family chipset;
134
135 unsigned int cob_size;
136 unsigned int bci_threshold_lo, bci_threshold_hi;
137 unsigned int dma_type;
138
139 /* frame buffer layout */
140 unsigned int fb_bpp;
141 unsigned int front_offset, front_pitch;
142 unsigned int back_offset, back_pitch;
143 unsigned int depth_bpp;
144 unsigned int depth_offset, depth_pitch;
145
146 /* bitmap descriptors for swap and clear */
147 unsigned int front_bd, back_bd, depth_bd;
148
149 /* local textures */
150 unsigned int texture_offset;
151 unsigned int texture_size;
152
153 /* memory regions in physical memory */
154 drm_local_map_t *sarea;
155 drm_local_map_t *mmio;
156 drm_local_map_t *fb;
157 drm_local_map_t *aperture;
158 drm_local_map_t *status;
159 drm_local_map_t *agp_textures;
160 drm_local_map_t *cmd_dma;
161 drm_local_map_t fake_dma;
162
163 struct {
164 int handle;
165 unsigned long base, size;
166 } mtrr[3];
167
168 /* BCI and status-related stuff */
169 volatile uint32_t *status_ptr, *bci_ptr;
170 uint32_t status_used_mask;
171 uint16_t event_counter;
172 unsigned int event_wrap;
173
174 /* Savage4 command DMA */
175 drm_savage_dma_page_t *dma_pages;
176 unsigned int nr_dma_pages, first_dma_page, current_dma_page;
177 drm_savage_age_t last_dma_age;
178
179 /* saved hw state for global/local check on S3D */
180 uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
181 /* and for scissors (global, so don't emit if not changed) */
182 uint32_t hw_scissors_start, hw_scissors_end;
183
184 drm_savage_state_t state;
185
186 /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
187 unsigned int waiting;
188
189 /* config/hardware-dependent function pointers */
190 int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n);
191 int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e);
192 /* Err, there is a macro wait_event in include/linux/wait.h.
193 * Avoid unwanted macro expansion. */
194 void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
195 const struct drm_clip_rect * pbox);
196 void (*dma_flush) (struct drm_savage_private * dev_priv);
197} drm_savage_private_t;
198
199/* ioctls */
200extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
201extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
202
203/* BCI functions */
204extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
205 unsigned int flags);
206extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf);
207extern void savage_dma_reset(drm_savage_private_t * dev_priv);
208extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
209extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
210 unsigned int n);
211extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
212extern int savage_driver_firstopen(struct drm_device *dev);
213extern void savage_driver_lastclose(struct drm_device *dev);
214extern int savage_driver_unload(struct drm_device *dev);
215extern void savage_reclaim_buffers(struct drm_device *dev,
216 struct drm_file *file_priv);
217
218/* state functions */
219extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
220 const struct drm_clip_rect * pbox);
221extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
222 const struct drm_clip_rect * pbox);
223
224#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
225#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
226#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
227#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
228#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
229
230#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
231 * inside the MMIO region */
232#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
233 * BCI FIFO */
234
235/*
236 * MMIO registers
237 */
238#define SAVAGE_STATUS_WORD0 0x48C00
239#define SAVAGE_STATUS_WORD1 0x48C04
240#define SAVAGE_ALT_STATUS_WORD0 0x48C60
241
242#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
243#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
244
245/* Copied from savage_bci.h in the 2D driver with some renaming. */
246
247/* Bitmap descriptors */
248#define SAVAGE_BD_STRIDE_SHIFT 0
249#define SAVAGE_BD_BPP_SHIFT 16
250#define SAVAGE_BD_TILE_SHIFT 24
251#define SAVAGE_BD_BW_DISABLE (1<<28)
252/* common: */
253#define SAVAGE_BD_TILE_LINEAR 0
254/* savage4, MX, IX, 3D */
255#define SAVAGE_BD_TILE_16BPP 2
256#define SAVAGE_BD_TILE_32BPP 3
257/* twister, prosavage, DDR, supersavage, 2000 */
258#define SAVAGE_BD_TILE_DEST 1
259#define SAVAGE_BD_TILE_TEXTURE 2
260/* GBD - BCI enable */
261/* savage4, MX, IX, 3D */
262#define SAVAGE_GBD_BCI_ENABLE 8
263/* twister, prosavage, DDR, supersavage, 2000 */
264#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
265
266#define SAVAGE_GBD_BIG_ENDIAN 4
267#define SAVAGE_GBD_LITTLE_ENDIAN 0
268#define SAVAGE_GBD_64 1
269
270/* Global Bitmap Descriptor */
271#define SAVAGE_BCI_GLB_BD_LOW 0x8168
272#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
273
274/*
275 * BCI registers
276 */
277/* Savage4/Twister/ProSavage 3D registers */
278#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
279#define SAVAGE_TEXPALADDR_S4 0x1f
280#define SAVAGE_TEXCTRL0_S4 0x20
281#define SAVAGE_TEXCTRL1_S4 0x21
282#define SAVAGE_TEXADDR0_S4 0x22
283#define SAVAGE_TEXADDR1_S4 0x23
284#define SAVAGE_TEXBLEND0_S4 0x24
285#define SAVAGE_TEXBLEND1_S4 0x25
286#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
287#define SAVAGE_TEXDESCR_S4 0x27
288#define SAVAGE_FOGTABLE_S4 0x28
289#define SAVAGE_FOGCTRL_S4 0x30
290#define SAVAGE_STENCILCTRL_S4 0x31
291#define SAVAGE_ZBUFCTRL_S4 0x32
292#define SAVAGE_ZBUFOFF_S4 0x33
293#define SAVAGE_DESTCTRL_S4 0x34
294#define SAVAGE_DRAWCTRL0_S4 0x35
295#define SAVAGE_DRAWCTRL1_S4 0x36
296#define SAVAGE_ZWATERMARK_S4 0x37
297#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
298#define SAVAGE_TEXBLENDCOLOR_S4 0x39
299/* Savage3D/MX/IX 3D registers */
300#define SAVAGE_TEXPALADDR_S3D 0x18
301#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
302#define SAVAGE_TEXADDR_S3D 0x1A
303#define SAVAGE_TEXDESCR_S3D 0x1B
304#define SAVAGE_TEXCTRL_S3D 0x1C
305#define SAVAGE_FOGTABLE_S3D 0x20
306#define SAVAGE_FOGCTRL_S3D 0x30
307#define SAVAGE_DRAWCTRL_S3D 0x31
308#define SAVAGE_ZBUFCTRL_S3D 0x32
309#define SAVAGE_ZBUFOFF_S3D 0x33
310#define SAVAGE_DESTCTRL_S3D 0x34
311#define SAVAGE_SCSTART_S3D 0x35
312#define SAVAGE_SCEND_S3D 0x36
313#define SAVAGE_ZWATERMARK_S3D 0x37
314#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
315/* common stuff */
316#define SAVAGE_VERTBUFADDR 0x3e
317#define SAVAGE_BITPLANEWTMASK 0xd7
318#define SAVAGE_DMABUFADDR 0x51
319
320/* texture enable bits (needed for tex addr checking) */
321#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
322#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
323#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
324
325/* Global fields in Savage4/Twister/ProSavage 3D registers:
326 *
327 * All texture registers and DrawLocalCtrl are local. All other
328 * registers are global. */
329
330/* Global fields in Savage3D/MX/IX 3D registers:
331 *
332 * All texture registers are local. DrawCtrl and ZBufCtrl are
333 * partially local. All other registers are global.
334 *
335 * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
336 * ZBufCtrl global fields: zCmpFunc, zBufEn
337 */
338#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
339#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
340
341/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
342 */
343#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
344#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
345
346/*
347 * BCI commands
348 */
349#define BCI_CMD_NOP 0x40000000
350#define BCI_CMD_RECT 0x48000000
351#define BCI_CMD_RECT_XP 0x01000000
352#define BCI_CMD_RECT_YP 0x02000000
353#define BCI_CMD_SCANLINE 0x50000000
354#define BCI_CMD_LINE 0x5C000000
355#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
356#define BCI_CMD_BYTE_TEXT 0x63000000
357#define BCI_CMD_NT_BYTE_TEXT 0x67000000
358#define BCI_CMD_BIT_TEXT 0x6C000000
359#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
360#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
361#define BCI_CMD_SEND_COLOR 0x00008000
362
363#define BCI_CMD_CLIP_NONE 0x00000000
364#define BCI_CMD_CLIP_CURRENT 0x00002000
365#define BCI_CMD_CLIP_LR 0x00004000
366#define BCI_CMD_CLIP_NEW 0x00006000
367
368#define BCI_CMD_DEST_GBD 0x00000000
369#define BCI_CMD_DEST_PBD 0x00000800
370#define BCI_CMD_DEST_PBD_NEW 0x00000C00
371#define BCI_CMD_DEST_SBD 0x00001000
372#define BCI_CMD_DEST_SBD_NEW 0x00001400
373
374#define BCI_CMD_SRC_TRANSPARENT 0x00000200
375#define BCI_CMD_SRC_SOLID 0x00000000
376#define BCI_CMD_SRC_GBD 0x00000020
377#define BCI_CMD_SRC_COLOR 0x00000040
378#define BCI_CMD_SRC_MONO 0x00000060
379#define BCI_CMD_SRC_PBD_COLOR 0x00000080
380#define BCI_CMD_SRC_PBD_MONO 0x000000A0
381#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
382#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
383#define BCI_CMD_SRC_SBD_COLOR 0x00000100
384#define BCI_CMD_SRC_SBD_MONO 0x00000120
385#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
386#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
387
388#define BCI_CMD_PAT_TRANSPARENT 0x00000010
389#define BCI_CMD_PAT_NONE 0x00000000
390#define BCI_CMD_PAT_COLOR 0x00000002
391#define BCI_CMD_PAT_MONO 0x00000003
392#define BCI_CMD_PAT_PBD_COLOR 0x00000004
393#define BCI_CMD_PAT_PBD_MONO 0x00000005
394#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
395#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
396#define BCI_CMD_PAT_SBD_COLOR 0x00000008
397#define BCI_CMD_PAT_SBD_MONO 0x00000009
398#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
399#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
400
401#define BCI_BD_BW_DISABLE 0x10000000
402#define BCI_BD_TILE_MASK 0x03000000
403#define BCI_BD_TILE_NONE 0x00000000
404#define BCI_BD_TILE_16 0x02000000
405#define BCI_BD_TILE_32 0x03000000
406#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
407#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
408#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
409#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
410
411#define BCI_CMD_SET_REGISTER 0x96000000
412
413#define BCI_CMD_WAIT 0xC0000000
414#define BCI_CMD_WAIT_3D 0x00010000
415#define BCI_CMD_WAIT_2D 0x00020000
416
417#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
418
419#define BCI_CMD_DRAW_PRIM 0x80000000
420#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
421#define BCI_CMD_DRAW_CONT 0x01000000
422#define BCI_CMD_DRAW_TRILIST 0x00000000
423#define BCI_CMD_DRAW_TRISTRIP 0x02000000
424#define BCI_CMD_DRAW_TRIFAN 0x04000000
425#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
426#define BCI_CMD_DRAW_NO_Z 0x00000001
427#define BCI_CMD_DRAW_NO_W 0x00000002
428#define BCI_CMD_DRAW_NO_CD 0x00000004
429#define BCI_CMD_DRAW_NO_CS 0x00000008
430#define BCI_CMD_DRAW_NO_U0 0x00000010
431#define BCI_CMD_DRAW_NO_V0 0x00000020
432#define BCI_CMD_DRAW_NO_UV0 0x00000030
433#define BCI_CMD_DRAW_NO_U1 0x00000040
434#define BCI_CMD_DRAW_NO_V1 0x00000080
435#define BCI_CMD_DRAW_NO_UV1 0x000000c0
436
437#define BCI_CMD_DMA 0xa8000000
438
439#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
440#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
441#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
442#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
443#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
444#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
445
446#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
447#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
448#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
449 (((maj) & 0x1FFF) | \
450 ((ym) ? 1<<13 : 0) | \
451 ((xp) ? 1<<14 : 0) | \
452 ((yp) ? 1<<15 : 0) | \
453 ((err) << 16))
454
455/*
456 * common commands
457 */
458#define BCI_SET_REGISTERS( first, n ) \
459 BCI_WRITE(BCI_CMD_SET_REGISTER | \
460 ((uint32_t)(n) & 0xff) << 16 | \
461 ((uint32_t)(first) & 0xffff))
462#define DMA_SET_REGISTERS( first, n ) \
463 DMA_WRITE(BCI_CMD_SET_REGISTER | \
464 ((uint32_t)(n) & 0xff) << 16 | \
465 ((uint32_t)(first) & 0xffff))
466
467#define BCI_DRAW_PRIMITIVE(n, type, skip) \
468 BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
469 ((n) << 16))
470#define DMA_DRAW_PRIMITIVE(n, type, skip) \
471 DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
472 ((n) << 16))
473
474#define BCI_DRAW_INDICES_S3D(n, type, i0) \
475 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
476 ((n) << 16) | (i0))
477
478#define BCI_DRAW_INDICES_S4(n, type, skip) \
479 BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
480 (skip) | ((n) << 16))
481
482#define BCI_DMA(n) \
483 BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
484
485/*
486 * access to MMIO
487 */
488#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
489#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
490
491/*
492 * access to the burst command interface (BCI)
493 */
494#define SAVAGE_BCI_DEBUG 1
495
496#define BCI_LOCALS volatile uint32_t *bci_ptr;
497
498#define BEGIN_BCI( n ) do { \
499 dev_priv->wait_fifo(dev_priv, (n)); \
500 bci_ptr = dev_priv->bci_ptr; \
501} while(0)
502
503#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
504
505/*
506 * command DMA support
507 */
508#define SAVAGE_DMA_DEBUG 1
509
510#define DMA_LOCALS uint32_t *dma_ptr;
511
512#define BEGIN_DMA( n ) do { \
513 unsigned int cur = dev_priv->current_dma_page; \
514 unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
515 dev_priv->dma_pages[cur].used; \
516 if ((n) > rest) { \
517 dma_ptr = savage_dma_alloc(dev_priv, (n)); \
518 } else { /* fast path for small allocations */ \
519 dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
520 cur * SAVAGE_DMA_PAGE_SIZE + \
521 dev_priv->dma_pages[cur].used; \
522 if (dev_priv->dma_pages[cur].used == 0) \
523 savage_dma_wait(dev_priv, cur); \
524 dev_priv->dma_pages[cur].used += (n); \
525 } \
526} while(0)
527
528#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
529
530#define DMA_COPY(src, n) do { \
531 memcpy(dma_ptr, (src), (n)*4); \
532 dma_ptr += n; \
533} while(0)
534
535#if SAVAGE_DMA_DEBUG
536#define DMA_COMMIT() do { \
537 unsigned int cur = dev_priv->current_dma_page; \
538 uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
539 cur * SAVAGE_DMA_PAGE_SIZE + \
540 dev_priv->dma_pages[cur].used; \
541 if (dma_ptr != expected) { \
542 DRM_ERROR("DMA allocation and use don't match: " \
543 "%p != %p\n", expected, dma_ptr); \
544 savage_dma_reset(dev_priv); \
545 } \
546} while(0)
547#else
548#define DMA_COMMIT() do {/* nothing */} while(0)
549#endif
550
551#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
552
553/* Buffer aging via event tag
554 */
555
556#define UPDATE_EVENT_COUNTER( ) do { \
557 if (dev_priv->status_ptr) { \
558 uint16_t count; \
559 /* coordinate with Xserver */ \
560 count = dev_priv->status_ptr[1023]; \
561 if (count < dev_priv->event_counter) \
562 dev_priv->event_wrap++; \
563 dev_priv->event_counter = count; \
564 } \
565} while(0)
566
567#define SET_AGE( age, e, w ) do { \
568 (age)->event = e; \
569 (age)->wrap = w; \
570} while(0)
571
572#define TEST_AGE( age, e, w ) \
573 ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
574
575#endif /* __SAVAGE_DRV_H__ */
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
new file mode 100644
index 000000000000..5f6238fdf1fa
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -0,0 +1,1163 @@
1/* savage_state.c -- State and drawing support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "drmP.h"
26#include "savage_drm.h"
27#include "savage_drv.h"
28
29void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
30 const struct drm_clip_rect * pbox)
31{
32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t) pbox->x1 & 0x000007ff) |
36 (((uint32_t) pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
39 ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) {
42 DMA_LOCALS;
43 BEGIN_DMA(4);
44 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46 DMA_WRITE(scstart);
47 DMA_WRITE(scend);
48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1;
51 DMA_COMMIT();
52 }
53}
54
55void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
56 const struct drm_clip_rect * pbox)
57{
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t) pbox->x1 & 0x000007ff) |
62 (((uint32_t) pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
65 ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
68 DMA_LOCALS;
69 BEGIN_DMA(4);
70 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72 DMA_WRITE(drawctrl0);
73 DMA_WRITE(drawctrl1);
74 dev_priv->state.s4.drawctrl0 = drawctrl0;
75 dev_priv->state.s4.drawctrl1 = drawctrl1;
76 dev_priv->waiting = 1;
77 DMA_COMMIT();
78 }
79}
80
81static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
82 uint32_t addr)
83{
84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return -EINVAL;
87 }
88 if (!(addr & 1)) { /* local */
89 addr &= ~7;
90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset + dev_priv->texture_size) {
92 DRM_ERROR
93 ("bad texAddr%d %08x (local addr out of range)\n",
94 unit, addr);
95 return -EINVAL;
96 }
97 } else { /* AGP */
98 if (!dev_priv->agp_textures) {
99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
100 unit, addr);
101 return -EINVAL;
102 }
103 addr &= ~7;
104 if (addr < dev_priv->agp_textures->offset ||
105 addr >= (dev_priv->agp_textures->offset +
106 dev_priv->agp_textures->size)) {
107 DRM_ERROR
108 ("bad texAddr%d %08x (AGP addr out of range)\n",
109 unit, addr);
110 return -EINVAL;
111 }
112 }
113 return 0;
114}
115
116#define SAVE_STATE(reg,where) \
117 if(start <= reg && start+count > reg) \
118 dev_priv->state.where = regs[reg - start]
119#define SAVE_STATE_MASK(reg,where,mask) do { \
120 if(start <= reg && start+count > reg) { \
121 uint32_t tmp; \
122 tmp = regs[reg - start]; \
123 dev_priv->state.where = (tmp & (mask)) | \
124 (dev_priv->state.where & ~(mask)); \
125 } \
126} while (0)
127
128static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
129 unsigned int start, unsigned int count,
130 const uint32_t *regs)
131{
132 if (start < SAVAGE_TEXPALADDR_S3D ||
133 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
135 start, start + count - 1);
136 return -EINVAL;
137 }
138
139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
140 ~SAVAGE_SCISSOR_MASK_S3D);
141 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
142 ~SAVAGE_SCISSOR_MASK_S3D);
143
144 /* if any texture regs were changed ... */
145 if (start <= SAVAGE_TEXCTRL_S3D &&
146 start + count > SAVAGE_TEXPALADDR_S3D) {
147 /* ... check texture state */
148 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
149 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
150 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
151 return savage_verify_texaddr(dev_priv, 0,
152 dev_priv->state.s3d.texaddr);
153 }
154
155 return 0;
156}
157
158static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
159 unsigned int start, unsigned int count,
160 const uint32_t *regs)
161{
162 int ret = 0;
163
164 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
165 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
167 start, start + count - 1);
168 return -EINVAL;
169 }
170
171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
172 ~SAVAGE_SCISSOR_MASK_S4);
173 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
174 ~SAVAGE_SCISSOR_MASK_S4);
175
176 /* if any texture regs were changed ... */
177 if (start <= SAVAGE_TEXDESCR_S4 &&
178 start + count > SAVAGE_TEXPALADDR_S4) {
179 /* ... check texture state */
180 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
181 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
182 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
183 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
184 ret |= savage_verify_texaddr(dev_priv, 0,
185 dev_priv->state.s4.texaddr0);
186 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
187 ret |= savage_verify_texaddr(dev_priv, 1,
188 dev_priv->state.s4.texaddr1);
189 }
190
191 return ret;
192}
193
194#undef SAVE_STATE
195#undef SAVE_STATE_MASK
196
197static int savage_dispatch_state(drm_savage_private_t * dev_priv,
198 const drm_savage_cmd_header_t * cmd_header,
199 const uint32_t *regs)
200{
201 unsigned int count = cmd_header->state.count;
202 unsigned int start = cmd_header->state.start;
203 unsigned int count2 = 0;
204 unsigned int bci_size;
205 int ret;
206 DMA_LOCALS;
207
208 if (!count)
209 return 0;
210
211 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
212 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
213 if (ret != 0)
214 return ret;
215 /* scissor regs are emitted in savage_dispatch_draw */
216 if (start < SAVAGE_SCSTART_S3D) {
217 if (start + count > SAVAGE_SCEND_S3D + 1)
218 count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
219 if (start + count > SAVAGE_SCSTART_S3D)
220 count = SAVAGE_SCSTART_S3D - start;
221 } else if (start <= SAVAGE_SCEND_S3D) {
222 if (start + count > SAVAGE_SCEND_S3D + 1) {
223 count -= SAVAGE_SCEND_S3D + 1 - start;
224 start = SAVAGE_SCEND_S3D + 1;
225 } else
226 return 0;
227 }
228 } else {
229 ret = savage_verify_state_s4(dev_priv, start, count, regs);
230 if (ret != 0)
231 return ret;
232 /* scissor regs are emitted in savage_dispatch_draw */
233 if (start < SAVAGE_DRAWCTRL0_S4) {
234 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
235 count2 = count -
236 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
237 if (start + count > SAVAGE_DRAWCTRL0_S4)
238 count = SAVAGE_DRAWCTRL0_S4 - start;
239 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
240 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
241 count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
242 start = SAVAGE_DRAWCTRL1_S4 + 1;
243 } else
244 return 0;
245 }
246 }
247
248 bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
249
250 if (cmd_header->state.global) {
251 BEGIN_DMA(bci_size + 1);
252 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
253 dev_priv->waiting = 1;
254 } else {
255 BEGIN_DMA(bci_size);
256 }
257
258 do {
259 while (count > 0) {
260 unsigned int n = count < 255 ? count : 255;
261 DMA_SET_REGISTERS(start, n);
262 DMA_COPY(regs, n);
263 count -= n;
264 start += n;
265 regs += n;
266 }
267 start += 2;
268 regs += 2;
269 count = count2;
270 count2 = 0;
271 } while (count);
272
273 DMA_COMMIT();
274
275 return 0;
276}
277
278static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
279 const drm_savage_cmd_header_t * cmd_header,
280 const struct drm_buf * dmabuf)
281{
282 unsigned char reorder = 0;
283 unsigned int prim = cmd_header->prim.prim;
284 unsigned int skip = cmd_header->prim.skip;
285 unsigned int n = cmd_header->prim.count;
286 unsigned int start = cmd_header->prim.start;
287 unsigned int i;
288 BCI_LOCALS;
289
290 if (!dmabuf) {
291 DRM_ERROR("called without dma buffers!\n");
292 return -EINVAL;
293 }
294
295 if (!n)
296 return 0;
297
298 switch (prim) {
299 case SAVAGE_PRIM_TRILIST_201:
300 reorder = 1;
301 prim = SAVAGE_PRIM_TRILIST;
302 case SAVAGE_PRIM_TRILIST:
303 if (n % 3 != 0) {
304 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
305 n);
306 return -EINVAL;
307 }
308 break;
309 case SAVAGE_PRIM_TRISTRIP:
310 case SAVAGE_PRIM_TRIFAN:
311 if (n < 3) {
312 DRM_ERROR
313 ("wrong number of vertices %u in TRIFAN/STRIP\n",
314 n);
315 return -EINVAL;
316 }
317 break;
318 default:
319 DRM_ERROR("invalid primitive type %u\n", prim);
320 return -EINVAL;
321 }
322
323 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
324 if (skip != 0) {
325 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
326 return -EINVAL;
327 }
328 } else {
329 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
330 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
331 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
332 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
333 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
334 return -EINVAL;
335 }
336 if (reorder) {
337 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
338 return -EINVAL;
339 }
340 }
341
342 if (start + n > dmabuf->total / 32) {
343 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
344 start, start + n - 1, dmabuf->total / 32);
345 return -EINVAL;
346 }
347
348 /* Vertex DMA doesn't work with command DMA at the same time,
349 * so we use BCI_... to submit commands here. Flush buffered
350 * faked DMA first. */
351 DMA_FLUSH();
352
353 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
354 BEGIN_BCI(2);
355 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
356 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
357 dev_priv->state.common.vbaddr = dmabuf->bus_address;
358 }
359 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
360 /* Workaround for what looks like a hardware bug. If a
361 * WAIT_3D_IDLE was emitted some time before the
362 * indexed drawing command then the engine will lock
363 * up. There are two known workarounds:
364 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
365 BEGIN_BCI(63);
366 for (i = 0; i < 63; ++i)
367 BCI_WRITE(BCI_CMD_WAIT);
368 dev_priv->waiting = 0;
369 }
370
371 prim <<= 25;
372 while (n != 0) {
373 /* Can emit up to 255 indices (85 triangles) at once. */
374 unsigned int count = n > 255 ? 255 : n;
375 if (reorder) {
376 /* Need to reorder indices for correct flat
377 * shading while preserving the clock sense
378 * for correct culling. Only on Savage3D. */
379 int reorder[3] = { -1, -1, -1 };
380 reorder[start % 3] = 2;
381
382 BEGIN_BCI((count + 1 + 1) / 2);
383 BCI_DRAW_INDICES_S3D(count, prim, start + 2);
384
385 for (i = start + 1; i + 1 < start + count; i += 2)
386 BCI_WRITE((i + reorder[i % 3]) |
387 ((i + 1 +
388 reorder[(i + 1) % 3]) << 16));
389 if (i < start + count)
390 BCI_WRITE(i + reorder[i % 3]);
391 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
392 BEGIN_BCI((count + 1 + 1) / 2);
393 BCI_DRAW_INDICES_S3D(count, prim, start);
394
395 for (i = start + 1; i + 1 < start + count; i += 2)
396 BCI_WRITE(i | ((i + 1) << 16));
397 if (i < start + count)
398 BCI_WRITE(i);
399 } else {
400 BEGIN_BCI((count + 2 + 1) / 2);
401 BCI_DRAW_INDICES_S4(count, prim, skip);
402
403 for (i = start; i + 1 < start + count; i += 2)
404 BCI_WRITE(i | ((i + 1) << 16));
405 if (i < start + count)
406 BCI_WRITE(i);
407 }
408
409 start += count;
410 n -= count;
411
412 prim |= BCI_CMD_DRAW_CONT;
413 }
414
415 return 0;
416}
417
418static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
419 const drm_savage_cmd_header_t * cmd_header,
420 const uint32_t *vtxbuf, unsigned int vb_size,
421 unsigned int vb_stride)
422{
423 unsigned char reorder = 0;
424 unsigned int prim = cmd_header->prim.prim;
425 unsigned int skip = cmd_header->prim.skip;
426 unsigned int n = cmd_header->prim.count;
427 unsigned int start = cmd_header->prim.start;
428 unsigned int vtx_size;
429 unsigned int i;
430 DMA_LOCALS;
431
432 if (!n)
433 return 0;
434
435 switch (prim) {
436 case SAVAGE_PRIM_TRILIST_201:
437 reorder = 1;
438 prim = SAVAGE_PRIM_TRILIST;
439 case SAVAGE_PRIM_TRILIST:
440 if (n % 3 != 0) {
441 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
442 n);
443 return -EINVAL;
444 }
445 break;
446 case SAVAGE_PRIM_TRISTRIP:
447 case SAVAGE_PRIM_TRIFAN:
448 if (n < 3) {
449 DRM_ERROR
450 ("wrong number of vertices %u in TRIFAN/STRIP\n",
451 n);
452 return -EINVAL;
453 }
454 break;
455 default:
456 DRM_ERROR("invalid primitive type %u\n", prim);
457 return -EINVAL;
458 }
459
460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
461 if (skip > SAVAGE_SKIP_ALL_S3D) {
462 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
463 return -EINVAL;
464 }
465 vtx_size = 8; /* full vertex */
466 } else {
467 if (skip > SAVAGE_SKIP_ALL_S4) {
468 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
469 return -EINVAL;
470 }
471 vtx_size = 10; /* full vertex */
472 }
473
474 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
475 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
476 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
477
478 if (vtx_size > vb_stride) {
479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
480 vtx_size, vb_stride);
481 return -EINVAL;
482 }
483
484 if (start + n > vb_size / (vb_stride * 4)) {
485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
486 start, start + n - 1, vb_size / (vb_stride * 4));
487 return -EINVAL;
488 }
489
490 prim <<= 25;
491 while (n != 0) {
492 /* Can emit up to 255 vertices (85 triangles) at once. */
493 unsigned int count = n > 255 ? 255 : n;
494 if (reorder) {
495 /* Need to reorder vertices for correct flat
496 * shading while preserving the clock sense
497 * for correct culling. Only on Savage3D. */
498 int reorder[3] = { -1, -1, -1 };
499 reorder[start % 3] = 2;
500
501 BEGIN_DMA(count * vtx_size + 1);
502 DMA_DRAW_PRIMITIVE(count, prim, skip);
503
504 for (i = start; i < start + count; ++i) {
505 unsigned int j = i + reorder[i % 3];
506 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
507 }
508
509 DMA_COMMIT();
510 } else {
511 BEGIN_DMA(count * vtx_size + 1);
512 DMA_DRAW_PRIMITIVE(count, prim, skip);
513
514 if (vb_stride == vtx_size) {
515 DMA_COPY(&vtxbuf[vb_stride * start],
516 vtx_size * count);
517 } else {
518 for (i = start; i < start + count; ++i) {
519 DMA_COPY(&vtxbuf [vb_stride * i],
520 vtx_size);
521 }
522 }
523
524 DMA_COMMIT();
525 }
526
527 start += count;
528 n -= count;
529
530 prim |= BCI_CMD_DRAW_CONT;
531 }
532
533 return 0;
534}
535
536static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
537 const drm_savage_cmd_header_t * cmd_header,
538 const uint16_t *idx,
539 const struct drm_buf * dmabuf)
540{
541 unsigned char reorder = 0;
542 unsigned int prim = cmd_header->idx.prim;
543 unsigned int skip = cmd_header->idx.skip;
544 unsigned int n = cmd_header->idx.count;
545 unsigned int i;
546 BCI_LOCALS;
547
548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n");
550 return -EINVAL;
551 }
552
553 if (!n)
554 return 0;
555
556 switch (prim) {
557 case SAVAGE_PRIM_TRILIST_201:
558 reorder = 1;
559 prim = SAVAGE_PRIM_TRILIST;
560 case SAVAGE_PRIM_TRILIST:
561 if (n % 3 != 0) {
562 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
563 return -EINVAL;
564 }
565 break;
566 case SAVAGE_PRIM_TRISTRIP:
567 case SAVAGE_PRIM_TRIFAN:
568 if (n < 3) {
569 DRM_ERROR
570 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
571 return -EINVAL;
572 }
573 break;
574 default:
575 DRM_ERROR("invalid primitive type %u\n", prim);
576 return -EINVAL;
577 }
578
579 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
580 if (skip != 0) {
581 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
582 return -EINVAL;
583 }
584 } else {
585 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
586 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
587 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
588 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
589 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
590 return -EINVAL;
591 }
592 if (reorder) {
593 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
594 return -EINVAL;
595 }
596 }
597
598 /* Vertex DMA doesn't work with command DMA at the same time,
599 * so we use BCI_... to submit commands here. Flush buffered
600 * faked DMA first. */
601 DMA_FLUSH();
602
603 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
604 BEGIN_BCI(2);
605 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
606 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
607 dev_priv->state.common.vbaddr = dmabuf->bus_address;
608 }
609 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
610 /* Workaround for what looks like a hardware bug. If a
611 * WAIT_3D_IDLE was emitted some time before the
612 * indexed drawing command then the engine will lock
613 * up. There are two known workarounds:
614 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
615 BEGIN_BCI(63);
616 for (i = 0; i < 63; ++i)
617 BCI_WRITE(BCI_CMD_WAIT);
618 dev_priv->waiting = 0;
619 }
620
621 prim <<= 25;
622 while (n != 0) {
623 /* Can emit up to 255 indices (85 triangles) at once. */
624 unsigned int count = n > 255 ? 255 : n;
625
626 /* check indices */
627 for (i = 0; i < count; ++i) {
628 if (idx[i] > dmabuf->total / 32) {
629 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
630 i, idx[i], dmabuf->total / 32);
631 return -EINVAL;
632 }
633 }
634
635 if (reorder) {
636 /* Need to reorder indices for correct flat
637 * shading while preserving the clock sense
638 * for correct culling. Only on Savage3D. */
639 int reorder[3] = { 2, -1, -1 };
640
641 BEGIN_BCI((count + 1 + 1) / 2);
642 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
643
644 for (i = 1; i + 1 < count; i += 2)
645 BCI_WRITE(idx[i + reorder[i % 3]] |
646 (idx[i + 1 +
647 reorder[(i + 1) % 3]] << 16));
648 if (i < count)
649 BCI_WRITE(idx[i + reorder[i % 3]]);
650 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
651 BEGIN_BCI((count + 1 + 1) / 2);
652 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
653
654 for (i = 1; i + 1 < count; i += 2)
655 BCI_WRITE(idx[i] | (idx[i + 1] << 16));
656 if (i < count)
657 BCI_WRITE(idx[i]);
658 } else {
659 BEGIN_BCI((count + 2 + 1) / 2);
660 BCI_DRAW_INDICES_S4(count, prim, skip);
661
662 for (i = 0; i + 1 < count; i += 2)
663 BCI_WRITE(idx[i] | (idx[i + 1] << 16));
664 if (i < count)
665 BCI_WRITE(idx[i]);
666 }
667
668 idx += count;
669 n -= count;
670
671 prim |= BCI_CMD_DRAW_CONT;
672 }
673
674 return 0;
675}
676
677static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
678 const drm_savage_cmd_header_t * cmd_header,
679 const uint16_t *idx,
680 const uint32_t *vtxbuf,
681 unsigned int vb_size, unsigned int vb_stride)
682{
683 unsigned char reorder = 0;
684 unsigned int prim = cmd_header->idx.prim;
685 unsigned int skip = cmd_header->idx.skip;
686 unsigned int n = cmd_header->idx.count;
687 unsigned int vtx_size;
688 unsigned int i;
689 DMA_LOCALS;
690
691 if (!n)
692 return 0;
693
694 switch (prim) {
695 case SAVAGE_PRIM_TRILIST_201:
696 reorder = 1;
697 prim = SAVAGE_PRIM_TRILIST;
698 case SAVAGE_PRIM_TRILIST:
699 if (n % 3 != 0) {
700 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
701 return -EINVAL;
702 }
703 break;
704 case SAVAGE_PRIM_TRISTRIP:
705 case SAVAGE_PRIM_TRIFAN:
706 if (n < 3) {
707 DRM_ERROR
708 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
709 return -EINVAL;
710 }
711 break;
712 default:
713 DRM_ERROR("invalid primitive type %u\n", prim);
714 return -EINVAL;
715 }
716
717 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
718 if (skip > SAVAGE_SKIP_ALL_S3D) {
719 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
720 return -EINVAL;
721 }
722 vtx_size = 8; /* full vertex */
723 } else {
724 if (skip > SAVAGE_SKIP_ALL_S4) {
725 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
726 return -EINVAL;
727 }
728 vtx_size = 10; /* full vertex */
729 }
730
731 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
732 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
733 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
734
735 if (vtx_size > vb_stride) {
736 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
737 vtx_size, vb_stride);
738 return -EINVAL;
739 }
740
741 prim <<= 25;
742 while (n != 0) {
743 /* Can emit up to 255 vertices (85 triangles) at once. */
744 unsigned int count = n > 255 ? 255 : n;
745
746 /* Check indices */
747 for (i = 0; i < count; ++i) {
748 if (idx[i] > vb_size / (vb_stride * 4)) {
749 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
750 i, idx[i], vb_size / (vb_stride * 4));
751 return -EINVAL;
752 }
753 }
754
755 if (reorder) {
756 /* Need to reorder vertices for correct flat
757 * shading while preserving the clock sense
758 * for correct culling. Only on Savage3D. */
759 int reorder[3] = { 2, -1, -1 };
760
761 BEGIN_DMA(count * vtx_size + 1);
762 DMA_DRAW_PRIMITIVE(count, prim, skip);
763
764 for (i = 0; i < count; ++i) {
765 unsigned int j = idx[i + reorder[i % 3]];
766 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
767 }
768
769 DMA_COMMIT();
770 } else {
771 BEGIN_DMA(count * vtx_size + 1);
772 DMA_DRAW_PRIMITIVE(count, prim, skip);
773
774 for (i = 0; i < count; ++i) {
775 unsigned int j = idx[i];
776 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
777 }
778
779 DMA_COMMIT();
780 }
781
782 idx += count;
783 n -= count;
784
785 prim |= BCI_CMD_DRAW_CONT;
786 }
787
788 return 0;
789}
790
791static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
792 const drm_savage_cmd_header_t * cmd_header,
793 const drm_savage_cmd_header_t *data,
794 unsigned int nbox,
795 const struct drm_clip_rect *boxes)
796{
797 unsigned int flags = cmd_header->clear0.flags;
798 unsigned int clear_cmd;
799 unsigned int i, nbufs;
800 DMA_LOCALS;
801
802 if (nbox == 0)
803 return 0;
804
805 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
806 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
807 BCI_CMD_SET_ROP(clear_cmd, 0xCC);
808
809 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
810 ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
811 if (nbufs == 0)
812 return 0;
813
814 if (data->clear1.mask != 0xffffffff) {
815 /* set mask */
816 BEGIN_DMA(2);
817 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
818 DMA_WRITE(data->clear1.mask);
819 DMA_COMMIT();
820 }
821 for (i = 0; i < nbox; ++i) {
822 unsigned int x, y, w, h;
823 unsigned int buf;
824 x = boxes[i].x1, y = boxes[i].y1;
825 w = boxes[i].x2 - boxes[i].x1;
826 h = boxes[i].y2 - boxes[i].y1;
827 BEGIN_DMA(nbufs * 6);
828 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
829 if (!(flags & buf))
830 continue;
831 DMA_WRITE(clear_cmd);
832 switch (buf) {
833 case SAVAGE_FRONT:
834 DMA_WRITE(dev_priv->front_offset);
835 DMA_WRITE(dev_priv->front_bd);
836 break;
837 case SAVAGE_BACK:
838 DMA_WRITE(dev_priv->back_offset);
839 DMA_WRITE(dev_priv->back_bd);
840 break;
841 case SAVAGE_DEPTH:
842 DMA_WRITE(dev_priv->depth_offset);
843 DMA_WRITE(dev_priv->depth_bd);
844 break;
845 }
846 DMA_WRITE(data->clear1.value);
847 DMA_WRITE(BCI_X_Y(x, y));
848 DMA_WRITE(BCI_W_H(w, h));
849 }
850 DMA_COMMIT();
851 }
852 if (data->clear1.mask != 0xffffffff) {
853 /* reset mask */
854 BEGIN_DMA(2);
855 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
856 DMA_WRITE(0xffffffff);
857 DMA_COMMIT();
858 }
859
860 return 0;
861}
862
863static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
864 unsigned int nbox, const struct drm_clip_rect *boxes)
865{
866 unsigned int swap_cmd;
867 unsigned int i;
868 DMA_LOCALS;
869
870 if (nbox == 0)
871 return 0;
872
873 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
874 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
875 BCI_CMD_SET_ROP(swap_cmd, 0xCC);
876
877 for (i = 0; i < nbox; ++i) {
878 BEGIN_DMA(6);
879 DMA_WRITE(swap_cmd);
880 DMA_WRITE(dev_priv->back_offset);
881 DMA_WRITE(dev_priv->back_bd);
882 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
883 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
884 DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
885 boxes[i].y2 - boxes[i].y1));
886 DMA_COMMIT();
887 }
888
889 return 0;
890}
891
892static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
893 const drm_savage_cmd_header_t *start,
894 const drm_savage_cmd_header_t *end,
895 const struct drm_buf * dmabuf,
896 const unsigned int *vtxbuf,
897 unsigned int vb_size, unsigned int vb_stride,
898 unsigned int nbox,
899 const struct drm_clip_rect *boxes)
900{
901 unsigned int i, j;
902 int ret;
903
904 for (i = 0; i < nbox; ++i) {
905 const drm_savage_cmd_header_t *cmdbuf;
906 dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
907
908 cmdbuf = start;
909 while (cmdbuf < end) {
910 drm_savage_cmd_header_t cmd_header;
911 cmd_header = *cmdbuf;
912 cmdbuf++;
913 switch (cmd_header.cmd.cmd) {
914 case SAVAGE_CMD_DMA_PRIM:
915 ret = savage_dispatch_dma_prim(
916 dev_priv, &cmd_header, dmabuf);
917 break;
918 case SAVAGE_CMD_VB_PRIM:
919 ret = savage_dispatch_vb_prim(
920 dev_priv, &cmd_header,
921 vtxbuf, vb_size, vb_stride);
922 break;
923 case SAVAGE_CMD_DMA_IDX:
924 j = (cmd_header.idx.count + 3) / 4;
925 /* j was check in savage_bci_cmdbuf */
926 ret = savage_dispatch_dma_idx(dev_priv,
927 &cmd_header, (const uint16_t *)cmdbuf,
928 dmabuf);
929 cmdbuf += j;
930 break;
931 case SAVAGE_CMD_VB_IDX:
932 j = (cmd_header.idx.count + 3) / 4;
933 /* j was check in savage_bci_cmdbuf */
934 ret = savage_dispatch_vb_idx(dev_priv,
935 &cmd_header, (const uint16_t *)cmdbuf,
936 (const uint32_t *)vtxbuf, vb_size,
937 vb_stride);
938 cmdbuf += j;
939 break;
940 default:
941 /* What's the best return code? EFAULT? */
942 DRM_ERROR("IMPLEMENTATION ERROR: "
943 "non-drawing-command %d\n",
944 cmd_header.cmd.cmd);
945 return -EINVAL;
946 }
947
948 if (ret != 0)
949 return ret;
950 }
951 }
952
953 return 0;
954}
955
956int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
957{
958 drm_savage_private_t *dev_priv = dev->dev_private;
959 struct drm_device_dma *dma = dev->dma;
960 struct drm_buf *dmabuf;
961 drm_savage_cmdbuf_t *cmdbuf = data;
962 drm_savage_cmd_header_t *kcmd_addr = NULL;
963 drm_savage_cmd_header_t *first_draw_cmd;
964 unsigned int *kvb_addr = NULL;
965 struct drm_clip_rect *kbox_addr = NULL;
966 unsigned int i, j;
967 int ret = 0;
968
969 DRM_DEBUG("\n");
970
971 LOCK_TEST_WITH_RETURN(dev, file_priv);
972
973 if (dma && dma->buflist) {
974 if (cmdbuf->dma_idx > dma->buf_count) {
975 DRM_ERROR
976 ("vertex buffer index %u out of range (0-%u)\n",
977 cmdbuf->dma_idx, dma->buf_count - 1);
978 return -EINVAL;
979 }
980 dmabuf = dma->buflist[cmdbuf->dma_idx];
981 } else {
982 dmabuf = NULL;
983 }
984
985 /* Copy the user buffers into kernel temporary areas. This hasn't been
986 * a performance loss compared to VERIFYAREA_READ/
987 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
988 * for locking on FreeBSD.
989 */
990 if (cmdbuf->size) {
991 kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
992 if (kcmd_addr == NULL)
993 return -ENOMEM;
994
995 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
996 cmdbuf->size * 8))
997 {
998 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
999 return -EFAULT;
1000 }
1001 cmdbuf->cmd_addr = kcmd_addr;
1002 }
1003 if (cmdbuf->vb_size) {
1004 kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
1005 if (kvb_addr == NULL) {
1006 ret = -ENOMEM;
1007 goto done;
1008 }
1009
1010 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
1011 cmdbuf->vb_size)) {
1012 ret = -EFAULT;
1013 goto done;
1014 }
1015 cmdbuf->vb_addr = kvb_addr;
1016 }
1017 if (cmdbuf->nbox) {
1018 kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
1019 DRM_MEM_DRIVER);
1020 if (kbox_addr == NULL) {
1021 ret = -ENOMEM;
1022 goto done;
1023 }
1024
1025 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
1026 cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
1027 ret = -EFAULT;
1028 goto done;
1029 }
1030 cmdbuf->box_addr = kbox_addr;
1031 }
1032
1033 /* Make sure writes to DMA buffers are finished before sending
1034 * DMA commands to the graphics hardware. */
1035 DRM_MEMORYBARRIER();
1036
1037 /* Coming from user space. Don't know if the Xserver has
1038 * emitted wait commands. Assuming the worst. */
1039 dev_priv->waiting = 1;
1040
1041 i = 0;
1042 first_draw_cmd = NULL;
1043 while (i < cmdbuf->size) {
1044 drm_savage_cmd_header_t cmd_header;
1045 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
1046 cmdbuf->cmd_addr++;
1047 i++;
1048
1049 /* Group drawing commands with same state to minimize
1050 * iterations over clip rects. */
1051 j = 0;
1052 switch (cmd_header.cmd.cmd) {
1053 case SAVAGE_CMD_DMA_IDX:
1054 case SAVAGE_CMD_VB_IDX:
1055 j = (cmd_header.idx.count + 3) / 4;
1056 if (i + j > cmdbuf->size) {
1057 DRM_ERROR("indexed drawing command extends "
1058 "beyond end of command buffer\n");
1059 DMA_FLUSH();
1060 return -EINVAL;
1061 }
1062 /* fall through */
1063 case SAVAGE_CMD_DMA_PRIM:
1064 case SAVAGE_CMD_VB_PRIM:
1065 if (!first_draw_cmd)
1066 first_draw_cmd = cmdbuf->cmd_addr - 1;
1067 cmdbuf->cmd_addr += j;
1068 i += j;
1069 break;
1070 default:
1071 if (first_draw_cmd) {
1072 ret = savage_dispatch_draw(
1073 dev_priv, first_draw_cmd,
1074 cmdbuf->cmd_addr - 1,
1075 dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
1076 cmdbuf->vb_stride,
1077 cmdbuf->nbox, cmdbuf->box_addr);
1078 if (ret != 0)
1079 return ret;
1080 first_draw_cmd = NULL;
1081 }
1082 }
1083 if (first_draw_cmd)
1084 continue;
1085
1086 switch (cmd_header.cmd.cmd) {
1087 case SAVAGE_CMD_STATE:
1088 j = (cmd_header.state.count + 1) / 2;
1089 if (i + j > cmdbuf->size) {
1090 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1091 "beyond end of command buffer\n");
1092 DMA_FLUSH();
1093 ret = -EINVAL;
1094 goto done;
1095 }
1096 ret = savage_dispatch_state(dev_priv, &cmd_header,
1097 (const uint32_t *)cmdbuf->cmd_addr);
1098 cmdbuf->cmd_addr += j;
1099 i += j;
1100 break;
1101 case SAVAGE_CMD_CLEAR:
1102 if (i + 1 > cmdbuf->size) {
1103 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1104 "beyond end of command buffer\n");
1105 DMA_FLUSH();
1106 ret = -EINVAL;
1107 goto done;
1108 }
1109 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1110 cmdbuf->cmd_addr,
1111 cmdbuf->nbox,
1112 cmdbuf->box_addr);
1113 cmdbuf->cmd_addr++;
1114 i++;
1115 break;
1116 case SAVAGE_CMD_SWAP:
1117 ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
1118 cmdbuf->box_addr);
1119 break;
1120 default:
1121 DRM_ERROR("invalid command 0x%x\n",
1122 cmd_header.cmd.cmd);
1123 DMA_FLUSH();
1124 ret = -EINVAL;
1125 goto done;
1126 }
1127
1128 if (ret != 0) {
1129 DMA_FLUSH();
1130 goto done;
1131 }
1132 }
1133
1134 if (first_draw_cmd) {
1135 ret = savage_dispatch_draw (
1136 dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
1137 cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
1138 cmdbuf->nbox, cmdbuf->box_addr);
1139 if (ret != 0) {
1140 DMA_FLUSH();
1141 goto done;
1142 }
1143 }
1144
1145 DMA_FLUSH();
1146
1147 if (dmabuf && cmdbuf->discard) {
1148 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1149 uint16_t event;
1150 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1151 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1152 savage_freelist_put(dev, dmabuf);
1153 }
1154
1155done:
1156 /* If we didn't need to allocate them, these'll be NULL */
1157 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1158 drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
1159 drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
1160 DRM_MEM_DRIVER);
1161
1162 return ret;
1163}
diff --git a/drivers/gpu/drm/sis/Makefile b/drivers/gpu/drm/sis/Makefile
new file mode 100644
index 000000000000..441c061c3ad0
--- /dev/null
+++ b/drivers/gpu/drm/sis/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y = -Iinclude/drm
6sis-y := sis_drv.o sis_mm.o
7
8obj-$(CONFIG_DRM_SIS) += sis.o
9
10
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
new file mode 100644
index 000000000000..7dacc64e9b56
--- /dev/null
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -0,0 +1,117 @@
1/* sis.c -- sis driver -*- linux-c -*-
2 *
3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "sis_drm.h"
30#include "sis_drv.h"
31
32#include "drm_pciids.h"
33
34static struct pci_device_id pciidlist[] = {
35 sisdrv_PCI_IDS
36};
37
38static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
39{
40 drm_sis_private_t *dev_priv;
41 int ret;
42
43 dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
44 if (dev_priv == NULL)
45 return -ENOMEM;
46
47 dev->dev_private = (void *)dev_priv;
48 dev_priv->chipset = chipset;
49 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
50 if (ret) {
51 drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
52 }
53
54 return ret;
55}
56
57static int sis_driver_unload(struct drm_device *dev)
58{
59 drm_sis_private_t *dev_priv = dev->dev_private;
60
61 drm_sman_takedown(&dev_priv->sman);
62 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
63
64 return 0;
65}
66
67static struct drm_driver driver = {
68 .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
69 .load = sis_driver_load,
70 .unload = sis_driver_unload,
71 .context_dtor = NULL,
72 .dma_quiescent = sis_idle,
73 .reclaim_buffers = NULL,
74 .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
75 .lastclose = sis_lastclose,
76 .get_map_ofs = drm_core_get_map_ofs,
77 .get_reg_ofs = drm_core_get_reg_ofs,
78 .ioctls = sis_ioctls,
79 .fops = {
80 .owner = THIS_MODULE,
81 .open = drm_open,
82 .release = drm_release,
83 .ioctl = drm_ioctl,
84 .mmap = drm_mmap,
85 .poll = drm_poll,
86 .fasync = drm_fasync,
87 },
88 .pci_driver = {
89 .name = DRIVER_NAME,
90 .id_table = pciidlist,
91 },
92
93 .name = DRIVER_NAME,
94 .desc = DRIVER_DESC,
95 .date = DRIVER_DATE,
96 .major = DRIVER_MAJOR,
97 .minor = DRIVER_MINOR,
98 .patchlevel = DRIVER_PATCHLEVEL,
99};
100
101static int __init sis_init(void)
102{
103 driver.num_ioctls = sis_max_ioctl;
104 return drm_init(&driver);
105}
106
107static void __exit sis_exit(void)
108{
109 drm_exit(&driver);
110}
111
112module_init(sis_init);
113module_exit(sis_exit);
114
115MODULE_AUTHOR(DRIVER_AUTHOR);
116MODULE_DESCRIPTION(DRIVER_DESC);
117MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
new file mode 100644
index 000000000000..ef940bad63f7
--- /dev/null
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -0,0 +1,73 @@
1/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
2/*
3 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All rights reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#ifndef _SIS_DRV_H_
29#define _SIS_DRV_H_
30
31/* General customization:
32 */
33
34#define DRIVER_AUTHOR "SIS, Tungsten Graphics"
35#define DRIVER_NAME "sis"
36#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8"
37#define DRIVER_DATE "20070626"
38#define DRIVER_MAJOR 1
39#define DRIVER_MINOR 3
40#define DRIVER_PATCHLEVEL 0
41
42enum sis_family {
43 SIS_OTHER = 0,
44 SIS_CHIP_315 = 1,
45};
46
47#include "drm_sman.h"
48
49
50#define SIS_BASE (dev_priv->mmio)
51#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
52#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
53
54typedef struct drm_sis_private {
55 drm_local_map_t *mmio;
56 unsigned int idle_fault;
57 struct drm_sman sman;
58 unsigned int chipset;
59 int vram_initialized;
60 int agp_initialized;
61 unsigned long vram_offset;
62 unsigned long agp_offset;
63} drm_sis_private_t;
64
65extern int sis_idle(struct drm_device *dev);
66extern void sis_reclaim_buffers_locked(struct drm_device *dev,
67 struct drm_file *file_priv);
68extern void sis_lastclose(struct drm_device *dev);
69
70extern struct drm_ioctl_desc sis_ioctls[];
71extern int sis_max_ioctl;
72
73#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
new file mode 100644
index 000000000000..b3878770fce1
--- /dev/null
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -0,0 +1,333 @@
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Authors:
31 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 */
33
34#include "drmP.h"
35#include "sis_drm.h"
36#include "sis_drv.h"
37
38#include <video/sisfb.h>
39
40#define VIDEO_TYPE 0
41#define AGP_TYPE 1
42
43
44#if defined(CONFIG_FB_SIS)
45/* fb management via fb device */
46
47#define SIS_MM_ALIGN_SHIFT 0
48#define SIS_MM_ALIGN_MASK 0
49
50static void *sis_sman_mm_allocate(void *private, unsigned long size,
51 unsigned alignment)
52{
53 struct sis_memreq req;
54
55 req.size = size;
56 sis_malloc(&req);
57 if (req.size == 0)
58 return NULL;
59 else
60 return (void *)~req.offset;
61}
62
63static void sis_sman_mm_free(void *private, void *ref)
64{
65 sis_free(~((unsigned long)ref));
66}
67
68static void sis_sman_mm_destroy(void *private)
69{
70 ;
71}
72
73static unsigned long sis_sman_mm_offset(void *private, void *ref)
74{
75 return ~((unsigned long)ref);
76}
77
78#else /* CONFIG_FB_SIS */
79
80#define SIS_MM_ALIGN_SHIFT 4
81#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
82
83#endif /* CONFIG_FB_SIS */
84
85static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
86{
87 drm_sis_private_t *dev_priv = dev->dev_private;
88 drm_sis_fb_t *fb = data;
89 int ret;
90
91 mutex_lock(&dev->struct_mutex);
92#if defined(CONFIG_FB_SIS)
93 {
94 struct drm_sman_mm sman_mm;
95 sman_mm.private = (void *)0xFFFFFFFF;
96 sman_mm.allocate = sis_sman_mm_allocate;
97 sman_mm.free = sis_sman_mm_free;
98 sman_mm.destroy = sis_sman_mm_destroy;
99 sman_mm.offset = sis_sman_mm_offset;
100 ret =
101 drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
102 }
103#else
104 ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
105 fb->size >> SIS_MM_ALIGN_SHIFT);
106#endif
107
108 if (ret) {
109 DRM_ERROR("VRAM memory manager initialisation error\n");
110 mutex_unlock(&dev->struct_mutex);
111 return ret;
112 }
113
114 dev_priv->vram_initialized = 1;
115 dev_priv->vram_offset = fb->offset;
116
117 mutex_unlock(&dev->struct_mutex);
118 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
119
120 return 0;
121}
122
123static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
124 void *data, int pool)
125{
126 drm_sis_private_t *dev_priv = dev->dev_private;
127 drm_sis_mem_t *mem = data;
128 int retval = 0;
129 struct drm_memblock_item *item;
130
131 mutex_lock(&dev->struct_mutex);
132
133 if (0 == ((pool == 0) ? dev_priv->vram_initialized :
134 dev_priv->agp_initialized)) {
135 DRM_ERROR
136 ("Attempt to allocate from uninitialized memory manager.\n");
137 mutex_unlock(&dev->struct_mutex);
138 return -EINVAL;
139 }
140
141 mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
142 item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
143 (unsigned long)file_priv);
144
145 mutex_unlock(&dev->struct_mutex);
146 if (item) {
147 mem->offset = ((pool == 0) ?
148 dev_priv->vram_offset : dev_priv->agp_offset) +
149 (item->mm->
150 offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
151 mem->free = item->user_hash.key;
152 mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
153 } else {
154 mem->offset = 0;
155 mem->size = 0;
156 mem->free = 0;
157 retval = -ENOMEM;
158 }
159
160 DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
161 mem->offset);
162
163 return retval;
164}
165
166static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
167{
168 drm_sis_private_t *dev_priv = dev->dev_private;
169 drm_sis_mem_t *mem = data;
170 int ret;
171
172 mutex_lock(&dev->struct_mutex);
173 ret = drm_sman_free_key(&dev_priv->sman, mem->free);
174 mutex_unlock(&dev->struct_mutex);
175 DRM_DEBUG("free = 0x%lx\n", mem->free);
176
177 return ret;
178}
179
180static int sis_fb_alloc(struct drm_device *dev, void *data,
181 struct drm_file *file_priv)
182{
183 return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
184}
185
186static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
187 struct drm_file *file_priv)
188{
189 drm_sis_private_t *dev_priv = dev->dev_private;
190 drm_sis_agp_t *agp = data;
191 int ret;
192 dev_priv = dev->dev_private;
193
194 mutex_lock(&dev->struct_mutex);
195 ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
196 agp->size >> SIS_MM_ALIGN_SHIFT);
197
198 if (ret) {
199 DRM_ERROR("AGP memory manager initialisation error\n");
200 mutex_unlock(&dev->struct_mutex);
201 return ret;
202 }
203
204 dev_priv->agp_initialized = 1;
205 dev_priv->agp_offset = agp->offset;
206 mutex_unlock(&dev->struct_mutex);
207
208 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
209 return 0;
210}
211
212static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
213 struct drm_file *file_priv)
214{
215
216 return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
217}
218
219static drm_local_map_t *sis_reg_init(struct drm_device *dev)
220{
221 struct drm_map_list *entry;
222 drm_local_map_t *map;
223
224 list_for_each_entry(entry, &dev->maplist, head) {
225 map = entry->map;
226 if (!map)
227 continue;
228 if (map->type == _DRM_REGISTERS) {
229 return map;
230 }
231 }
232 return NULL;
233}
234
235int sis_idle(struct drm_device *dev)
236{
237 drm_sis_private_t *dev_priv = dev->dev_private;
238 uint32_t idle_reg;
239 unsigned long end;
240 int i;
241
242 if (dev_priv->idle_fault)
243 return 0;
244
245 if (dev_priv->mmio == NULL) {
246 dev_priv->mmio = sis_reg_init(dev);
247 if (dev_priv->mmio == NULL) {
248 DRM_ERROR("Could not find register map.\n");
249 return 0;
250 }
251 }
252
253 /*
254 * Implement a device switch here if needed
255 */
256
257 if (dev_priv->chipset != SIS_CHIP_315)
258 return 0;
259
260 /*
261 * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
262 * because its polling frequency is too low.
263 */
264
265 end = jiffies + (DRM_HZ * 3);
266
267 for (i=0; i<4; ++i) {
268 do {
269 idle_reg = SIS_READ(0x85cc);
270 } while ( !time_after_eq(jiffies, end) &&
271 ((idle_reg & 0x80000000) != 0x80000000));
272 }
273
274 if (time_after_eq(jiffies, end)) {
275 DRM_ERROR("Graphics engine idle timeout. "
276 "Disabling idle check\n");
277 dev_priv->idle_fault = 1;
278 }
279
280 /*
281 * The caller never sees an error code. It gets trapped
282 * in libdrm.
283 */
284
285 return 0;
286}
287
288
289void sis_lastclose(struct drm_device *dev)
290{
291 drm_sis_private_t *dev_priv = dev->dev_private;
292
293 if (!dev_priv)
294 return;
295
296 mutex_lock(&dev->struct_mutex);
297 drm_sman_cleanup(&dev_priv->sman);
298 dev_priv->vram_initialized = 0;
299 dev_priv->agp_initialized = 0;
300 dev_priv->mmio = NULL;
301 mutex_unlock(&dev->struct_mutex);
302}
303
304void sis_reclaim_buffers_locked(struct drm_device * dev,
305 struct drm_file *file_priv)
306{
307 drm_sis_private_t *dev_priv = dev->dev_private;
308
309 mutex_lock(&dev->struct_mutex);
310 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
311 mutex_unlock(&dev->struct_mutex);
312 return;
313 }
314
315 if (dev->driver->dma_quiescent) {
316 dev->driver->dma_quiescent(dev);
317 }
318
319 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
320 mutex_unlock(&dev->struct_mutex);
321 return;
322}
323
324struct drm_ioctl_desc sis_ioctls[] = {
325 DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
326 DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
327 DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
328 DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
329 DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
330 DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
331};
332
333int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/tdfx/Makefile b/drivers/gpu/drm/tdfx/Makefile
new file mode 100644
index 000000000000..0379f294b32a
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6tdfx-y := tdfx_drv.o
7
8obj-$(CONFIG_DRM_TDFX) += tdfx.o
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
new file mode 100644
index 000000000000..012ff2e356b2
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -0,0 +1,84 @@
1/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Daryll Strauss <daryll@valinux.com>
30 * Gareth Hughes <gareth@valinux.com>
31 */
32
33#include "drmP.h"
34#include "tdfx_drv.h"
35
36#include "drm_pciids.h"
37
38static struct pci_device_id pciidlist[] = {
39 tdfx_PCI_IDS
40};
41
42static struct drm_driver driver = {
43 .driver_features = DRIVER_USE_MTRR,
44 .reclaim_buffers = drm_core_reclaim_buffers,
45 .get_map_ofs = drm_core_get_map_ofs,
46 .get_reg_ofs = drm_core_get_reg_ofs,
47 .fops = {
48 .owner = THIS_MODULE,
49 .open = drm_open,
50 .release = drm_release,
51 .ioctl = drm_ioctl,
52 .mmap = drm_mmap,
53 .poll = drm_poll,
54 .fasync = drm_fasync,
55 },
56 .pci_driver = {
57 .name = DRIVER_NAME,
58 .id_table = pciidlist,
59 },
60
61 .name = DRIVER_NAME,
62 .desc = DRIVER_DESC,
63 .date = DRIVER_DATE,
64 .major = DRIVER_MAJOR,
65 .minor = DRIVER_MINOR,
66 .patchlevel = DRIVER_PATCHLEVEL,
67};
68
69static int __init tdfx_init(void)
70{
71 return drm_init(&driver);
72}
73
74static void __exit tdfx_exit(void)
75{
76 drm_exit(&driver);
77}
78
79module_init(tdfx_init);
80module_exit(tdfx_exit);
81
82MODULE_AUTHOR(DRIVER_AUTHOR);
83MODULE_DESCRIPTION(DRIVER_DESC);
84MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.h b/drivers/gpu/drm/tdfx/tdfx_drv.h
new file mode 100644
index 000000000000..84204ec1b046
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.h
@@ -0,0 +1,47 @@
1/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
2 * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
3 */
4/*
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 */
30
31#ifndef __TDFX_H__
32#define __TDFX_H__
33
34/* General customization:
35 */
36
37#define DRIVER_AUTHOR "VA Linux Systems Inc."
38
39#define DRIVER_NAME "tdfx"
40#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
41#define DRIVER_DATE "20010216"
42
43#define DRIVER_MAJOR 1
44#define DRIVER_MINOR 0
45#define DRIVER_PATCHLEVEL 0
46
47#endif
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
new file mode 100644
index 000000000000..d59e258e2c13
--- /dev/null
+++ b/drivers/gpu/drm/via/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
7
8obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/gpu/drm/via/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
new file mode 100644
index 000000000000..462375d543b9
--- /dev/null
+++ b/drivers/gpu/drm/via/via_3d_reg.h
@@ -0,0 +1,1650 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef VIA_3D_REG_H
26#define VIA_3D_REG_H
27#define HC_REG_BASE 0x0400
28
29#define HC_REG_TRANS_SPACE 0x0040
30
31#define HC_ParaN_MASK 0xffffffff
32#define HC_Para_MASK 0x00ffffff
33#define HC_SubA_MASK 0xff000000
34#define HC_SubA_SHIFT 24
35/* Transmission Setting
36 */
37#define HC_REG_TRANS_SET 0x003c
38#define HC_ParaSubType_MASK 0xff000000
39#define HC_ParaType_MASK 0x00ff0000
40#define HC_ParaOS_MASK 0x0000ff00
41#define HC_ParaAdr_MASK 0x000000ff
42#define HC_ParaSubType_SHIFT 24
43#define HC_ParaType_SHIFT 16
44#define HC_ParaOS_SHIFT 8
45#define HC_ParaAdr_SHIFT 0
46
47#define HC_ParaType_CmdVdata 0x0000
48#define HC_ParaType_NotTex 0x0001
49#define HC_ParaType_Tex 0x0002
50#define HC_ParaType_Palette 0x0003
51#define HC_ParaType_PreCR 0x0010
52#define HC_ParaType_Auto 0x00fe
53
54/* Transmission Space
55 */
56#define HC_REG_Hpara0 0x0040
57#define HC_REG_HpataAF 0x02fc
58
59/* Read
60 */
61#define HC_REG_HREngSt 0x0000
62#define HC_REG_HRFIFOempty 0x0004
63#define HC_REG_HRFIFOfull 0x0008
64#define HC_REG_HRErr 0x000c
65#define HC_REG_FIFOstatus 0x0010
66/* HC_REG_HREngSt 0x0000
67 */
68#define HC_HDASZC_MASK 0x00010000
69#define HC_HSGEMI_MASK 0x0000f000
70#define HC_HLGEMISt_MASK 0x00000f00
71#define HC_HCRSt_MASK 0x00000080
72#define HC_HSE0St_MASK 0x00000040
73#define HC_HSE1St_MASK 0x00000020
74#define HC_HPESt_MASK 0x00000010
75#define HC_HXESt_MASK 0x00000008
76#define HC_HBESt_MASK 0x00000004
77#define HC_HE2St_MASK 0x00000002
78#define HC_HE3St_MASK 0x00000001
79/* HC_REG_HRFIFOempty 0x0004
80 */
81#define HC_HRZDempty_MASK 0x00000010
82#define HC_HRTXAempty_MASK 0x00000008
83#define HC_HRTXDempty_MASK 0x00000004
84#define HC_HWZDempty_MASK 0x00000002
85#define HC_HWCDempty_MASK 0x00000001
86/* HC_REG_HRFIFOfull 0x0008
87 */
88#define HC_HRZDfull_MASK 0x00000010
89#define HC_HRTXAfull_MASK 0x00000008
90#define HC_HRTXDfull_MASK 0x00000004
91#define HC_HWZDfull_MASK 0x00000002
92#define HC_HWCDfull_MASK 0x00000001
93/* HC_REG_HRErr 0x000c
94 */
95#define HC_HAGPCMErr_MASK 0x80000000
96#define HC_HAGPCMErrC_MASK 0x70000000
97/* HC_REG_FIFOstatus 0x0010
98 */
99#define HC_HRFIFOATall_MASK 0x80000000
100#define HC_HRFIFOATbusy_MASK 0x40000000
101#define HC_HRATFGMDo_MASK 0x00000100
102#define HC_HRATFGMDi_MASK 0x00000080
103#define HC_HRATFRZD_MASK 0x00000040
104#define HC_HRATFRTXA_MASK 0x00000020
105#define HC_HRATFRTXD_MASK 0x00000010
106#define HC_HRATFWZD_MASK 0x00000008
107#define HC_HRATFWCD_MASK 0x00000004
108#define HC_HRATTXTAG_MASK 0x00000002
109#define HC_HRATTXCH_MASK 0x00000001
110
111/* AGP Command Setting
112 */
113#define HC_SubA_HAGPBstL 0x0060
114#define HC_SubA_HAGPBendL 0x0061
115#define HC_SubA_HAGPCMNT 0x0062
116#define HC_SubA_HAGPBpL 0x0063
117#define HC_SubA_HAGPBpH 0x0064
118/* HC_SubA_HAGPCMNT 0x0062
119 */
120#define HC_HAGPCMNT_MASK 0x00800000
121#define HC_HCmdErrClr_MASK 0x00400000
122#define HC_HAGPBendH_MASK 0x0000ff00
123#define HC_HAGPBstH_MASK 0x000000ff
124#define HC_HAGPBendH_SHIFT 8
125#define HC_HAGPBstH_SHIFT 0
126/* HC_SubA_HAGPBpL 0x0063
127 */
128#define HC_HAGPBpL_MASK 0x00fffffc
129#define HC_HAGPBpID_MASK 0x00000003
130#define HC_HAGPBpID_PAUSE 0x00000000
131#define HC_HAGPBpID_JUMP 0x00000001
132#define HC_HAGPBpID_STOP 0x00000002
133/* HC_SubA_HAGPBpH 0x0064
134 */
135#define HC_HAGPBpH_MASK 0x00ffffff
136
137/* Miscellaneous Settings
138 */
139#define HC_SubA_HClipTB 0x0070
140#define HC_SubA_HClipLR 0x0071
141#define HC_SubA_HFPClipTL 0x0072
142#define HC_SubA_HFPClipBL 0x0073
143#define HC_SubA_HFPClipLL 0x0074
144#define HC_SubA_HFPClipRL 0x0075
145#define HC_SubA_HFPClipTBH 0x0076
146#define HC_SubA_HFPClipLRH 0x0077
147#define HC_SubA_HLP 0x0078
148#define HC_SubA_HLPRF 0x0079
149#define HC_SubA_HSolidCL 0x007a
150#define HC_SubA_HPixGC 0x007b
151#define HC_SubA_HSPXYOS 0x007c
152#define HC_SubA_HVertexCNT 0x007d
153
154#define HC_HClipT_MASK 0x00fff000
155#define HC_HClipT_SHIFT 12
156#define HC_HClipB_MASK 0x00000fff
157#define HC_HClipB_SHIFT 0
158#define HC_HClipL_MASK 0x00fff000
159#define HC_HClipL_SHIFT 12
160#define HC_HClipR_MASK 0x00000fff
161#define HC_HClipR_SHIFT 0
162#define HC_HFPClipBH_MASK 0x0000ff00
163#define HC_HFPClipBH_SHIFT 8
164#define HC_HFPClipTH_MASK 0x000000ff
165#define HC_HFPClipTH_SHIFT 0
166#define HC_HFPClipRH_MASK 0x0000ff00
167#define HC_HFPClipRH_SHIFT 8
168#define HC_HFPClipLH_MASK 0x000000ff
169#define HC_HFPClipLH_SHIFT 0
170#define HC_HSolidCH_MASK 0x000000ff
171#define HC_HPixGC_MASK 0x00800000
172#define HC_HSPXOS_MASK 0x00fff000
173#define HC_HSPXOS_SHIFT 12
174#define HC_HSPYOS_MASK 0x00000fff
175
176/* Command
177 * Command A
178 */
179#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
180#define HC_HE3Fire_MASK 0x00100000
181#define HC_HPMType_MASK 0x000f0000
182#define HC_HEFlag_MASK 0x0000e000
183#define HC_HShading_MASK 0x00001c00
184#define HC_HPMValidN_MASK 0x00000200
185#define HC_HPLEND_MASK 0x00000100
186#define HC_HVCycle_MASK 0x000000ff
187#define HC_HVCycle_Style_MASK 0x000000c0
188#define HC_HVCycle_ChgA_MASK 0x00000030
189#define HC_HVCycle_ChgB_MASK 0x0000000c
190#define HC_HVCycle_ChgC_MASK 0x00000003
191#define HC_HPMType_Point 0x00000000
192#define HC_HPMType_Line 0x00010000
193#define HC_HPMType_Tri 0x00020000
194#define HC_HPMType_TriWF 0x00040000
195#define HC_HEFlag_NoAA 0x00000000
196#define HC_HEFlag_ab 0x00008000
197#define HC_HEFlag_bc 0x00004000
198#define HC_HEFlag_ca 0x00002000
199#define HC_HShading_Solid 0x00000000
200#define HC_HShading_FlatA 0x00000400
201#define HC_HShading_FlatB 0x00000800
202#define HC_HShading_FlatC 0x00000c00
203#define HC_HShading_Gouraud 0x00001000
204#define HC_HVCycle_Full 0x00000000
205#define HC_HVCycle_AFP 0x00000040
206#define HC_HVCycle_One 0x000000c0
207#define HC_HVCycle_NewA 0x00000000
208#define HC_HVCycle_AA 0x00000010
209#define HC_HVCycle_AB 0x00000020
210#define HC_HVCycle_AC 0x00000030
211#define HC_HVCycle_NewB 0x00000000
212#define HC_HVCycle_BA 0x00000004
213#define HC_HVCycle_BB 0x00000008
214#define HC_HVCycle_BC 0x0000000c
215#define HC_HVCycle_NewC 0x00000000
216#define HC_HVCycle_CA 0x00000001
217#define HC_HVCycle_CB 0x00000002
218#define HC_HVCycle_CC 0x00000003
219
220/* Command B
221 */
222#define HC_HLPrst_MASK 0x00010000
223#define HC_HLLastP_MASK 0x00008000
224#define HC_HVPMSK_MASK 0x00007f80
225#define HC_HBFace_MASK 0x00000040
226#define HC_H2nd1VT_MASK 0x0000003f
227#define HC_HVPMSK_X 0x00004000
228#define HC_HVPMSK_Y 0x00002000
229#define HC_HVPMSK_Z 0x00001000
230#define HC_HVPMSK_W 0x00000800
231#define HC_HVPMSK_Cd 0x00000400
232#define HC_HVPMSK_Cs 0x00000200
233#define HC_HVPMSK_S 0x00000100
234#define HC_HVPMSK_T 0x00000080
235
236/* Enable Setting
237 */
238#define HC_SubA_HEnable 0x0000
239#define HC_HenTXEnvMap_MASK 0x00200000
240#define HC_HenVertexCNT_MASK 0x00100000
241#define HC_HenCPUDAZ_MASK 0x00080000
242#define HC_HenDASZWC_MASK 0x00040000
243#define HC_HenFBCull_MASK 0x00020000
244#define HC_HenCW_MASK 0x00010000
245#define HC_HenAA_MASK 0x00008000
246#define HC_HenST_MASK 0x00004000
247#define HC_HenZT_MASK 0x00002000
248#define HC_HenZW_MASK 0x00001000
249#define HC_HenAT_MASK 0x00000800
250#define HC_HenAW_MASK 0x00000400
251#define HC_HenSP_MASK 0x00000200
252#define HC_HenLP_MASK 0x00000100
253#define HC_HenTXCH_MASK 0x00000080
254#define HC_HenTXMP_MASK 0x00000040
255#define HC_HenTXPP_MASK 0x00000020
256#define HC_HenTXTR_MASK 0x00000010
257#define HC_HenCS_MASK 0x00000008
258#define HC_HenFOG_MASK 0x00000004
259#define HC_HenABL_MASK 0x00000002
260#define HC_HenDT_MASK 0x00000001
261
262/* Z Setting
263 */
264#define HC_SubA_HZWBBasL 0x0010
265#define HC_SubA_HZWBBasH 0x0011
266#define HC_SubA_HZWBType 0x0012
267#define HC_SubA_HZBiasL 0x0013
268#define HC_SubA_HZWBend 0x0014
269#define HC_SubA_HZWTMD 0x0015
270#define HC_SubA_HZWCDL 0x0016
271#define HC_SubA_HZWCTAGnum 0x0017
272#define HC_SubA_HZCYNum 0x0018
273#define HC_SubA_HZWCFire 0x0019
274/* HC_SubA_HZWBType
275 */
276#define HC_HZWBType_MASK 0x00800000
277#define HC_HZBiasedWB_MASK 0x00400000
278#define HC_HZONEasFF_MASK 0x00200000
279#define HC_HZOONEasFF_MASK 0x00100000
280#define HC_HZWBFM_MASK 0x00030000
281#define HC_HZWBLoc_MASK 0x0000c000
282#define HC_HZWBPit_MASK 0x00003fff
283#define HC_HZWBFM_16 0x00000000
284#define HC_HZWBFM_32 0x00020000
285#define HC_HZWBFM_24 0x00030000
286#define HC_HZWBLoc_Local 0x00000000
287#define HC_HZWBLoc_SyS 0x00004000
288/* HC_SubA_HZWBend
289 */
290#define HC_HZWBend_MASK 0x00ffe000
291#define HC_HZBiasH_MASK 0x000000ff
292#define HC_HZWBend_SHIFT 10
293/* HC_SubA_HZWTMD
294 */
295#define HC_HZWTMD_MASK 0x00070000
296#define HC_HEBEBias_MASK 0x00007f00
297#define HC_HZNF_MASK 0x000000ff
298#define HC_HZWTMD_NeverPass 0x00000000
299#define HC_HZWTMD_LT 0x00010000
300#define HC_HZWTMD_EQ 0x00020000
301#define HC_HZWTMD_LE 0x00030000
302#define HC_HZWTMD_GT 0x00040000
303#define HC_HZWTMD_NE 0x00050000
304#define HC_HZWTMD_GE 0x00060000
305#define HC_HZWTMD_AllPass 0x00070000
306#define HC_HEBEBias_SHIFT 8
307/* HC_SubA_HZWCDL 0x0016
308 */
309#define HC_HZWCDL_MASK 0x00ffffff
310/* HC_SubA_HZWCTAGnum 0x0017
311 */
312#define HC_HZWCTAGnum_MASK 0x00ff0000
313#define HC_HZWCTAGnum_SHIFT 16
314#define HC_HZWCDH_MASK 0x000000ff
315#define HC_HZWCDH_SHIFT 0
316/* HC_SubA_HZCYNum 0x0018
317 */
318#define HC_HZCYNum_MASK 0x00030000
319#define HC_HZCYNum_SHIFT 16
320#define HC_HZWCQWnum_MASK 0x00003fff
321#define HC_HZWCQWnum_SHIFT 0
322/* HC_SubA_HZWCFire 0x0019
323 */
324#define HC_ZWCFire_MASK 0x00010000
325#define HC_HZWCQWnumLast_MASK 0x00003fff
326#define HC_HZWCQWnumLast_SHIFT 0
327
328/* Stencil Setting
329 */
330#define HC_SubA_HSTREF 0x0023
331#define HC_SubA_HSTMD 0x0024
332/* HC_SubA_HSBFM
333 */
334#define HC_HSBFM_MASK 0x00030000
335#define HC_HSBLoc_MASK 0x0000c000
336#define HC_HSBPit_MASK 0x00003fff
337/* HC_SubA_HSTREF
338 */
339#define HC_HSTREF_MASK 0x00ff0000
340#define HC_HSTOPMSK_MASK 0x0000ff00
341#define HC_HSTBMSK_MASK 0x000000ff
342#define HC_HSTREF_SHIFT 16
343#define HC_HSTOPMSK_SHIFT 8
344/* HC_SubA_HSTMD
345 */
346#define HC_HSTMD_MASK 0x00070000
347#define HC_HSTOPSF_MASK 0x000001c0
348#define HC_HSTOPSPZF_MASK 0x00000038
349#define HC_HSTOPSPZP_MASK 0x00000007
350#define HC_HSTMD_NeverPass 0x00000000
351#define HC_HSTMD_LT 0x00010000
352#define HC_HSTMD_EQ 0x00020000
353#define HC_HSTMD_LE 0x00030000
354#define HC_HSTMD_GT 0x00040000
355#define HC_HSTMD_NE 0x00050000
356#define HC_HSTMD_GE 0x00060000
357#define HC_HSTMD_AllPass 0x00070000
358#define HC_HSTOPSF_KEEP 0x00000000
359#define HC_HSTOPSF_ZERO 0x00000040
360#define HC_HSTOPSF_REPLACE 0x00000080
361#define HC_HSTOPSF_INCRSAT 0x000000c0
362#define HC_HSTOPSF_DECRSAT 0x00000100
363#define HC_HSTOPSF_INVERT 0x00000140
364#define HC_HSTOPSF_INCR 0x00000180
365#define HC_HSTOPSF_DECR 0x000001c0
366#define HC_HSTOPSPZF_KEEP 0x00000000
367#define HC_HSTOPSPZF_ZERO 0x00000008
368#define HC_HSTOPSPZF_REPLACE 0x00000010
369#define HC_HSTOPSPZF_INCRSAT 0x00000018
370#define HC_HSTOPSPZF_DECRSAT 0x00000020
371#define HC_HSTOPSPZF_INVERT 0x00000028
372#define HC_HSTOPSPZF_INCR 0x00000030
373#define HC_HSTOPSPZF_DECR 0x00000038
374#define HC_HSTOPSPZP_KEEP 0x00000000
375#define HC_HSTOPSPZP_ZERO 0x00000001
376#define HC_HSTOPSPZP_REPLACE 0x00000002
377#define HC_HSTOPSPZP_INCRSAT 0x00000003
378#define HC_HSTOPSPZP_DECRSAT 0x00000004
379#define HC_HSTOPSPZP_INVERT 0x00000005
380#define HC_HSTOPSPZP_INCR 0x00000006
381#define HC_HSTOPSPZP_DECR 0x00000007
382
383/* Alpha Setting
384 */
385#define HC_SubA_HABBasL 0x0030
386#define HC_SubA_HABBasH 0x0031
387#define HC_SubA_HABFM 0x0032
388#define HC_SubA_HATMD 0x0033
389#define HC_SubA_HABLCsat 0x0034
390#define HC_SubA_HABLCop 0x0035
391#define HC_SubA_HABLAsat 0x0036
392#define HC_SubA_HABLAop 0x0037
393#define HC_SubA_HABLRCa 0x0038
394#define HC_SubA_HABLRFCa 0x0039
395#define HC_SubA_HABLRCbias 0x003a
396#define HC_SubA_HABLRCb 0x003b
397#define HC_SubA_HABLRFCb 0x003c
398#define HC_SubA_HABLRAa 0x003d
399#define HC_SubA_HABLRAb 0x003e
400/* HC_SubA_HABFM
401 */
402#define HC_HABFM_MASK 0x00030000
403#define HC_HABLoc_MASK 0x0000c000
404#define HC_HABPit_MASK 0x000007ff
405/* HC_SubA_HATMD
406 */
407#define HC_HATMD_MASK 0x00000700
408#define HC_HATREF_MASK 0x000000ff
409#define HC_HATMD_NeverPass 0x00000000
410#define HC_HATMD_LT 0x00000100
411#define HC_HATMD_EQ 0x00000200
412#define HC_HATMD_LE 0x00000300
413#define HC_HATMD_GT 0x00000400
414#define HC_HATMD_NE 0x00000500
415#define HC_HATMD_GE 0x00000600
416#define HC_HATMD_AllPass 0x00000700
417/* HC_SubA_HABLCsat
418 */
419#define HC_HABLCsat_MASK 0x00010000
420#define HC_HABLCa_MASK 0x0000fc00
421#define HC_HABLCa_C_MASK 0x0000c000
422#define HC_HABLCa_OPC_MASK 0x00003c00
423#define HC_HABLFCa_MASK 0x000003f0
424#define HC_HABLFCa_C_MASK 0x00000300
425#define HC_HABLFCa_OPC_MASK 0x000000f0
426#define HC_HABLCbias_MASK 0x0000000f
427#define HC_HABLCbias_C_MASK 0x00000008
428#define HC_HABLCbias_OPC_MASK 0x00000007
429/*-- Define the input color.
430 */
431#define HC_XC_Csrc 0x00000000
432#define HC_XC_Cdst 0x00000001
433#define HC_XC_Asrc 0x00000002
434#define HC_XC_Adst 0x00000003
435#define HC_XC_Fog 0x00000004
436#define HC_XC_HABLRC 0x00000005
437#define HC_XC_minSrcDst 0x00000006
438#define HC_XC_maxSrcDst 0x00000007
439#define HC_XC_mimAsrcInvAdst 0x00000008
440#define HC_XC_OPC 0x00000000
441#define HC_XC_InvOPC 0x00000010
442#define HC_XC_OPCp5 0x00000020
443/*-- Define the input Alpha
444 */
445#define HC_XA_OPA 0x00000000
446#define HC_XA_InvOPA 0x00000010
447#define HC_XA_OPAp5 0x00000020
448#define HC_XA_0 0x00000000
449#define HC_XA_Asrc 0x00000001
450#define HC_XA_Adst 0x00000002
451#define HC_XA_Fog 0x00000003
452#define HC_XA_minAsrcFog 0x00000004
453#define HC_XA_minAsrcAdst 0x00000005
454#define HC_XA_maxAsrcFog 0x00000006
455#define HC_XA_maxAsrcAdst 0x00000007
456#define HC_XA_HABLRA 0x00000008
457#define HC_XA_minAsrcInvAdst 0x00000008
458#define HC_XA_HABLFRA 0x00000009
459/*--
460 */
461#define HC_HABLCa_OPC (HC_XC_OPC << 10)
462#define HC_HABLCa_InvOPC (HC_XC_InvOPC << 10)
463#define HC_HABLCa_OPCp5 (HC_XC_OPCp5 << 10)
464#define HC_HABLCa_Csrc (HC_XC_Csrc << 10)
465#define HC_HABLCa_Cdst (HC_XC_Cdst << 10)
466#define HC_HABLCa_Asrc (HC_XC_Asrc << 10)
467#define HC_HABLCa_Adst (HC_XC_Adst << 10)
468#define HC_HABLCa_Fog (HC_XC_Fog << 10)
469#define HC_HABLCa_HABLRCa (HC_XC_HABLRC << 10)
470#define HC_HABLCa_minSrcDst (HC_XC_minSrcDst << 10)
471#define HC_HABLCa_maxSrcDst (HC_XC_maxSrcDst << 10)
472#define HC_HABLFCa_OPC (HC_XC_OPC << 4)
473#define HC_HABLFCa_InvOPC (HC_XC_InvOPC << 4)
474#define HC_HABLFCa_OPCp5 (HC_XC_OPCp5 << 4)
475#define HC_HABLFCa_Csrc (HC_XC_Csrc << 4)
476#define HC_HABLFCa_Cdst (HC_XC_Cdst << 4)
477#define HC_HABLFCa_Asrc (HC_XC_Asrc << 4)
478#define HC_HABLFCa_Adst (HC_XC_Adst << 4)
479#define HC_HABLFCa_Fog (HC_XC_Fog << 4)
480#define HC_HABLFCa_HABLRCa (HC_XC_HABLRC << 4)
481#define HC_HABLFCa_minSrcDst (HC_XC_minSrcDst << 4)
482#define HC_HABLFCa_maxSrcDst (HC_XC_maxSrcDst << 4)
483#define HC_HABLFCa_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 4)
484#define HC_HABLCbias_HABLRCbias 0x00000000
485#define HC_HABLCbias_Asrc 0x00000001
486#define HC_HABLCbias_Adst 0x00000002
487#define HC_HABLCbias_Fog 0x00000003
488#define HC_HABLCbias_Cin 0x00000004
489/* HC_SubA_HABLCop 0x0035
490 */
491#define HC_HABLdot_MASK 0x00010000
492#define HC_HABLCop_MASK 0x00004000
493#define HC_HABLCb_MASK 0x00003f00
494#define HC_HABLCb_C_MASK 0x00003000
495#define HC_HABLCb_OPC_MASK 0x00000f00
496#define HC_HABLFCb_MASK 0x000000fc
497#define HC_HABLFCb_C_MASK 0x000000c0
498#define HC_HABLFCb_OPC_MASK 0x0000003c
499#define HC_HABLCshift_MASK 0x00000003
500#define HC_HABLCb_OPC (HC_XC_OPC << 8)
501#define HC_HABLCb_InvOPC (HC_XC_InvOPC << 8)
502#define HC_HABLCb_OPCp5 (HC_XC_OPCp5 << 8)
503#define HC_HABLCb_Csrc (HC_XC_Csrc << 8)
504#define HC_HABLCb_Cdst (HC_XC_Cdst << 8)
505#define HC_HABLCb_Asrc (HC_XC_Asrc << 8)
506#define HC_HABLCb_Adst (HC_XC_Adst << 8)
507#define HC_HABLCb_Fog (HC_XC_Fog << 8)
508#define HC_HABLCb_HABLRCa (HC_XC_HABLRC << 8)
509#define HC_HABLCb_minSrcDst (HC_XC_minSrcDst << 8)
510#define HC_HABLCb_maxSrcDst (HC_XC_maxSrcDst << 8)
511#define HC_HABLFCb_OPC (HC_XC_OPC << 2)
512#define HC_HABLFCb_InvOPC (HC_XC_InvOPC << 2)
513#define HC_HABLFCb_OPCp5 (HC_XC_OPCp5 << 2)
514#define HC_HABLFCb_Csrc (HC_XC_Csrc << 2)
515#define HC_HABLFCb_Cdst (HC_XC_Cdst << 2)
516#define HC_HABLFCb_Asrc (HC_XC_Asrc << 2)
517#define HC_HABLFCb_Adst (HC_XC_Adst << 2)
518#define HC_HABLFCb_Fog (HC_XC_Fog << 2)
519#define HC_HABLFCb_HABLRCb (HC_XC_HABLRC << 2)
520#define HC_HABLFCb_minSrcDst (HC_XC_minSrcDst << 2)
521#define HC_HABLFCb_maxSrcDst (HC_XC_maxSrcDst << 2)
522#define HC_HABLFCb_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 2)
523/* HC_SubA_HABLAsat 0x0036
524 */
525#define HC_HABLAsat_MASK 0x00010000
526#define HC_HABLAa_MASK 0x0000fc00
527#define HC_HABLAa_A_MASK 0x0000c000
528#define HC_HABLAa_OPA_MASK 0x00003c00
529#define HC_HABLFAa_MASK 0x000003f0
530#define HC_HABLFAa_A_MASK 0x00000300
531#define HC_HABLFAa_OPA_MASK 0x000000f0
532#define HC_HABLAbias_MASK 0x0000000f
533#define HC_HABLAbias_A_MASK 0x00000008
534#define HC_HABLAbias_OPA_MASK 0x00000007
535#define HC_HABLAa_OPA (HC_XA_OPA << 10)
536#define HC_HABLAa_InvOPA (HC_XA_InvOPA << 10)
537#define HC_HABLAa_OPAp5 (HC_XA_OPAp5 << 10)
538#define HC_HABLAa_0 (HC_XA_0 << 10)
539#define HC_HABLAa_Asrc (HC_XA_Asrc << 10)
540#define HC_HABLAa_Adst (HC_XA_Adst << 10)
541#define HC_HABLAa_Fog (HC_XA_Fog << 10)
542#define HC_HABLAa_minAsrcFog (HC_XA_minAsrcFog << 10)
543#define HC_HABLAa_minAsrcAdst (HC_XA_minAsrcAdst << 10)
544#define HC_HABLAa_maxAsrcFog (HC_XA_maxAsrcFog << 10)
545#define HC_HABLAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 10)
546#define HC_HABLAa_HABLRA (HC_XA_HABLRA << 10)
547#define HC_HABLFAa_OPA (HC_XA_OPA << 4)
548#define HC_HABLFAa_InvOPA (HC_XA_InvOPA << 4)
549#define HC_HABLFAa_OPAp5 (HC_XA_OPAp5 << 4)
550#define HC_HABLFAa_0 (HC_XA_0 << 4)
551#define HC_HABLFAa_Asrc (HC_XA_Asrc << 4)
552#define HC_HABLFAa_Adst (HC_XA_Adst << 4)
553#define HC_HABLFAa_Fog (HC_XA_Fog << 4)
554#define HC_HABLFAa_minAsrcFog (HC_XA_minAsrcFog << 4)
555#define HC_HABLFAa_minAsrcAdst (HC_XA_minAsrcAdst << 4)
556#define HC_HABLFAa_maxAsrcFog (HC_XA_maxAsrcFog << 4)
557#define HC_HABLFAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 4)
558#define HC_HABLFAa_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 4)
559#define HC_HABLFAa_HABLFRA (HC_XA_HABLFRA << 4)
560#define HC_HABLAbias_HABLRAbias 0x00000000
561#define HC_HABLAbias_Asrc 0x00000001
562#define HC_HABLAbias_Adst 0x00000002
563#define HC_HABLAbias_Fog 0x00000003
564#define HC_HABLAbias_Aaa 0x00000004
565/* HC_SubA_HABLAop 0x0037
566 */
567#define HC_HABLAop_MASK 0x00004000
568#define HC_HABLAb_MASK 0x00003f00
569#define HC_HABLAb_OPA_MASK 0x00000f00
570#define HC_HABLFAb_MASK 0x000000fc
571#define HC_HABLFAb_OPA_MASK 0x0000003c
572#define HC_HABLAshift_MASK 0x00000003
573#define HC_HABLAb_OPA (HC_XA_OPA << 8)
574#define HC_HABLAb_InvOPA (HC_XA_InvOPA << 8)
575#define HC_HABLAb_OPAp5 (HC_XA_OPAp5 << 8)
576#define HC_HABLAb_0 (HC_XA_0 << 8)
577#define HC_HABLAb_Asrc (HC_XA_Asrc << 8)
578#define HC_HABLAb_Adst (HC_XA_Adst << 8)
579#define HC_HABLAb_Fog (HC_XA_Fog << 8)
580#define HC_HABLAb_minAsrcFog (HC_XA_minAsrcFog << 8)
581#define HC_HABLAb_minAsrcAdst (HC_XA_minAsrcAdst << 8)
582#define HC_HABLAb_maxAsrcFog (HC_XA_maxAsrcFog << 8)
583#define HC_HABLAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 8)
584#define HC_HABLAb_HABLRA (HC_XA_HABLRA << 8)
585#define HC_HABLFAb_OPA (HC_XA_OPA << 2)
586#define HC_HABLFAb_InvOPA (HC_XA_InvOPA << 2)
587#define HC_HABLFAb_OPAp5 (HC_XA_OPAp5 << 2)
588#define HC_HABLFAb_0 (HC_XA_0 << 2)
589#define HC_HABLFAb_Asrc (HC_XA_Asrc << 2)
590#define HC_HABLFAb_Adst (HC_XA_Adst << 2)
591#define HC_HABLFAb_Fog (HC_XA_Fog << 2)
592#define HC_HABLFAb_minAsrcFog (HC_XA_minAsrcFog << 2)
593#define HC_HABLFAb_minAsrcAdst (HC_XA_minAsrcAdst << 2)
594#define HC_HABLFAb_maxAsrcFog (HC_XA_maxAsrcFog << 2)
595#define HC_HABLFAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 2)
596#define HC_HABLFAb_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 2)
597#define HC_HABLFAb_HABLFRA (HC_XA_HABLFRA << 2)
598/* HC_SubA_HABLRAa 0x003d
599 */
600#define HC_HABLRAa_MASK 0x00ff0000
601#define HC_HABLRFAa_MASK 0x0000ff00
602#define HC_HABLRAbias_MASK 0x000000ff
603#define HC_HABLRAa_SHIFT 16
604#define HC_HABLRFAa_SHIFT 8
605/* HC_SubA_HABLRAb 0x003e
606 */
607#define HC_HABLRAb_MASK 0x0000ff00
608#define HC_HABLRFAb_MASK 0x000000ff
609#define HC_HABLRAb_SHIFT 8
610
611/* Destination Setting
612 */
613#define HC_SubA_HDBBasL 0x0040
614#define HC_SubA_HDBBasH 0x0041
615#define HC_SubA_HDBFM 0x0042
616#define HC_SubA_HFBBMSKL 0x0043
617#define HC_SubA_HROP 0x0044
618/* HC_SubA_HDBFM 0x0042
619 */
620#define HC_HDBFM_MASK 0x001f0000
621#define HC_HDBLoc_MASK 0x0000c000
622#define HC_HDBPit_MASK 0x00003fff
623#define HC_HDBFM_RGB555 0x00000000
624#define HC_HDBFM_RGB565 0x00010000
625#define HC_HDBFM_ARGB4444 0x00020000
626#define HC_HDBFM_ARGB1555 0x00030000
627#define HC_HDBFM_BGR555 0x00040000
628#define HC_HDBFM_BGR565 0x00050000
629#define HC_HDBFM_ABGR4444 0x00060000
630#define HC_HDBFM_ABGR1555 0x00070000
631#define HC_HDBFM_ARGB0888 0x00080000
632#define HC_HDBFM_ARGB8888 0x00090000
633#define HC_HDBFM_ABGR0888 0x000a0000
634#define HC_HDBFM_ABGR8888 0x000b0000
635#define HC_HDBLoc_Local 0x00000000
636#define HC_HDBLoc_Sys 0x00004000
637/* HC_SubA_HROP 0x0044
638 */
639#define HC_HROP_MASK 0x00000f00
640#define HC_HFBBMSKH_MASK 0x000000ff
641#define HC_HROP_BLACK 0x00000000
642#define HC_HROP_DPon 0x00000100
643#define HC_HROP_DPna 0x00000200
644#define HC_HROP_Pn 0x00000300
645#define HC_HROP_PDna 0x00000400
646#define HC_HROP_Dn 0x00000500
647#define HC_HROP_DPx 0x00000600
648#define HC_HROP_DPan 0x00000700
649#define HC_HROP_DPa 0x00000800
650#define HC_HROP_DPxn 0x00000900
651#define HC_HROP_D 0x00000a00
652#define HC_HROP_DPno 0x00000b00
653#define HC_HROP_P 0x00000c00
654#define HC_HROP_PDno 0x00000d00
655#define HC_HROP_DPo 0x00000e00
656#define HC_HROP_WHITE 0x00000f00
657
658/* Fog Setting
659 */
660#define HC_SubA_HFogLF 0x0050
661#define HC_SubA_HFogCL 0x0051
662#define HC_SubA_HFogCH 0x0052
663#define HC_SubA_HFogStL 0x0053
664#define HC_SubA_HFogStH 0x0054
665#define HC_SubA_HFogOOdMF 0x0055
666#define HC_SubA_HFogOOdEF 0x0056
667#define HC_SubA_HFogEndL 0x0057
668#define HC_SubA_HFogDenst 0x0058
669/* HC_SubA_FogLF 0x0050
670 */
671#define HC_FogLF_MASK 0x00000010
672#define HC_FogEq_MASK 0x00000008
673#define HC_FogMD_MASK 0x00000007
674#define HC_FogMD_LocalFog 0x00000000
675#define HC_FogMD_LinearFog 0x00000002
676#define HC_FogMD_ExponentialFog 0x00000004
677#define HC_FogMD_Exponential2Fog 0x00000005
678/* #define HC_FogMD_FogTable 0x00000003 */
679
680/* HC_SubA_HFogDenst 0x0058
681 */
682#define HC_FogDenst_MASK 0x001fff00
683#define HC_FogEndL_MASK 0x000000ff
684
685/* Texture subtype definitions
686 */
687#define HC_SubType_Tex0 0x00000000
688#define HC_SubType_Tex1 0x00000001
689#define HC_SubType_TexGeneral 0x000000fe
690
691/* Attribute of texture n
692 */
693#define HC_SubA_HTXnL0BasL 0x0000
694#define HC_SubA_HTXnL1BasL 0x0001
695#define HC_SubA_HTXnL2BasL 0x0002
696#define HC_SubA_HTXnL3BasL 0x0003
697#define HC_SubA_HTXnL4BasL 0x0004
698#define HC_SubA_HTXnL5BasL 0x0005
699#define HC_SubA_HTXnL6BasL 0x0006
700#define HC_SubA_HTXnL7BasL 0x0007
701#define HC_SubA_HTXnL8BasL 0x0008
702#define HC_SubA_HTXnL9BasL 0x0009
703#define HC_SubA_HTXnLaBasL 0x000a
704#define HC_SubA_HTXnLbBasL 0x000b
705#define HC_SubA_HTXnLcBasL 0x000c
706#define HC_SubA_HTXnLdBasL 0x000d
707#define HC_SubA_HTXnLeBasL 0x000e
708#define HC_SubA_HTXnLfBasL 0x000f
709#define HC_SubA_HTXnL10BasL 0x0010
710#define HC_SubA_HTXnL11BasL 0x0011
711#define HC_SubA_HTXnL012BasH 0x0020
712#define HC_SubA_HTXnL345BasH 0x0021
713#define HC_SubA_HTXnL678BasH 0x0022
714#define HC_SubA_HTXnL9abBasH 0x0023
715#define HC_SubA_HTXnLcdeBasH 0x0024
716#define HC_SubA_HTXnLf1011BasH 0x0025
717#define HC_SubA_HTXnL0Pit 0x002b
718#define HC_SubA_HTXnL1Pit 0x002c
719#define HC_SubA_HTXnL2Pit 0x002d
720#define HC_SubA_HTXnL3Pit 0x002e
721#define HC_SubA_HTXnL4Pit 0x002f
722#define HC_SubA_HTXnL5Pit 0x0030
723#define HC_SubA_HTXnL6Pit 0x0031
724#define HC_SubA_HTXnL7Pit 0x0032
725#define HC_SubA_HTXnL8Pit 0x0033
726#define HC_SubA_HTXnL9Pit 0x0034
727#define HC_SubA_HTXnLaPit 0x0035
728#define HC_SubA_HTXnLbPit 0x0036
729#define HC_SubA_HTXnLcPit 0x0037
730#define HC_SubA_HTXnLdPit 0x0038
731#define HC_SubA_HTXnLePit 0x0039
732#define HC_SubA_HTXnLfPit 0x003a
733#define HC_SubA_HTXnL10Pit 0x003b
734#define HC_SubA_HTXnL11Pit 0x003c
735#define HC_SubA_HTXnL0_5WE 0x004b
736#define HC_SubA_HTXnL6_bWE 0x004c
737#define HC_SubA_HTXnLc_11WE 0x004d
738#define HC_SubA_HTXnL0_5HE 0x0051
739#define HC_SubA_HTXnL6_bHE 0x0052
740#define HC_SubA_HTXnLc_11HE 0x0053
741#define HC_SubA_HTXnL0OS 0x0077
742#define HC_SubA_HTXnTB 0x0078
743#define HC_SubA_HTXnMPMD 0x0079
744#define HC_SubA_HTXnCLODu 0x007a
745#define HC_SubA_HTXnFM 0x007b
746#define HC_SubA_HTXnTRCH 0x007c
747#define HC_SubA_HTXnTRCL 0x007d
748#define HC_SubA_HTXnTBC 0x007e
749#define HC_SubA_HTXnTRAH 0x007f
750#define HC_SubA_HTXnTBLCsat 0x0080
751#define HC_SubA_HTXnTBLCop 0x0081
752#define HC_SubA_HTXnTBLMPfog 0x0082
753#define HC_SubA_HTXnTBLAsat 0x0083
754#define HC_SubA_HTXnTBLRCa 0x0085
755#define HC_SubA_HTXnTBLRCb 0x0086
756#define HC_SubA_HTXnTBLRCc 0x0087
757#define HC_SubA_HTXnTBLRCbias 0x0088
758#define HC_SubA_HTXnTBLRAa 0x0089
759#define HC_SubA_HTXnTBLRFog 0x008a
760#define HC_SubA_HTXnBumpM00 0x0090
761#define HC_SubA_HTXnBumpM01 0x0091
762#define HC_SubA_HTXnBumpM10 0x0092
763#define HC_SubA_HTXnBumpM11 0x0093
764#define HC_SubA_HTXnLScale 0x0094
765#define HC_SubA_HTXSMD 0x0000
766/* HC_SubA_HTXnL012BasH 0x0020
767 */
768#define HC_HTXnL0BasH_MASK 0x000000ff
769#define HC_HTXnL1BasH_MASK 0x0000ff00
770#define HC_HTXnL2BasH_MASK 0x00ff0000
771#define HC_HTXnL1BasH_SHIFT 8
772#define HC_HTXnL2BasH_SHIFT 16
773/* HC_SubA_HTXnL345BasH 0x0021
774 */
775#define HC_HTXnL3BasH_MASK 0x000000ff
776#define HC_HTXnL4BasH_MASK 0x0000ff00
777#define HC_HTXnL5BasH_MASK 0x00ff0000
778#define HC_HTXnL4BasH_SHIFT 8
779#define HC_HTXnL5BasH_SHIFT 16
780/* HC_SubA_HTXnL678BasH 0x0022
781 */
782#define HC_HTXnL6BasH_MASK 0x000000ff
783#define HC_HTXnL7BasH_MASK 0x0000ff00
784#define HC_HTXnL8BasH_MASK 0x00ff0000
785#define HC_HTXnL7BasH_SHIFT 8
786#define HC_HTXnL8BasH_SHIFT 16
787/* HC_SubA_HTXnL9abBasH 0x0023
788 */
789#define HC_HTXnL9BasH_MASK 0x000000ff
790#define HC_HTXnLaBasH_MASK 0x0000ff00
791#define HC_HTXnLbBasH_MASK 0x00ff0000
792#define HC_HTXnLaBasH_SHIFT 8
793#define HC_HTXnLbBasH_SHIFT 16
794/* HC_SubA_HTXnLcdeBasH 0x0024
795 */
796#define HC_HTXnLcBasH_MASK 0x000000ff
797#define HC_HTXnLdBasH_MASK 0x0000ff00
798#define HC_HTXnLeBasH_MASK 0x00ff0000
799#define HC_HTXnLdBasH_SHIFT 8
800#define HC_HTXnLeBasH_SHIFT 16
801/* HC_SubA_HTXnLcdeBasH 0x0025
802 */
803#define HC_HTXnLfBasH_MASK 0x000000ff
804#define HC_HTXnL10BasH_MASK 0x0000ff00
805#define HC_HTXnL11BasH_MASK 0x00ff0000
806#define HC_HTXnL10BasH_SHIFT 8
807#define HC_HTXnL11BasH_SHIFT 16
808/* HC_SubA_HTXnL0Pit 0x002b
809 */
810#define HC_HTXnLnPit_MASK 0x00003fff
811#define HC_HTXnEnPit_MASK 0x00080000
812#define HC_HTXnLnPitE_MASK 0x00f00000
813#define HC_HTXnLnPitE_SHIFT 20
814/* HC_SubA_HTXnL0_5WE 0x004b
815 */
816#define HC_HTXnL0WE_MASK 0x0000000f
817#define HC_HTXnL1WE_MASK 0x000000f0
818#define HC_HTXnL2WE_MASK 0x00000f00
819#define HC_HTXnL3WE_MASK 0x0000f000
820#define HC_HTXnL4WE_MASK 0x000f0000
821#define HC_HTXnL5WE_MASK 0x00f00000
822#define HC_HTXnL1WE_SHIFT 4
823#define HC_HTXnL2WE_SHIFT 8
824#define HC_HTXnL3WE_SHIFT 12
825#define HC_HTXnL4WE_SHIFT 16
826#define HC_HTXnL5WE_SHIFT 20
827/* HC_SubA_HTXnL6_bWE 0x004c
828 */
829#define HC_HTXnL6WE_MASK 0x0000000f
830#define HC_HTXnL7WE_MASK 0x000000f0
831#define HC_HTXnL8WE_MASK 0x00000f00
832#define HC_HTXnL9WE_MASK 0x0000f000
833#define HC_HTXnLaWE_MASK 0x000f0000
834#define HC_HTXnLbWE_MASK 0x00f00000
835#define HC_HTXnL7WE_SHIFT 4
836#define HC_HTXnL8WE_SHIFT 8
837#define HC_HTXnL9WE_SHIFT 12
838#define HC_HTXnLaWE_SHIFT 16
839#define HC_HTXnLbWE_SHIFT 20
840/* HC_SubA_HTXnLc_11WE 0x004d
841 */
842#define HC_HTXnLcWE_MASK 0x0000000f
843#define HC_HTXnLdWE_MASK 0x000000f0
844#define HC_HTXnLeWE_MASK 0x00000f00
845#define HC_HTXnLfWE_MASK 0x0000f000
846#define HC_HTXnL10WE_MASK 0x000f0000
847#define HC_HTXnL11WE_MASK 0x00f00000
848#define HC_HTXnLdWE_SHIFT 4
849#define HC_HTXnLeWE_SHIFT 8
850#define HC_HTXnLfWE_SHIFT 12
851#define HC_HTXnL10WE_SHIFT 16
852#define HC_HTXnL11WE_SHIFT 20
853/* HC_SubA_HTXnL0_5HE 0x0051
854 */
855#define HC_HTXnL0HE_MASK 0x0000000f
856#define HC_HTXnL1HE_MASK 0x000000f0
857#define HC_HTXnL2HE_MASK 0x00000f00
858#define HC_HTXnL3HE_MASK 0x0000f000
859#define HC_HTXnL4HE_MASK 0x000f0000
860#define HC_HTXnL5HE_MASK 0x00f00000
861#define HC_HTXnL1HE_SHIFT 4
862#define HC_HTXnL2HE_SHIFT 8
863#define HC_HTXnL3HE_SHIFT 12
864#define HC_HTXnL4HE_SHIFT 16
865#define HC_HTXnL5HE_SHIFT 20
866/* HC_SubA_HTXnL6_bHE 0x0052
867 */
868#define HC_HTXnL6HE_MASK 0x0000000f
869#define HC_HTXnL7HE_MASK 0x000000f0
870#define HC_HTXnL8HE_MASK 0x00000f00
871#define HC_HTXnL9HE_MASK 0x0000f000
872#define HC_HTXnLaHE_MASK 0x000f0000
873#define HC_HTXnLbHE_MASK 0x00f00000
874#define HC_HTXnL7HE_SHIFT 4
875#define HC_HTXnL8HE_SHIFT 8
876#define HC_HTXnL9HE_SHIFT 12
877#define HC_HTXnLaHE_SHIFT 16
878#define HC_HTXnLbHE_SHIFT 20
879/* HC_SubA_HTXnLc_11HE 0x0053
880 */
881#define HC_HTXnLcHE_MASK 0x0000000f
882#define HC_HTXnLdHE_MASK 0x000000f0
883#define HC_HTXnLeHE_MASK 0x00000f00
884#define HC_HTXnLfHE_MASK 0x0000f000
885#define HC_HTXnL10HE_MASK 0x000f0000
886#define HC_HTXnL11HE_MASK 0x00f00000
887#define HC_HTXnLdHE_SHIFT 4
888#define HC_HTXnLeHE_SHIFT 8
889#define HC_HTXnLfHE_SHIFT 12
890#define HC_HTXnL10HE_SHIFT 16
891#define HC_HTXnL11HE_SHIFT 20
892/* HC_SubA_HTXnL0OS 0x0077
893 */
894#define HC_HTXnL0OS_MASK 0x003ff000
895#define HC_HTXnLVmax_MASK 0x00000fc0
896#define HC_HTXnLVmin_MASK 0x0000003f
897#define HC_HTXnL0OS_SHIFT 12
898#define HC_HTXnLVmax_SHIFT 6
899/* HC_SubA_HTXnTB 0x0078
900 */
901#define HC_HTXnTB_MASK 0x00f00000
902#define HC_HTXnFLSe_MASK 0x0000e000
903#define HC_HTXnFLSs_MASK 0x00001c00
904#define HC_HTXnFLTe_MASK 0x00000380
905#define HC_HTXnFLTs_MASK 0x00000070
906#define HC_HTXnFLDs_MASK 0x0000000f
907#define HC_HTXnTB_NoTB 0x00000000
908#define HC_HTXnTB_TBC_S 0x00100000
909#define HC_HTXnTB_TBC_T 0x00200000
910#define HC_HTXnTB_TB_S 0x00400000
911#define HC_HTXnTB_TB_T 0x00800000
912#define HC_HTXnFLSe_Nearest 0x00000000
913#define HC_HTXnFLSe_Linear 0x00002000
914#define HC_HTXnFLSe_NonLinear 0x00004000
915#define HC_HTXnFLSe_Sharp 0x00008000
916#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
917#define HC_HTXnFLSs_Nearest 0x00000000
918#define HC_HTXnFLSs_Linear 0x00000400
919#define HC_HTXnFLSs_NonLinear 0x00000800
920#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
921#define HC_HTXnFLTe_Nearest 0x00000000
922#define HC_HTXnFLTe_Linear 0x00000080
923#define HC_HTXnFLTe_NonLinear 0x00000100
924#define HC_HTXnFLTe_Sharp 0x00000180
925#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
926#define HC_HTXnFLTs_Nearest 0x00000000
927#define HC_HTXnFLTs_Linear 0x00000010
928#define HC_HTXnFLTs_NonLinear 0x00000020
929#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
930#define HC_HTXnFLDs_Tex0 0x00000000
931#define HC_HTXnFLDs_Nearest 0x00000001
932#define HC_HTXnFLDs_Linear 0x00000002
933#define HC_HTXnFLDs_NonLinear 0x00000003
934#define HC_HTXnFLDs_Dither 0x00000004
935#define HC_HTXnFLDs_ConstLOD 0x00000005
936#define HC_HTXnFLDs_Ani 0x00000006
937#define HC_HTXnFLDs_AniDither 0x00000007
938/* HC_SubA_HTXnMPMD 0x0079
939 */
940#define HC_HTXnMPMD_SMASK 0x00070000
941#define HC_HTXnMPMD_TMASK 0x00380000
942#define HC_HTXnLODDTf_MASK 0x00000007
943#define HC_HTXnXY2ST_MASK 0x00000008
944#define HC_HTXnMPMD_Tsingle 0x00000000
945#define HC_HTXnMPMD_Tclamp 0x00080000
946#define HC_HTXnMPMD_Trepeat 0x00100000
947#define HC_HTXnMPMD_Tmirror 0x00180000
948#define HC_HTXnMPMD_Twrap 0x00200000
949#define HC_HTXnMPMD_Ssingle 0x00000000
950#define HC_HTXnMPMD_Sclamp 0x00010000
951#define HC_HTXnMPMD_Srepeat 0x00020000
952#define HC_HTXnMPMD_Smirror 0x00030000
953#define HC_HTXnMPMD_Swrap 0x00040000
954/* HC_SubA_HTXnCLODu 0x007a
955 */
956#define HC_HTXnCLODu_MASK 0x000ffc00
957#define HC_HTXnCLODd_MASK 0x000003ff
958#define HC_HTXnCLODu_SHIFT 10
959/* HC_SubA_HTXnFM 0x007b
960 */
961#define HC_HTXnFM_MASK 0x00ff0000
962#define HC_HTXnLoc_MASK 0x00000003
963#define HC_HTXnFM_INDEX 0x00000000
964#define HC_HTXnFM_Intensity 0x00080000
965#define HC_HTXnFM_Lum 0x00100000
966#define HC_HTXnFM_Alpha 0x00180000
967#define HC_HTXnFM_DX 0x00280000
968#define HC_HTXnFM_ARGB16 0x00880000
969#define HC_HTXnFM_ARGB32 0x00980000
970#define HC_HTXnFM_ABGR16 0x00a80000
971#define HC_HTXnFM_ABGR32 0x00b80000
972#define HC_HTXnFM_RGBA16 0x00c80000
973#define HC_HTXnFM_RGBA32 0x00d80000
974#define HC_HTXnFM_BGRA16 0x00e80000
975#define HC_HTXnFM_BGRA32 0x00f80000
976#define HC_HTXnFM_BUMPMAP 0x00380000
977#define HC_HTXnFM_Index1 (HC_HTXnFM_INDEX | 0x00000000)
978#define HC_HTXnFM_Index2 (HC_HTXnFM_INDEX | 0x00010000)
979#define HC_HTXnFM_Index4 (HC_HTXnFM_INDEX | 0x00020000)
980#define HC_HTXnFM_Index8 (HC_HTXnFM_INDEX | 0x00030000)
981#define HC_HTXnFM_T1 (HC_HTXnFM_Intensity | 0x00000000)
982#define HC_HTXnFM_T2 (HC_HTXnFM_Intensity | 0x00010000)
983#define HC_HTXnFM_T4 (HC_HTXnFM_Intensity | 0x00020000)
984#define HC_HTXnFM_T8 (HC_HTXnFM_Intensity | 0x00030000)
985#define HC_HTXnFM_L1 (HC_HTXnFM_Lum | 0x00000000)
986#define HC_HTXnFM_L2 (HC_HTXnFM_Lum | 0x00010000)
987#define HC_HTXnFM_L4 (HC_HTXnFM_Lum | 0x00020000)
988#define HC_HTXnFM_L8 (HC_HTXnFM_Lum | 0x00030000)
989#define HC_HTXnFM_AL44 (HC_HTXnFM_Lum | 0x00040000)
990#define HC_HTXnFM_AL88 (HC_HTXnFM_Lum | 0x00050000)
991#define HC_HTXnFM_A1 (HC_HTXnFM_Alpha | 0x00000000)
992#define HC_HTXnFM_A2 (HC_HTXnFM_Alpha | 0x00010000)
993#define HC_HTXnFM_A4 (HC_HTXnFM_Alpha | 0x00020000)
994#define HC_HTXnFM_A8 (HC_HTXnFM_Alpha | 0x00030000)
995#define HC_HTXnFM_DX1 (HC_HTXnFM_DX | 0x00010000)
996#define HC_HTXnFM_DX23 (HC_HTXnFM_DX | 0x00020000)
997#define HC_HTXnFM_DX45 (HC_HTXnFM_DX | 0x00030000)
998#define HC_HTXnFM_RGB555 (HC_HTXnFM_ARGB16 | 0x00000000)
999#define HC_HTXnFM_RGB565 (HC_HTXnFM_ARGB16 | 0x00010000)
1000#define HC_HTXnFM_ARGB1555 (HC_HTXnFM_ARGB16 | 0x00020000)
1001#define HC_HTXnFM_ARGB4444 (HC_HTXnFM_ARGB16 | 0x00030000)
1002#define HC_HTXnFM_ARGB0888 (HC_HTXnFM_ARGB32 | 0x00000000)
1003#define HC_HTXnFM_ARGB8888 (HC_HTXnFM_ARGB32 | 0x00010000)
1004#define HC_HTXnFM_BGR555 (HC_HTXnFM_ABGR16 | 0x00000000)
1005#define HC_HTXnFM_BGR565 (HC_HTXnFM_ABGR16 | 0x00010000)
1006#define HC_HTXnFM_ABGR1555 (HC_HTXnFM_ABGR16 | 0x00020000)
1007#define HC_HTXnFM_ABGR4444 (HC_HTXnFM_ABGR16 | 0x00030000)
1008#define HC_HTXnFM_ABGR0888 (HC_HTXnFM_ABGR32 | 0x00000000)
1009#define HC_HTXnFM_ABGR8888 (HC_HTXnFM_ABGR32 | 0x00010000)
1010#define HC_HTXnFM_RGBA5550 (HC_HTXnFM_RGBA16 | 0x00000000)
1011#define HC_HTXnFM_RGBA5551 (HC_HTXnFM_RGBA16 | 0x00020000)
1012#define HC_HTXnFM_RGBA4444 (HC_HTXnFM_RGBA16 | 0x00030000)
1013#define HC_HTXnFM_RGBA8880 (HC_HTXnFM_RGBA32 | 0x00000000)
1014#define HC_HTXnFM_RGBA8888 (HC_HTXnFM_RGBA32 | 0x00010000)
1015#define HC_HTXnFM_BGRA5550 (HC_HTXnFM_BGRA16 | 0x00000000)
1016#define HC_HTXnFM_BGRA5551 (HC_HTXnFM_BGRA16 | 0x00020000)
1017#define HC_HTXnFM_BGRA4444 (HC_HTXnFM_BGRA16 | 0x00030000)
1018#define HC_HTXnFM_BGRA8880 (HC_HTXnFM_BGRA32 | 0x00000000)
1019#define HC_HTXnFM_BGRA8888 (HC_HTXnFM_BGRA32 | 0x00010000)
1020#define HC_HTXnFM_VU88 (HC_HTXnFM_BUMPMAP | 0x00000000)
1021#define HC_HTXnFM_LVU655 (HC_HTXnFM_BUMPMAP | 0x00010000)
1022#define HC_HTXnFM_LVU888 (HC_HTXnFM_BUMPMAP | 0x00020000)
1023#define HC_HTXnLoc_Local 0x00000000
1024#define HC_HTXnLoc_Sys 0x00000002
1025#define HC_HTXnLoc_AGP 0x00000003
1026/* HC_SubA_HTXnTRAH 0x007f
1027 */
1028#define HC_HTXnTRAH_MASK 0x00ff0000
1029#define HC_HTXnTRAL_MASK 0x0000ff00
1030#define HC_HTXnTBA_MASK 0x000000ff
1031#define HC_HTXnTRAH_SHIFT 16
1032#define HC_HTXnTRAL_SHIFT 8
1033/* HC_SubA_HTXnTBLCsat 0x0080
1034 *-- Define the input texture.
1035 */
1036#define HC_XTC_TOPC 0x00000000
1037#define HC_XTC_InvTOPC 0x00000010
1038#define HC_XTC_TOPCp5 0x00000020
1039#define HC_XTC_Cbias 0x00000000
1040#define HC_XTC_InvCbias 0x00000010
1041#define HC_XTC_0 0x00000000
1042#define HC_XTC_Dif 0x00000001
1043#define HC_XTC_Spec 0x00000002
1044#define HC_XTC_Tex 0x00000003
1045#define HC_XTC_Cur 0x00000004
1046#define HC_XTC_Adif 0x00000005
1047#define HC_XTC_Fog 0x00000006
1048#define HC_XTC_Atex 0x00000007
1049#define HC_XTC_Acur 0x00000008
1050#define HC_XTC_HTXnTBLRC 0x00000009
1051#define HC_XTC_Ctexnext 0x0000000a
1052/*--
1053 */
1054#define HC_HTXnTBLCsat_MASK 0x00800000
1055#define HC_HTXnTBLCa_MASK 0x000fc000
1056#define HC_HTXnTBLCb_MASK 0x00001f80
1057#define HC_HTXnTBLCc_MASK 0x0000003f
1058#define HC_HTXnTBLCa_TOPC (HC_XTC_TOPC << 14)
1059#define HC_HTXnTBLCa_InvTOPC (HC_XTC_InvTOPC << 14)
1060#define HC_HTXnTBLCa_TOPCp5 (HC_XTC_TOPCp5 << 14)
1061#define HC_HTXnTBLCa_0 (HC_XTC_0 << 14)
1062#define HC_HTXnTBLCa_Dif (HC_XTC_Dif << 14)
1063#define HC_HTXnTBLCa_Spec (HC_XTC_Spec << 14)
1064#define HC_HTXnTBLCa_Tex (HC_XTC_Tex << 14)
1065#define HC_HTXnTBLCa_Cur (HC_XTC_Cur << 14)
1066#define HC_HTXnTBLCa_Adif (HC_XTC_Adif << 14)
1067#define HC_HTXnTBLCa_Fog (HC_XTC_Fog << 14)
1068#define HC_HTXnTBLCa_Atex (HC_XTC_Atex << 14)
1069#define HC_HTXnTBLCa_Acur (HC_XTC_Acur << 14)
1070#define HC_HTXnTBLCa_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14)
1071#define HC_HTXnTBLCa_Ctexnext (HC_XTC_Ctexnext << 14)
1072#define HC_HTXnTBLCb_TOPC (HC_XTC_TOPC << 7)
1073#define HC_HTXnTBLCb_InvTOPC (HC_XTC_InvTOPC << 7)
1074#define HC_HTXnTBLCb_TOPCp5 (HC_XTC_TOPCp5 << 7)
1075#define HC_HTXnTBLCb_0 (HC_XTC_0 << 7)
1076#define HC_HTXnTBLCb_Dif (HC_XTC_Dif << 7)
1077#define HC_HTXnTBLCb_Spec (HC_XTC_Spec << 7)
1078#define HC_HTXnTBLCb_Tex (HC_XTC_Tex << 7)
1079#define HC_HTXnTBLCb_Cur (HC_XTC_Cur << 7)
1080#define HC_HTXnTBLCb_Adif (HC_XTC_Adif << 7)
1081#define HC_HTXnTBLCb_Fog (HC_XTC_Fog << 7)
1082#define HC_HTXnTBLCb_Atex (HC_XTC_Atex << 7)
1083#define HC_HTXnTBLCb_Acur (HC_XTC_Acur << 7)
1084#define HC_HTXnTBLCb_HTXnTBLRC (HC_XTC_HTXnTBLRC << 7)
1085#define HC_HTXnTBLCb_Ctexnext (HC_XTC_Ctexnext << 7)
1086#define HC_HTXnTBLCc_TOPC (HC_XTC_TOPC << 0)
1087#define HC_HTXnTBLCc_InvTOPC (HC_XTC_InvTOPC << 0)
1088#define HC_HTXnTBLCc_TOPCp5 (HC_XTC_TOPCp5 << 0)
1089#define HC_HTXnTBLCc_0 (HC_XTC_0 << 0)
1090#define HC_HTXnTBLCc_Dif (HC_XTC_Dif << 0)
1091#define HC_HTXnTBLCc_Spec (HC_XTC_Spec << 0)
1092#define HC_HTXnTBLCc_Tex (HC_XTC_Tex << 0)
1093#define HC_HTXnTBLCc_Cur (HC_XTC_Cur << 0)
1094#define HC_HTXnTBLCc_Adif (HC_XTC_Adif << 0)
1095#define HC_HTXnTBLCc_Fog (HC_XTC_Fog << 0)
1096#define HC_HTXnTBLCc_Atex (HC_XTC_Atex << 0)
1097#define HC_HTXnTBLCc_Acur (HC_XTC_Acur << 0)
1098#define HC_HTXnTBLCc_HTXnTBLRC (HC_XTC_HTXnTBLRC << 0)
1099#define HC_HTXnTBLCc_Ctexnext (HC_XTC_Ctexnext << 0)
1100/* HC_SubA_HTXnTBLCop 0x0081
1101 */
1102#define HC_HTXnTBLdot_MASK 0x00c00000
1103#define HC_HTXnTBLCop_MASK 0x00380000
1104#define HC_HTXnTBLCbias_MASK 0x0007c000
1105#define HC_HTXnTBLCshift_MASK 0x00001800
1106#define HC_HTXnTBLAop_MASK 0x00000380
1107#define HC_HTXnTBLAbias_MASK 0x00000078
1108#define HC_HTXnTBLAshift_MASK 0x00000003
1109#define HC_HTXnTBLCop_Add 0x00000000
1110#define HC_HTXnTBLCop_Sub 0x00080000
1111#define HC_HTXnTBLCop_Min 0x00100000
1112#define HC_HTXnTBLCop_Max 0x00180000
1113#define HC_HTXnTBLCop_Mask 0x00200000
1114#define HC_HTXnTBLCbias_Cbias (HC_XTC_Cbias << 14)
1115#define HC_HTXnTBLCbias_InvCbias (HC_XTC_InvCbias << 14)
1116#define HC_HTXnTBLCbias_0 (HC_XTC_0 << 14)
1117#define HC_HTXnTBLCbias_Dif (HC_XTC_Dif << 14)
1118#define HC_HTXnTBLCbias_Spec (HC_XTC_Spec << 14)
1119#define HC_HTXnTBLCbias_Tex (HC_XTC_Tex << 14)
1120#define HC_HTXnTBLCbias_Cur (HC_XTC_Cur << 14)
1121#define HC_HTXnTBLCbias_Adif (HC_XTC_Adif << 14)
1122#define HC_HTXnTBLCbias_Fog (HC_XTC_Fog << 14)
1123#define HC_HTXnTBLCbias_Atex (HC_XTC_Atex << 14)
1124#define HC_HTXnTBLCbias_Acur (HC_XTC_Acur << 14)
1125#define HC_HTXnTBLCbias_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14)
1126#define HC_HTXnTBLCshift_1 0x00000000
1127#define HC_HTXnTBLCshift_2 0x00000800
1128#define HC_HTXnTBLCshift_No 0x00001000
1129#define HC_HTXnTBLCshift_DotP 0x00001800
1130/*=* John Sheng [2003.7.18] texture combine *=*/
1131#define HC_HTXnTBLDOT3 0x00080000
1132#define HC_HTXnTBLDOT4 0x000C0000
1133
1134#define HC_HTXnTBLAop_Add 0x00000000
1135#define HC_HTXnTBLAop_Sub 0x00000080
1136#define HC_HTXnTBLAop_Min 0x00000100
1137#define HC_HTXnTBLAop_Max 0x00000180
1138#define HC_HTXnTBLAop_Mask 0x00000200
1139#define HC_HTXnTBLAbias_Inv 0x00000040
1140#define HC_HTXnTBLAbias_Adif 0x00000000
1141#define HC_HTXnTBLAbias_Fog 0x00000008
1142#define HC_HTXnTBLAbias_Acur 0x00000010
1143#define HC_HTXnTBLAbias_HTXnTBLRAbias 0x00000018
1144#define HC_HTXnTBLAbias_Atex 0x00000020
1145#define HC_HTXnTBLAshift_1 0x00000000
1146#define HC_HTXnTBLAshift_2 0x00000001
1147#define HC_HTXnTBLAshift_No 0x00000002
1148/* #define HC_HTXnTBLAshift_DotP 0x00000003 */
1149/* HC_SubA_HTXnTBLMPFog 0x0082
1150 */
1151#define HC_HTXnTBLMPfog_MASK 0x00e00000
1152#define HC_HTXnTBLMPfog_0 0x00000000
1153#define HC_HTXnTBLMPfog_Adif 0x00200000
1154#define HC_HTXnTBLMPfog_Fog 0x00400000
1155#define HC_HTXnTBLMPfog_Atex 0x00600000
1156#define HC_HTXnTBLMPfog_Acur 0x00800000
1157#define HC_HTXnTBLMPfog_GHTXnTBLRFog 0x00a00000
1158/* HC_SubA_HTXnTBLAsat 0x0083
1159 *-- Define the texture alpha input.
1160 */
1161#define HC_XTA_TOPA 0x00000000
1162#define HC_XTA_InvTOPA 0x00000008
1163#define HC_XTA_TOPAp5 0x00000010
1164#define HC_XTA_Adif 0x00000000
1165#define HC_XTA_Fog 0x00000001
1166#define HC_XTA_Acur 0x00000002
1167#define HC_XTA_HTXnTBLRA 0x00000003
1168#define HC_XTA_Atex 0x00000004
1169#define HC_XTA_Atexnext 0x00000005
1170/*--
1171 */
1172#define HC_HTXnTBLAsat_MASK 0x00800000
1173#define HC_HTXnTBLAMB_MASK 0x00700000
1174#define HC_HTXnTBLAa_MASK 0x0007c000
1175#define HC_HTXnTBLAb_MASK 0x00000f80
1176#define HC_HTXnTBLAc_MASK 0x0000001f
1177#define HC_HTXnTBLAMB_SHIFT 20
1178#define HC_HTXnTBLAa_TOPA (HC_XTA_TOPA << 14)
1179#define HC_HTXnTBLAa_InvTOPA (HC_XTA_InvTOPA << 14)
1180#define HC_HTXnTBLAa_TOPAp5 (HC_XTA_TOPAp5 << 14)
1181#define HC_HTXnTBLAa_Adif (HC_XTA_Adif << 14)
1182#define HC_HTXnTBLAa_Fog (HC_XTA_Fog << 14)
1183#define HC_HTXnTBLAa_Acur (HC_XTA_Acur << 14)
1184#define HC_HTXnTBLAa_HTXnTBLRA (HC_XTA_HTXnTBLRA << 14)
1185#define HC_HTXnTBLAa_Atex (HC_XTA_Atex << 14)
1186#define HC_HTXnTBLAa_Atexnext (HC_XTA_Atexnext << 14)
1187#define HC_HTXnTBLAb_TOPA (HC_XTA_TOPA << 7)
1188#define HC_HTXnTBLAb_InvTOPA (HC_XTA_InvTOPA << 7)
1189#define HC_HTXnTBLAb_TOPAp5 (HC_XTA_TOPAp5 << 7)
1190#define HC_HTXnTBLAb_Adif (HC_XTA_Adif << 7)
1191#define HC_HTXnTBLAb_Fog (HC_XTA_Fog << 7)
1192#define HC_HTXnTBLAb_Acur (HC_XTA_Acur << 7)
1193#define HC_HTXnTBLAb_HTXnTBLRA (HC_XTA_HTXnTBLRA << 7)
1194#define HC_HTXnTBLAb_Atex (HC_XTA_Atex << 7)
1195#define HC_HTXnTBLAb_Atexnext (HC_XTA_Atexnext << 7)
1196#define HC_HTXnTBLAc_TOPA (HC_XTA_TOPA << 0)
1197#define HC_HTXnTBLAc_InvTOPA (HC_XTA_InvTOPA << 0)
1198#define HC_HTXnTBLAc_TOPAp5 (HC_XTA_TOPAp5 << 0)
1199#define HC_HTXnTBLAc_Adif (HC_XTA_Adif << 0)
1200#define HC_HTXnTBLAc_Fog (HC_XTA_Fog << 0)
1201#define HC_HTXnTBLAc_Acur (HC_XTA_Acur << 0)
1202#define HC_HTXnTBLAc_HTXnTBLRA (HC_XTA_HTXnTBLRA << 0)
1203#define HC_HTXnTBLAc_Atex (HC_XTA_Atex << 0)
1204#define HC_HTXnTBLAc_Atexnext (HC_XTA_Atexnext << 0)
1205/* HC_SubA_HTXnTBLRAa 0x0089
1206 */
1207#define HC_HTXnTBLRAa_MASK 0x00ff0000
1208#define HC_HTXnTBLRAb_MASK 0x0000ff00
1209#define HC_HTXnTBLRAc_MASK 0x000000ff
1210#define HC_HTXnTBLRAa_SHIFT 16
1211#define HC_HTXnTBLRAb_SHIFT 8
1212#define HC_HTXnTBLRAc_SHIFT 0
1213/* HC_SubA_HTXnTBLRFog 0x008a
1214 */
1215#define HC_HTXnTBLRFog_MASK 0x0000ff00
1216#define HC_HTXnTBLRAbias_MASK 0x000000ff
1217#define HC_HTXnTBLRFog_SHIFT 8
1218#define HC_HTXnTBLRAbias_SHIFT 0
1219/* HC_SubA_HTXnLScale 0x0094
1220 */
1221#define HC_HTXnLScale_MASK 0x0007fc00
1222#define HC_HTXnLOff_MASK 0x000001ff
1223#define HC_HTXnLScale_SHIFT 10
1224/* HC_SubA_HTXSMD 0x0000
1225 */
1226#define HC_HTXSMD_MASK 0x00000080
1227#define HC_HTXTMD_MASK 0x00000040
1228#define HC_HTXNum_MASK 0x00000038
1229#define HC_HTXTRMD_MASK 0x00000006
1230#define HC_HTXCHCLR_MASK 0x00000001
1231#define HC_HTXNum_SHIFT 3
1232
1233/* Texture Palette n
1234 */
1235#define HC_SubType_TexPalette0 0x00000000
1236#define HC_SubType_TexPalette1 0x00000001
1237#define HC_SubType_FogTable 0x00000010
1238#define HC_SubType_Stipple 0x00000014
1239/* HC_SubA_TexPalette0 0x0000
1240 */
1241#define HC_HTPnA_MASK 0xff000000
1242#define HC_HTPnR_MASK 0x00ff0000
1243#define HC_HTPnG_MASK 0x0000ff00
1244#define HC_HTPnB_MASK 0x000000ff
1245/* HC_SubA_FogTable 0x0010
1246 */
1247#define HC_HFPn3_MASK 0xff000000
1248#define HC_HFPn2_MASK 0x00ff0000
1249#define HC_HFPn1_MASK 0x0000ff00
1250#define HC_HFPn_MASK 0x000000ff
1251#define HC_HFPn3_SHIFT 24
1252#define HC_HFPn2_SHIFT 16
1253#define HC_HFPn1_SHIFT 8
1254
1255/* Auto Testing & Security
1256 */
1257#define HC_SubA_HenFIFOAT 0x0000
1258#define HC_SubA_HFBDrawFirst 0x0004
1259#define HC_SubA_HFBBasL 0x0005
1260#define HC_SubA_HFBDst 0x0006
1261/* HC_SubA_HenFIFOAT 0x0000
1262 */
1263#define HC_HenFIFOAT_MASK 0x00000020
1264#define HC_HenGEMILock_MASK 0x00000010
1265#define HC_HenFBASwap_MASK 0x00000008
1266#define HC_HenOT_MASK 0x00000004
1267#define HC_HenCMDQ_MASK 0x00000002
1268#define HC_HenTXCTSU_MASK 0x00000001
1269/* HC_SubA_HFBDrawFirst 0x0004
1270 */
1271#define HC_HFBDrawFirst_MASK 0x00000800
1272#define HC_HFBQueue_MASK 0x00000400
1273#define HC_HFBLock_MASK 0x00000200
1274#define HC_HEOF_MASK 0x00000100
1275#define HC_HFBBasH_MASK 0x000000ff
1276
1277/* GEMI Setting
1278 */
1279#define HC_SubA_HTArbRCM 0x0008
1280#define HC_SubA_HTArbRZ 0x000a
1281#define HC_SubA_HTArbWZ 0x000b
1282#define HC_SubA_HTArbRTX 0x000c
1283#define HC_SubA_HTArbRCW 0x000d
1284#define HC_SubA_HTArbE2 0x000e
1285#define HC_SubA_HArbRQCM 0x0010
1286#define HC_SubA_HArbWQCM 0x0011
1287#define HC_SubA_HGEMITout 0x0020
1288#define HC_SubA_HFthRTXD 0x0040
1289#define HC_SubA_HFthRTXA 0x0044
1290#define HC_SubA_HCMDQstL 0x0050
1291#define HC_SubA_HCMDQendL 0x0051
1292#define HC_SubA_HCMDQLen 0x0052
1293/* HC_SubA_HTArbRCM 0x0008
1294 */
1295#define HC_HTArbRCM_MASK 0x0000ffff
1296/* HC_SubA_HTArbRZ 0x000a
1297 */
1298#define HC_HTArbRZ_MASK 0x0000ffff
1299/* HC_SubA_HTArbWZ 0x000b
1300 */
1301#define HC_HTArbWZ_MASK 0x0000ffff
1302/* HC_SubA_HTArbRTX 0x000c
1303 */
1304#define HC_HTArbRTX_MASK 0x0000ffff
1305/* HC_SubA_HTArbRCW 0x000d
1306 */
1307#define HC_HTArbRCW_MASK 0x0000ffff
1308/* HC_SubA_HTArbE2 0x000e
1309 */
1310#define HC_HTArbE2_MASK 0x0000ffff
1311/* HC_SubA_HArbRQCM 0x0010
1312 */
1313#define HC_HTArbRQCM_MASK 0x0000ffff
1314/* HC_SubA_HArbWQCM 0x0011
1315 */
1316#define HC_HArbWQCM_MASK 0x0000ffff
1317/* HC_SubA_HGEMITout 0x0020
1318 */
1319#define HC_HGEMITout_MASK 0x000f0000
1320#define HC_HNPArbZC_MASK 0x0000ffff
1321#define HC_HGEMITout_SHIFT 16
1322/* HC_SubA_HFthRTXD 0x0040
1323 */
1324#define HC_HFthRTXD_MASK 0x00ff0000
1325#define HC_HFthRZD_MASK 0x0000ff00
1326#define HC_HFthWZD_MASK 0x000000ff
1327#define HC_HFthRTXD_SHIFT 16
1328#define HC_HFthRZD_SHIFT 8
1329/* HC_SubA_HFthRTXA 0x0044
1330 */
1331#define HC_HFthRTXA_MASK 0x000000ff
1332
1333/******************************************************************************
1334** Define the Halcyon Internal register access constants. For simulator only.
1335******************************************************************************/
1336#define HC_SIMA_HAGPBstL 0x0000
1337#define HC_SIMA_HAGPBendL 0x0001
1338#define HC_SIMA_HAGPCMNT 0x0002
1339#define HC_SIMA_HAGPBpL 0x0003
1340#define HC_SIMA_HAGPBpH 0x0004
1341#define HC_SIMA_HClipTB 0x0005
1342#define HC_SIMA_HClipLR 0x0006
1343#define HC_SIMA_HFPClipTL 0x0007
1344#define HC_SIMA_HFPClipBL 0x0008
1345#define HC_SIMA_HFPClipLL 0x0009
1346#define HC_SIMA_HFPClipRL 0x000a
1347#define HC_SIMA_HFPClipTBH 0x000b
1348#define HC_SIMA_HFPClipLRH 0x000c
1349#define HC_SIMA_HLP 0x000d
1350#define HC_SIMA_HLPRF 0x000e
1351#define HC_SIMA_HSolidCL 0x000f
1352#define HC_SIMA_HPixGC 0x0010
1353#define HC_SIMA_HSPXYOS 0x0011
1354#define HC_SIMA_HCmdA 0x0012
1355#define HC_SIMA_HCmdB 0x0013
1356#define HC_SIMA_HEnable 0x0014
1357#define HC_SIMA_HZWBBasL 0x0015
1358#define HC_SIMA_HZWBBasH 0x0016
1359#define HC_SIMA_HZWBType 0x0017
1360#define HC_SIMA_HZBiasL 0x0018
1361#define HC_SIMA_HZWBend 0x0019
1362#define HC_SIMA_HZWTMD 0x001a
1363#define HC_SIMA_HZWCDL 0x001b
1364#define HC_SIMA_HZWCTAGnum 0x001c
1365#define HC_SIMA_HZCYNum 0x001d
1366#define HC_SIMA_HZWCFire 0x001e
1367/* #define HC_SIMA_HSBBasL 0x001d */
1368/* #define HC_SIMA_HSBBasH 0x001e */
1369/* #define HC_SIMA_HSBFM 0x001f */
1370#define HC_SIMA_HSTREF 0x0020
1371#define HC_SIMA_HSTMD 0x0021
1372#define HC_SIMA_HABBasL 0x0022
1373#define HC_SIMA_HABBasH 0x0023
1374#define HC_SIMA_HABFM 0x0024
1375#define HC_SIMA_HATMD 0x0025
1376#define HC_SIMA_HABLCsat 0x0026
1377#define HC_SIMA_HABLCop 0x0027
1378#define HC_SIMA_HABLAsat 0x0028
1379#define HC_SIMA_HABLAop 0x0029
1380#define HC_SIMA_HABLRCa 0x002a
1381#define HC_SIMA_HABLRFCa 0x002b
1382#define HC_SIMA_HABLRCbias 0x002c
1383#define HC_SIMA_HABLRCb 0x002d
1384#define HC_SIMA_HABLRFCb 0x002e
1385#define HC_SIMA_HABLRAa 0x002f
1386#define HC_SIMA_HABLRAb 0x0030
1387#define HC_SIMA_HDBBasL 0x0031
1388#define HC_SIMA_HDBBasH 0x0032
1389#define HC_SIMA_HDBFM 0x0033
1390#define HC_SIMA_HFBBMSKL 0x0034
1391#define HC_SIMA_HROP 0x0035
1392#define HC_SIMA_HFogLF 0x0036
1393#define HC_SIMA_HFogCL 0x0037
1394#define HC_SIMA_HFogCH 0x0038
1395#define HC_SIMA_HFogStL 0x0039
1396#define HC_SIMA_HFogStH 0x003a
1397#define HC_SIMA_HFogOOdMF 0x003b
1398#define HC_SIMA_HFogOOdEF 0x003c
1399#define HC_SIMA_HFogEndL 0x003d
1400#define HC_SIMA_HFogDenst 0x003e
1401/*---- start of texture 0 setting ----
1402 */
1403#define HC_SIMA_HTX0L0BasL 0x0040
1404#define HC_SIMA_HTX0L1BasL 0x0041
1405#define HC_SIMA_HTX0L2BasL 0x0042
1406#define HC_SIMA_HTX0L3BasL 0x0043
1407#define HC_SIMA_HTX0L4BasL 0x0044
1408#define HC_SIMA_HTX0L5BasL 0x0045
1409#define HC_SIMA_HTX0L6BasL 0x0046
1410#define HC_SIMA_HTX0L7BasL 0x0047
1411#define HC_SIMA_HTX0L8BasL 0x0048
1412#define HC_SIMA_HTX0L9BasL 0x0049
1413#define HC_SIMA_HTX0LaBasL 0x004a
1414#define HC_SIMA_HTX0LbBasL 0x004b
1415#define HC_SIMA_HTX0LcBasL 0x004c
1416#define HC_SIMA_HTX0LdBasL 0x004d
1417#define HC_SIMA_HTX0LeBasL 0x004e
1418#define HC_SIMA_HTX0LfBasL 0x004f
1419#define HC_SIMA_HTX0L10BasL 0x0050
1420#define HC_SIMA_HTX0L11BasL 0x0051
1421#define HC_SIMA_HTX0L012BasH 0x0052
1422#define HC_SIMA_HTX0L345BasH 0x0053
1423#define HC_SIMA_HTX0L678BasH 0x0054
1424#define HC_SIMA_HTX0L9abBasH 0x0055
1425#define HC_SIMA_HTX0LcdeBasH 0x0056
1426#define HC_SIMA_HTX0Lf1011BasH 0x0057
1427#define HC_SIMA_HTX0L0Pit 0x0058
1428#define HC_SIMA_HTX0L1Pit 0x0059
1429#define HC_SIMA_HTX0L2Pit 0x005a
1430#define HC_SIMA_HTX0L3Pit 0x005b
1431#define HC_SIMA_HTX0L4Pit 0x005c
1432#define HC_SIMA_HTX0L5Pit 0x005d
1433#define HC_SIMA_HTX0L6Pit 0x005e
1434#define HC_SIMA_HTX0L7Pit 0x005f
1435#define HC_SIMA_HTX0L8Pit 0x0060
1436#define HC_SIMA_HTX0L9Pit 0x0061
1437#define HC_SIMA_HTX0LaPit 0x0062
1438#define HC_SIMA_HTX0LbPit 0x0063
1439#define HC_SIMA_HTX0LcPit 0x0064
1440#define HC_SIMA_HTX0LdPit 0x0065
1441#define HC_SIMA_HTX0LePit 0x0066
1442#define HC_SIMA_HTX0LfPit 0x0067
1443#define HC_SIMA_HTX0L10Pit 0x0068
1444#define HC_SIMA_HTX0L11Pit 0x0069
1445#define HC_SIMA_HTX0L0_5WE 0x006a
1446#define HC_SIMA_HTX0L6_bWE 0x006b
1447#define HC_SIMA_HTX0Lc_11WE 0x006c
1448#define HC_SIMA_HTX0L0_5HE 0x006d
1449#define HC_SIMA_HTX0L6_bHE 0x006e
1450#define HC_SIMA_HTX0Lc_11HE 0x006f
1451#define HC_SIMA_HTX0L0OS 0x0070
1452#define HC_SIMA_HTX0TB 0x0071
1453#define HC_SIMA_HTX0MPMD 0x0072
1454#define HC_SIMA_HTX0CLODu 0x0073
1455#define HC_SIMA_HTX0FM 0x0074
1456#define HC_SIMA_HTX0TRCH 0x0075
1457#define HC_SIMA_HTX0TRCL 0x0076
1458#define HC_SIMA_HTX0TBC 0x0077
1459#define HC_SIMA_HTX0TRAH 0x0078
1460#define HC_SIMA_HTX0TBLCsat 0x0079
1461#define HC_SIMA_HTX0TBLCop 0x007a
1462#define HC_SIMA_HTX0TBLMPfog 0x007b
1463#define HC_SIMA_HTX0TBLAsat 0x007c
1464#define HC_SIMA_HTX0TBLRCa 0x007d
1465#define HC_SIMA_HTX0TBLRCb 0x007e
1466#define HC_SIMA_HTX0TBLRCc 0x007f
1467#define HC_SIMA_HTX0TBLRCbias 0x0080
1468#define HC_SIMA_HTX0TBLRAa 0x0081
1469#define HC_SIMA_HTX0TBLRFog 0x0082
1470#define HC_SIMA_HTX0BumpM00 0x0083
1471#define HC_SIMA_HTX0BumpM01 0x0084
1472#define HC_SIMA_HTX0BumpM10 0x0085
1473#define HC_SIMA_HTX0BumpM11 0x0086
1474#define HC_SIMA_HTX0LScale 0x0087
1475/*---- end of texture 0 setting ---- 0x008f
1476 */
1477#define HC_SIMA_TX0TX1_OFF 0x0050
1478/*---- start of texture 1 setting ----
1479 */
1480#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
1481#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
1482#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
1483#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
1484#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
1485#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
1486#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
1487#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
1488#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
1489#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
1490#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
1491#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
1492#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
1493#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
1494#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
1495#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
1496#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
1497#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
1498#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
1499#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
1500#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
1501#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
1502#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
1503#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
1504#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
1505#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
1506#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
1507#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
1508#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
1509#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
1510#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
1511#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
1512#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
1513#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
1514#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
1515#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
1516#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
1517#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
1518#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
1519#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
1520#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
1521#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
1522#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
1523#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
1524#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
1525#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
1526#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
1527#define HC_SIMA_HTX1Lc_11HE (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
1528#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
1529#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
1530#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
1531#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
1532#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
1533#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
1534#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
1535#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
1536#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
1537#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
1538#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
1539#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
1540#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
1541#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
1542#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
1543#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
1544#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
1545#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
1546#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
1547#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
1548#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
1549#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
1550#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
1551#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
1552#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
1553#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
1554/*---- end of texture 1 setting ---- 0xaf
1555 */
1556#define HC_SIMA_HTXSMD 0x00b0
1557#define HC_SIMA_HenFIFOAT 0x00b1
1558#define HC_SIMA_HFBDrawFirst 0x00b2
1559#define HC_SIMA_HFBBasL 0x00b3
1560#define HC_SIMA_HTArbRCM 0x00b4
1561#define HC_SIMA_HTArbRZ 0x00b5
1562#define HC_SIMA_HTArbWZ 0x00b6
1563#define HC_SIMA_HTArbRTX 0x00b7
1564#define HC_SIMA_HTArbRCW 0x00b8
1565#define HC_SIMA_HTArbE2 0x00b9
1566#define HC_SIMA_HGEMITout 0x00ba
1567#define HC_SIMA_HFthRTXD 0x00bb
1568#define HC_SIMA_HFthRTXA 0x00bc
1569/* Define the texture palette 0
1570 */
1571#define HC_SIMA_HTP0 0x0100
1572#define HC_SIMA_HTP1 0x0200
1573#define HC_SIMA_FOGTABLE 0x0300
1574#define HC_SIMA_STIPPLE 0x0400
1575#define HC_SIMA_HE3Fire 0x0440
1576#define HC_SIMA_TRANS_SET 0x0441
1577#define HC_SIMA_HREngSt 0x0442
1578#define HC_SIMA_HRFIFOempty 0x0443
1579#define HC_SIMA_HRFIFOfull 0x0444
1580#define HC_SIMA_HRErr 0x0445
1581#define HC_SIMA_FIFOstatus 0x0446
1582
1583/******************************************************************************
1584** Define the AGP command header.
1585******************************************************************************/
1586#define HC_ACMD_MASK 0xfe000000
1587#define HC_ACMD_SUB_MASK 0x0c000000
1588#define HC_ACMD_HCmdA 0xee000000
1589#define HC_ACMD_HCmdB 0xec000000
1590#define HC_ACMD_HCmdC 0xea000000
1591#define HC_ACMD_H1 0xf0000000
1592#define HC_ACMD_H2 0xf2000000
1593#define HC_ACMD_H3 0xf4000000
1594#define HC_ACMD_H4 0xf6000000
1595
1596#define HC_ACMD_H1IO_MASK 0x000001ff
1597#define HC_ACMD_H2IO1_MASK 0x001ff000
1598#define HC_ACMD_H2IO2_MASK 0x000001ff
1599#define HC_ACMD_H2IO1_SHIFT 12
1600#define HC_ACMD_H2IO2_SHIFT 0
1601#define HC_ACMD_H3IO_MASK 0x000001ff
1602#define HC_ACMD_H3COUNT_MASK 0x01fff000
1603#define HC_ACMD_H3COUNT_SHIFT 12
1604#define HC_ACMD_H4ID_MASK 0x000001ff
1605#define HC_ACMD_H4COUNT_MASK 0x01fffe00
1606#define HC_ACMD_H4COUNT_SHIFT 9
1607
1608/********************************************************************************
1609** Define Header
1610********************************************************************************/
1611#define HC_HEADER2 0xF210F110
1612
1613/********************************************************************************
1614** Define Dummy Value
1615********************************************************************************/
1616#define HC_DUMMY 0xCCCCCCCC
1617/********************************************************************************
1618** Define for DMA use
1619********************************************************************************/
1620#define HALCYON_HEADER2 0XF210F110
1621#define HALCYON_FIRECMD 0XEE100000
1622#define HALCYON_FIREMASK 0XFFF00000
1623#define HALCYON_CMDB 0XEC000000
1624#define HALCYON_CMDBMASK 0XFFFE0000
1625#define HALCYON_SUB_ADDR0 0X00000000
1626#define HALCYON_HEADER1MASK 0XFFFFFC00
1627#define HALCYON_HEADER1 0XF0000000
1628#define HC_SubA_HAGPBstL 0x0060
1629#define HC_SubA_HAGPBendL 0x0061
1630#define HC_SubA_HAGPCMNT 0x0062
1631#define HC_SubA_HAGPBpL 0x0063
1632#define HC_SubA_HAGPBpH 0x0064
1633#define HC_HAGPCMNT_MASK 0x00800000
1634#define HC_HCmdErrClr_MASK 0x00400000
1635#define HC_HAGPBendH_MASK 0x0000ff00
1636#define HC_HAGPBstH_MASK 0x000000ff
1637#define HC_HAGPBendH_SHIFT 8
1638#define HC_HAGPBstH_SHIFT 0
1639#define HC_HAGPBpL_MASK 0x00fffffc
1640#define HC_HAGPBpID_MASK 0x00000003
1641#define HC_HAGPBpID_PAUSE 0x00000000
1642#define HC_HAGPBpID_JUMP 0x00000001
1643#define HC_HAGPBpID_STOP 0x00000002
1644#define HC_HAGPBpH_MASK 0x00ffffff
1645
1646#define VIA_VIDEO_HEADER5 0xFE040000
1647#define VIA_VIDEO_HEADER6 0xFE050000
1648#define VIA_VIDEO_HEADER7 0xFE060000
1649#define VIA_VIDEOMASK 0xFFFF0000
1650#endif
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
new file mode 100644
index 000000000000..7a339dba6a69
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -0,0 +1,755 @@
1/* via_dma.c -- DMA support for the VIA Unichrome/Pro
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
7 * All Rights Reserved.
8 *
9 * Copyright 2004 The Unichrome project.
10 * All Rights Reserved.
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a
13 * copy of this software and associated documentation files (the "Software"),
14 * to deal in the Software without restriction, including without limitation
15 * the rights to use, copy, modify, merge, publish, distribute, sub license,
16 * and/or sell copies of the Software, and to permit persons to whom the
17 * Software is furnished to do so, subject to the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
21 * of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 *
31 * Authors:
32 * Tungsten Graphics,
33 * Erdi Chen,
34 * Thomas Hellstrom.
35 */
36
37#include "drmP.h"
38#include "drm.h"
39#include "via_drm.h"
40#include "via_drv.h"
41#include "via_3d_reg.h"
42
43#define CMDBUF_ALIGNMENT_SIZE (0x100)
44#define CMDBUF_ALIGNMENT_MASK (0x0ff)
45
46/* defines for VIA 3D registers */
47#define VIA_REG_STATUS 0x400
48#define VIA_REG_TRANSET 0x43C
49#define VIA_REG_TRANSPACE 0x440
50
51/* VIA_REG_STATUS(0x400): Engine Status */
52#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
53#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
54#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
55#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
56
57#define SetReg2DAGP(nReg, nData) { \
58 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
59 *((uint32_t *)(vb) + 1) = (nData); \
60 vb = ((uint32_t *)vb) + 2; \
61 dev_priv->dma_low +=8; \
62}
63
64#define via_flush_write_combine() DRM_MEMORYBARRIER()
65
66#define VIA_OUT_RING_QW(w1,w2) \
67 *vb++ = (w1); \
68 *vb++ = (w2); \
69 dev_priv->dma_low += 8;
70
71static void via_cmdbuf_start(drm_via_private_t * dev_priv);
72static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
73static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
74static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
75static int via_wait_idle(drm_via_private_t * dev_priv);
76static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
77
78/*
79 * Free space in command buffer.
80 */
81
82static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
83{
84 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
85 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
86
87 return ((hw_addr <= dev_priv->dma_low) ?
88 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
89 (hw_addr - dev_priv->dma_low));
90}
91
92/*
93 * How much does the command regulator lag behind?
94 */
95
96static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
97{
98 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
99 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
100
101 return ((hw_addr <= dev_priv->dma_low) ?
102 (dev_priv->dma_low - hw_addr) :
103 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
104}
105
106/*
107 * Check that the given size fits in the buffer, otherwise wait.
108 */
109
110static inline int
111via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
112{
113 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
114 uint32_t cur_addr, hw_addr, next_addr;
115 volatile uint32_t *hw_addr_ptr;
116 uint32_t count;
117 hw_addr_ptr = dev_priv->hw_addr_ptr;
118 cur_addr = dev_priv->dma_low;
119 next_addr = cur_addr + size + 512 * 1024;
120 count = 1000000;
121 do {
122 hw_addr = *hw_addr_ptr - agp_base;
123 if (count-- == 0) {
124 DRM_ERROR
125 ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
126 hw_addr, cur_addr, next_addr);
127 return -1;
128 }
129 if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
130 msleep(1);
131 } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
132 return 0;
133}
134
135/*
136 * Checks whether buffer head has reach the end. Rewind the ring buffer
137 * when necessary.
138 *
139 * Returns virtual pointer to ring buffer.
140 */
141
142static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
143 unsigned int size)
144{
145 if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
146 dev_priv->dma_high) {
147 via_cmdbuf_rewind(dev_priv);
148 }
149 if (via_cmdbuf_wait(dev_priv, size) != 0) {
150 return NULL;
151 }
152
153 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
154}
155
156int via_dma_cleanup(struct drm_device * dev)
157{
158 if (dev->dev_private) {
159 drm_via_private_t *dev_priv =
160 (drm_via_private_t *) dev->dev_private;
161
162 if (dev_priv->ring.virtual_start) {
163 via_cmdbuf_reset(dev_priv);
164
165 drm_core_ioremapfree(&dev_priv->ring.map, dev);
166 dev_priv->ring.virtual_start = NULL;
167 }
168
169 }
170
171 return 0;
172}
173
174static int via_initialize(struct drm_device * dev,
175 drm_via_private_t * dev_priv,
176 drm_via_dma_init_t * init)
177{
178 if (!dev_priv || !dev_priv->mmio) {
179 DRM_ERROR("via_dma_init called before via_map_init\n");
180 return -EFAULT;
181 }
182
183 if (dev_priv->ring.virtual_start != NULL) {
184 DRM_ERROR("called again without calling cleanup\n");
185 return -EFAULT;
186 }
187
188 if (!dev->agp || !dev->agp->base) {
189 DRM_ERROR("called with no agp memory available\n");
190 return -EFAULT;
191 }
192
193 if (dev_priv->chipset == VIA_DX9_0) {
194 DRM_ERROR("AGP DMA is not supported on this chip\n");
195 return -EINVAL;
196 }
197
198 dev_priv->ring.map.offset = dev->agp->base + init->offset;
199 dev_priv->ring.map.size = init->size;
200 dev_priv->ring.map.type = 0;
201 dev_priv->ring.map.flags = 0;
202 dev_priv->ring.map.mtrr = 0;
203
204 drm_core_ioremap(&dev_priv->ring.map, dev);
205
206 if (dev_priv->ring.map.handle == NULL) {
207 via_dma_cleanup(dev);
208 DRM_ERROR("can not ioremap virtual address for"
209 " ring buffer\n");
210 return -ENOMEM;
211 }
212
213 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
214
215 dev_priv->dma_ptr = dev_priv->ring.virtual_start;
216 dev_priv->dma_low = 0;
217 dev_priv->dma_high = init->size;
218 dev_priv->dma_wrap = init->size;
219 dev_priv->dma_offset = init->offset;
220 dev_priv->last_pause_ptr = NULL;
221 dev_priv->hw_addr_ptr =
222 (volatile uint32_t *)((char *)dev_priv->mmio->handle +
223 init->reg_pause_addr);
224
225 via_cmdbuf_start(dev_priv);
226
227 return 0;
228}
229
230static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
231{
232 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
233 drm_via_dma_init_t *init = data;
234 int retcode = 0;
235
236 switch (init->func) {
237 case VIA_INIT_DMA:
238 if (!DRM_SUSER(DRM_CURPROC))
239 retcode = -EPERM;
240 else
241 retcode = via_initialize(dev, dev_priv, init);
242 break;
243 case VIA_CLEANUP_DMA:
244 if (!DRM_SUSER(DRM_CURPROC))
245 retcode = -EPERM;
246 else
247 retcode = via_dma_cleanup(dev);
248 break;
249 case VIA_DMA_INITIALIZED:
250 retcode = (dev_priv->ring.virtual_start != NULL) ?
251 0 : -EFAULT;
252 break;
253 default:
254 retcode = -EINVAL;
255 break;
256 }
257
258 return retcode;
259}
260
261static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
262{
263 drm_via_private_t *dev_priv;
264 uint32_t *vb;
265 int ret;
266
267 dev_priv = (drm_via_private_t *) dev->dev_private;
268
269 if (dev_priv->ring.virtual_start == NULL) {
270 DRM_ERROR("called without initializing AGP ring buffer.\n");
271 return -EFAULT;
272 }
273
274 if (cmd->size > VIA_PCI_BUF_SIZE) {
275 return -ENOMEM;
276 }
277
278 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
279 return -EFAULT;
280
281 /*
282 * Running this function on AGP memory is dead slow. Therefore
283 * we run it on a temporary cacheable system memory buffer and
284 * copy it to AGP memory when ready.
285 */
286
287 if ((ret =
288 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
289 cmd->size, dev, 1))) {
290 return ret;
291 }
292
293 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
294 if (vb == NULL) {
295 return -EAGAIN;
296 }
297
298 memcpy(vb, dev_priv->pci_buf, cmd->size);
299
300 dev_priv->dma_low += cmd->size;
301
302 /*
303 * Small submissions somehow stalls the CPU. (AGP cache effects?)
304 * pad to greater size.
305 */
306
307 if (cmd->size < 0x100)
308 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
309 via_cmdbuf_pause(dev_priv);
310
311 return 0;
312}
313
314int via_driver_dma_quiescent(struct drm_device * dev)
315{
316 drm_via_private_t *dev_priv = dev->dev_private;
317
318 if (!via_wait_idle(dev_priv)) {
319 return -EBUSY;
320 }
321 return 0;
322}
323
324static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
325{
326
327 LOCK_TEST_WITH_RETURN(dev, file_priv);
328
329 return via_driver_dma_quiescent(dev);
330}
331
332static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
333{
334 drm_via_cmdbuffer_t *cmdbuf = data;
335 int ret;
336
337 LOCK_TEST_WITH_RETURN(dev, file_priv);
338
339 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
340
341 ret = via_dispatch_cmdbuffer(dev, cmdbuf);
342 if (ret) {
343 return ret;
344 }
345
346 return 0;
347}
348
349static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
350 drm_via_cmdbuffer_t * cmd)
351{
352 drm_via_private_t *dev_priv = dev->dev_private;
353 int ret;
354
355 if (cmd->size > VIA_PCI_BUF_SIZE) {
356 return -ENOMEM;
357 }
358 if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
359 return -EFAULT;
360
361 if ((ret =
362 via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
363 cmd->size, dev, 0))) {
364 return ret;
365 }
366
367 ret =
368 via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
369 cmd->size);
370 return ret;
371}
372
373static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
374{
375 drm_via_cmdbuffer_t *cmdbuf = data;
376 int ret;
377
378 LOCK_TEST_WITH_RETURN(dev, file_priv);
379
380 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
381
382 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
383 if (ret) {
384 return ret;
385 }
386
387 return 0;
388}
389
390static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
391 uint32_t * vb, int qw_count)
392{
393 for (; qw_count > 0; --qw_count) {
394 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
395 }
396 return vb;
397}
398
399/*
400 * This function is used internally by ring buffer management code.
401 *
402 * Returns virtual pointer to ring buffer.
403 */
404static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
405{
406 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
407}
408
409/*
410 * Hooks a segment of data into the tail of the ring-buffer by
411 * modifying the pause address stored in the buffer itself. If
412 * the regulator has already paused, restart it.
413 */
414static int via_hook_segment(drm_via_private_t * dev_priv,
415 uint32_t pause_addr_hi, uint32_t pause_addr_lo,
416 int no_pci_fire)
417{
418 int paused, count;
419 volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
420 uint32_t reader,ptr;
421 uint32_t diff;
422
423 paused = 0;
424 via_flush_write_combine();
425 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
426
427 *paused_at = pause_addr_lo;
428 via_flush_write_combine();
429 (void) *paused_at;
430
431 reader = *(dev_priv->hw_addr_ptr);
432 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
433 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
434
435 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
436
437 /*
438 * If there is a possibility that the command reader will
439 * miss the new pause address and pause on the old one,
440 * In that case we need to program the new start address
441 * using PCI.
442 */
443
444 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
445 count = 10000000;
446 while(diff == 0 && count--) {
447 paused = (VIA_READ(0x41c) & 0x80000000);
448 if (paused)
449 break;
450 reader = *(dev_priv->hw_addr_ptr);
451 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
452 }
453
454 paused = VIA_READ(0x41c) & 0x80000000;
455
456 if (paused && !no_pci_fire) {
457 reader = *(dev_priv->hw_addr_ptr);
458 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
459 diff &= (dev_priv->dma_high - 1);
460 if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
461 DRM_ERROR("Paused at incorrect address. "
462 "0x%08x, 0x%08x 0x%08x\n",
463 ptr, reader, dev_priv->dma_diff);
464 } else if (diff == 0) {
465 /*
466 * There is a concern that these writes may stall the PCI bus
467 * if the GPU is not idle. However, idling the GPU first
468 * doesn't make a difference.
469 */
470
471 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
472 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
473 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
474 VIA_READ(VIA_REG_TRANSPACE);
475 }
476 }
477 return paused;
478}
479
480static int via_wait_idle(drm_via_private_t * dev_priv)
481{
482 int count = 10000000;
483
484 while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
485
486 while (count-- && (VIA_READ(VIA_REG_STATUS) &
487 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
488 VIA_3D_ENG_BUSY))) ;
489 return count;
490}
491
492static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
493 uint32_t addr, uint32_t * cmd_addr_hi,
494 uint32_t * cmd_addr_lo, int skip_wait)
495{
496 uint32_t agp_base;
497 uint32_t cmd_addr, addr_lo, addr_hi;
498 uint32_t *vb;
499 uint32_t qw_pad_count;
500
501 if (!skip_wait)
502 via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
503
504 vb = via_get_dma(dev_priv);
505 VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
506 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
507 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
508 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
509 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
510
511 cmd_addr = (addr) ? addr :
512 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
513 addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
514 (cmd_addr & HC_HAGPBpL_MASK));
515 addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
516
517 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
518 VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
519 return vb;
520}
521
522static void via_cmdbuf_start(drm_via_private_t * dev_priv)
523{
524 uint32_t pause_addr_lo, pause_addr_hi;
525 uint32_t start_addr, start_addr_lo;
526 uint32_t end_addr, end_addr_lo;
527 uint32_t command;
528 uint32_t agp_base;
529 uint32_t ptr;
530 uint32_t reader;
531 int count;
532
533 dev_priv->dma_low = 0;
534
535 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
536 start_addr = agp_base;
537 end_addr = agp_base + dev_priv->dma_high;
538
539 start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
540 end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
541 command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
542 ((end_addr & 0xff000000) >> 16));
543
544 dev_priv->last_pause_ptr =
545 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
546 &pause_addr_hi, &pause_addr_lo, 1) - 1;
547
548 via_flush_write_combine();
549 (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
550
551 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
552 VIA_WRITE(VIA_REG_TRANSPACE, command);
553 VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
554 VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
555
556 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
557 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
558 DRM_WRITEMEMORYBARRIER();
559 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
560 VIA_READ(VIA_REG_TRANSPACE);
561
562 dev_priv->dma_diff = 0;
563
564 count = 10000000;
565 while (!(VIA_READ(0x41c) & 0x80000000) && count--);
566
567 reader = *(dev_priv->hw_addr_ptr);
568 ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
569 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
570
571 /*
572 * This is the difference between where we tell the
573 * command reader to pause and where it actually pauses.
574 * This differs between hw implementation so we need to
575 * detect it.
576 */
577
578 dev_priv->dma_diff = ptr - reader;
579}
580
581static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
582{
583 uint32_t *vb;
584
585 via_cmdbuf_wait(dev_priv, qwords + 2);
586 vb = via_get_dma(dev_priv);
587 VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
588 via_align_buffer(dev_priv, vb, qwords);
589}
590
591static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
592{
593 uint32_t *vb = via_get_dma(dev_priv);
594 SetReg2DAGP(0x0C, (0 | (0 << 16)));
595 SetReg2DAGP(0x10, 0 | (0 << 16));
596 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
597}
598
599static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
600{
601 uint32_t agp_base;
602 uint32_t pause_addr_lo, pause_addr_hi;
603 uint32_t jump_addr_lo, jump_addr_hi;
604 volatile uint32_t *last_pause_ptr;
605 uint32_t dma_low_save1, dma_low_save2;
606
607 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
608 via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
609 &jump_addr_lo, 0);
610
611 dev_priv->dma_wrap = dev_priv->dma_low;
612
613 /*
614 * Wrap command buffer to the beginning.
615 */
616
617 dev_priv->dma_low = 0;
618 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
619 DRM_ERROR("via_cmdbuf_jump failed\n");
620 }
621
622 via_dummy_bitblt(dev_priv);
623 via_dummy_bitblt(dev_priv);
624
625 last_pause_ptr =
626 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
627 &pause_addr_lo, 0) - 1;
628 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
629 &pause_addr_lo, 0);
630
631 *last_pause_ptr = pause_addr_lo;
632 dma_low_save1 = dev_priv->dma_low;
633
634 /*
635 * Now, set a trap that will pause the regulator if it tries to rerun the old
636 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
637 * and reissues the jump command over PCI, while the regulator has already taken the jump
638 * and actually paused at the current buffer end).
639 * There appears to be no other way to detect this condition, since the hw_addr_pointer
640 * does not seem to get updated immediately when a jump occurs.
641 */
642
643 last_pause_ptr =
644 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
645 &pause_addr_lo, 0) - 1;
646 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
647 &pause_addr_lo, 0);
648 *last_pause_ptr = pause_addr_lo;
649
650 dma_low_save2 = dev_priv->dma_low;
651 dev_priv->dma_low = dma_low_save1;
652 via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
653 dev_priv->dma_low = dma_low_save2;
654 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
655}
656
657
658static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
659{
660 via_cmdbuf_jump(dev_priv);
661}
662
663static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
664{
665 uint32_t pause_addr_lo, pause_addr_hi;
666
667 via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
668 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
669}
670
671static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
672{
673 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
674}
675
676static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
677{
678 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
679 via_wait_idle(dev_priv);
680}
681
682/*
683 * User interface to the space and lag functions.
684 */
685
686static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
687{
688 drm_via_cmdbuf_size_t *d_siz = data;
689 int ret = 0;
690 uint32_t tmp_size, count;
691 drm_via_private_t *dev_priv;
692
693 DRM_DEBUG("\n");
694 LOCK_TEST_WITH_RETURN(dev, file_priv);
695
696 dev_priv = (drm_via_private_t *) dev->dev_private;
697
698 if (dev_priv->ring.virtual_start == NULL) {
699 DRM_ERROR("called without initializing AGP ring buffer.\n");
700 return -EFAULT;
701 }
702
703 count = 1000000;
704 tmp_size = d_siz->size;
705 switch (d_siz->func) {
706 case VIA_CMDBUF_SPACE:
707 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
708 && count--) {
709 if (!d_siz->wait) {
710 break;
711 }
712 }
713 if (!count) {
714 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
715 ret = -EAGAIN;
716 }
717 break;
718 case VIA_CMDBUF_LAG:
719 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
720 && count--) {
721 if (!d_siz->wait) {
722 break;
723 }
724 }
725 if (!count) {
726 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
727 ret = -EAGAIN;
728 }
729 break;
730 default:
731 ret = -EFAULT;
732 }
733 d_siz->size = tmp_size;
734
735 return ret;
736}
737
738struct drm_ioctl_desc via_ioctls[] = {
739 DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
740 DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
741 DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
742 DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
743 DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
744 DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
745 DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
746 DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
747 DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
748 DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
749 DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
750 DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
751 DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
752 DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
753};
754
755int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
new file mode 100644
index 000000000000..409e00afdd07
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -0,0 +1,816 @@
1/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Thomas Hellstrom.
26 * Partially based on code obtained from Digeo Inc.
27 */
28
29
30/*
31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings?
35 */
36
37#include "drmP.h"
38#include "via_drm.h"
39#include "via_drv.h"
40#include "via_dmablit.h"
41
42#include <linux/pagemap.h>
43
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
47
48typedef struct _drm_via_descriptor {
49 uint32_t mem_addr;
50 uint32_t dev_addr;
51 uint32_t size;
52 uint32_t next;
53} drm_via_descriptor_t;
54
55
56/*
57 * Unmap a DMA mapping.
58 */
59
60
61
62static void
63via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
64{
65 int num_desc = vsg->num_desc;
66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
69 descriptor_this_page;
70 dma_addr_t next = vsg->chain_start;
71
72 while(num_desc--) {
73 if (descriptor_this_page-- == 0) {
74 cur_descriptor_page--;
75 descriptor_this_page = vsg->descriptors_per_page - 1;
76 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
77 descriptor_this_page;
78 }
79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
80 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
81 next = (dma_addr_t) desc_ptr->next;
82 desc_ptr--;
83 }
84}
85
86/*
87 * If mode = 0, count how many descriptors are needed.
88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
90 * 'next' field without syncing calls when the descriptor is already mapped.
91 */
92
93static void
94via_map_blit_for_device(struct pci_dev *pdev,
95 const drm_via_dmablit_t *xfer,
96 drm_via_sg_info_t *vsg,
97 int mode)
98{
99 unsigned cur_descriptor_page = 0;
100 unsigned num_descriptors_this_page = 0;
101 unsigned char *mem_addr = xfer->mem_addr;
102 unsigned char *cur_mem;
103 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
104 uint32_t fb_addr = xfer->fb_addr;
105 uint32_t cur_fb;
106 unsigned long line_len;
107 unsigned remaining_len;
108 int num_desc = 0;
109 int cur_line;
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = NULL;
112
113 if (mode == 1)
114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
115
116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
117
118 line_len = xfer->line_length;
119 cur_fb = fb_addr;
120 cur_mem = mem_addr;
121
122 while (line_len > 0) {
123
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len;
126
127 if (mode == 1) {
128 desc_ptr->mem_addr =
129 dma_map_page(&pdev->dev,
130 vsg->pages[VIA_PFN(cur_mem) -
131 VIA_PFN(first_addr)],
132 VIA_PGOFF(cur_mem), remaining_len,
133 vsg->direction);
134 desc_ptr->dev_addr = cur_fb;
135
136 desc_ptr->size = remaining_len;
137 desc_ptr->next = (uint32_t) next;
138 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
139 DMA_TO_DEVICE);
140 desc_ptr++;
141 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
142 num_descriptors_this_page = 0;
143 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
144 }
145 }
146
147 num_desc++;
148 cur_mem += remaining_len;
149 cur_fb += remaining_len;
150 }
151
152 mem_addr += xfer->mem_stride;
153 fb_addr += xfer->fb_stride;
154 }
155
156 if (mode == 1) {
157 vsg->chain_start = next;
158 vsg->state = dr_via_device_mapped;
159 }
160 vsg->num_desc = num_desc;
161}
162
163/*
164 * Function that frees up all resources for a blit. It is usable even if the
165 * blit info has only been partially built as long as the status enum is consistent
166 * with the actual status of the used resources.
167 */
168
169
170static void
171via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
172{
173 struct page *page;
174 int i;
175
176 switch(vsg->state) {
177 case dr_via_device_mapped:
178 via_unmap_blit_from_device(pdev, vsg);
179 case dr_via_desc_pages_alloc:
180 for (i=0; i<vsg->num_desc_pages; ++i) {
181 if (vsg->desc_pages[i] != NULL)
182 free_page((unsigned long)vsg->desc_pages[i]);
183 }
184 kfree(vsg->desc_pages);
185 case dr_via_pages_locked:
186 for (i=0; i<vsg->num_pages; ++i) {
187 if ( NULL != (page = vsg->pages[i])) {
188 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 SetPageDirty(page);
190 page_cache_release(page);
191 }
192 }
193 case dr_via_pages_alloc:
194 vfree(vsg->pages);
195 default:
196 vsg->state = dr_via_sg_init;
197 }
198 if (vsg->bounce_buffer) {
199 vfree(vsg->bounce_buffer);
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0;
203}
204
205/*
206 * Fire a blit engine.
207 */
208
209static void
210via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
211{
212 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
213
214 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
215 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
216 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 VIA_DMA_CSR_DE);
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221 DRM_WRITEMEMORYBARRIER();
222 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
223 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
224}
225
226/*
227 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
228 * occur here if the calling user does not have access to the submitted address.
229 */
230
231static int
232via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
233{
234 int ret;
235 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
236 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
237 first_pfn + 1;
238
239 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
240 return -ENOMEM;
241 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
242 down_read(&current->mm->mmap_sem);
243 ret = get_user_pages(current, current->mm,
244 (unsigned long)xfer->mem_addr,
245 vsg->num_pages,
246 (vsg->direction == DMA_FROM_DEVICE),
247 0, vsg->pages, NULL);
248
249 up_read(&current->mm->mmap_sem);
250 if (ret != vsg->num_pages) {
251 if (ret < 0)
252 return ret;
253 vsg->state = dr_via_pages_locked;
254 return -EINVAL;
255 }
256 vsg->state = dr_via_pages_locked;
257 DRM_DEBUG("DMA pages locked\n");
258 return 0;
259}
260
261/*
262 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
263 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
264 * quite large for some blits, and pages don't need to be contingous.
265 */
266
267static int
268via_alloc_desc_pages(drm_via_sg_info_t *vsg)
269{
270 int i;
271
272 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
273 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
274 vsg->descriptors_per_page;
275
276 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
277 return -ENOMEM;
278
279 vsg->state = dr_via_desc_pages_alloc;
280 for (i=0; i<vsg->num_desc_pages; ++i) {
281 if (NULL == (vsg->desc_pages[i] =
282 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
283 return -ENOMEM;
284 }
285 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
286 vsg->num_desc);
287 return 0;
288}
289
290static void
291via_abort_dmablit(struct drm_device *dev, int engine)
292{
293 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
294
295 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
296}
297
298static void
299via_dmablit_engine_off(struct drm_device *dev, int engine)
300{
301 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
302
303 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
304}
305
306
307
308/*
309 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
310 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
311 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
312 * the workqueue task takes care of processing associated with the old blit.
313 */
314
315void
316via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
317{
318 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
319 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
320 int cur;
321 int done_transfer;
322 unsigned long irqsave=0;
323 uint32_t status = 0;
324
325 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
326 engine, from_irq, (unsigned long) blitq);
327
328 if (from_irq) {
329 spin_lock(&blitq->blit_lock);
330 } else {
331 spin_lock_irqsave(&blitq->blit_lock, irqsave);
332 }
333
334 done_transfer = blitq->is_active &&
335 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
336 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
337
338 cur = blitq->cur;
339 if (done_transfer) {
340
341 blitq->blits[cur]->aborted = blitq->aborting;
342 blitq->done_blit_handle++;
343 DRM_WAKEUP(blitq->blit_queue + cur);
344
345 cur++;
346 if (cur >= VIA_NUM_BLIT_SLOTS)
347 cur = 0;
348 blitq->cur = cur;
349
350 /*
351 * Clear transfer done flag.
352 */
353
354 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
355
356 blitq->is_active = 0;
357 blitq->aborting = 0;
358 schedule_work(&blitq->wq);
359
360 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
361
362 /*
363 * Abort transfer after one second.
364 */
365
366 via_abort_dmablit(dev, engine);
367 blitq->aborting = 1;
368 blitq->end = jiffies + DRM_HZ;
369 }
370
371 if (!blitq->is_active) {
372 if (blitq->num_outstanding) {
373 via_fire_dmablit(dev, blitq->blits[cur], engine);
374 blitq->is_active = 1;
375 blitq->cur = cur;
376 blitq->num_outstanding--;
377 blitq->end = jiffies + DRM_HZ;
378 if (!timer_pending(&blitq->poll_timer))
379 mod_timer(&blitq->poll_timer, jiffies + 1);
380 } else {
381 if (timer_pending(&blitq->poll_timer)) {
382 del_timer(&blitq->poll_timer);
383 }
384 via_dmablit_engine_off(dev, engine);
385 }
386 }
387
388 if (from_irq) {
389 spin_unlock(&blitq->blit_lock);
390 } else {
391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
392 }
393}
394
395
396
397/*
398 * Check whether this blit is still active, performing necessary locking.
399 */
400
401static int
402via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
403{
404 unsigned long irqsave;
405 uint32_t slot;
406 int active;
407
408 spin_lock_irqsave(&blitq->blit_lock, irqsave);
409
410 /*
411 * Allow for handle wraparounds.
412 */
413
414 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
415 ((blitq->cur_blit_handle - handle) <= (1 << 23));
416
417 if (queue && active) {
418 slot = handle - blitq->done_blit_handle + blitq->cur -1;
419 if (slot >= VIA_NUM_BLIT_SLOTS) {
420 slot -= VIA_NUM_BLIT_SLOTS;
421 }
422 *queue = blitq->blit_queue + slot;
423 }
424
425 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
426
427 return active;
428}
429
430/*
431 * Sync. Wait for at least three seconds for the blit to be performed.
432 */
433
434static int
435via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
436{
437
438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
439 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
440 wait_queue_head_t *queue;
441 int ret = 0;
442
443 if (via_dmablit_active(blitq, engine, handle, &queue)) {
444 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
445 !via_dmablit_active(blitq, engine, handle, NULL));
446 }
447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
448 handle, engine, ret);
449
450 return ret;
451}
452
453
454/*
455 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
456 * a) Broken hardware (typically those that don't have any video capture facility).
457 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
458 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
459 * irqs, it will shorten the latency somewhat.
460 */
461
462
463
464static void
465via_dmablit_timer(unsigned long data)
466{
467 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
468 struct drm_device *dev = blitq->dev;
469 int engine = (int)
470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
471
472 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
473 (unsigned long) jiffies);
474
475 via_dmablit_handler(dev, engine, 0);
476
477 if (!timer_pending(&blitq->poll_timer)) {
478 mod_timer(&blitq->poll_timer, jiffies + 1);
479
480 /*
481 * Rerun handler to delete timer if engines are off, and
482 * to shorten abort latency. This is a little nasty.
483 */
484
485 via_dmablit_handler(dev, engine, 0);
486
487 }
488}
489
490
491
492
493/*
494 * Workqueue task that frees data and mappings associated with a blit.
495 * Also wakes up waiting processes. Each of these tasks handles one
496 * blit engine only and may not be called on each interrupt.
497 */
498
499
500static void
501via_dmablit_workqueue(struct work_struct *work)
502{
503 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
504 struct drm_device *dev = blitq->dev;
505 unsigned long irqsave;
506 drm_via_sg_info_t *cur_sg;
507 int cur_released;
508
509
510 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
511 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
512
513 spin_lock_irqsave(&blitq->blit_lock, irqsave);
514
515 while(blitq->serviced != blitq->cur) {
516
517 cur_released = blitq->serviced++;
518
519 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
520
521 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
522 blitq->serviced = 0;
523
524 cur_sg = blitq->blits[cur_released];
525 blitq->num_free++;
526
527 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
528
529 DRM_WAKEUP(&blitq->busy_queue);
530
531 via_free_sg_info(dev->pdev, cur_sg);
532 kfree(cur_sg);
533
534 spin_lock_irqsave(&blitq->blit_lock, irqsave);
535 }
536
537 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
538}
539
540
541/*
542 * Init all blit engines. Currently we use two, but some hardware have 4.
543 */
544
545
546void
547via_init_dmablit(struct drm_device *dev)
548{
549 int i,j;
550 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
551 drm_via_blitq_t *blitq;
552
553 pci_set_master(dev->pdev);
554
555 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
556 blitq = dev_priv->blit_queues + i;
557 blitq->dev = dev;
558 blitq->cur_blit_handle = 0;
559 blitq->done_blit_handle = 0;
560 blitq->head = 0;
561 blitq->cur = 0;
562 blitq->serviced = 0;
563 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
564 blitq->num_outstanding = 0;
565 blitq->is_active = 0;
566 blitq->aborting = 0;
567 spin_lock_init(&blitq->blit_lock);
568 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
569 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
570 }
571 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
572 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
573 setup_timer(&blitq->poll_timer, via_dmablit_timer,
574 (unsigned long)blitq);
575 }
576}
577
578/*
579 * Build all info and do all mappings required for a blit.
580 */
581
582
583static int
584via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
585{
586 int draw = xfer->to_fb;
587 int ret = 0;
588
589 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
590 vsg->bounce_buffer = NULL;
591
592 vsg->state = dr_via_sg_init;
593
594 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
595 DRM_ERROR("Zero size bitblt.\n");
596 return -EINVAL;
597 }
598
599 /*
600 * Below check is a driver limitation, not a hardware one. We
601 * don't want to lock unused pages, and don't want to incoporate the
602 * extra logic of avoiding them. Make sure there are no.
603 * (Not a big limitation anyway.)
604 */
605
606 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
607 DRM_ERROR("Too large system memory stride. Stride: %d, "
608 "Length: %d\n", xfer->mem_stride, xfer->line_length);
609 return -EINVAL;
610 }
611
612 if ((xfer->mem_stride == xfer->line_length) &&
613 (xfer->fb_stride == xfer->line_length)) {
614 xfer->mem_stride *= xfer->num_lines;
615 xfer->line_length = xfer->mem_stride;
616 xfer->fb_stride = xfer->mem_stride;
617 xfer->num_lines = 1;
618 }
619
620 /*
621 * Don't lock an arbitrary large number of pages, since that causes a
622 * DOS security hole.
623 */
624
625 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
626 DRM_ERROR("Too large PCI DMA bitblt.\n");
627 return -EINVAL;
628 }
629
630 /*
631 * we allow a negative fb stride to allow flipping of images in
632 * transfer.
633 */
634
635 if (xfer->mem_stride < xfer->line_length ||
636 abs(xfer->fb_stride) < xfer->line_length) {
637 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
638 return -EINVAL;
639 }
640
641 /*
642 * A hardware bug seems to be worked around if system memory addresses start on
643 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
644 * about this. Meanwhile, impose the following restrictions:
645 */
646
647#ifdef VIA_BUGFREE
648 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
649 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
650 DRM_ERROR("Invalid DRM bitblt alignment.\n");
651 return -EINVAL;
652 }
653#else
654 if ((((unsigned long)xfer->mem_addr & 15) ||
655 ((unsigned long)xfer->fb_addr & 3)) ||
656 ((xfer->num_lines > 1) &&
657 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
658 DRM_ERROR("Invalid DRM bitblt alignment.\n");
659 return -EINVAL;
660 }
661#endif
662
663 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
664 DRM_ERROR("Could not lock DMA pages.\n");
665 via_free_sg_info(dev->pdev, vsg);
666 return ret;
667 }
668
669 via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
670 if (0 != (ret = via_alloc_desc_pages(vsg))) {
671 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
672 via_free_sg_info(dev->pdev, vsg);
673 return ret;
674 }
675 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
676
677 return 0;
678}
679
680
681/*
682 * Reserve one free slot in the blit queue. Will wait for one second for one
683 * to become available. Otherwise -EBUSY is returned.
684 */
685
686static int
687via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
688{
689 int ret=0;
690 unsigned long irqsave;
691
692 DRM_DEBUG("Num free is %d\n", blitq->num_free);
693 spin_lock_irqsave(&blitq->blit_lock, irqsave);
694 while(blitq->num_free == 0) {
695 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
696
697 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
698 if (ret) {
699 return (-EINTR == ret) ? -EAGAIN : ret;
700 }
701
702 spin_lock_irqsave(&blitq->blit_lock, irqsave);
703 }
704
705 blitq->num_free--;
706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
707
708 return 0;
709}
710
711/*
712 * Hand back a free slot if we changed our mind.
713 */
714
715static void
716via_dmablit_release_slot(drm_via_blitq_t *blitq)
717{
718 unsigned long irqsave;
719
720 spin_lock_irqsave(&blitq->blit_lock, irqsave);
721 blitq->num_free++;
722 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
723 DRM_WAKEUP( &blitq->busy_queue );
724}
725
726/*
727 * Grab a free slot. Build blit info and queue a blit.
728 */
729
730
731static int
732via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
733{
734 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
735 drm_via_sg_info_t *vsg;
736 drm_via_blitq_t *blitq;
737 int ret;
738 int engine;
739 unsigned long irqsave;
740
741 if (dev_priv == NULL) {
742 DRM_ERROR("Called without initialization.\n");
743 return -EINVAL;
744 }
745
746 engine = (xfer->to_fb) ? 0 : 1;
747 blitq = dev_priv->blit_queues + engine;
748 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
749 return ret;
750 }
751 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
752 via_dmablit_release_slot(blitq);
753 return -ENOMEM;
754 }
755 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
756 via_dmablit_release_slot(blitq);
757 kfree(vsg);
758 return ret;
759 }
760 spin_lock_irqsave(&blitq->blit_lock, irqsave);
761
762 blitq->blits[blitq->head++] = vsg;
763 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
764 blitq->head = 0;
765 blitq->num_outstanding++;
766 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
767
768 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
769 xfer->sync.engine = engine;
770
771 via_dmablit_handler(dev, engine, 0);
772
773 return 0;
774}
775
776/*
777 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
778 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
779 * case it returns with -EAGAIN for the signal to be delivered.
780 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
781 */
782
783int
784via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
785{
786 drm_via_blitsync_t *sync = data;
787 int err;
788
789 if (sync->engine >= VIA_NUM_BLIT_ENGINES)
790 return -EINVAL;
791
792 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
793
794 if (-EINTR == err)
795 err = -EAGAIN;
796
797 return err;
798}
799
800
801/*
802 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
803 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
804 * be reissued. See the above IOCTL code.
805 */
806
807int
808via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
809{
810 drm_via_dmablit_t *xfer = data;
811 int err;
812
813 err = via_dmablit(dev, xfer);
814
815 return err;
816}
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
new file mode 100644
index 000000000000..7408a547a036
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dmablit.h
@@ -0,0 +1,140 @@
1/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
2 *
3 * Copyright 2005 Thomas Hellstrom.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Thomas Hellstrom.
27 * Register info from Digeo Inc.
28 */
29
30#ifndef _VIA_DMABLIT_H
31#define _VIA_DMABLIT_H
32
33#include <linux/dma-mapping.h>
34
35#define VIA_NUM_BLIT_ENGINES 2
36#define VIA_NUM_BLIT_SLOTS 8
37
38struct _drm_via_descriptor;
39
40typedef struct _drm_via_sg_info {
41 struct page **pages;
42 unsigned long num_pages;
43 struct _drm_via_descriptor **desc_pages;
44 int num_desc_pages;
45 int num_desc;
46 enum dma_data_direction direction;
47 unsigned char *bounce_buffer;
48 dma_addr_t chain_start;
49 uint32_t free_on_sequence;
50 unsigned int descriptors_per_page;
51 int aborted;
52 enum {
53 dr_via_device_mapped,
54 dr_via_desc_pages_alloc,
55 dr_via_pages_locked,
56 dr_via_pages_alloc,
57 dr_via_sg_init
58 } state;
59} drm_via_sg_info_t;
60
61typedef struct _drm_via_blitq {
62 struct drm_device *dev;
63 uint32_t cur_blit_handle;
64 uint32_t done_blit_handle;
65 unsigned serviced;
66 unsigned head;
67 unsigned cur;
68 unsigned num_free;
69 unsigned num_outstanding;
70 unsigned long end;
71 int aborting;
72 int is_active;
73 drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
74 spinlock_t blit_lock;
75 wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
76 wait_queue_head_t busy_queue;
77 struct work_struct wq;
78 struct timer_list poll_timer;
79} drm_via_blitq_t;
80
81
82/*
83 * PCI DMA Registers
84 * Channels 2 & 3 don't seem to be implemented in hardware.
85 */
86
87#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
88#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
89#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
90#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
91
92#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
93#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
94#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
95#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
96
97#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
98#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
99#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
100#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
101
102#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
103#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
104#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
105#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
106
107#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
108#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
109#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
110#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
111
112#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
113#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
114#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
115#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
116
117#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
118
119/* Define for DMA engine */
120/* DPR */
121#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
122#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
123#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
124
125/* MR */
126#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
127#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
128#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
129
130/* CSR */
131#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
132#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
133#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
134#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
135#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
136#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
137
138
139
140#endif
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
new file mode 100644
index 000000000000..80c01cdfa37d
--- /dev/null
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "via_drm.h"
27#include "via_drv.h"
28
29#include "drm_pciids.h"
30
31static int dri_library_name(struct drm_device *dev, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "unichrome");
34}
35
36static struct pci_device_id pciidlist[] = {
37 viadrv_PCI_IDS
38};
39
40static struct drm_driver driver = {
41 .driver_features =
42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
43 DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
44 .load = via_driver_load,
45 .unload = via_driver_unload,
46 .context_dtor = via_final_context,
47 .vblank_wait = via_driver_vblank_wait,
48 .irq_preinstall = via_driver_irq_preinstall,
49 .irq_postinstall = via_driver_irq_postinstall,
50 .irq_uninstall = via_driver_irq_uninstall,
51 .irq_handler = via_driver_irq_handler,
52 .dma_quiescent = via_driver_dma_quiescent,
53 .dri_library_name = dri_library_name,
54 .reclaim_buffers = drm_core_reclaim_buffers,
55 .reclaim_buffers_locked = NULL,
56 .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
57 .lastclose = via_lastclose,
58 .get_map_ofs = drm_core_get_map_ofs,
59 .get_reg_ofs = drm_core_get_reg_ofs,
60 .ioctls = via_ioctls,
61 .fops = {
62 .owner = THIS_MODULE,
63 .open = drm_open,
64 .release = drm_release,
65 .ioctl = drm_ioctl,
66 .mmap = drm_mmap,
67 .poll = drm_poll,
68 .fasync = drm_fasync,
69 },
70 .pci_driver = {
71 .name = DRIVER_NAME,
72 .id_table = pciidlist,
73 },
74
75 .name = DRIVER_NAME,
76 .desc = DRIVER_DESC,
77 .date = DRIVER_DATE,
78 .major = DRIVER_MAJOR,
79 .minor = DRIVER_MINOR,
80 .patchlevel = DRIVER_PATCHLEVEL,
81};
82
83static int __init via_init(void)
84{
85 driver.num_ioctls = via_max_ioctl;
86 via_init_command_verifier();
87 return drm_init(&driver);
88}
89
90static void __exit via_exit(void)
91{
92 drm_exit(&driver);
93}
94
95module_init(via_init);
96module_exit(via_exit);
97
98MODULE_AUTHOR(DRIVER_AUTHOR);
99MODULE_DESCRIPTION(DRIVER_DESC);
100MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
new file mode 100644
index 000000000000..2daae81874cd
--- /dev/null
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _VIA_DRV_H_
25#define _VIA_DRV_H_
26
27#include "drm_sman.h"
28#define DRIVER_AUTHOR "Various"
29
30#define DRIVER_NAME "via"
31#define DRIVER_DESC "VIA Unichrome / Pro"
32#define DRIVER_DATE "20070202"
33
34#define DRIVER_MAJOR 2
35#define DRIVER_MINOR 11
36#define DRIVER_PATCHLEVEL 1
37
38#include "via_verifier.h"
39
40#include "via_dmablit.h"
41
42#define VIA_PCI_BUF_SIZE 60000
43#define VIA_FIRE_BUF_SIZE 1024
44#define VIA_NUM_IRQS 4
45
46typedef struct drm_via_ring_buffer {
47 drm_local_map_t map;
48 char *virtual_start;
49} drm_via_ring_buffer_t;
50
51typedef uint32_t maskarray_t[5];
52
53typedef struct drm_via_irq {
54 atomic_t irq_received;
55 uint32_t pending_mask;
56 uint32_t enable_mask;
57 wait_queue_head_t irq_queue;
58} drm_via_irq_t;
59
60typedef struct drm_via_private {
61 drm_via_sarea_t *sarea_priv;
62 drm_local_map_t *sarea;
63 drm_local_map_t *fb;
64 drm_local_map_t *mmio;
65 unsigned long agpAddr;
66 wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
67 char *dma_ptr;
68 unsigned int dma_low;
69 unsigned int dma_high;
70 unsigned int dma_offset;
71 uint32_t dma_wrap;
72 volatile uint32_t *last_pause_ptr;
73 volatile uint32_t *hw_addr_ptr;
74 drm_via_ring_buffer_t ring;
75 struct timeval last_vblank;
76 int last_vblank_valid;
77 unsigned usec_per_vblank;
78 drm_via_state_t hc_state;
79 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
81 uint32_t num_fire_offsets;
82 int chipset;
83 drm_via_irq_t via_irqs[VIA_NUM_IRQS];
84 unsigned num_irqs;
85 maskarray_t *irq_masks;
86 uint32_t irq_enable_mask;
87 uint32_t irq_pending_mask;
88 int *irq_map;
89 unsigned int idle_fault;
90 struct drm_sman sman;
91 int vram_initialized;
92 int agp_initialized;
93 unsigned long vram_offset;
94 unsigned long agp_offset;
95 drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
96 uint32_t dma_diff;
97} drm_via_private_t;
98
99enum via_family {
100 VIA_OTHER = 0, /* Baseline */
101 VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
102 VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
103};
104
105/* VIA MMIO register access */
106#define VIA_BASE ((dev_priv->mmio))
107
108#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
109#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val)
110#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
111#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
112
113extern struct drm_ioctl_desc via_ioctls[];
114extern int via_max_ioctl;
115
116extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
117extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
118extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
119extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
120extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
121extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
122extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
123extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
124extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
125
126extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
127extern int via_driver_unload(struct drm_device *dev);
128
129extern int via_init_context(struct drm_device * dev, int context);
130extern int via_final_context(struct drm_device * dev, int context);
131
132extern int via_do_cleanup_map(struct drm_device * dev);
133extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
134
135extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
136extern void via_driver_irq_preinstall(struct drm_device * dev);
137extern void via_driver_irq_postinstall(struct drm_device * dev);
138extern void via_driver_irq_uninstall(struct drm_device * dev);
139
140extern int via_dma_cleanup(struct drm_device * dev);
141extern void via_init_command_verifier(void);
142extern int via_driver_dma_quiescent(struct drm_device * dev);
143extern void via_init_futex(drm_via_private_t * dev_priv);
144extern void via_cleanup_futex(drm_via_private_t * dev_priv);
145extern void via_release_futex(drm_via_private_t * dev_priv, int context);
146
147extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
148extern void via_lastclose(struct drm_device *dev);
149
150extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
151extern void via_init_dmablit(struct drm_device *dev);
152
153#endif
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
new file mode 100644
index 000000000000..c6bb978a1106
--- /dev/null
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -0,0 +1,377 @@
1/* via_irq.c
2 *
3 * Copyright 2004 BEAM Ltd.
4 * Copyright 2002 Tungsten Graphics, Inc.
5 * Copyright 2005 Thomas Hellstrom.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Terry Barnaby <terry1@beam.ltd.uk>
30 * Keith Whitwell <keith@tungstengraphics.com>
31 * Thomas Hellstrom <unichrome@shipmail.org>
32 *
33 * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
34 * interrupt, as well as an infrastructure to handle other interrupts of the chip.
35 * The refresh rate is also calculated for video playback sync purposes.
36 */
37
38#include "drmP.h"
39#include "drm.h"
40#include "via_drm.h"
41#include "via_drv.h"
42
43#define VIA_REG_INTERRUPT 0x200
44
45/* VIA_REG_INTERRUPT */
46#define VIA_IRQ_GLOBAL (1 << 31)
47#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
48#define VIA_IRQ_VBLANK_PENDING (1 << 3)
49#define VIA_IRQ_HQV0_ENABLE (1 << 11)
50#define VIA_IRQ_HQV1_ENABLE (1 << 25)
51#define VIA_IRQ_HQV0_PENDING (1 << 9)
52#define VIA_IRQ_HQV1_PENDING (1 << 10)
53#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
54#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
55#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
56#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
57#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
58#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
59#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
60#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
61
62
63/*
64 * Device-specific IRQs go here. This type might need to be extended with
65 * the register if there are multiple IRQ control registers.
66 * Currently we activate the HQV interrupts of Unichrome Pro group A.
67 */
68
69static maskarray_t via_pro_group_a_irqs[] = {
70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
71 0x00000000},
72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
73 0x00000000},
74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
78};
79static int via_num_pro_group_a =
80 sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
81static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
82
83static maskarray_t via_unichrome_irqs[] = {
84 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
85 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
86 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
87 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
88};
89static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
90static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
91
92static unsigned time_diff(struct timeval *now, struct timeval *then)
93{
94 return (now->tv_usec >= then->tv_usec) ?
95 now->tv_usec - then->tv_usec :
96 1000000 - (then->tv_usec - now->tv_usec);
97}
98
99irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
100{
101 struct drm_device *dev = (struct drm_device *) arg;
102 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
103 u32 status;
104 int handled = 0;
105 struct timeval cur_vblank;
106 drm_via_irq_t *cur_irq = dev_priv->via_irqs;
107 int i;
108
109 status = VIA_READ(VIA_REG_INTERRUPT);
110 if (status & VIA_IRQ_VBLANK_PENDING) {
111 atomic_inc(&dev->vbl_received);
112 if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
113 do_gettimeofday(&cur_vblank);
114 if (dev_priv->last_vblank_valid) {
115 dev_priv->usec_per_vblank =
116 time_diff(&cur_vblank,
117 &dev_priv->last_vblank) >> 4;
118 }
119 dev_priv->last_vblank = cur_vblank;
120 dev_priv->last_vblank_valid = 1;
121 }
122 if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
123 DRM_DEBUG("US per vblank is: %u\n",
124 dev_priv->usec_per_vblank);
125 }
126 DRM_WAKEUP(&dev->vbl_queue);
127 drm_vbl_send_signals(dev);
128 handled = 1;
129 }
130
131 for (i = 0; i < dev_priv->num_irqs; ++i) {
132 if (status & cur_irq->pending_mask) {
133 atomic_inc(&cur_irq->irq_received);
134 DRM_WAKEUP(&cur_irq->irq_queue);
135 handled = 1;
136 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
137 via_dmablit_handler(dev, 0, 1);
138 } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
139 via_dmablit_handler(dev, 1, 1);
140 }
141 }
142 cur_irq++;
143 }
144
145 /* Acknowlege interrupts */
146 VIA_WRITE(VIA_REG_INTERRUPT, status);
147
148 if (handled)
149 return IRQ_HANDLED;
150 else
151 return IRQ_NONE;
152}
153
154static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
155{
156 u32 status;
157
158 if (dev_priv) {
159 /* Acknowlege interrupts */
160 status = VIA_READ(VIA_REG_INTERRUPT);
161 VIA_WRITE(VIA_REG_INTERRUPT, status |
162 dev_priv->irq_pending_mask);
163 }
164}
165
166int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
167{
168 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
169 unsigned int cur_vblank;
170 int ret = 0;
171
172 DRM_DEBUG("\n");
173 if (!dev_priv) {
174 DRM_ERROR("called with no initialization\n");
175 return -EINVAL;
176 }
177
178 viadrv_acknowledge_irqs(dev_priv);
179
180 /* Assume that the user has missed the current sequence number
181 * by about a day rather than she wants to wait for years
182 * using vertical blanks...
183 */
184
185 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
186 (((cur_vblank = atomic_read(&dev->vbl_received)) -
187 *sequence) <= (1 << 23)));
188
189 *sequence = cur_vblank;
190 return ret;
191}
192
193static int
194via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
195 unsigned int *sequence)
196{
197 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
198 unsigned int cur_irq_sequence;
199 drm_via_irq_t *cur_irq;
200 int ret = 0;
201 maskarray_t *masks;
202 int real_irq;
203
204 DRM_DEBUG("\n");
205
206 if (!dev_priv) {
207 DRM_ERROR("called with no initialization\n");
208 return -EINVAL;
209 }
210
211 if (irq >= drm_via_irq_num) {
212 DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
213 return -EINVAL;
214 }
215
216 real_irq = dev_priv->irq_map[irq];
217
218 if (real_irq < 0) {
219 DRM_ERROR("Video IRQ %d not available on this hardware.\n",
220 irq);
221 return -EINVAL;
222 }
223
224 masks = dev_priv->irq_masks;
225 cur_irq = dev_priv->via_irqs + real_irq;
226
227 if (masks[real_irq][2] && !force_sequence) {
228 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
229 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
230 masks[irq][4]));
231 cur_irq_sequence = atomic_read(&cur_irq->irq_received);
232 } else {
233 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
234 (((cur_irq_sequence =
235 atomic_read(&cur_irq->irq_received)) -
236 *sequence) <= (1 << 23)));
237 }
238 *sequence = cur_irq_sequence;
239 return ret;
240}
241
242/*
243 * drm_dma.h hooks
244 */
245
246void via_driver_irq_preinstall(struct drm_device * dev)
247{
248 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
249 u32 status;
250 drm_via_irq_t *cur_irq;
251 int i;
252
253 DRM_DEBUG("dev_priv: %p\n", dev_priv);
254 if (dev_priv) {
255 cur_irq = dev_priv->via_irqs;
256
257 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
258 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
259
260 if (dev_priv->chipset == VIA_PRO_GROUP_A ||
261 dev_priv->chipset == VIA_DX9_0) {
262 dev_priv->irq_masks = via_pro_group_a_irqs;
263 dev_priv->num_irqs = via_num_pro_group_a;
264 dev_priv->irq_map = via_irqmap_pro_group_a;
265 } else {
266 dev_priv->irq_masks = via_unichrome_irqs;
267 dev_priv->num_irqs = via_num_unichrome;
268 dev_priv->irq_map = via_irqmap_unichrome;
269 }
270
271 for (i = 0; i < dev_priv->num_irqs; ++i) {
272 atomic_set(&cur_irq->irq_received, 0);
273 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
274 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
275 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
276 dev_priv->irq_enable_mask |= cur_irq->enable_mask;
277 dev_priv->irq_pending_mask |= cur_irq->pending_mask;
278 cur_irq++;
279
280 DRM_DEBUG("Initializing IRQ %d\n", i);
281 }
282
283 dev_priv->last_vblank_valid = 0;
284
285 /* Clear VSync interrupt regs */
286 status = VIA_READ(VIA_REG_INTERRUPT);
287 VIA_WRITE(VIA_REG_INTERRUPT, status &
288 ~(dev_priv->irq_enable_mask));
289
290 /* Clear bits if they're already high */
291 viadrv_acknowledge_irqs(dev_priv);
292 }
293}
294
295void via_driver_irq_postinstall(struct drm_device * dev)
296{
297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
298 u32 status;
299
300 DRM_DEBUG("\n");
301 if (dev_priv) {
302 status = VIA_READ(VIA_REG_INTERRUPT);
303 VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
304 | dev_priv->irq_enable_mask);
305
306 /* Some magic, oh for some data sheets ! */
307
308 VIA_WRITE8(0x83d4, 0x11);
309 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
310
311 }
312}
313
314void via_driver_irq_uninstall(struct drm_device * dev)
315{
316 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
317 u32 status;
318
319 DRM_DEBUG("\n");
320 if (dev_priv) {
321
322 /* Some more magic, oh for some data sheets ! */
323
324 VIA_WRITE8(0x83d4, 0x11);
325 VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
326
327 status = VIA_READ(VIA_REG_INTERRUPT);
328 VIA_WRITE(VIA_REG_INTERRUPT, status &
329 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
330 }
331}
332
333int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
334{
335 drm_via_irqwait_t *irqwait = data;
336 struct timeval now;
337 int ret = 0;
338 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
339 drm_via_irq_t *cur_irq = dev_priv->via_irqs;
340 int force_sequence;
341
342 if (!dev->irq)
343 return -EINVAL;
344
345 if (irqwait->request.irq >= dev_priv->num_irqs) {
346 DRM_ERROR("Trying to wait on unknown irq %d\n",
347 irqwait->request.irq);
348 return -EINVAL;
349 }
350
351 cur_irq += irqwait->request.irq;
352
353 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
354 case VIA_IRQ_RELATIVE:
355 irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
356 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
357 case VIA_IRQ_ABSOLUTE:
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 if (irqwait->request.type & VIA_IRQ_SIGNAL) {
364 DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
365 return -EINVAL;
366 }
367
368 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
369
370 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
371 &irqwait->request.sequence);
372 do_gettimeofday(&now);
373 irqwait->reply.tval_sec = now.tv_sec;
374 irqwait->reply.tval_usec = now.tv_usec;
375
376 return ret;
377}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
new file mode 100644
index 000000000000..a967556be014
--- /dev/null
+++ b/drivers/gpu/drm/via/via_map.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#include "drmP.h"
25#include "via_drm.h"
26#include "via_drv.h"
27
28static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
29{
30 drm_via_private_t *dev_priv = dev->dev_private;
31
32 DRM_DEBUG("\n");
33
34 dev_priv->sarea = drm_getsarea(dev);
35 if (!dev_priv->sarea) {
36 DRM_ERROR("could not find sarea!\n");
37 dev->dev_private = (void *)dev_priv;
38 via_do_cleanup_map(dev);
39 return -EINVAL;
40 }
41
42 dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
43 if (!dev_priv->fb) {
44 DRM_ERROR("could not find framebuffer!\n");
45 dev->dev_private = (void *)dev_priv;
46 via_do_cleanup_map(dev);
47 return -EINVAL;
48 }
49 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
50 if (!dev_priv->mmio) {
51 DRM_ERROR("could not find mmio region!\n");
52 dev->dev_private = (void *)dev_priv;
53 via_do_cleanup_map(dev);
54 return -EINVAL;
55 }
56
57 dev_priv->sarea_priv =
58 (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
59 init->sarea_priv_offset);
60
61 dev_priv->agpAddr = init->agpAddr;
62
63 via_init_futex(dev_priv);
64
65 via_init_dmablit(dev);
66
67 dev->dev_private = (void *)dev_priv;
68 return 0;
69}
70
71int via_do_cleanup_map(struct drm_device * dev)
72{
73 via_dma_cleanup(dev);
74
75 return 0;
76}
77
78int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
79{
80 drm_via_init_t *init = data;
81
82 DRM_DEBUG("\n");
83
84 switch (init->func) {
85 case VIA_INIT_MAP:
86 return via_do_init_map(dev, init);
87 case VIA_CLEANUP_MAP:
88 return via_do_cleanup_map(dev);
89 }
90
91 return -EINVAL;
92}
93
94int via_driver_load(struct drm_device *dev, unsigned long chipset)
95{
96 drm_via_private_t *dev_priv;
97 int ret = 0;
98
99 dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
100 if (dev_priv == NULL)
101 return -ENOMEM;
102
103 dev->dev_private = (void *)dev_priv;
104
105 dev_priv->chipset = chipset;
106
107 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
108 if (ret) {
109 drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
110 }
111 return ret;
112}
113
114int via_driver_unload(struct drm_device *dev)
115{
116 drm_via_private_t *dev_priv = dev->dev_private;
117
118 drm_sman_takedown(&dev_priv->sman);
119
120 drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
121
122 return 0;
123}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
new file mode 100644
index 000000000000..e64094916e4f
--- /dev/null
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
3 * All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
26 */
27
28#include "drmP.h"
29#include "via_drm.h"
30#include "via_drv.h"
31#include "drm_sman.h"
32
33#define VIA_MM_ALIGN_SHIFT 4
34#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
35
36int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
37{
38 drm_via_agp_t *agp = data;
39 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
40 int ret;
41
42 mutex_lock(&dev->struct_mutex);
43 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
44 agp->size >> VIA_MM_ALIGN_SHIFT);
45
46 if (ret) {
47 DRM_ERROR("AGP memory manager initialisation error\n");
48 mutex_unlock(&dev->struct_mutex);
49 return ret;
50 }
51
52 dev_priv->agp_initialized = 1;
53 dev_priv->agp_offset = agp->offset;
54 mutex_unlock(&dev->struct_mutex);
55
56 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
57 return 0;
58}
59
60int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
61{
62 drm_via_fb_t *fb = data;
63 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
64 int ret;
65
66 mutex_lock(&dev->struct_mutex);
67 ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
68 fb->size >> VIA_MM_ALIGN_SHIFT);
69
70 if (ret) {
71 DRM_ERROR("VRAM memory manager initialisation error\n");
72 mutex_unlock(&dev->struct_mutex);
73 return ret;
74 }
75
76 dev_priv->vram_initialized = 1;
77 dev_priv->vram_offset = fb->offset;
78
79 mutex_unlock(&dev->struct_mutex);
80 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
81
82 return 0;
83
84}
85
86int via_final_context(struct drm_device *dev, int context)
87{
88 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
89
90 via_release_futex(dev_priv, context);
91
92 /* Linux specific until context tracking code gets ported to BSD */
93 /* Last context, perform cleanup */
94 if (dev->ctx_count == 1 && dev->dev_private) {
95 DRM_DEBUG("Last Context\n");
96 if (dev->irq)
97 drm_irq_uninstall(dev);
98 via_cleanup_futex(dev_priv);
99 via_do_cleanup_map(dev);
100 }
101 return 1;
102}
103
104void via_lastclose(struct drm_device *dev)
105{
106 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
107
108 if (!dev_priv)
109 return;
110
111 mutex_lock(&dev->struct_mutex);
112 drm_sman_cleanup(&dev_priv->sman);
113 dev_priv->vram_initialized = 0;
114 dev_priv->agp_initialized = 0;
115 mutex_unlock(&dev->struct_mutex);
116}
117
118int via_mem_alloc(struct drm_device *dev, void *data,
119 struct drm_file *file_priv)
120{
121 drm_via_mem_t *mem = data;
122 int retval = 0;
123 struct drm_memblock_item *item;
124 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
125 unsigned long tmpSize;
126
127 if (mem->type > VIA_MEM_AGP) {
128 DRM_ERROR("Unknown memory type allocation\n");
129 return -EINVAL;
130 }
131 mutex_lock(&dev->struct_mutex);
132 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
133 dev_priv->agp_initialized)) {
134 DRM_ERROR
135 ("Attempt to allocate from uninitialized memory manager.\n");
136 mutex_unlock(&dev->struct_mutex);
137 return -EINVAL;
138 }
139
140 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
141 item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
142 (unsigned long)file_priv);
143 mutex_unlock(&dev->struct_mutex);
144 if (item) {
145 mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
146 dev_priv->vram_offset : dev_priv->agp_offset) +
147 (item->mm->
148 offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
149 mem->index = item->user_hash.key;
150 } else {
151 mem->offset = 0;
152 mem->size = 0;
153 mem->index = 0;
154 DRM_DEBUG("Video memory allocation failed\n");
155 retval = -ENOMEM;
156 }
157
158 return retval;
159}
160
161int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
162{
163 drm_via_private_t *dev_priv = dev->dev_private;
164 drm_via_mem_t *mem = data;
165 int ret;
166
167 mutex_lock(&dev->struct_mutex);
168 ret = drm_sman_free_key(&dev_priv->sman, mem->index);
169 mutex_unlock(&dev->struct_mutex);
170 DRM_DEBUG("free = 0x%lx\n", mem->index);
171
172 return ret;
173}
174
175
176void via_reclaim_buffers_locked(struct drm_device * dev,
177 struct drm_file *file_priv)
178{
179 drm_via_private_t *dev_priv = dev->dev_private;
180
181 mutex_lock(&dev->struct_mutex);
182 if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
183 mutex_unlock(&dev->struct_mutex);
184 return;
185 }
186
187 if (dev->driver->dma_quiescent) {
188 dev->driver->dma_quiescent(dev);
189 }
190
191 drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
192 mutex_unlock(&dev->struct_mutex);
193 return;
194}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
new file mode 100644
index 000000000000..46a579198747
--- /dev/null
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -0,0 +1,1116 @@
1/*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Thomas Hellstrom 2004, 2005.
25 * This code was written using docs obtained under NDA from VIA Inc.
26 *
27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
28 * be very slow.
29 */
30
31#include "via_3d_reg.h"
32#include "drmP.h"
33#include "drm.h"
34#include "via_drm.h"
35#include "via_verifier.h"
36#include "via_drv.h"
37
38typedef enum {
39 state_command,
40 state_header2,
41 state_header1,
42 state_vheader5,
43 state_vheader6,
44 state_error
45} verifier_state_t;
46
47typedef enum {
48 no_check = 0,
49 check_for_header2,
50 check_for_header1,
51 check_for_header2_err,
52 check_for_header1_err,
53 check_for_fire,
54 check_z_buffer_addr0,
55 check_z_buffer_addr1,
56 check_z_buffer_addr_mode,
57 check_destination_addr0,
58 check_destination_addr1,
59 check_destination_addr_mode,
60 check_for_dummy,
61 check_for_dd,
62 check_texture_addr0,
63 check_texture_addr1,
64 check_texture_addr2,
65 check_texture_addr3,
66 check_texture_addr4,
67 check_texture_addr5,
68 check_texture_addr6,
69 check_texture_addr7,
70 check_texture_addr8,
71 check_texture_addr_mode,
72 check_for_vertex_count,
73 check_number_texunits,
74 forbidden_command
75} hazard_t;
76
77/*
78 * Associates each hazard above with a possible multi-command
79 * sequence. For example an address that is split over multiple
80 * commands and that needs to be checked at the first command
81 * that does not include any part of the address.
82 */
83
84static drm_via_sequence_t seqs[] = {
85 no_sequence,
86 no_sequence,
87 no_sequence,
88 no_sequence,
89 no_sequence,
90 no_sequence,
91 z_address,
92 z_address,
93 z_address,
94 dest_address,
95 dest_address,
96 dest_address,
97 no_sequence,
98 no_sequence,
99 tex_address,
100 tex_address,
101 tex_address,
102 tex_address,
103 tex_address,
104 tex_address,
105 tex_address,
106 tex_address,
107 tex_address,
108 tex_address,
109 no_sequence
110};
111
112typedef struct {
113 unsigned int code;
114 hazard_t hz;
115} hz_init_t;
116
117static hz_init_t init_table1[] = {
118 {0xf2, check_for_header2_err},
119 {0xf0, check_for_header1_err},
120 {0xee, check_for_fire},
121 {0xcc, check_for_dummy},
122 {0xdd, check_for_dd},
123 {0x00, no_check},
124 {0x10, check_z_buffer_addr0},
125 {0x11, check_z_buffer_addr1},
126 {0x12, check_z_buffer_addr_mode},
127 {0x13, no_check},
128 {0x14, no_check},
129 {0x15, no_check},
130 {0x23, no_check},
131 {0x24, no_check},
132 {0x33, no_check},
133 {0x34, no_check},
134 {0x35, no_check},
135 {0x36, no_check},
136 {0x37, no_check},
137 {0x38, no_check},
138 {0x39, no_check},
139 {0x3A, no_check},
140 {0x3B, no_check},
141 {0x3C, no_check},
142 {0x3D, no_check},
143 {0x3E, no_check},
144 {0x40, check_destination_addr0},
145 {0x41, check_destination_addr1},
146 {0x42, check_destination_addr_mode},
147 {0x43, no_check},
148 {0x44, no_check},
149 {0x50, no_check},
150 {0x51, no_check},
151 {0x52, no_check},
152 {0x53, no_check},
153 {0x54, no_check},
154 {0x55, no_check},
155 {0x56, no_check},
156 {0x57, no_check},
157 {0x58, no_check},
158 {0x70, no_check},
159 {0x71, no_check},
160 {0x78, no_check},
161 {0x79, no_check},
162 {0x7A, no_check},
163 {0x7B, no_check},
164 {0x7C, no_check},
165 {0x7D, check_for_vertex_count}
166};
167
168static hz_init_t init_table2[] = {
169 {0xf2, check_for_header2_err},
170 {0xf0, check_for_header1_err},
171 {0xee, check_for_fire},
172 {0xcc, check_for_dummy},
173 {0x00, check_texture_addr0},
174 {0x01, check_texture_addr0},
175 {0x02, check_texture_addr0},
176 {0x03, check_texture_addr0},
177 {0x04, check_texture_addr0},
178 {0x05, check_texture_addr0},
179 {0x06, check_texture_addr0},
180 {0x07, check_texture_addr0},
181 {0x08, check_texture_addr0},
182 {0x09, check_texture_addr0},
183 {0x20, check_texture_addr1},
184 {0x21, check_texture_addr1},
185 {0x22, check_texture_addr1},
186 {0x23, check_texture_addr4},
187 {0x2B, check_texture_addr3},
188 {0x2C, check_texture_addr3},
189 {0x2D, check_texture_addr3},
190 {0x2E, check_texture_addr3},
191 {0x2F, check_texture_addr3},
192 {0x30, check_texture_addr3},
193 {0x31, check_texture_addr3},
194 {0x32, check_texture_addr3},
195 {0x33, check_texture_addr3},
196 {0x34, check_texture_addr3},
197 {0x4B, check_texture_addr5},
198 {0x4C, check_texture_addr6},
199 {0x51, check_texture_addr7},
200 {0x52, check_texture_addr8},
201 {0x77, check_texture_addr2},
202 {0x78, no_check},
203 {0x79, no_check},
204 {0x7A, no_check},
205 {0x7B, check_texture_addr_mode},
206 {0x7C, no_check},
207 {0x7D, no_check},
208 {0x7E, no_check},
209 {0x7F, no_check},
210 {0x80, no_check},
211 {0x81, no_check},
212 {0x82, no_check},
213 {0x83, no_check},
214 {0x85, no_check},
215 {0x86, no_check},
216 {0x87, no_check},
217 {0x88, no_check},
218 {0x89, no_check},
219 {0x8A, no_check},
220 {0x90, no_check},
221 {0x91, no_check},
222 {0x92, no_check},
223 {0x93, no_check}
224};
225
226static hz_init_t init_table3[] = {
227 {0xf2, check_for_header2_err},
228 {0xf0, check_for_header1_err},
229 {0xcc, check_for_dummy},
230 {0x00, check_number_texunits}
231};
232
233static hazard_t table1[256];
234static hazard_t table2[256];
235static hazard_t table3[256];
236
237static __inline__ int
238eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
239{
240 if ((buf_end - *buf) >= num_words) {
241 *buf += num_words;
242 return 0;
243 }
244 DRM_ERROR("Illegal termination of DMA command buffer\n");
245 return 1;
246}
247
248/*
249 * Partially stolen from drm_memory.h
250 */
251
252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253 unsigned long offset,
254 unsigned long size,
255 struct drm_device * dev)
256{
257 struct drm_map_list *r_list;
258 drm_local_map_t *map = seq->map_cache;
259
260 if (map && map->offset <= offset
261 && (offset + size) <= (map->offset + map->size)) {
262 return map;
263 }
264
265 list_for_each_entry(r_list, &dev->maplist, head) {
266 map = r_list->map;
267 if (!map)
268 continue;
269 if (map->offset <= offset
270 && (offset + size) <= (map->offset + map->size)
271 && !(map->flags & _DRM_RESTRICTED)
272 && (map->type == _DRM_AGP)) {
273 seq->map_cache = map;
274 return map;
275 }
276 }
277 return NULL;
278}
279
280/*
281 * Require that all AGP texture levels reside in the same AGP map which should
282 * be mappable by the client. This is not a big restriction.
283 * FIXME: To actually enforce this security policy strictly, drm_rmmap
284 * would have to wait for dma quiescent before removing an AGP map.
285 * The via_drm_lookup_agp_map call in reality seems to take
286 * very little CPU time.
287 */
288
289static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
290{
291 switch (cur_seq->unfinished) {
292 case z_address:
293 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
294 break;
295 case dest_address:
296 DRM_DEBUG("Destination start address is 0x%x\n",
297 cur_seq->d_addr);
298 break;
299 case tex_address:
300 if (cur_seq->agp_texture) {
301 unsigned start =
302 cur_seq->tex_level_lo[cur_seq->texture];
303 unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
304 unsigned long lo = ~0, hi = 0, tmp;
305 uint32_t *addr, *pitch, *height, tex;
306 unsigned i;
307 int npot;
308
309 if (end > 9)
310 end = 9;
311 if (start > 9)
312 start = 9;
313
314 addr =
315 &(cur_seq->t_addr[tex = cur_seq->texture][start]);
316 pitch = &(cur_seq->pitch[tex][start]);
317 height = &(cur_seq->height[tex][start]);
318 npot = cur_seq->tex_npot[tex];
319 for (i = start; i <= end; ++i) {
320 tmp = *addr++;
321 if (tmp < lo)
322 lo = tmp;
323 if (i == 0 && npot)
324 tmp += (*height++ * *pitch++);
325 else
326 tmp += (*height++ << *pitch++);
327 if (tmp > hi)
328 hi = tmp;
329 }
330
331 if (!via_drm_lookup_agp_map
332 (cur_seq, lo, hi - lo, cur_seq->dev)) {
333 DRM_ERROR
334 ("AGP texture is not in allowed map\n");
335 return 2;
336 }
337 }
338 break;
339 default:
340 break;
341 }
342 cur_seq->unfinished = no_sequence;
343 return 0;
344}
345
346static __inline__ int
347investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
348{
349 register uint32_t tmp, *tmp_addr;
350
351 if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
352 int ret;
353 if ((ret = finish_current_sequence(cur_seq)))
354 return ret;
355 }
356
357 switch (hz) {
358 case check_for_header2:
359 if (cmd == HALCYON_HEADER2)
360 return 1;
361 return 0;
362 case check_for_header1:
363 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
364 return 1;
365 return 0;
366 case check_for_header2_err:
367 if (cmd == HALCYON_HEADER2)
368 return 1;
369 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
370 break;
371 case check_for_header1_err:
372 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
373 return 1;
374 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
375 break;
376 case check_for_fire:
377 if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
378 return 1;
379 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
380 break;
381 case check_for_dummy:
382 if (HC_DUMMY == cmd)
383 return 0;
384 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
385 break;
386 case check_for_dd:
387 if (0xdddddddd == cmd)
388 return 0;
389 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
390 break;
391 case check_z_buffer_addr0:
392 cur_seq->unfinished = z_address;
393 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
394 (cmd & 0x00FFFFFF);
395 return 0;
396 case check_z_buffer_addr1:
397 cur_seq->unfinished = z_address;
398 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
399 ((cmd & 0xFF) << 24);
400 return 0;
401 case check_z_buffer_addr_mode:
402 cur_seq->unfinished = z_address;
403 if ((cmd & 0x0000C000) == 0)
404 return 0;
405 DRM_ERROR("Attempt to place Z buffer in system memory\n");
406 return 2;
407 case check_destination_addr0:
408 cur_seq->unfinished = dest_address;
409 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
410 (cmd & 0x00FFFFFF);
411 return 0;
412 case check_destination_addr1:
413 cur_seq->unfinished = dest_address;
414 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
415 ((cmd & 0xFF) << 24);
416 return 0;
417 case check_destination_addr_mode:
418 cur_seq->unfinished = dest_address;
419 if ((cmd & 0x0000C000) == 0)
420 return 0;
421 DRM_ERROR
422 ("Attempt to place 3D drawing buffer in system memory\n");
423 return 2;
424 case check_texture_addr0:
425 cur_seq->unfinished = tex_address;
426 tmp = (cmd >> 24);
427 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
428 *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
429 return 0;
430 case check_texture_addr1:
431 cur_seq->unfinished = tex_address;
432 tmp = ((cmd >> 24) - 0x20);
433 tmp += tmp << 1;
434 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
435 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
436 tmp_addr++;
437 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
438 tmp_addr++;
439 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
440 return 0;
441 case check_texture_addr2:
442 cur_seq->unfinished = tex_address;
443 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
444 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
445 return 0;
446 case check_texture_addr3:
447 cur_seq->unfinished = tex_address;
448 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
449 if (tmp == 0 &&
450 (cmd & HC_HTXnEnPit_MASK)) {
451 cur_seq->pitch[cur_seq->texture][tmp] =
452 (cmd & HC_HTXnLnPit_MASK);
453 cur_seq->tex_npot[cur_seq->texture] = 1;
454 } else {
455 cur_seq->pitch[cur_seq->texture][tmp] =
456 (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
457 cur_seq->tex_npot[cur_seq->texture] = 0;
458 if (cmd & 0x000FFFFF) {
459 DRM_ERROR
460 ("Unimplemented texture level 0 pitch mode.\n");
461 return 2;
462 }
463 }
464 return 0;
465 case check_texture_addr4:
466 cur_seq->unfinished = tex_address;
467 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
468 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
469 return 0;
470 case check_texture_addr5:
471 case check_texture_addr6:
472 cur_seq->unfinished = tex_address;
473 /*
474 * Texture width. We don't care since we have the pitch.
475 */
476 return 0;
477 case check_texture_addr7:
478 cur_seq->unfinished = tex_address;
479 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
480 tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
481 tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
482 tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
483 tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
484 tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
485 tmp_addr[0] = 1 << (cmd & 0x0000000F);
486 return 0;
487 case check_texture_addr8:
488 cur_seq->unfinished = tex_address;
489 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
490 tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
491 tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
492 tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
493 tmp_addr[6] = 1 << (cmd & 0x0000000F);
494 return 0;
495 case check_texture_addr_mode:
496 cur_seq->unfinished = tex_address;
497 if (2 == (tmp = cmd & 0x00000003)) {
498 DRM_ERROR
499 ("Attempt to fetch texture from system memory.\n");
500 return 2;
501 }
502 cur_seq->agp_texture = (tmp == 3);
503 cur_seq->tex_palette_size[cur_seq->texture] =
504 (cmd >> 16) & 0x000000007;
505 return 0;
506 case check_for_vertex_count:
507 cur_seq->vertex_count = cmd & 0x0000FFFF;
508 return 0;
509 case check_number_texunits:
510 cur_seq->multitex = (cmd >> 3) & 1;
511 return 0;
512 default:
513 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
514 return 2;
515 }
516 return 2;
517}
518
519static __inline__ int
520via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
521 drm_via_state_t * cur_seq)
522{
523 drm_via_private_t *dev_priv =
524 (drm_via_private_t *) cur_seq->dev->dev_private;
525 uint32_t a_fire, bcmd, dw_count;
526 int ret = 0;
527 int have_fire;
528 const uint32_t *buf = *buffer;
529
530 while (buf < buf_end) {
531 have_fire = 0;
532 if ((buf_end - buf) < 2) {
533 DRM_ERROR
534 ("Unexpected termination of primitive list.\n");
535 ret = 1;
536 break;
537 }
538 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
539 break;
540 bcmd = *buf++;
541 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
542 DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
543 *buf);
544 ret = 1;
545 break;
546 }
547 a_fire =
548 *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
549 HC_HE3Fire_MASK;
550
551 /*
552 * How many dwords per vertex ?
553 */
554
555 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
556 DRM_ERROR("Illegal B command vertex data for AGP.\n");
557 ret = 1;
558 break;
559 }
560
561 dw_count = 0;
562 if (bcmd & (1 << 7))
563 dw_count += (cur_seq->multitex) ? 2 : 1;
564 if (bcmd & (1 << 8))
565 dw_count += (cur_seq->multitex) ? 2 : 1;
566 if (bcmd & (1 << 9))
567 dw_count++;
568 if (bcmd & (1 << 10))
569 dw_count++;
570 if (bcmd & (1 << 11))
571 dw_count++;
572 if (bcmd & (1 << 12))
573 dw_count++;
574 if (bcmd & (1 << 13))
575 dw_count++;
576 if (bcmd & (1 << 14))
577 dw_count++;
578
579 while (buf < buf_end) {
580 if (*buf == a_fire) {
581 if (dev_priv->num_fire_offsets >=
582 VIA_FIRE_BUF_SIZE) {
583 DRM_ERROR("Fire offset buffer full.\n");
584 ret = 1;
585 break;
586 }
587 dev_priv->fire_offsets[dev_priv->
588 num_fire_offsets++] =
589 buf;
590 have_fire = 1;
591 buf++;
592 if (buf < buf_end && *buf == a_fire)
593 buf++;
594 break;
595 }
596 if ((*buf == HALCYON_HEADER2) ||
597 ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
598 DRM_ERROR("Missing Vertex Fire command, "
599 "Stray Vertex Fire command or verifier "
600 "lost sync.\n");
601 ret = 1;
602 break;
603 }
604 if ((ret = eat_words(&buf, buf_end, dw_count)))
605 break;
606 }
607 if (buf >= buf_end && !have_fire) {
608 DRM_ERROR("Missing Vertex Fire command or verifier "
609 "lost sync.\n");
610 ret = 1;
611 break;
612 }
613 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
614 DRM_ERROR("AGP Primitive list end misaligned.\n");
615 ret = 1;
616 break;
617 }
618 }
619 *buffer = buf;
620 return ret;
621}
622
623static __inline__ verifier_state_t
624via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
625 drm_via_state_t * hc_state)
626{
627 uint32_t cmd;
628 int hz_mode;
629 hazard_t hz;
630 const uint32_t *buf = *buffer;
631 const hazard_t *hz_table;
632
633 if ((buf_end - buf) < 2) {
634 DRM_ERROR
635 ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
636 return state_error;
637 }
638 buf++;
639 cmd = (*buf++ & 0xFFFF0000) >> 16;
640
641 switch (cmd) {
642 case HC_ParaType_CmdVdata:
643 if (via_check_prim_list(&buf, buf_end, hc_state))
644 return state_error;
645 *buffer = buf;
646 return state_command;
647 case HC_ParaType_NotTex:
648 hz_table = table1;
649 break;
650 case HC_ParaType_Tex:
651 hc_state->texture = 0;
652 hz_table = table2;
653 break;
654 case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
655 hc_state->texture = 1;
656 hz_table = table2;
657 break;
658 case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
659 hz_table = table3;
660 break;
661 case HC_ParaType_Auto:
662 if (eat_words(&buf, buf_end, 2))
663 return state_error;
664 *buffer = buf;
665 return state_command;
666 case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
667 if (eat_words(&buf, buf_end, 32))
668 return state_error;
669 *buffer = buf;
670 return state_command;
671 case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
672 case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
673 DRM_ERROR("Texture palettes are rejected because of "
674 "lack of info how to determine their size.\n");
675 return state_error;
676 case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
677 DRM_ERROR("Fog factor palettes are rejected because of "
678 "lack of info how to determine their size.\n");
679 return state_error;
680 default:
681
682 /*
683 * There are some unimplemented HC_ParaTypes here, that
684 * need to be implemented if the Mesa driver is extended.
685 */
686
687 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
688 "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
689 cmd, *(buf - 2));
690 *buffer = buf;
691 return state_error;
692 }
693
694 while (buf < buf_end) {
695 cmd = *buf++;
696 if ((hz = hz_table[cmd >> 24])) {
697 if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
698 if (hz_mode == 1) {
699 buf--;
700 break;
701 }
702 return state_error;
703 }
704 } else if (hc_state->unfinished &&
705 finish_current_sequence(hc_state)) {
706 return state_error;
707 }
708 }
709 if (hc_state->unfinished && finish_current_sequence(hc_state)) {
710 return state_error;
711 }
712 *buffer = buf;
713 return state_command;
714}
715
716static __inline__ verifier_state_t
717via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
718 const uint32_t * buf_end, int *fire_count)
719{
720 uint32_t cmd;
721 const uint32_t *buf = *buffer;
722 const uint32_t *next_fire;
723 int burst = 0;
724
725 next_fire = dev_priv->fire_offsets[*fire_count];
726 buf++;
727 cmd = (*buf & 0xFFFF0000) >> 16;
728 VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
729 switch (cmd) {
730 case HC_ParaType_CmdVdata:
731 while ((buf < buf_end) &&
732 (*fire_count < dev_priv->num_fire_offsets) &&
733 (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
734 while (buf <= next_fire) {
735 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
736 (burst & 63), *buf++);
737 burst += 4;
738 }
739 if ((buf < buf_end)
740 && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
741 buf++;
742
743 if (++(*fire_count) < dev_priv->num_fire_offsets)
744 next_fire = dev_priv->fire_offsets[*fire_count];
745 }
746 break;
747 default:
748 while (buf < buf_end) {
749
750 if (*buf == HC_HEADER2 ||
751 (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
752 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
753 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
754 break;
755
756 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
757 (burst & 63), *buf++);
758 burst += 4;
759 }
760 }
761 *buffer = buf;
762 return state_command;
763}
764
765static __inline__ int verify_mmio_address(uint32_t address)
766{
767 if ((address > 0x3FF) && (address < 0xC00)) {
768 DRM_ERROR("Invalid VIDEO DMA command. "
769 "Attempt to access 3D- or command burst area.\n");
770 return 1;
771 } else if ((address > 0xCFF) && (address < 0x1300)) {
772 DRM_ERROR("Invalid VIDEO DMA command. "
773 "Attempt to access PCI DMA area.\n");
774 return 1;
775 } else if (address > 0x13FF) {
776 DRM_ERROR("Invalid VIDEO DMA command. "
777 "Attempt to access VGA registers.\n");
778 return 1;
779 }
780 return 0;
781}
782
783static __inline__ int
784verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
785 uint32_t dwords)
786{
787 const uint32_t *buf = *buffer;
788
789 if (buf_end - buf < dwords) {
790 DRM_ERROR("Illegal termination of video command.\n");
791 return 1;
792 }
793 while (dwords--) {
794 if (*buf++) {
795 DRM_ERROR("Illegal video command tail.\n");
796 return 1;
797 }
798 }
799 *buffer = buf;
800 return 0;
801}
802
803static __inline__ verifier_state_t
804via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
805{
806 uint32_t cmd;
807 const uint32_t *buf = *buffer;
808 verifier_state_t ret = state_command;
809
810 while (buf < buf_end) {
811 cmd = *buf;
812 if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
813 (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
814 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
815 break;
816 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
817 "Attempt to access 3D- or command burst area.\n");
818 ret = state_error;
819 break;
820 } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
821 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
822 break;
823 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
824 "Attempt to access VGA registers.\n");
825 ret = state_error;
826 break;
827 } else {
828 buf += 2;
829 }
830 }
831 *buffer = buf;
832 return ret;
833}
834
835static __inline__ verifier_state_t
836via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
837 const uint32_t * buf_end)
838{
839 register uint32_t cmd;
840 const uint32_t *buf = *buffer;
841
842 while (buf < buf_end) {
843 cmd = *buf;
844 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
845 break;
846 VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
847 buf++;
848 }
849 *buffer = buf;
850 return state_command;
851}
852
853static __inline__ verifier_state_t
854via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
855{
856 uint32_t data;
857 const uint32_t *buf = *buffer;
858
859 if (buf_end - buf < 4) {
860 DRM_ERROR("Illegal termination of video header5 command\n");
861 return state_error;
862 }
863
864 data = *buf++ & ~VIA_VIDEOMASK;
865 if (verify_mmio_address(data))
866 return state_error;
867
868 data = *buf++;
869 if (*buf++ != 0x00F50000) {
870 DRM_ERROR("Illegal header5 header data\n");
871 return state_error;
872 }
873 if (*buf++ != 0x00000000) {
874 DRM_ERROR("Illegal header5 header data\n");
875 return state_error;
876 }
877 if (eat_words(&buf, buf_end, data))
878 return state_error;
879 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
880 return state_error;
881 *buffer = buf;
882 return state_command;
883
884}
885
886static __inline__ verifier_state_t
887via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
888 const uint32_t * buf_end)
889{
890 uint32_t addr, count, i;
891 const uint32_t *buf = *buffer;
892
893 addr = *buf++ & ~VIA_VIDEOMASK;
894 i = count = *buf;
895 buf += 3;
896 while (i--) {
897 VIA_WRITE(addr, *buf++);
898 }
899 if (count & 3)
900 buf += 4 - (count & 3);
901 *buffer = buf;
902 return state_command;
903}
904
905static __inline__ verifier_state_t
906via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
907{
908 uint32_t data;
909 const uint32_t *buf = *buffer;
910 uint32_t i;
911
912 if (buf_end - buf < 4) {
913 DRM_ERROR("Illegal termination of video header6 command\n");
914 return state_error;
915 }
916 buf++;
917 data = *buf++;
918 if (*buf++ != 0x00F60000) {
919 DRM_ERROR("Illegal header6 header data\n");
920 return state_error;
921 }
922 if (*buf++ != 0x00000000) {
923 DRM_ERROR("Illegal header6 header data\n");
924 return state_error;
925 }
926 if ((buf_end - buf) < (data << 1)) {
927 DRM_ERROR("Illegal termination of video header6 command\n");
928 return state_error;
929 }
930 for (i = 0; i < data; ++i) {
931 if (verify_mmio_address(*buf++))
932 return state_error;
933 buf++;
934 }
935 data <<= 1;
936 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
937 return state_error;
938 *buffer = buf;
939 return state_command;
940}
941
942static __inline__ verifier_state_t
943via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
944 const uint32_t * buf_end)
945{
946
947 uint32_t addr, count, i;
948 const uint32_t *buf = *buffer;
949
950 i = count = *++buf;
951 buf += 3;
952 while (i--) {
953 addr = *buf++;
954 VIA_WRITE(addr, *buf++);
955 }
956 count <<= 1;
957 if (count & 3)
958 buf += 4 - (count & 3);
959 *buffer = buf;
960 return state_command;
961}
962
963int
964via_verify_command_stream(const uint32_t * buf, unsigned int size,
965 struct drm_device * dev, int agp)
966{
967
968 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
969 drm_via_state_t *hc_state = &dev_priv->hc_state;
970 drm_via_state_t saved_state = *hc_state;
971 uint32_t cmd;
972 const uint32_t *buf_end = buf + (size >> 2);
973 verifier_state_t state = state_command;
974 int cme_video;
975 int supported_3d;
976
977 cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
978 dev_priv->chipset == VIA_DX9_0);
979
980 supported_3d = dev_priv->chipset != VIA_DX9_0;
981
982 hc_state->dev = dev;
983 hc_state->unfinished = no_sequence;
984 hc_state->map_cache = NULL;
985 hc_state->agp = agp;
986 hc_state->buf_start = buf;
987 dev_priv->num_fire_offsets = 0;
988
989 while (buf < buf_end) {
990
991 switch (state) {
992 case state_header2:
993 state = via_check_header2(&buf, buf_end, hc_state);
994 break;
995 case state_header1:
996 state = via_check_header1(&buf, buf_end);
997 break;
998 case state_vheader5:
999 state = via_check_vheader5(&buf, buf_end);
1000 break;
1001 case state_vheader6:
1002 state = via_check_vheader6(&buf, buf_end);
1003 break;
1004 case state_command:
1005 if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1006 supported_3d)
1007 state = state_header2;
1008 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1009 state = state_header1;
1010 else if (cme_video
1011 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1012 state = state_vheader5;
1013 else if (cme_video
1014 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1015 state = state_vheader6;
1016 else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1017 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1018 state = state_error;
1019 } else {
1020 DRM_ERROR
1021 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1022 cmd);
1023 state = state_error;
1024 }
1025 break;
1026 case state_error:
1027 default:
1028 *hc_state = saved_state;
1029 return -EINVAL;
1030 }
1031 }
1032 if (state == state_error) {
1033 *hc_state = saved_state;
1034 return -EINVAL;
1035 }
1036 return 0;
1037}
1038
1039int
1040via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1041 unsigned int size)
1042{
1043
1044 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
1045 uint32_t cmd;
1046 const uint32_t *buf_end = buf + (size >> 2);
1047 verifier_state_t state = state_command;
1048 int fire_count = 0;
1049
1050 while (buf < buf_end) {
1051
1052 switch (state) {
1053 case state_header2:
1054 state =
1055 via_parse_header2(dev_priv, &buf, buf_end,
1056 &fire_count);
1057 break;
1058 case state_header1:
1059 state = via_parse_header1(dev_priv, &buf, buf_end);
1060 break;
1061 case state_vheader5:
1062 state = via_parse_vheader5(dev_priv, &buf, buf_end);
1063 break;
1064 case state_vheader6:
1065 state = via_parse_vheader6(dev_priv, &buf, buf_end);
1066 break;
1067 case state_command:
1068 if (HALCYON_HEADER2 == (cmd = *buf))
1069 state = state_header2;
1070 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1071 state = state_header1;
1072 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1073 state = state_vheader5;
1074 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1075 state = state_vheader6;
1076 else {
1077 DRM_ERROR
1078 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1079 cmd);
1080 state = state_error;
1081 }
1082 break;
1083 case state_error:
1084 default:
1085 return -EINVAL;
1086 }
1087 }
1088 if (state == state_error) {
1089 return -EINVAL;
1090 }
1091 return 0;
1092}
1093
1094static void
1095setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1096{
1097 int i;
1098
1099 for (i = 0; i < 256; ++i) {
1100 table[i] = forbidden_command;
1101 }
1102
1103 for (i = 0; i < size; ++i) {
1104 table[init_table[i].code] = init_table[i].hz;
1105 }
1106}
1107
1108void via_init_command_verifier(void)
1109{
1110 setup_hazard_table(init_table1, table1,
1111 sizeof(init_table1) / sizeof(hz_init_t));
1112 setup_hazard_table(init_table2, table2,
1113 sizeof(init_table2) / sizeof(hz_init_t));
1114 setup_hazard_table(init_table3, table3,
1115 sizeof(init_table3) / sizeof(hz_init_t));
1116}
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
new file mode 100644
index 000000000000..d6f8214b69f5
--- /dev/null
+++ b/drivers/gpu/drm/via/via_verifier.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Thomas Hellström 2004.
24 */
25
26#ifndef _VIA_VERIFIER_H_
27#define _VIA_VERIFIER_H_
28
29typedef enum {
30 no_sequence = 0,
31 z_address,
32 dest_address,
33 tex_address
34} drm_via_sequence_t;
35
36typedef struct {
37 unsigned texture;
38 uint32_t z_addr;
39 uint32_t d_addr;
40 uint32_t t_addr[2][10];
41 uint32_t pitch[2][10];
42 uint32_t height[2][10];
43 uint32_t tex_level_lo[2];
44 uint32_t tex_level_hi[2];
45 uint32_t tex_palette_size[2];
46 uint32_t tex_npot[2];
47 drm_via_sequence_t unfinished;
48 int agp_texture;
49 int multitex;
50 struct drm_device *dev;
51 drm_local_map_t *map_cache;
52 uint32_t vertex_count;
53 int agp;
54 const uint32_t *buf_start;
55} drm_via_state_t;
56
57extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
58 struct drm_device * dev, int agp);
59extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
60 unsigned int size);
61
62#endif
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
new file mode 100644
index 000000000000..6ec04ac12459
--- /dev/null
+++ b/drivers/gpu/drm/via/via_video.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Thomas Hellstrom 2005.
24 *
25 * Video and XvMC related functions.
26 */
27
28#include "drmP.h"
29#include "via_drm.h"
30#include "via_drv.h"
31
32void via_init_futex(drm_via_private_t * dev_priv)
33{
34 unsigned int i;
35
36 DRM_DEBUG("\n");
37
38 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
39 DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
40 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
41 }
42}
43
44void via_cleanup_futex(drm_via_private_t * dev_priv)
45{
46}
47
48void via_release_futex(drm_via_private_t * dev_priv, int context)
49{
50 unsigned int i;
51 volatile int *lock;
52
53 if (!dev_priv->sarea_priv)
54 return;
55
56 for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
57 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
58 if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
59 if (_DRM_LOCK_IS_HELD(*lock)
60 && (*lock & _DRM_LOCK_CONT)) {
61 DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
62 }
63 *lock = 0;
64 }
65 }
66}
67
68int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
69{
70 drm_via_futex_t *fx = data;
71 volatile int *lock;
72 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
73 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
74 int ret = 0;
75
76 DRM_DEBUG("\n");
77
78 if (fx->lock > VIA_NR_XVMC_LOCKS)
79 return -EFAULT;
80
81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
82
83 switch (fx->func) {
84 case VIA_FUTEX_WAIT:
85 DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
86 (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
87 return ret;
88 case VIA_FUTEX_WAKE:
89 DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
90 return 0;
91 }
92 return 0;
93}