aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/char/agp
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/char/agp')
-rw-r--r--drivers/char/agp/Kconfig171
-rw-r--r--drivers/char/agp/Makefile18
-rw-r--r--drivers/char/agp/agp.h331
-rw-r--r--drivers/char/agp/ali-agp.c414
-rw-r--r--drivers/char/agp/alpha-agp.c216
-rw-r--r--drivers/char/agp/amd-k7-agp.c542
-rw-r--r--drivers/char/agp/amd64-agp.c761
-rw-r--r--drivers/char/agp/ati-agp.c548
-rw-r--r--drivers/char/agp/backend.c348
-rw-r--r--drivers/char/agp/efficeon-agp.c463
-rw-r--r--drivers/char/agp/frontend.c1103
-rw-r--r--drivers/char/agp/generic.c1222
-rw-r--r--drivers/char/agp/hp-agp.c552
-rw-r--r--drivers/char/agp/i460-agp.c642
-rw-r--r--drivers/char/agp/intel-agp.c1833
-rw-r--r--drivers/char/agp/isoch.c470
-rw-r--r--drivers/char/agp/nvidia-agp.c424
-rw-r--r--drivers/char/agp/sgi-agp.c331
-rw-r--r--drivers/char/agp/sis-agp.c360
-rw-r--r--drivers/char/agp/sworks-agp.c556
-rw-r--r--drivers/char/agp/uninorth-agp.c647
-rw-r--r--drivers/char/agp/via-agp.c548
22 files changed, 12500 insertions, 0 deletions
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
new file mode 100644
index 000000000000..7f8c1b53b754
--- /dev/null
+++ b/drivers/char/agp/Kconfig
@@ -0,0 +1,171 @@
1config AGP
2 tristate "/dev/agpgart (AGP Support)" if !GART_IOMMU
3 depends on ALPHA || IA64 || PPC || X86
4 default y if GART_IOMMU
5 ---help---
6 AGP (Accelerated Graphics Port) is a bus system mainly used to
7 connect graphics cards to the rest of the system.
8
9 If you have an AGP system and you say Y here, it will be possible to
10 use the AGP features of your 3D rendering video card. This code acts
11 as a sort of "AGP driver" for the motherboard's chipset.
12
13 If you need more texture memory than you can get with the AGP GART
14 (theoretically up to 256 MB, but in practice usually 64 or 128 MB
15 due to kernel allocation issues), you could use PCI accesses
16 and have up to a couple gigs of texture space.
17
18 Note that this is the only means to have XFree4/GLX use
19 write-combining with MTRR support on the AGP bus. Without it, OpenGL
20 direct rendering will be a lot slower but still faster than PIO.
21
22 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
23 use GLX or DRI. If unsure, say N.
24
25 To compile this driver as a module, choose M here: the
26 module will be called agpgart.
27
28config AGP_ALI
29 tristate "ALI chipset support"
30 depends on AGP && X86 && !X86_64
31 ---help---
32 This option gives you AGP support for the GLX component of
33 XFree86 4.x on the following ALi chipsets. The supported chipsets
34 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
35 For the ALi-chipset question, ALi suggests you refer to
36 <http://www.ali.com.tw/eng/support/index.shtml>.
37
38 The M1541 chipset can do AGP 1x and 2x, but note that there is an
39 acknowledged incompatibility with Matrox G200 cards. Due to
40 timing issues, this chipset cannot do AGP 2x with the G200.
41 This is a hardware limitation. AGP 1x seems to be fine, though.
42
43 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
44 use GLX or DRI. If unsure, say N.
45
46config AGP_ATI
47 tristate "ATI chipset support"
48 depends on AGP && X86 && !X86_64
49 ---help---
50 This option gives you AGP support for the GLX component of
51 XFree86 4.x on the ATI RadeonIGP family of chipsets.
52
53 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
54 use GLX or DRI. If unsure, say N.
55
56config AGP_AMD
57 tristate "AMD Irongate, 761, and 762 chipset support"
58 depends on AGP && X86 && !X86_64
59 help
60 This option gives you AGP support for the GLX component of
61 XFree86 4.x on AMD Irongate, 761, and 762 chipsets.
62
63 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
64 use GLX or DRI. If unsure, say N.
65
66config AGP_AMD64
67 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
68 depends on AGP && X86
69 default y if GART_IOMMU
70 help
71 This option gives you AGP support for the GLX component of
72 XFree86 4.x using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
73 You still need an external AGP bridge like the AMD 8151, VIA
74 K8T400M, SiS755. It may also support other AGP bridges when loaded
75 with agp_try_unsupported=1.
76 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
77 use GLX or DRI. If unsure, say Y
78
79config AGP_INTEL
80 tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
81 depends on AGP && X86
82 help
83 This option gives you AGP support for the GLX component of XFree86 4.x
84 on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
85 E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
86 852GM, 855GM, 865G and I915 integrated graphics chipsets.
87
88 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
89 use GLX or DRI, or if you have any Intel integrated graphics
90 chipsets. If unsure, say Y.
91
92config AGP_NVIDIA
93 tristate "NVIDIA nForce/nForce2 chipset support"
94 depends on AGP && X86 && !X86_64
95 help
96 This option gives you AGP support for the GLX component of
97 XFree86 4.x on the following NVIDIA chipsets. The supported chipsets
98 include nForce and nForce2
99
100config AGP_SIS
101 tristate "SiS chipset support"
102 depends on AGP && X86 && !X86_64
103 help
104 This option gives you AGP support for the GLX component of
105 XFree86 4.x on Silicon Integrated Systems [SiS] chipsets.
106
107 Note that 5591/5592 AGP chipsets are NOT supported.
108
109 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
110 use GLX or DRI. If unsure, say N.
111
112config AGP_SWORKS
113 tristate "Serverworks LE/HE chipset support"
114 depends on AGP && X86 && !X86_64
115 help
116 Say Y here to support the Serverworks AGP card. See
117 <http://www.serverworks.com/> for product descriptions and images.
118
119config AGP_VIA
120 tristate "VIA chipset support"
121 depends on AGP && X86 && !X86_64
122 help
123 This option gives you AGP support for the GLX component of
124 XFree86 4.x on VIA MVP3/Apollo Pro chipsets.
125
126 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
127 use GLX or DRI. If unsure, say N.
128
129config AGP_I460
130 tristate "Intel 460GX chipset support"
131 depends on AGP && (IA64_DIG || IA64_GENERIC)
132 help
133 This option gives you AGP GART support for the Intel 460GX chipset
134 for IA64 processors.
135
136config AGP_HP_ZX1
137 tristate "HP ZX1 chipset AGP support"
138 depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
139 help
140 This option gives you AGP GART support for the HP ZX1 chipset
141 for IA64 processors.
142
143config AGP_ALPHA_CORE
144 tristate "Alpha AGP support"
145 depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
146 default AGP
147
148config AGP_UNINORTH
149 tristate "Apple UniNorth & U3 AGP support"
150 depends on AGP && PPC_PMAC
151 help
152 This option gives you AGP support for Apple machines with a
153 UniNorth or U3 (Apple G5) bridge.
154
155config AGP_EFFICEON
156 tristate "Transmeta Efficeon support"
157 depends on AGP && X86 && !X86_64
158 help
159 This option gives you AGP support for the Transmeta Efficeon
160 series processors with integrated northbridges.
161
162 You should say Y here if you use XFree86 3.3.6 or 4.x and want to
163 use GLX or DRI. If unsure, say Y.
164
165config AGP_SGI_TIOCA
166 tristate "SGI TIO chipset AGP support"
167 depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
168 help
169 This option gives you AGP GART support for the SGI TIO chipset
170 for IA64 processors.
171
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
new file mode 100644
index 000000000000..d33a22f2fa0b
--- /dev/null
+++ b/drivers/char/agp/Makefile
@@ -0,0 +1,18 @@
1agpgart-y := backend.o frontend.o generic.o isoch.o
2
3obj-$(CONFIG_AGP) += agpgart.o
4obj-$(CONFIG_AGP_ALI) += ali-agp.o
5obj-$(CONFIG_AGP_ATI) += ati-agp.o
6obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
7obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
8obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
9obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
10obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
11obj-$(CONFIG_AGP_I460) += i460-agp.o
12obj-$(CONFIG_AGP_INTEL) += intel-agp.o
13obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
14obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
15obj-$(CONFIG_AGP_SIS) += sis-agp.o
16obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
17obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
18obj-$(CONFIG_AGP_VIA) += via-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
new file mode 100644
index 000000000000..ad9c11391d81
--- /dev/null
+++ b/drivers/char/agp/agp.h
@@ -0,0 +1,331 @@
1/*
2 * AGPGART
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2004 Dave Jones
5 * Copyright (C) 1999 Jeff Hartmann
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#ifndef _AGP_BACKEND_PRIV_H
30#define _AGP_BACKEND_PRIV_H 1
31
32#include <asm/agp.h> /* for flush_agp_cache() */
33
34#define PFX "agpgart: "
35
36//#define AGP_DEBUG 1
37#ifdef AGP_DEBUG
38#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __FUNCTION__ , ## y)
39#else
40#define DBG(x,y...) do { } while (0)
41#endif
42
43extern struct agp_bridge_data *agp_bridge;
44
45enum aper_size_type {
46 U8_APER_SIZE,
47 U16_APER_SIZE,
48 U32_APER_SIZE,
49 LVL2_APER_SIZE,
50 FIXED_APER_SIZE
51};
52
53struct gatt_mask {
54 unsigned long mask;
55 u32 type;
56 /* totally device specific, for integrated chipsets that
57 * might have different types of memory masks. For other
58 * devices this will probably be ignored */
59};
60
61struct aper_size_info_8 {
62 int size;
63 int num_entries;
64 int page_order;
65 u8 size_value;
66};
67
68struct aper_size_info_16 {
69 int size;
70 int num_entries;
71 int page_order;
72 u16 size_value;
73};
74
75struct aper_size_info_32 {
76 int size;
77 int num_entries;
78 int page_order;
79 u32 size_value;
80};
81
82struct aper_size_info_lvl2 {
83 int size;
84 int num_entries;
85 u32 size_value;
86};
87
88struct aper_size_info_fixed {
89 int size;
90 int num_entries;
91 int page_order;
92};
93
94struct agp_bridge_driver {
95 struct module *owner;
96 void *aperture_sizes;
97 int num_aperture_sizes;
98 enum aper_size_type size_type;
99 int cant_use_aperture;
100 int needs_scratch_page;
101 struct gatt_mask *masks;
102 int (*fetch_size)(void);
103 int (*configure)(void);
104 void (*agp_enable)(struct agp_bridge_data *, u32);
105 void (*cleanup)(void);
106 void (*tlb_flush)(struct agp_memory *);
107 unsigned long (*mask_memory)(struct agp_bridge_data *,
108 unsigned long, int);
109 void (*cache_flush)(void);
110 int (*create_gatt_table)(struct agp_bridge_data *);
111 int (*free_gatt_table)(struct agp_bridge_data *);
112 int (*insert_memory)(struct agp_memory *, off_t, int);
113 int (*remove_memory)(struct agp_memory *, off_t, int);
114 struct agp_memory *(*alloc_by_type) (size_t, int);
115 void (*free_by_type)(struct agp_memory *);
116 void *(*agp_alloc_page)(struct agp_bridge_data *);
117 void (*agp_destroy_page)(void *);
118};
119
120struct agp_bridge_data {
121 struct agp_version *version;
122 struct agp_bridge_driver *driver;
123 struct vm_operations_struct *vm_ops;
124 void *previous_size;
125 void *current_size;
126 void *dev_private_data;
127 struct pci_dev *dev;
128 u32 __iomem *gatt_table;
129 u32 *gatt_table_real;
130 unsigned long scratch_page;
131 unsigned long scratch_page_real;
132 unsigned long gart_bus_addr;
133 unsigned long gatt_bus_addr;
134 u32 mode;
135 enum chipset_type type;
136 unsigned long *key_list;
137 atomic_t current_memory_agp;
138 atomic_t agp_in_use;
139 int max_memory_agp; /* in number of pages */
140 int aperture_size_idx;
141 int capndx;
142 int flags;
143 char major_version;
144 char minor_version;
145 struct list_head list;
146};
147
148#define KB(x) ((x) * 1024)
149#define MB(x) (KB (KB (x)))
150#define GB(x) (MB (KB (x)))
151
152#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
153#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
154#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
155#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
156#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
157#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i)
158#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i)
159#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i)
160#define MAXKEY (4096 * 32)
161
162#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
163
164
165/* Intel registers */
166#define INTEL_APSIZE 0xb4
167#define INTEL_ATTBASE 0xb8
168#define INTEL_AGPCTRL 0xb0
169#define INTEL_NBXCFG 0x50
170#define INTEL_ERRSTS 0x91
171
172/* Intel i830 registers */
173#define I830_GMCH_CTRL 0x52
174#define I830_GMCH_ENABLED 0x4
175#define I830_GMCH_MEM_MASK 0x1
176#define I830_GMCH_MEM_64M 0x1
177#define I830_GMCH_MEM_128M 0
178#define I830_GMCH_GMS_MASK 0x70
179#define I830_GMCH_GMS_DISABLED 0x00
180#define I830_GMCH_GMS_LOCAL 0x10
181#define I830_GMCH_GMS_STOLEN_512 0x20
182#define I830_GMCH_GMS_STOLEN_1024 0x30
183#define I830_GMCH_GMS_STOLEN_8192 0x40
184#define I830_RDRAM_CHANNEL_TYPE 0x03010
185#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
186#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
187
188/* This one is for I830MP w. an external graphic card */
189#define INTEL_I830_ERRSTS 0x92
190
191/* Intel 855GM/852GM registers */
192#define I855_GMCH_GMS_STOLEN_0M 0x0
193#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
194#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
195#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
196#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
197#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
198#define I85X_CAPID 0x44
199#define I85X_VARIANT_MASK 0x7
200#define I85X_VARIANT_SHIFT 5
201#define I855_GME 0x0
202#define I855_GM 0x4
203#define I852_GME 0x2
204#define I852_GM 0x5
205
206/* Intel i845 registers */
207#define INTEL_I845_AGPM 0x51
208#define INTEL_I845_ERRSTS 0xc8
209
210/* Intel i860 registers */
211#define INTEL_I860_MCHCFG 0x50
212#define INTEL_I860_ERRSTS 0xc8
213
214/* Intel i810 registers */
215#define I810_GMADDR 0x10
216#define I810_MMADDR 0x14
217#define I810_PTE_BASE 0x10000
218#define I810_PTE_MAIN_UNCACHED 0x00000000
219#define I810_PTE_LOCAL 0x00000002
220#define I810_PTE_VALID 0x00000001
221#define I810_SMRAM_MISCC 0x70
222#define I810_GFX_MEM_WIN_SIZE 0x00010000
223#define I810_GFX_MEM_WIN_32M 0x00010000
224#define I810_GMS 0x000000c0
225#define I810_GMS_DISABLE 0x00000000
226#define I810_PGETBL_CTL 0x2020
227#define I810_PGETBL_ENABLED 0x00000001
228#define I810_DRAM_CTL 0x3000
229#define I810_DRAM_ROW_0 0x00000001
230#define I810_DRAM_ROW_0_SDRAM 0x00000001
231
232struct agp_device_ids {
233 unsigned short device_id; /* first, to make table easier to read */
234 enum chipset_type chipset;
235 const char *chipset_name;
236 int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */
237};
238
239/* Driver registration */
240struct agp_bridge_data *agp_alloc_bridge(void);
241void agp_put_bridge(struct agp_bridge_data *bridge);
242int agp_add_bridge(struct agp_bridge_data *bridge);
243void agp_remove_bridge(struct agp_bridge_data *bridge);
244
245/* Frontend routines. */
246int agp_frontend_initialize(void);
247void agp_frontend_cleanup(void);
248
249/* Generic routines. */
250void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
251int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
252int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
253struct agp_memory *agp_create_memory(int scratch_pages);
254int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
255int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
256struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
257void agp_generic_free_by_type(struct agp_memory *curr);
258void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
259void agp_generic_destroy_page(void *addr);
260void agp_free_key(int key);
261int agp_num_entries(void);
262u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
263void agp_device_command(u32 command, int agp_v3);
264int agp_3_5_enable(struct agp_bridge_data *bridge);
265void global_cache_flush(void);
266void get_agp_version(struct agp_bridge_data *bridge);
267unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
268 unsigned long addr, int type);
269struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
270
271/* generic routines for agp>=3 */
272int agp3_generic_fetch_size(void);
273void agp3_generic_tlbflush(struct agp_memory *mem);
274int agp3_generic_configure(void);
275void agp3_generic_cleanup(void);
276
277/* aperture sizes have been standardised since v3 */
278#define AGP_GENERIC_SIZES_ENTRIES 11
279extern struct aper_size_info_16 agp3_generic_sizes[];
280
281
282extern int agp_off;
283extern int agp_try_unsupported_boot;
284
285/* Chipset independant registers (from AGP Spec) */
286#define AGP_APBASE 0x10
287
288#define AGPSTAT 0x4
289#define AGPCMD 0x8
290#define AGPNISTAT 0xc
291#define AGPCTRL 0x10
292#define AGPAPSIZE 0x14
293#define AGPNEPG 0x16
294#define AGPGARTLO 0x18
295#define AGPGARTHI 0x1c
296#define AGPNICMD 0x20
297
298#define AGP_MAJOR_VERSION_SHIFT (20)
299#define AGP_MINOR_VERSION_SHIFT (16)
300
301#define AGPSTAT_RQ_DEPTH (0xff000000)
302#define AGPSTAT_RQ_DEPTH_SHIFT 24
303
304#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
305#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
306#define AGPSTAT_ARQSZ_SHIFT 13
307
308#define AGPSTAT_SBA (1<<9)
309#define AGPSTAT_AGP_ENABLE (1<<8)
310#define AGPSTAT_FW (1<<4)
311#define AGPSTAT_MODE_3_0 (1<<3)
312
313#define AGPSTAT2_1X (1<<0)
314#define AGPSTAT2_2X (1<<1)
315#define AGPSTAT2_4X (1<<2)
316
317#define AGPSTAT3_RSVD (1<<2)
318#define AGPSTAT3_8X (1<<1)
319#define AGPSTAT3_4X (1)
320
321#define AGPCTRL_APERENB (1<<8)
322#define AGPCTRL_GTLBEN (1<<7)
323
324#define AGP2_RESERVED_MASK 0x00fffcc8
325#define AGP3_RESERVED_MASK 0x00ff00c4
326
327#define AGP_ERRATA_FASTWRITES 1<<0
328#define AGP_ERRATA_SBA 1<<1
329#define AGP_ERRATA_1X 1<<2
330
331#endif /* _AGP_BACKEND_PRIV_H */
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
new file mode 100644
index 000000000000..c86a22c5499b
--- /dev/null
+++ b/drivers/char/agp/ali-agp.c
@@ -0,0 +1,414 @@
1/*
2 * ALi AGPGART routines.
3 */
4
5#include <linux/types.h>
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/init.h>
9#include <linux/agp_backend.h>
10#include "agp.h"
11
12#define ALI_AGPCTRL 0xb8
13#define ALI_ATTBASE 0xbc
14#define ALI_TLBCTRL 0xc0
15#define ALI_TAGCTRL 0xc4
16#define ALI_CACHE_FLUSH_CTRL 0xD0
17#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
18#define ALI_CACHE_FLUSH_EN 0x100
19
20static int ali_fetch_size(void)
21{
22 int i;
23 u32 temp;
24 struct aper_size_info_32 *values;
25
26 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
27 temp &= ~(0xfffffff0);
28 values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
29
30 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
31 if (temp == values[i].size_value) {
32 agp_bridge->previous_size =
33 agp_bridge->current_size = (void *) (values + i);
34 agp_bridge->aperture_size_idx = i;
35 return values[i].size;
36 }
37 }
38
39 return 0;
40}
41
42static void ali_tlbflush(struct agp_memory *mem)
43{
44 u32 temp;
45
46 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
47 temp &= 0xfffffff0;
48 temp |= (1<<0 | 1<<1);
49 pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
50}
51
52static void ali_cleanup(void)
53{
54 struct aper_size_info_32 *previous_size;
55 u32 temp;
56
57 previous_size = A_SIZE_32(agp_bridge->previous_size);
58
59 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
60// clear tag
61 pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
62 ((temp & 0xffffff00) | 0x00000001|0x00000002));
63
64 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
65 pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
66 ((temp & 0x00000ff0) | previous_size->size_value));
67}
68
69static int ali_configure(void)
70{
71 u32 temp;
72 struct aper_size_info_32 *current_size;
73
74 current_size = A_SIZE_32(agp_bridge->current_size);
75
76 /* aperture size and gatt addr */
77 pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
78 temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
79 | (current_size->size_value & 0xf));
80 pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
81
82 /* tlb control */
83 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
84 pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
85
86 /* address to map to */
87 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
88 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
89
90#if 0
91 if (agp_bridge->type == ALI_M1541) {
92 u32 nlvm_addr = 0;
93
94 switch (current_size->size_value) {
95 case 0: break;
96 case 1: nlvm_addr = 0x100000;break;
97 case 2: nlvm_addr = 0x200000;break;
98 case 3: nlvm_addr = 0x400000;break;
99 case 4: nlvm_addr = 0x800000;break;
100 case 6: nlvm_addr = 0x1000000;break;
101 case 7: nlvm_addr = 0x2000000;break;
102 case 8: nlvm_addr = 0x4000000;break;
103 case 9: nlvm_addr = 0x8000000;break;
104 case 10: nlvm_addr = 0x10000000;break;
105 default: break;
106 }
107 nlvm_addr--;
108 nlvm_addr&=0xfff00000;
109
110 nlvm_addr+= agp_bridge->gart_bus_addr;
111 nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
112 printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr);
113 }
114#endif
115
116 pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
117 temp &= 0xffffff7f; //enable TLB
118 pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
119
120 return 0;
121}
122
123
124static void m1541_cache_flush(void)
125{
126 int i, page_count;
127 u32 temp;
128
129 global_cache_flush();
130
131 page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
132 for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
133 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
134 &temp);
135 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
136 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
137 (agp_bridge->gatt_bus_addr + i)) |
138 ALI_CACHE_FLUSH_EN));
139 }
140}
141
142static void *m1541_alloc_page(struct agp_bridge_data *bridge)
143{
144 void *addr = agp_generic_alloc_page(agp_bridge);
145 u32 temp;
146
147 if (!addr)
148 return NULL;
149
150 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
151 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
152 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
153 virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
154 return addr;
155}
156
157static void ali_destroy_page(void * addr)
158{
159 if (addr) {
160 global_cache_flush(); /* is this really needed? --hch */
161 agp_generic_destroy_page(addr);
162 }
163}
164
165static void m1541_destroy_page(void * addr)
166{
167 u32 temp;
168
169 if (addr == NULL)
170 return;
171
172 global_cache_flush();
173
174 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
175 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
176 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
177 virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
178 agp_generic_destroy_page(addr);
179}
180
181
182/* Setup function */
183
184static struct aper_size_info_32 ali_generic_sizes[7] =
185{
186 {256, 65536, 6, 10},
187 {128, 32768, 5, 9},
188 {64, 16384, 4, 8},
189 {32, 8192, 3, 7},
190 {16, 4096, 2, 6},
191 {8, 2048, 1, 4},
192 {4, 1024, 0, 3}
193};
194
195struct agp_bridge_driver ali_generic_bridge = {
196 .owner = THIS_MODULE,
197 .aperture_sizes = ali_generic_sizes,
198 .size_type = U32_APER_SIZE,
199 .num_aperture_sizes = 7,
200 .configure = ali_configure,
201 .fetch_size = ali_fetch_size,
202 .cleanup = ali_cleanup,
203 .tlb_flush = ali_tlbflush,
204 .mask_memory = agp_generic_mask_memory,
205 .masks = NULL,
206 .agp_enable = agp_generic_enable,
207 .cache_flush = global_cache_flush,
208 .create_gatt_table = agp_generic_create_gatt_table,
209 .free_gatt_table = agp_generic_free_gatt_table,
210 .insert_memory = agp_generic_insert_memory,
211 .remove_memory = agp_generic_remove_memory,
212 .alloc_by_type = agp_generic_alloc_by_type,
213 .free_by_type = agp_generic_free_by_type,
214 .agp_alloc_page = agp_generic_alloc_page,
215 .agp_destroy_page = ali_destroy_page,
216};
217
218struct agp_bridge_driver ali_m1541_bridge = {
219 .owner = THIS_MODULE,
220 .aperture_sizes = ali_generic_sizes,
221 .size_type = U32_APER_SIZE,
222 .num_aperture_sizes = 7,
223 .configure = ali_configure,
224 .fetch_size = ali_fetch_size,
225 .cleanup = ali_cleanup,
226 .tlb_flush = ali_tlbflush,
227 .mask_memory = agp_generic_mask_memory,
228 .masks = NULL,
229 .agp_enable = agp_generic_enable,
230 .cache_flush = m1541_cache_flush,
231 .create_gatt_table = agp_generic_create_gatt_table,
232 .free_gatt_table = agp_generic_free_gatt_table,
233 .insert_memory = agp_generic_insert_memory,
234 .remove_memory = agp_generic_remove_memory,
235 .alloc_by_type = agp_generic_alloc_by_type,
236 .free_by_type = agp_generic_free_by_type,
237 .agp_alloc_page = m1541_alloc_page,
238 .agp_destroy_page = m1541_destroy_page,
239};
240
241
242static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
243{
244 {
245 .device_id = PCI_DEVICE_ID_AL_M1541,
246 .chipset_name = "M1541",
247 },
248 {
249 .device_id = PCI_DEVICE_ID_AL_M1621,
250 .chipset_name = "M1621",
251 },
252 {
253 .device_id = PCI_DEVICE_ID_AL_M1631,
254 .chipset_name = "M1631",
255 },
256 {
257 .device_id = PCI_DEVICE_ID_AL_M1632,
258 .chipset_name = "M1632",
259 },
260 {
261 .device_id = PCI_DEVICE_ID_AL_M1641,
262 .chipset_name = "M1641",
263 },
264 {
265 .device_id = PCI_DEVICE_ID_AL_M1644,
266 .chipset_name = "M1644",
267 },
268 {
269 .device_id = PCI_DEVICE_ID_AL_M1647,
270 .chipset_name = "M1647",
271 },
272 {
273 .device_id = PCI_DEVICE_ID_AL_M1651,
274 .chipset_name = "M1651",
275 },
276 {
277 .device_id = PCI_DEVICE_ID_AL_M1671,
278 .chipset_name = "M1671",
279 },
280 {
281 .device_id = PCI_DEVICE_ID_AL_M1681,
282 .chipset_name = "M1681",
283 },
284 {
285 .device_id = PCI_DEVICE_ID_AL_M1683,
286 .chipset_name = "M1683",
287 },
288
289 { }, /* dummy final entry, always present */
290};
291
292static int __devinit agp_ali_probe(struct pci_dev *pdev,
293 const struct pci_device_id *ent)
294{
295 struct agp_device_ids *devs = ali_agp_device_ids;
296 struct agp_bridge_data *bridge;
297 u8 hidden_1621_id, cap_ptr;
298 int j;
299
300 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
301 if (!cap_ptr)
302 return -ENODEV;
303
304 /* probe for known chipsets */
305 for (j = 0; devs[j].chipset_name; j++) {
306 if (pdev->device == devs[j].device_id)
307 goto found;
308 }
309
310 printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n",
311 pdev->device);
312 return -ENODEV;
313
314
315found:
316 bridge = agp_alloc_bridge();
317 if (!bridge)
318 return -ENOMEM;
319
320 bridge->dev = pdev;
321 bridge->capndx = cap_ptr;
322
323 switch (pdev->device) {
324 case PCI_DEVICE_ID_AL_M1541:
325 bridge->driver = &ali_m1541_bridge;
326 break;
327 case PCI_DEVICE_ID_AL_M1621:
328 pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
329 switch (hidden_1621_id) {
330 case 0x31:
331 devs[j].chipset_name = "M1631";
332 break;
333 case 0x32:
334 devs[j].chipset_name = "M1632";
335 break;
336 case 0x41:
337 devs[j].chipset_name = "M1641";
338 break;
339 case 0x43:
340 devs[j].chipset_name = "M????";
341 break;
342 case 0x47:
343 devs[j].chipset_name = "M1647";
344 break;
345 case 0x51:
346 devs[j].chipset_name = "M1651";
347 break;
348 default:
349 break;
350 }
351 /*FALLTHROUGH*/
352 default:
353 bridge->driver = &ali_generic_bridge;
354 }
355
356 printk(KERN_INFO PFX "Detected ALi %s chipset\n",
357 devs[j].chipset_name);
358
359 /* Fill in the mode register */
360 pci_read_config_dword(pdev,
361 bridge->capndx+PCI_AGP_STATUS,
362 &bridge->mode);
363
364 pci_set_drvdata(pdev, bridge);
365 return agp_add_bridge(bridge);
366}
367
368static void __devexit agp_ali_remove(struct pci_dev *pdev)
369{
370 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
371
372 agp_remove_bridge(bridge);
373 agp_put_bridge(bridge);
374}
375
376static struct pci_device_id agp_ali_pci_table[] = {
377 {
378 .class = (PCI_CLASS_BRIDGE_HOST << 8),
379 .class_mask = ~0,
380 .vendor = PCI_VENDOR_ID_AL,
381 .device = PCI_ANY_ID,
382 .subvendor = PCI_ANY_ID,
383 .subdevice = PCI_ANY_ID,
384 },
385 { }
386};
387
388MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
389
390static struct pci_driver agp_ali_pci_driver = {
391 .name = "agpgart-ali",
392 .id_table = agp_ali_pci_table,
393 .probe = agp_ali_probe,
394 .remove = agp_ali_remove,
395};
396
397static int __init agp_ali_init(void)
398{
399 if (agp_off)
400 return -EINVAL;
401 return pci_register_driver(&agp_ali_pci_driver);
402}
403
404static void __exit agp_ali_cleanup(void)
405{
406 pci_unregister_driver(&agp_ali_pci_driver);
407}
408
409module_init(agp_ali_init);
410module_exit(agp_ali_cleanup);
411
412MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
413MODULE_LICENSE("GPL and additional rights");
414
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
new file mode 100644
index 000000000000..a072d32005a4
--- /dev/null
+++ b/drivers/char/agp/alpha-agp.c
@@ -0,0 +1,216 @@
1#include <linux/module.h>
2#include <linux/pci.h>
3#include <linux/init.h>
4#include <linux/agp_backend.h>
5#include <linux/mm.h>
6#include <linux/slab.h>
7
8#include <asm/machvec.h>
9#include <asm/agp_backend.h>
10#include "../../../arch/alpha/kernel/pci_impl.h"
11
12#include "agp.h"
13
14static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
15 unsigned long address,
16 int *type)
17{
18 alpha_agp_info *agp = agp_bridge->dev_private_data;
19 dma_addr_t dma_addr;
20 unsigned long pa;
21 struct page *page;
22
23 dma_addr = address - vma->vm_start + agp->aperture.bus_base;
24 pa = agp->ops->translate(agp, dma_addr);
25
26 if (pa == (unsigned long)-EINVAL) return NULL; /* no translation */
27
28 /*
29 * Get the page, inc the use count, and return it
30 */
31 page = virt_to_page(__va(pa));
32 get_page(page);
33 if (type)
34 *type = VM_FAULT_MINOR;
35 return page;
36}
37
38static struct aper_size_info_fixed alpha_core_agp_sizes[] =
39{
40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
41};
42
43struct vm_operations_struct alpha_core_agp_vm_ops = {
44 .nopage = alpha_core_agp_vm_nopage,
45};
46
47
48static int alpha_core_agp_nop(void)
49{
50 /* just return success */
51 return 0;
52}
53
54static int alpha_core_agp_fetch_size(void)
55{
56 return alpha_core_agp_sizes[0].size;
57}
58
59static int alpha_core_agp_configure(void)
60{
61 alpha_agp_info *agp = agp_bridge->dev_private_data;
62 agp_bridge->gart_bus_addr = agp->aperture.bus_base;
63 return 0;
64}
65
66static void alpha_core_agp_cleanup(void)
67{
68 alpha_agp_info *agp = agp_bridge->dev_private_data;
69
70 agp->ops->cleanup(agp);
71}
72
73static void alpha_core_agp_tlbflush(struct agp_memory *mem)
74{
75 alpha_agp_info *agp = agp_bridge->dev_private_data;
76 alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
77}
78
79static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
80{
81 alpha_agp_info *agp = bridge->dev_private_data;
82
83 agp->mode.lw = agp_collect_device_status(bridge, mode,
84 agp->capability.lw);
85
86 agp->mode.bits.enable = 1;
87 agp->ops->configure(agp);
88
89 agp_device_command(agp->mode.lw, 0);
90}
91
92static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
93 int type)
94{
95 alpha_agp_info *agp = agp_bridge->dev_private_data;
96 int num_entries, status;
97 void *temp;
98
99 temp = agp_bridge->current_size;
100 num_entries = A_SIZE_FIX(temp)->num_entries;
101 if ((pg_start + mem->page_count) > num_entries) return -EINVAL;
102
103 status = agp->ops->bind(agp, pg_start, mem);
104 mb();
105 alpha_core_agp_tlbflush(mem);
106
107 return status;
108}
109
110static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start,
111 int type)
112{
113 alpha_agp_info *agp = agp_bridge->dev_private_data;
114 int status;
115
116 status = agp->ops->unbind(agp, pg_start, mem);
117 alpha_core_agp_tlbflush(mem);
118 return status;
119}
120
121struct agp_bridge_driver alpha_core_agp_driver = {
122 .owner = THIS_MODULE,
123 .aperture_sizes = alpha_core_agp_sizes,
124 .num_aperture_sizes = 1,
125 .size_type = FIXED_APER_SIZE,
126 .cant_use_aperture = 1,
127 .masks = NULL,
128
129 .fetch_size = alpha_core_agp_fetch_size,
130 .configure = alpha_core_agp_configure,
131 .agp_enable = alpha_core_agp_enable,
132 .cleanup = alpha_core_agp_cleanup,
133 .tlb_flush = alpha_core_agp_tlbflush,
134 .mask_memory = agp_generic_mask_memory,
135 .cache_flush = global_cache_flush,
136 .create_gatt_table = alpha_core_agp_nop,
137 .free_gatt_table = alpha_core_agp_nop,
138 .insert_memory = alpha_core_agp_insert_memory,
139 .remove_memory = alpha_core_agp_remove_memory,
140 .alloc_by_type = agp_generic_alloc_by_type,
141 .free_by_type = agp_generic_free_by_type,
142 .agp_alloc_page = agp_generic_alloc_page,
143 .agp_destroy_page = agp_generic_destroy_page,
144};
145
146struct agp_bridge_data *alpha_bridge;
147
148int __init
149alpha_core_agp_setup(void)
150{
151 alpha_agp_info *agp = alpha_mv.agp_info();
152 struct pci_dev *pdev; /* faked */
153 struct aper_size_info_fixed *aper_size;
154
155 if (!agp)
156 return -ENODEV;
157 if (agp->ops->setup(agp))
158 return -ENODEV;
159
160 /*
161 * Build the aperture size descriptor
162 */
163 aper_size = alpha_core_agp_sizes;
164 aper_size->size = agp->aperture.size / (1024 * 1024);
165 aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
166 aper_size->page_order = __ffs(aper_size->num_entries / 1024);
167
168 /*
169 * Build a fake pci_dev struct
170 */
171 pdev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
172 if (!pdev)
173 return -ENOMEM;
174 pdev->vendor = 0xffff;
175 pdev->device = 0xffff;
176 pdev->sysdata = agp->hose;
177
178 alpha_bridge = agp_alloc_bridge();
179 if (!alpha_bridge)
180 goto fail;
181
182 alpha_bridge->driver = &alpha_core_agp_driver;
183 alpha_bridge->vm_ops = &alpha_core_agp_vm_ops;
184 alpha_bridge->current_size = aper_size; /* only 1 size */
185 alpha_bridge->dev_private_data = agp;
186 alpha_bridge->dev = pdev;
187 alpha_bridge->mode = agp->capability.lw;
188
189 printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index);
190 return agp_add_bridge(alpha_bridge);
191
192 fail:
193 kfree(pdev);
194 return -ENOMEM;
195}
196
197static int __init agp_alpha_core_init(void)
198{
199 if (agp_off)
200 return -EINVAL;
201 if (alpha_mv.agp_info)
202 return alpha_core_agp_setup();
203 return -ENODEV;
204}
205
206static void __exit agp_alpha_core_cleanup(void)
207{
208 agp_remove_bridge(alpha_bridge);
209 agp_put_bridge(alpha_bridge);
210}
211
212module_init(agp_alpha_core_init);
213module_exit(agp_alpha_core_cleanup);
214
215MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
216MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
new file mode 100644
index 000000000000..f1ea87ea6b65
--- /dev/null
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -0,0 +1,542 @@
1/*
2 * AMD K7 AGPGART routines.
3 */
4
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/init.h>
8#include <linux/agp_backend.h>
9#include <linux/gfp.h>
10#include <linux/page-flags.h>
11#include <linux/mm.h>
12#include "agp.h"
13
14#define AMD_MMBASE 0x14
15#define AMD_APSIZE 0xac
16#define AMD_MODECNTL 0xb0
17#define AMD_MODECNTL2 0xb2
18#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
19#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
20#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
21#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
22
23static struct pci_device_id agp_amdk7_pci_table[];
24
25struct amd_page_map {
26 unsigned long *real;
27 unsigned long __iomem *remapped;
28};
29
30static struct _amd_irongate_private {
31 volatile u8 __iomem *registers;
32 struct amd_page_map **gatt_pages;
33 int num_tables;
34} amd_irongate_private;
35
36static int amd_create_page_map(struct amd_page_map *page_map)
37{
38 int i;
39
40 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
41 if (page_map->real == NULL)
42 return -ENOMEM;
43
44 SetPageReserved(virt_to_page(page_map->real));
45 global_cache_flush();
46 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
47 PAGE_SIZE);
48 if (page_map->remapped == NULL) {
49 ClearPageReserved(virt_to_page(page_map->real));
50 free_page((unsigned long) page_map->real);
51 page_map->real = NULL;
52 return -ENOMEM;
53 }
54 global_cache_flush();
55
56 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
57 writel(agp_bridge->scratch_page, page_map->remapped+i);
58 readl(page_map->remapped+i); /* PCI Posting. */
59 }
60
61 return 0;
62}
63
64static void amd_free_page_map(struct amd_page_map *page_map)
65{
66 iounmap(page_map->remapped);
67 ClearPageReserved(virt_to_page(page_map->real));
68 free_page((unsigned long) page_map->real);
69}
70
71static void amd_free_gatt_pages(void)
72{
73 int i;
74 struct amd_page_map **tables;
75 struct amd_page_map *entry;
76
77 tables = amd_irongate_private.gatt_pages;
78 for (i = 0; i < amd_irongate_private.num_tables; i++) {
79 entry = tables[i];
80 if (entry != NULL) {
81 if (entry->real != NULL)
82 amd_free_page_map(entry);
83 kfree(entry);
84 }
85 }
86 kfree(tables);
87 amd_irongate_private.gatt_pages = NULL;
88}
89
90static int amd_create_gatt_pages(int nr_tables)
91{
92 struct amd_page_map **tables;
93 struct amd_page_map *entry;
94 int retval = 0;
95 int i;
96
97 tables = kmalloc((nr_tables + 1) * sizeof(struct amd_page_map *),
98 GFP_KERNEL);
99 if (tables == NULL)
100 return -ENOMEM;
101
102 memset (tables, 0, sizeof(struct amd_page_map *) * (nr_tables + 1));
103 for (i = 0; i < nr_tables; i++) {
104 entry = kmalloc(sizeof(struct amd_page_map), GFP_KERNEL);
105 if (entry == NULL) {
106 retval = -ENOMEM;
107 break;
108 }
109 memset (entry, 0, sizeof(struct amd_page_map));
110 tables[i] = entry;
111 retval = amd_create_page_map(entry);
112 if (retval != 0)
113 break;
114 }
115 amd_irongate_private.num_tables = nr_tables;
116 amd_irongate_private.gatt_pages = tables;
117
118 if (retval != 0)
119 amd_free_gatt_pages();
120
121 return retval;
122}
123
124/* Since we don't need contigious memory we just try
125 * to get the gatt table once
126 */
127
128#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
129#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
130 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
131#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
132#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
133 GET_PAGE_DIR_IDX(addr)]->remapped)
134
135static int amd_create_gatt_table(struct agp_bridge_data *bridge)
136{
137 struct aper_size_info_lvl2 *value;
138 struct amd_page_map page_dir;
139 unsigned long addr;
140 int retval;
141 u32 temp;
142 int i;
143
144 value = A_SIZE_LVL2(agp_bridge->current_size);
145 retval = amd_create_page_map(&page_dir);
146 if (retval != 0)
147 return retval;
148
149 retval = amd_create_gatt_pages(value->num_entries / 1024);
150 if (retval != 0) {
151 amd_free_page_map(&page_dir);
152 return retval;
153 }
154
155 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
156 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
157 agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
158
159 /* Get the address for the gart region.
160 * This is a bus address even on the alpha, b/c its
161 * used to program the agp master not the cpu
162 */
163
164 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
165 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
166 agp_bridge->gart_bus_addr = addr;
167
168 /* Calculate the agp offset */
169 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
170 writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
171 page_dir.remapped+GET_PAGE_DIR_OFF(addr));
172 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
173 }
174
175 return 0;
176}
177
178static int amd_free_gatt_table(struct agp_bridge_data *bridge)
179{
180 struct amd_page_map page_dir;
181
182 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
183 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
184
185 amd_free_gatt_pages();
186 amd_free_page_map(&page_dir);
187 return 0;
188}
189
190static int amd_irongate_fetch_size(void)
191{
192 int i;
193 u32 temp;
194 struct aper_size_info_lvl2 *values;
195
196 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
197 temp = (temp & 0x0000000e);
198 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
199 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
200 if (temp == values[i].size_value) {
201 agp_bridge->previous_size =
202 agp_bridge->current_size = (void *) (values + i);
203
204 agp_bridge->aperture_size_idx = i;
205 return values[i].size;
206 }
207 }
208
209 return 0;
210}
211
212static int amd_irongate_configure(void)
213{
214 struct aper_size_info_lvl2 *current_size;
215 u32 temp;
216 u16 enable_reg;
217
218 current_size = A_SIZE_LVL2(agp_bridge->current_size);
219
220 /* Get the memory mapped registers */
221 pci_read_config_dword(agp_bridge->dev, AMD_MMBASE, &temp);
222 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
223 amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
224
225 /* Write out the address of the gatt table */
226 writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
227 readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
228
229 /* Write the Sync register */
230 pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
231
232 /* Set indexing mode */
233 pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
234
235 /* Write the enable register */
236 enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
237 enable_reg = (enable_reg | 0x0004);
238 writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
239 readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
240
241 /* Write out the size register */
242 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
243 temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
244 pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
245
246 /* Flush the tlb */
247 writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
248 readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
249 return 0;
250}
251
252static void amd_irongate_cleanup(void)
253{
254 struct aper_size_info_lvl2 *previous_size;
255 u32 temp;
256 u16 enable_reg;
257
258 previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
259
260 enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
261 enable_reg = (enable_reg & ~(0x0004));
262 writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
263 readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
264
265 /* Write back the previous size and disable gart translation */
266 pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
267 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
268 pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
269 iounmap((void __iomem *) amd_irongate_private.registers);
270}
271
272/*
273 * This routine could be implemented by taking the addresses
274 * written to the GATT, and flushing them individually. However
275 * currently it just flushes the whole table. Which is probably
276 * more efficent, since agp_memory blocks can be a large number of
277 * entries.
278 */
279
280static void amd_irongate_tlbflush(struct agp_memory *temp)
281{
282 writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
283 readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
284}
285
286static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
287{
288 int i, j, num_entries;
289 unsigned long __iomem *cur_gatt;
290 unsigned long addr;
291
292 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
293
294 if (type != 0 || mem->type != 0)
295 return -EINVAL;
296
297 if ((pg_start + mem->page_count) > num_entries)
298 return -EINVAL;
299
300 j = pg_start;
301 while (j < (pg_start + mem->page_count)) {
302 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
303 cur_gatt = GET_GATT(addr);
304 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
305 return -EBUSY;
306 j++;
307 }
308
309 if (mem->is_flushed == FALSE) {
310 global_cache_flush();
311 mem->is_flushed = TRUE;
312 }
313
314 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
315 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
316 cur_gatt = GET_GATT(addr);
317 writel(agp_generic_mask_memory(agp_bridge,
318 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
319 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
320 }
321 amd_irongate_tlbflush(mem);
322 return 0;
323}
324
325static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
326{
327 int i;
328 unsigned long __iomem *cur_gatt;
329 unsigned long addr;
330
331 if (type != 0 || mem->type != 0)
332 return -EINVAL;
333
334 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
335 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
336 cur_gatt = GET_GATT(addr);
337 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
338 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
339 }
340
341 amd_irongate_tlbflush(mem);
342 return 0;
343}
344
345static struct aper_size_info_lvl2 amd_irongate_sizes[7] =
346{
347 {2048, 524288, 0x0000000c},
348 {1024, 262144, 0x0000000a},
349 {512, 131072, 0x00000008},
350 {256, 65536, 0x00000006},
351 {128, 32768, 0x00000004},
352 {64, 16384, 0x00000002},
353 {32, 8192, 0x00000000}
354};
355
356static struct gatt_mask amd_irongate_masks[] =
357{
358 {.mask = 1, .type = 0}
359};
360
361struct agp_bridge_driver amd_irongate_driver = {
362 .owner = THIS_MODULE,
363 .aperture_sizes = amd_irongate_sizes,
364 .size_type = LVL2_APER_SIZE,
365 .num_aperture_sizes = 7,
366 .configure = amd_irongate_configure,
367 .fetch_size = amd_irongate_fetch_size,
368 .cleanup = amd_irongate_cleanup,
369 .tlb_flush = amd_irongate_tlbflush,
370 .mask_memory = agp_generic_mask_memory,
371 .masks = amd_irongate_masks,
372 .agp_enable = agp_generic_enable,
373 .cache_flush = global_cache_flush,
374 .create_gatt_table = amd_create_gatt_table,
375 .free_gatt_table = amd_free_gatt_table,
376 .insert_memory = amd_insert_memory,
377 .remove_memory = amd_remove_memory,
378 .alloc_by_type = agp_generic_alloc_by_type,
379 .free_by_type = agp_generic_free_by_type,
380 .agp_alloc_page = agp_generic_alloc_page,
381 .agp_destroy_page = agp_generic_destroy_page,
382};
383
384static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
385{
386 {
387 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
388 .chipset_name = "Irongate",
389 },
390 {
391 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E,
392 .chipset_name = "761",
393 },
394 {
395 .device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C,
396 .chipset_name = "760MP",
397 },
398 { }, /* dummy final entry, always present */
399};
400
401static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
402 const struct pci_device_id *ent)
403{
404 struct agp_bridge_data *bridge;
405 u8 cap_ptr;
406 int j;
407
408 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
409 if (!cap_ptr)
410 return -ENODEV;
411
412 j = ent - agp_amdk7_pci_table;
413 printk(KERN_INFO PFX "Detected AMD %s chipset\n",
414 amd_agp_device_ids[j].chipset_name);
415
416 bridge = agp_alloc_bridge();
417 if (!bridge)
418 return -ENOMEM;
419
420 bridge->driver = &amd_irongate_driver;
421 bridge->dev_private_data = &amd_irongate_private,
422 bridge->dev = pdev;
423 bridge->capndx = cap_ptr;
424
425 /* 751 Errata (22564_B-1.PDF)
426 erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
427 system controller may experience noise due to strong drive strengths
428 */
429 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
430 u8 cap_ptr=0;
431 struct pci_dev *gfxcard=NULL;
432 while (!cap_ptr) {
433 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
434 if (!gfxcard) {
435 printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
436 return -ENODEV;
437 }
438 cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
439 if (!cap_ptr) {
440 pci_dev_put(gfxcard);
441 continue;
442 }
443 }
444
445 /* With so many variants of NVidia cards, it's simpler just
446 to blacklist them all, and then whitelist them as needed
447 (if necessary at all). */
448 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
449 agp_bridge->flags |= AGP_ERRATA_1X;
450 printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
451 }
452 pci_dev_put(gfxcard);
453 }
454
455 /* 761 Errata (23613_F.pdf)
456 * Revisions B0/B1 were a disaster.
457 * erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
458 * erratum 45: Timing problem prevents fast writes -- Disable fast write.
459 * erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
460 * With this lot disabled, we should prevent lockups. */
461 if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
462 u8 revision=0;
463 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
464 if (revision == 0x10 || revision == 0x11) {
465 agp_bridge->flags = AGP_ERRATA_FASTWRITES;
466 agp_bridge->flags |= AGP_ERRATA_SBA;
467 agp_bridge->flags |= AGP_ERRATA_1X;
468 printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
469 }
470 }
471
472 /* Fill in the mode register */
473 pci_read_config_dword(pdev,
474 bridge->capndx+PCI_AGP_STATUS,
475 &bridge->mode);
476
477 pci_set_drvdata(pdev, bridge);
478 return agp_add_bridge(bridge);
479}
480
481static void __devexit agp_amdk7_remove(struct pci_dev *pdev)
482{
483 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
484
485 agp_remove_bridge(bridge);
486 agp_put_bridge(bridge);
487}
488
489/* must be the same order as name table above */
490static struct pci_device_id agp_amdk7_pci_table[] = {
491 {
492 .class = (PCI_CLASS_BRIDGE_HOST << 8),
493 .class_mask = ~0,
494 .vendor = PCI_VENDOR_ID_AMD,
495 .device = PCI_DEVICE_ID_AMD_FE_GATE_7006,
496 .subvendor = PCI_ANY_ID,
497 .subdevice = PCI_ANY_ID,
498 },
499 {
500 .class = (PCI_CLASS_BRIDGE_HOST << 8),
501 .class_mask = ~0,
502 .vendor = PCI_VENDOR_ID_AMD,
503 .device = PCI_DEVICE_ID_AMD_FE_GATE_700E,
504 .subvendor = PCI_ANY_ID,
505 .subdevice = PCI_ANY_ID,
506 },
507 {
508 .class = (PCI_CLASS_BRIDGE_HOST << 8),
509 .class_mask = ~0,
510 .vendor = PCI_VENDOR_ID_AMD,
511 .device = PCI_DEVICE_ID_AMD_FE_GATE_700C,
512 .subvendor = PCI_ANY_ID,
513 .subdevice = PCI_ANY_ID,
514 },
515 { }
516};
517
518MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
519
520static struct pci_driver agp_amdk7_pci_driver = {
521 .name = "agpgart-amdk7",
522 .id_table = agp_amdk7_pci_table,
523 .probe = agp_amdk7_probe,
524 .remove = agp_amdk7_remove,
525};
526
527static int __init agp_amdk7_init(void)
528{
529 if (agp_off)
530 return -EINVAL;
531 return pci_register_driver(&agp_amdk7_pci_driver);
532}
533
534static void __exit agp_amdk7_cleanup(void)
535{
536 pci_unregister_driver(&agp_amdk7_pci_driver);
537}
538
539module_init(agp_amdk7_init);
540module_exit(agp_amdk7_cleanup);
541
542MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
new file mode 100644
index 000000000000..905f0629c44f
--- /dev/null
+++ b/drivers/char/agp/amd64-agp.c
@@ -0,0 +1,761 @@
1/*
2 * Copyright 2001-2003 SuSE Labs.
3 * Distributed under the GNU public license, v2.
4 *
5 * This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
6 * It also includes support for the AMD 8151 AGP bridge,
7 * although it doesn't actually do much, as all the real
8 * work is done in the northbridge(s).
9 */
10
11#include <linux/config.h>
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/agp_backend.h>
16#include "agp.h"
17
18/* Will need to be increased if AMD64 ever goes >8-way. */
19#define MAX_HAMMER_GARTS 8
20
21/* PTE bits. */
22#define GPTE_VALID 1
23#define GPTE_COHERENT 2
24
25/* Aperture control register bits. */
26#define GARTEN (1<<0)
27#define DISGARTCPU (1<<4)
28#define DISGARTIO (1<<5)
29
30/* GART cache control register bits. */
31#define INVGART (1<<0)
32#define GARTPTEERR (1<<1)
33
34/* K8 On-cpu GART registers */
35#define AMD64_GARTAPERTURECTL 0x90
36#define AMD64_GARTAPERTUREBASE 0x94
37#define AMD64_GARTTABLEBASE 0x98
38#define AMD64_GARTCACHECTL 0x9c
39#define AMD64_GARTEN (1<<0)
40
41/* NVIDIA K8 registers */
42#define NVIDIA_X86_64_0_APBASE 0x10
43#define NVIDIA_X86_64_1_APBASE1 0x50
44#define NVIDIA_X86_64_1_APLIMIT1 0x54
45#define NVIDIA_X86_64_1_APSIZE 0xa8
46#define NVIDIA_X86_64_1_APBASE2 0xd8
47#define NVIDIA_X86_64_1_APLIMIT2 0xdc
48
49/* ULi K8 registers */
50#define ULI_X86_64_BASE_ADDR 0x10
51#define ULI_X86_64_HTT_FEA_REG 0x50
52#define ULI_X86_64_ENU_SCR_REG 0x54
53
54static int nr_garts;
55static struct pci_dev * hammers[MAX_HAMMER_GARTS];
56
57static struct resource *aperture_resource;
58static int __initdata agp_try_unsupported;
59
60static int gart_iterator;
61#define for_each_nb() for(gart_iterator=0;gart_iterator<nr_garts;gart_iterator++)
62
63static void flush_amd64_tlb(struct pci_dev *dev)
64{
65 u32 tmp;
66
67 pci_read_config_dword (dev, AMD64_GARTCACHECTL, &tmp);
68 tmp |= INVGART;
69 pci_write_config_dword (dev, AMD64_GARTCACHECTL, tmp);
70}
71
72static void amd64_tlbflush(struct agp_memory *temp)
73{
74 for_each_nb()
75 flush_amd64_tlb(hammers[gart_iterator]);
76}
77
78static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
79{
80 int i, j, num_entries;
81 long long tmp;
82 u32 pte;
83
84 num_entries = agp_num_entries();
85
86 if (type != 0 || mem->type != 0)
87 return -EINVAL;
88
89 /* Make sure we can fit the range in the gatt table. */
90 /* FIXME: could wrap */
91 if (((unsigned long)pg_start + mem->page_count) > num_entries)
92 return -EINVAL;
93
94 j = pg_start;
95
96 /* gatt table should be empty. */
97 while (j < (pg_start + mem->page_count)) {
98 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
99 return -EBUSY;
100 j++;
101 }
102
103 if (mem->is_flushed == FALSE) {
104 global_cache_flush();
105 mem->is_flushed = TRUE;
106 }
107
108 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
109 tmp = agp_bridge->driver->mask_memory(agp_bridge,
110 mem->memory[i], mem->type);
111
112 BUG_ON(tmp & 0xffffff0000000ffcULL);
113 pte = (tmp & 0x000000ff00000000ULL) >> 28;
114 pte |=(tmp & 0x00000000fffff000ULL);
115 pte |= GPTE_VALID | GPTE_COHERENT;
116
117 writel(pte, agp_bridge->gatt_table+j);
118 readl(agp_bridge->gatt_table+j); /* PCI Posting. */
119 }
120 amd64_tlbflush(mem);
121 return 0;
122}
123
124/*
125 * This hack alters the order element according
126 * to the size of a long. It sucks. I totally disown this, even
127 * though it does appear to work for the most part.
128 */
129static struct aper_size_info_32 amd64_aperture_sizes[7] =
130{
131 {32, 8192, 3+(sizeof(long)/8), 0 },
132 {64, 16384, 4+(sizeof(long)/8), 1<<1 },
133 {128, 32768, 5+(sizeof(long)/8), 1<<2 },
134 {256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
135 {512, 131072, 7+(sizeof(long)/8), 1<<3 },
136 {1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
137 {2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
138};
139
140
141/*
142 * Get the current Aperture size from the x86-64.
143 * Note, that there may be multiple x86-64's, but we just return
144 * the value from the first one we find. The set_size functions
145 * keep the rest coherent anyway. Or at least should do.
146 */
147static int amd64_fetch_size(void)
148{
149 struct pci_dev *dev;
150 int i;
151 u32 temp;
152 struct aper_size_info_32 *values;
153
154 dev = hammers[0];
155 if (dev==NULL)
156 return 0;
157
158 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
159 temp = (temp & 0xe);
160 values = A_SIZE_32(amd64_aperture_sizes);
161
162 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
163 if (temp == values[i].size_value) {
164 agp_bridge->previous_size =
165 agp_bridge->current_size = (void *) (values + i);
166
167 agp_bridge->aperture_size_idx = i;
168 return values[i].size;
169 }
170 }
171 return 0;
172}
173
174/*
175 * In a multiprocessor x86-64 system, this function gets
176 * called once for each CPU.
177 */
178static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table)
179{
180 u64 aperturebase;
181 u32 tmp;
182 u64 addr, aper_base;
183
184 /* Address to map to */
185 pci_read_config_dword (hammer, AMD64_GARTAPERTUREBASE, &tmp);
186 aperturebase = tmp << 25;
187 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
188
189 /* address of the mappings table */
190 addr = (u64) gatt_table;
191 addr >>= 12;
192 tmp = (u32) addr<<4;
193 tmp &= ~0xf;
194 pci_write_config_dword (hammer, AMD64_GARTTABLEBASE, tmp);
195
196 /* Enable GART translation for this hammer. */
197 pci_read_config_dword(hammer, AMD64_GARTAPERTURECTL, &tmp);
198 tmp |= GARTEN;
199 tmp &= ~(DISGARTCPU | DISGARTIO);
200 pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
201
202 /* keep CPU's coherent. */
203 flush_amd64_tlb (hammer);
204
205 return aper_base;
206}
207
208
209static struct aper_size_info_32 amd_8151_sizes[7] =
210{
211 {2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
212 {1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
213 {512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
214 {256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
215 {128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
216 {64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
217 {32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
218};
219
220static int amd_8151_configure(void)
221{
222 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
223
224 /* Configure AGP regs in each x86-64 host bridge. */
225 for_each_nb() {
226 agp_bridge->gart_bus_addr =
227 amd64_configure(hammers[gart_iterator],gatt_bus);
228 }
229 return 0;
230}
231
232
233static void amd64_cleanup(void)
234{
235 u32 tmp;
236
237 for_each_nb() {
238 /* disable gart translation */
239 pci_read_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, &tmp);
240 tmp &= ~AMD64_GARTEN;
241 pci_write_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, tmp);
242 }
243}
244
245
246struct agp_bridge_driver amd_8151_driver = {
247 .owner = THIS_MODULE,
248 .aperture_sizes = amd_8151_sizes,
249 .size_type = U32_APER_SIZE,
250 .num_aperture_sizes = 7,
251 .configure = amd_8151_configure,
252 .fetch_size = amd64_fetch_size,
253 .cleanup = amd64_cleanup,
254 .tlb_flush = amd64_tlbflush,
255 .mask_memory = agp_generic_mask_memory,
256 .masks = NULL,
257 .agp_enable = agp_generic_enable,
258 .cache_flush = global_cache_flush,
259 .create_gatt_table = agp_generic_create_gatt_table,
260 .free_gatt_table = agp_generic_free_gatt_table,
261 .insert_memory = amd64_insert_memory,
262 .remove_memory = agp_generic_remove_memory,
263 .alloc_by_type = agp_generic_alloc_by_type,
264 .free_by_type = agp_generic_free_by_type,
265 .agp_alloc_page = agp_generic_alloc_page,
266 .agp_destroy_page = agp_generic_destroy_page,
267};
268
269/* Some basic sanity checks for the aperture. */
270static int __devinit aperture_valid(u64 aper, u32 size)
271{
272 u32 pfn, c;
273 if (aper == 0) {
274 printk(KERN_ERR PFX "No aperture\n");
275 return 0;
276 }
277 if (size < 32*1024*1024) {
278 printk(KERN_ERR PFX "Aperture too small (%d MB)\n", size>>20);
279 return 0;
280 }
281 if (aper + size > 0xffffffff) {
282 printk(KERN_ERR PFX "Aperture out of bounds\n");
283 return 0;
284 }
285 pfn = aper >> PAGE_SHIFT;
286 for (c = 0; c < size/PAGE_SIZE; c++) {
287 if (!pfn_valid(pfn + c))
288 break;
289 if (!PageReserved(pfn_to_page(pfn + c))) {
290 printk(KERN_ERR PFX "Aperture pointing to RAM\n");
291 return 0;
292 }
293 }
294
295 /* Request the Aperture. This catches cases when someone else
296 already put a mapping in there - happens with some very broken BIOS
297
298 Maybe better to use pci_assign_resource/pci_enable_device instead
299 trusting the bridges? */
300 if (!aperture_resource &&
301 !(aperture_resource = request_mem_region(aper, size, "aperture"))) {
302 printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
303 return 0;
304 }
305 return 1;
306}
307
308/*
309 * W*s centric BIOS sometimes only set up the aperture in the AGP
310 * bridge, not the northbridge. On AMD64 this is handled early
311 * in aperture.c, but when GART_IOMMU is not enabled or we run
312 * on a 32bit kernel this needs to be redone.
313 * Unfortunately it is impossible to fix the aperture here because it's too late
314 * to allocate that much memory. But at least error out cleanly instead of
315 * crashing.
316 */
317static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
318 u16 cap)
319{
320 u32 aper_low, aper_hi;
321 u64 aper, nb_aper;
322 int order = 0;
323 u32 nb_order, nb_base;
324 u16 apsize;
325
326 pci_read_config_dword(nb, 0x90, &nb_order);
327 nb_order = (nb_order >> 1) & 7;
328 pci_read_config_dword(nb, 0x94, &nb_base);
329 nb_aper = nb_base << 25;
330 if (aperture_valid(nb_aper, (32*1024*1024)<<nb_order)) {
331 return 0;
332 }
333
334 /* Northbridge seems to contain crap. Try the AGP bridge. */
335
336 pci_read_config_word(agp, cap+0x14, &apsize);
337 if (apsize == 0xffff)
338 return -1;
339
340 apsize &= 0xfff;
341 /* Some BIOS use weird encodings not in the AGPv3 table. */
342 if (apsize & 0xff)
343 apsize |= 0xf00;
344 order = 7 - hweight16(apsize);
345
346 pci_read_config_dword(agp, 0x10, &aper_low);
347 pci_read_config_dword(agp, 0x14, &aper_hi);
348 aper = (aper_low & ~((1<<22)-1)) | ((u64)aper_hi << 32);
349 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order);
350 if (order < 0 || !aperture_valid(aper, (32*1024*1024)<<order))
351 return -1;
352
353 pci_write_config_dword(nb, 0x90, order << 1);
354 pci_write_config_dword(nb, 0x94, aper >> 25);
355
356 return 0;
357}
358
359static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
360{
361 struct pci_dev *loop_dev = NULL;
362 int i = 0;
363
364 /* cache pci_devs of northbridges. */
365 while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev))
366 != NULL) {
367 if (i == MAX_HAMMER_GARTS) {
368 printk(KERN_ERR PFX "Too many northbridges for AGP\n");
369 return -1;
370 }
371 if (fix_northbridge(loop_dev, pdev, cap_ptr) < 0) {
372 printk(KERN_ERR PFX "No usable aperture found.\n");
373#ifdef __x86_64__
374 /* should port this to i386 */
375 printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n");
376#endif
377 return -1;
378 }
379 hammers[i++] = loop_dev;
380 }
381 nr_garts = i;
382 return i == 0 ? -1 : 0;
383}
384
385/* Handle AMD 8151 quirks */
386static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
387{
388 char *revstring;
389 u8 rev_id;
390
391 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
392 switch (rev_id) {
393 case 0x01: revstring="A0"; break;
394 case 0x02: revstring="A1"; break;
395 case 0x11: revstring="B0"; break;
396 case 0x12: revstring="B1"; break;
397 case 0x13: revstring="B2"; break;
398 case 0x14: revstring="B3"; break;
399 default: revstring="??"; break;
400 }
401
402 printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring);
403
404 /*
405 * Work around errata.
406 * Chips before B2 stepping incorrectly reporting v3.5
407 */
408 if (rev_id < 0x13) {
409 printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n");
410 bridge->major_version = 3;
411 bridge->minor_version = 0;
412 }
413}
414
415
416static struct aper_size_info_32 uli_sizes[7] =
417{
418 {256, 65536, 6, 10},
419 {128, 32768, 5, 9},
420 {64, 16384, 4, 8},
421 {32, 8192, 3, 7},
422 {16, 4096, 2, 6},
423 {8, 2048, 1, 4},
424 {4, 1024, 0, 3}
425};
426static int __devinit uli_agp_init(struct pci_dev *pdev)
427{
428 u32 httfea,baseaddr,enuscr;
429 struct pci_dev *dev1;
430 int i;
431 unsigned size = amd64_fetch_size();
432 printk(KERN_INFO "Setting up ULi AGP. \n");
433 dev1 = pci_find_slot ((unsigned int)pdev->bus->number,PCI_DEVFN(0,0));
434 if (dev1 == NULL) {
435 printk(KERN_INFO PFX "Detected a ULi chipset, "
436 "but could not fine the secondary device.\n");
437 return -ENODEV;
438 }
439
440 for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
441 if (uli_sizes[i].size == size)
442 break;
443
444 if (i == ARRAY_SIZE(uli_sizes)) {
445 printk(KERN_INFO PFX "No ULi size found for %d\n", size);
446 return -ENODEV;
447 }
448
449 /* shadow x86-64 registers into ULi registers */
450 pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea);
451
452 /* if x86-64 aperture base is beyond 4G, exit here */
453 if ((httfea & 0x7fff) >> (32 - 25))
454 return -ENODEV;
455
456 httfea = (httfea& 0x7fff) << 25;
457
458 pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
459 baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
460 baseaddr|= httfea;
461 pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
462
463 enuscr= httfea+ (size * 1024 * 1024) - 1;
464 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
465 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
466 return 0;
467}
468
469
470static struct aper_size_info_32 nforce3_sizes[5] =
471{
472 {512, 131072, 7, 0x00000000 },
473 {256, 65536, 6, 0x00000008 },
474 {128, 32768, 5, 0x0000000C },
475 {64, 16384, 4, 0x0000000E },
476 {32, 8192, 3, 0x0000000F }
477};
478
479/* Handle shadow device of the Nvidia NForce3 */
480/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
481static int __devinit nforce3_agp_init(struct pci_dev *pdev)
482{
483 u32 tmp, apbase, apbar, aplimit;
484 struct pci_dev *dev1;
485 int i;
486 unsigned size = amd64_fetch_size();
487
488 printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n");
489
490 dev1 = pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(11, 0));
491 if (dev1 == NULL) {
492 printk(KERN_INFO PFX "agpgart: Detected an NVIDIA "
493 "nForce3 chipset, but could not find "
494 "the secondary device.\n");
495 return -ENODEV;
496 }
497
498 for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
499 if (nforce3_sizes[i].size == size)
500 break;
501
502 if (i == ARRAY_SIZE(nforce3_sizes)) {
503 printk(KERN_INFO PFX "No NForce3 size found for %d\n", size);
504 return -ENODEV;
505 }
506
507 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
508 tmp &= ~(0xf);
509 tmp |= nforce3_sizes[i].size_value;
510 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
511
512 /* shadow x86-64 registers into NVIDIA registers */
513 pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase);
514
515 /* if x86-64 aperture base is beyond 4G, exit here */
516 if ( (apbase & 0x7fff) >> (32 - 25) )
517 return -ENODEV;
518
519 apbase = (apbase & 0x7fff) << 25;
520
521 pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
522 apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
523 apbar |= apbase;
524 pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
525
526 aplimit = apbase + (size * 1024 * 1024) - 1;
527 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
528 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
529 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
530 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
531
532 return 0;
533}
534
535static int __devinit agp_amd64_probe(struct pci_dev *pdev,
536 const struct pci_device_id *ent)
537{
538 struct agp_bridge_data *bridge;
539 u8 cap_ptr;
540
541 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
542 if (!cap_ptr)
543 return -ENODEV;
544
545 /* Could check for AGPv3 here */
546
547 bridge = agp_alloc_bridge();
548 if (!bridge)
549 return -ENOMEM;
550
551 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
552 pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
553 amd8151_init(pdev, bridge);
554 } else {
555 printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn);
556 }
557
558 bridge->driver = &amd_8151_driver;
559 bridge->dev = pdev;
560 bridge->capndx = cap_ptr;
561
562 /* Fill in the mode register */
563 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
564
565 if (cache_nbs(pdev, cap_ptr) == -1) {
566 agp_put_bridge(bridge);
567 return -ENODEV;
568 }
569
570 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
571 int ret = nforce3_agp_init(pdev);
572 if (ret) {
573 agp_put_bridge(bridge);
574 return ret;
575 }
576 }
577
578 if (pdev->vendor == PCI_VENDOR_ID_AL) {
579 int ret = uli_agp_init(pdev);
580 if (ret) {
581 agp_put_bridge(bridge);
582 return ret;
583 }
584 }
585
586 pci_set_drvdata(pdev, bridge);
587 return agp_add_bridge(bridge);
588}
589
590static void __devexit agp_amd64_remove(struct pci_dev *pdev)
591{
592 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
593
594 release_mem_region(virt_to_phys(bridge->gatt_table_real),
595 amd64_aperture_sizes[bridge->aperture_size_idx].size);
596 agp_remove_bridge(bridge);
597 agp_put_bridge(bridge);
598}
599
600static struct pci_device_id agp_amd64_pci_table[] = {
601 {
602 .class = (PCI_CLASS_BRIDGE_HOST << 8),
603 .class_mask = ~0,
604 .vendor = PCI_VENDOR_ID_AMD,
605 .device = PCI_DEVICE_ID_AMD_8151_0,
606 .subvendor = PCI_ANY_ID,
607 .subdevice = PCI_ANY_ID,
608 },
609 /* ULi M1689 */
610 {
611 .class = (PCI_CLASS_BRIDGE_HOST << 8),
612 .class_mask = ~0,
613 .vendor = PCI_VENDOR_ID_AL,
614 .device = PCI_DEVICE_ID_AL_M1689,
615 .subvendor = PCI_ANY_ID,
616 .subdevice = PCI_ANY_ID,
617 },
618 /* VIA K8T800Pro */
619 {
620 .class = (PCI_CLASS_BRIDGE_HOST << 8),
621 .class_mask = ~0,
622 .vendor = PCI_VENDOR_ID_VIA,
623 .device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
624 .subvendor = PCI_ANY_ID,
625 .subdevice = PCI_ANY_ID,
626 },
627 /* VIA K8T800 */
628 {
629 .class = (PCI_CLASS_BRIDGE_HOST << 8),
630 .class_mask = ~0,
631 .vendor = PCI_VENDOR_ID_VIA,
632 .device = PCI_DEVICE_ID_VIA_8385_0,
633 .subvendor = PCI_ANY_ID,
634 .subdevice = PCI_ANY_ID,
635 },
636 /* VIA K8M800 / K8N800 */
637 {
638 .class = (PCI_CLASS_BRIDGE_HOST << 8),
639 .class_mask = ~0,
640 .vendor = PCI_VENDOR_ID_VIA,
641 .device = PCI_DEVICE_ID_VIA_8380_0,
642 .subvendor = PCI_ANY_ID,
643 .subdevice = PCI_ANY_ID,
644 },
645 /* VIA K8T890 */
646 {
647 .class = (PCI_CLASS_BRIDGE_HOST << 8),
648 .class_mask = ~0,
649 .vendor = PCI_VENDOR_ID_VIA,
650 .device = PCI_DEVICE_ID_VIA_3238_0,
651 .subvendor = PCI_ANY_ID,
652 .subdevice = PCI_ANY_ID,
653 },
654 /* VIA K8T800/K8M800/K8N800 */
655 {
656 .class = (PCI_CLASS_BRIDGE_HOST << 8),
657 .class_mask = ~0,
658 .vendor = PCI_VENDOR_ID_VIA,
659 .device = PCI_DEVICE_ID_VIA_838X_1,
660 .subvendor = PCI_ANY_ID,
661 .subdevice = PCI_ANY_ID,
662 },
663 /* NForce3 */
664 {
665 .class = (PCI_CLASS_BRIDGE_HOST << 8),
666 .class_mask = ~0,
667 .vendor = PCI_VENDOR_ID_NVIDIA,
668 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
669 .subvendor = PCI_ANY_ID,
670 .subdevice = PCI_ANY_ID,
671 },
672 {
673 .class = (PCI_CLASS_BRIDGE_HOST << 8),
674 .class_mask = ~0,
675 .vendor = PCI_VENDOR_ID_NVIDIA,
676 .device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
677 .subvendor = PCI_ANY_ID,
678 .subdevice = PCI_ANY_ID,
679 },
680 /* SIS 755 */
681 {
682 .class = (PCI_CLASS_BRIDGE_HOST << 8),
683 .class_mask = ~0,
684 .vendor = PCI_VENDOR_ID_SI,
685 .device = PCI_DEVICE_ID_SI_755,
686 .subvendor = PCI_ANY_ID,
687 .subdevice = PCI_ANY_ID,
688 },
689 { }
690};
691
692MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
693
694static struct pci_driver agp_amd64_pci_driver = {
695 .name = "agpgart-amd64",
696 .id_table = agp_amd64_pci_table,
697 .probe = agp_amd64_probe,
698 .remove = agp_amd64_remove,
699};
700
701
702/* Not static due to IOMMU code calling it early. */
703int __init agp_amd64_init(void)
704{
705 int err = 0;
706 static struct pci_device_id amd64nb[] = {
707 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
708 { },
709 };
710
711 if (agp_off)
712 return -EINVAL;
713 if (pci_register_driver(&agp_amd64_pci_driver) > 0) {
714 struct pci_dev *dev;
715 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
716 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
717#ifdef MODULE
718 printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
719#else
720 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
721#endif
722 return -ENODEV;
723 }
724
725 /* First check that we have at least one AMD64 NB */
726 if (!pci_dev_present(amd64nb))
727 return -ENODEV;
728
729 /* Look for any AGP bridge */
730 dev = NULL;
731 err = -ENODEV;
732 for_each_pci_dev(dev) {
733 if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
734 continue;
735 /* Only one bridge supported right now */
736 if (agp_amd64_probe(dev, NULL) == 0) {
737 err = 0;
738 break;
739 }
740 }
741 }
742 return err;
743}
744
745static void __exit agp_amd64_cleanup(void)
746{
747 if (aperture_resource)
748 release_resource(aperture_resource);
749 pci_unregister_driver(&agp_amd64_pci_driver);
750}
751
752/* On AMD64 the PCI driver needs to initialize this driver early
753 for the IOMMU, so it has to be called via a backdoor. */
754#ifndef CONFIG_GART_IOMMU
755module_init(agp_amd64_init);
756module_exit(agp_amd64_cleanup);
757#endif
758
759MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>, Andi Kleen");
760module_param(agp_try_unsupported, bool, 0);
761MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
new file mode 100644
index 000000000000..757dde006fc9
--- /dev/null
+++ b/drivers/char/agp/ati-agp.c
@@ -0,0 +1,548 @@
1/*
2 * ATi AGPGART routines.
3 */
4
5#include <linux/types.h>
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/init.h>
9#include <linux/agp_backend.h>
10#include <asm/agp.h>
11#include "agp.h"
12
13#define ATI_GART_MMBASE_ADDR 0x14
14#define ATI_RS100_APSIZE 0xac
15#define ATI_RS100_IG_AGPMODE 0xb0
16#define ATI_RS300_APSIZE 0xf8
17#define ATI_RS300_IG_AGPMODE 0xfc
18#define ATI_GART_FEATURE_ID 0x00
19#define ATI_GART_BASE 0x04
20#define ATI_GART_CACHE_SZBASE 0x08
21#define ATI_GART_CACHE_CNTRL 0x0c
22#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
23
24
25static struct aper_size_info_lvl2 ati_generic_sizes[7] =
26{
27 {2048, 524288, 0x0000000c},
28 {1024, 262144, 0x0000000a},
29 {512, 131072, 0x00000008},
30 {256, 65536, 0x00000006},
31 {128, 32768, 0x00000004},
32 {64, 16384, 0x00000002},
33 {32, 8192, 0x00000000}
34};
35
36static struct gatt_mask ati_generic_masks[] =
37{
38 { .mask = 1, .type = 0}
39};
40
41
42
43typedef struct _ati_page_map {
44 unsigned long *real;
45 unsigned long __iomem *remapped;
46} ati_page_map;
47
48static struct _ati_generic_private {
49 volatile u8 __iomem *registers;
50 ati_page_map **gatt_pages;
51 int num_tables;
52} ati_generic_private;
53
54static int ati_create_page_map(ati_page_map *page_map)
55{
56 int i, err = 0;
57
58 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
59 if (page_map->real == NULL)
60 return -ENOMEM;
61
62 SetPageReserved(virt_to_page(page_map->real));
63 err = map_page_into_agp(virt_to_page(page_map->real));
64 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
65 PAGE_SIZE);
66 if (page_map->remapped == NULL || err) {
67 ClearPageReserved(virt_to_page(page_map->real));
68 free_page((unsigned long) page_map->real);
69 page_map->real = NULL;
70 return -ENOMEM;
71 }
72 /*CACHE_FLUSH();*/
73 global_cache_flush();
74
75 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
76 writel(agp_bridge->scratch_page, page_map->remapped+i);
77 readl(page_map->remapped+i); /* PCI Posting. */
78 }
79
80 return 0;
81}
82
83
84static void ati_free_page_map(ati_page_map *page_map)
85{
86 unmap_page_from_agp(virt_to_page(page_map->real));
87 iounmap(page_map->remapped);
88 ClearPageReserved(virt_to_page(page_map->real));
89 free_page((unsigned long) page_map->real);
90}
91
92
93static void ati_free_gatt_pages(void)
94{
95 int i;
96 ati_page_map **tables;
97 ati_page_map *entry;
98
99 tables = ati_generic_private.gatt_pages;
100 for(i = 0; i < ati_generic_private.num_tables; i++) {
101 entry = tables[i];
102 if (entry != NULL) {
103 if (entry->real != NULL)
104 ati_free_page_map(entry);
105 kfree(entry);
106 }
107 }
108 kfree(tables);
109}
110
111
112static int ati_create_gatt_pages(int nr_tables)
113{
114 ati_page_map **tables;
115 ati_page_map *entry;
116 int retval = 0;
117 int i;
118
119 tables = kmalloc((nr_tables + 1) * sizeof(ati_page_map *),
120 GFP_KERNEL);
121 if (tables == NULL)
122 return -ENOMEM;
123
124 memset(tables, 0, sizeof(ati_page_map *) * (nr_tables + 1));
125 for (i = 0; i < nr_tables; i++) {
126 entry = kmalloc(sizeof(ati_page_map), GFP_KERNEL);
127 if (entry == NULL) {
128 while (i>0) {
129 kfree (tables[i-1]);
130 i--;
131 }
132 kfree (tables);
133 tables = NULL;
134 retval = -ENOMEM;
135 break;
136 }
137 memset(entry, 0, sizeof(ati_page_map));
138 tables[i] = entry;
139 retval = ati_create_page_map(entry);
140 if (retval != 0) break;
141 }
142 ati_generic_private.num_tables = nr_tables;
143 ati_generic_private.gatt_pages = tables;
144
145 if (retval != 0) ati_free_gatt_pages();
146
147 return retval;
148}
149
150static int is_r200(void)
151{
152 if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
153 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
154 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
155 (agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
156 return 1;
157 return 0;
158}
159
160static int ati_fetch_size(void)
161{
162 int i;
163 u32 temp;
164 struct aper_size_info_lvl2 *values;
165
166 if (is_r200())
167 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
168 else
169 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
170
171 temp = (temp & 0x0000000e);
172 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
173 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
174 if (temp == values[i].size_value) {
175 agp_bridge->previous_size =
176 agp_bridge->current_size = (void *) (values + i);
177
178 agp_bridge->aperture_size_idx = i;
179 return values[i].size;
180 }
181 }
182
183 return 0;
184}
185
186static void ati_tlbflush(struct agp_memory * mem)
187{
188 writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
189 readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
190}
191
192static void ati_cleanup(void)
193{
194 struct aper_size_info_lvl2 *previous_size;
195 u32 temp;
196
197 previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
198
199 /* Write back the previous size and disable gart translation */
200 if (is_r200()) {
201 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
202 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
203 pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
204 } else {
205 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
206 temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
207 pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
208 }
209 iounmap((volatile u8 __iomem *)ati_generic_private.registers);
210}
211
212
213static int ati_configure(void)
214{
215 u32 temp;
216
217 /* Get the memory mapped registers */
218 pci_read_config_dword(agp_bridge->dev, ATI_GART_MMBASE_ADDR, &temp);
219 temp = (temp & 0xfffff000);
220 ati_generic_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
221
222 if (is_r200())
223 pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
224 else
225 pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
226
227 /* address to map too */
228 /*
229 pci_read_config_dword(agp_bridge.dev, AGP_APBASE, &temp);
230 agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
231 printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
232 */
233 writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
234 readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
235
236 /* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
237 pci_read_config_dword(agp_bridge->dev, 4, &temp);
238 pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14));
239
240 /* Write out the address of the gatt table */
241 writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
242 readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
243
244 return 0;
245}
246
247
248/*
249 *Since we don't need contigious memory we just try
250 * to get the gatt table once
251 */
252
253#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
254#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
255 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
256#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
257#undef GET_GATT
258#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
259 GET_PAGE_DIR_IDX(addr)]->remapped)
260
261static int ati_insert_memory(struct agp_memory * mem,
262 off_t pg_start, int type)
263{
264 int i, j, num_entries;
265 unsigned long __iomem *cur_gatt;
266 unsigned long addr;
267
268 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
269
270 if (type != 0 || mem->type != 0)
271 return -EINVAL;
272
273 if ((pg_start + mem->page_count) > num_entries)
274 return -EINVAL;
275
276 j = pg_start;
277 while (j < (pg_start + mem->page_count)) {
278 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
279 cur_gatt = GET_GATT(addr);
280 if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
281 return -EBUSY;
282 j++;
283 }
284
285 if (mem->is_flushed == FALSE) {
286 /*CACHE_FLUSH(); */
287 global_cache_flush();
288 mem->is_flushed = TRUE;
289 }
290
291 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
292 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
293 cur_gatt = GET_GATT(addr);
294 writel(agp_bridge->driver->mask_memory(agp_bridge,
295 mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
296 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
297 }
298 agp_bridge->driver->tlb_flush(mem);
299 return 0;
300}
301
302static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
303 int type)
304{
305 int i;
306 unsigned long __iomem *cur_gatt;
307 unsigned long addr;
308
309 if (type != 0 || mem->type != 0) {
310 return -EINVAL;
311 }
312 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
313 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
314 cur_gatt = GET_GATT(addr);
315 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
316 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
317 }
318
319 agp_bridge->driver->tlb_flush(mem);
320 return 0;
321}
322
323static int ati_create_gatt_table(struct agp_bridge_data *bridge)
324{
325 struct aper_size_info_lvl2 *value;
326 ati_page_map page_dir;
327 unsigned long addr;
328 int retval;
329 u32 temp;
330 int i;
331 struct aper_size_info_lvl2 *current_size;
332
333 value = A_SIZE_LVL2(agp_bridge->current_size);
334 retval = ati_create_page_map(&page_dir);
335 if (retval != 0)
336 return retval;
337
338 retval = ati_create_gatt_pages(value->num_entries / 1024);
339 if (retval != 0) {
340 ati_free_page_map(&page_dir);
341 return retval;
342 }
343
344 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
345 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
346 agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
347
348 /* Write out the size register */
349 current_size = A_SIZE_LVL2(agp_bridge->current_size);
350
351 if (is_r200()) {
352 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
353 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
354 | 0x00000001);
355 pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
356 pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
357 } else {
358 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
359 temp = (((temp & ~(0x0000000e)) | current_size->size_value)
360 | 0x00000001);
361 pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
362 pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
363 }
364
365 /*
366 * Get the address for the gart region.
367 * This is a bus address even on the alpha, b/c its
368 * used to program the agp master not the cpu
369 */
370 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
371 addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
372 agp_bridge->gart_bus_addr = addr;
373
374 /* Calculate the agp offset */
375 for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
376 writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1,
377 page_dir.remapped+GET_PAGE_DIR_OFF(addr));
378 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
379 }
380
381 return 0;
382}
383
384static int ati_free_gatt_table(struct agp_bridge_data *bridge)
385{
386 ati_page_map page_dir;
387
388 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
389 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
390
391 ati_free_gatt_pages();
392 ati_free_page_map(&page_dir);
393 return 0;
394}
395
396struct agp_bridge_driver ati_generic_bridge = {
397 .owner = THIS_MODULE,
398 .aperture_sizes = ati_generic_sizes,
399 .size_type = LVL2_APER_SIZE,
400 .num_aperture_sizes = 7,
401 .configure = ati_configure,
402 .fetch_size = ati_fetch_size,
403 .cleanup = ati_cleanup,
404 .tlb_flush = ati_tlbflush,
405 .mask_memory = agp_generic_mask_memory,
406 .masks = ati_generic_masks,
407 .agp_enable = agp_generic_enable,
408 .cache_flush = global_cache_flush,
409 .create_gatt_table = ati_create_gatt_table,
410 .free_gatt_table = ati_free_gatt_table,
411 .insert_memory = ati_insert_memory,
412 .remove_memory = ati_remove_memory,
413 .alloc_by_type = agp_generic_alloc_by_type,
414 .free_by_type = agp_generic_free_by_type,
415 .agp_alloc_page = agp_generic_alloc_page,
416 .agp_destroy_page = agp_generic_destroy_page,
417};
418
419
420static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
421{
422 {
423 .device_id = PCI_DEVICE_ID_ATI_RS100,
424 .chipset_name = "IGP320/M",
425 },
426 {
427 .device_id = PCI_DEVICE_ID_ATI_RS200,
428 .chipset_name = "IGP330/340/345/350/M",
429 },
430 {
431 .device_id = PCI_DEVICE_ID_ATI_RS200_B,
432 .chipset_name = "IGP345M",
433 },
434 {
435 .device_id = PCI_DEVICE_ID_ATI_RS250,
436 .chipset_name = "IGP7000/M",
437 },
438 {
439 .device_id = PCI_DEVICE_ID_ATI_RS300_100,
440 .chipset_name = "IGP9100/M",
441 },
442 {
443 .device_id = PCI_DEVICE_ID_ATI_RS300_133,
444 .chipset_name = "IGP9100/M",
445 },
446 {
447 .device_id = PCI_DEVICE_ID_ATI_RS300_166,
448 .chipset_name = "IGP9100/M",
449 },
450 {
451 .device_id = PCI_DEVICE_ID_ATI_RS300_200,
452 .chipset_name = "IGP9100/M",
453 },
454 { }, /* dummy final entry, always present */
455};
456
457static int __devinit agp_ati_probe(struct pci_dev *pdev,
458 const struct pci_device_id *ent)
459{
460 struct agp_device_ids *devs = ati_agp_device_ids;
461 struct agp_bridge_data *bridge;
462 u8 cap_ptr;
463 int j;
464
465 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
466 if (!cap_ptr)
467 return -ENODEV;
468
469 /* probe for known chipsets */
470 for (j = 0; devs[j].chipset_name; j++) {
471 if (pdev->device == devs[j].device_id)
472 goto found;
473 }
474
475 printk(KERN_ERR PFX
476 "Unsupported Ati chipset (device id: %04x)\n", pdev->device);
477 return -ENODEV;
478
479found:
480 bridge = agp_alloc_bridge();
481 if (!bridge)
482 return -ENOMEM;
483
484 bridge->dev = pdev;
485 bridge->capndx = cap_ptr;
486
487 bridge->driver = &ati_generic_bridge;
488
489
490 printk(KERN_INFO PFX "Detected Ati %s chipset\n",
491 devs[j].chipset_name);
492
493 /* Fill in the mode register */
494 pci_read_config_dword(pdev,
495 bridge->capndx+PCI_AGP_STATUS,
496 &bridge->mode);
497
498 pci_set_drvdata(pdev, bridge);
499 return agp_add_bridge(bridge);
500}
501
502static void __devexit agp_ati_remove(struct pci_dev *pdev)
503{
504 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
505
506 agp_remove_bridge(bridge);
507 agp_put_bridge(bridge);
508}
509
510static struct pci_device_id agp_ati_pci_table[] = {
511 {
512 .class = (PCI_CLASS_BRIDGE_HOST << 8),
513 .class_mask = ~0,
514 .vendor = PCI_VENDOR_ID_ATI,
515 .device = PCI_ANY_ID,
516 .subvendor = PCI_ANY_ID,
517 .subdevice = PCI_ANY_ID,
518 },
519 { }
520};
521
522MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
523
524static struct pci_driver agp_ati_pci_driver = {
525 .name = "agpgart-ati",
526 .id_table = agp_ati_pci_table,
527 .probe = agp_ati_probe,
528 .remove = agp_ati_remove,
529};
530
531static int __init agp_ati_init(void)
532{
533 if (agp_off)
534 return -EINVAL;
535 return pci_register_driver(&agp_ati_pci_driver);
536}
537
538static void __exit agp_ati_cleanup(void)
539{
540 pci_unregister_driver(&agp_ati_pci_driver);
541}
542
543module_init(agp_ati_init);
544module_exit(agp_ati_cleanup);
545
546MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
547MODULE_LICENSE("GPL and additional rights");
548
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
new file mode 100644
index 000000000000..c3442f3c6480
--- /dev/null
+++ b/drivers/char/agp/backend.c
@@ -0,0 +1,348 @@
1/*
2 * AGPGART driver backend routines.
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2003 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/init.h>
33#include <linux/pagemap.h>
34#include <linux/miscdevice.h>
35#include <linux/pm.h>
36#include <linux/agp_backend.h>
37#include <linux/agpgart.h>
38#include <linux/vmalloc.h>
39#include <asm/io.h>
40#include "agp.h"
41
42/* Due to XFree86 brain-damage, we can't go to 1.0 until they
43 * fix some real stupidity. It's only by chance we can bump
44 * past 0.99 at all due to some boolean logic error. */
45#define AGPGART_VERSION_MAJOR 0
46#define AGPGART_VERSION_MINOR 101
47static struct agp_version agp_current_version =
48{
49 .major = AGPGART_VERSION_MAJOR,
50 .minor = AGPGART_VERSION_MINOR,
51};
52
53struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
54 &agp_generic_find_bridge;
55
56struct agp_bridge_data *agp_bridge;
57LIST_HEAD(agp_bridges);
58EXPORT_SYMBOL(agp_bridge);
59EXPORT_SYMBOL(agp_bridges);
60EXPORT_SYMBOL(agp_find_bridge);
61
62/**
63 * agp_backend_acquire - attempt to acquire an agp backend.
64 *
65 */
66struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
67{
68 struct agp_bridge_data *bridge;
69
70 bridge = agp_find_bridge(pdev);
71
72 if (!bridge)
73 return NULL;
74
75 if (atomic_read(&bridge->agp_in_use))
76 return NULL;
77 atomic_inc(&bridge->agp_in_use);
78 return bridge;
79}
80EXPORT_SYMBOL(agp_backend_acquire);
81
82
83/**
84 * agp_backend_release - release the lock on the agp backend.
85 *
86 * The caller must insure that the graphics aperture translation table
87 * is read for use by another entity.
88 *
89 * (Ensure that all memory it bound is unbound.)
90 */
91void agp_backend_release(struct agp_bridge_data *bridge)
92{
93
94 if (bridge)
95 atomic_dec(&bridge->agp_in_use);
96}
97EXPORT_SYMBOL(agp_backend_release);
98
99
100struct { int mem, agp; } maxes_table[] = {
101 {0, 0},
102 {32, 4},
103 {64, 28},
104 {128, 96},
105 {256, 204},
106 {512, 440},
107 {1024, 942},
108 {2048, 1920},
109 {4096, 3932}
110};
111
112static int agp_find_max(void)
113{
114 long memory, index, result;
115
116#if PAGE_SHIFT < 20
117 memory = num_physpages >> (20 - PAGE_SHIFT);
118#else
119 memory = num_physpages << (PAGE_SHIFT - 20);
120#endif
121 index = 1;
122
123 while ((memory > maxes_table[index].mem) && (index < 8))
124 index++;
125
126 result = maxes_table[index - 1].agp +
127 ( (memory - maxes_table[index - 1].mem) *
128 (maxes_table[index].agp - maxes_table[index - 1].agp)) /
129 (maxes_table[index].mem - maxes_table[index - 1].mem);
130
131 result = result << (20 - PAGE_SHIFT);
132 return result;
133}
134
135
136static int agp_backend_initialize(struct agp_bridge_data *bridge)
137{
138 int size_value, rc, got_gatt=0, got_keylist=0;
139
140 bridge->max_memory_agp = agp_find_max();
141 bridge->version = &agp_current_version;
142
143 if (bridge->driver->needs_scratch_page) {
144 void *addr = bridge->driver->agp_alloc_page(bridge);
145
146 if (!addr) {
147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
148 return -ENOMEM;
149 }
150
151 bridge->scratch_page_real = virt_to_phys(addr);
152 bridge->scratch_page =
153 bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
154 }
155
156 size_value = bridge->driver->fetch_size();
157 if (size_value == 0) {
158 printk(KERN_ERR PFX "unable to determine aperture size.\n");
159 rc = -EINVAL;
160 goto err_out;
161 }
162 if (bridge->driver->create_gatt_table(bridge)) {
163 printk(KERN_ERR PFX
164 "unable to get memory for graphics translation table.\n");
165 rc = -ENOMEM;
166 goto err_out;
167 }
168 got_gatt = 1;
169
170 bridge->key_list = vmalloc(PAGE_SIZE * 4);
171 if (bridge->key_list == NULL) {
172 printk(KERN_ERR PFX "error allocating memory for key lists.\n");
173 rc = -ENOMEM;
174 goto err_out;
175 }
176 got_keylist = 1;
177
178 /* FIXME vmalloc'd memory not guaranteed contiguous */
179 memset(bridge->key_list, 0, PAGE_SIZE * 4);
180
181 if (bridge->driver->configure()) {
182 printk(KERN_ERR PFX "error configuring host chipset.\n");
183 rc = -EINVAL;
184 goto err_out;
185 }
186
187 return 0;
188
189err_out:
190 if (bridge->driver->needs_scratch_page)
191 bridge->driver->agp_destroy_page(
192 phys_to_virt(bridge->scratch_page_real));
193 if (got_gatt)
194 bridge->driver->free_gatt_table(bridge);
195 if (got_keylist) {
196 vfree(bridge->key_list);
197 bridge->key_list = NULL;
198 }
199 return rc;
200}
201
202/* cannot be __exit b/c as it could be called from __init code */
203static void agp_backend_cleanup(struct agp_bridge_data *bridge)
204{
205 if (bridge->driver->cleanup)
206 bridge->driver->cleanup();
207 if (bridge->driver->free_gatt_table)
208 bridge->driver->free_gatt_table(bridge);
209 if (bridge->key_list) {
210 vfree(bridge->key_list);
211 bridge->key_list = NULL;
212 }
213
214 if (bridge->driver->agp_destroy_page &&
215 bridge->driver->needs_scratch_page)
216 bridge->driver->agp_destroy_page(
217 phys_to_virt(bridge->scratch_page_real));
218}
219
220/* When we remove the global variable agp_bridge from all drivers
221 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
222 */
223
224struct agp_bridge_data *agp_alloc_bridge(void)
225{
226 struct agp_bridge_data *bridge = kmalloc(sizeof(*bridge), GFP_KERNEL);
227
228 if (!bridge)
229 return NULL;
230
231 memset(bridge, 0, sizeof(*bridge));
232 atomic_set(&bridge->agp_in_use, 0);
233 atomic_set(&bridge->current_memory_agp, 0);
234
235 if (list_empty(&agp_bridges))
236 agp_bridge = bridge;
237
238 return bridge;
239}
240EXPORT_SYMBOL(agp_alloc_bridge);
241
242
243void agp_put_bridge(struct agp_bridge_data *bridge)
244{
245 kfree(bridge);
246
247 if (list_empty(&agp_bridges))
248 agp_bridge = NULL;
249}
250EXPORT_SYMBOL(agp_put_bridge);
251
252
253int agp_add_bridge(struct agp_bridge_data *bridge)
254{
255 int error;
256
257 if (agp_off)
258 return -ENODEV;
259
260 if (!bridge->dev) {
261 printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
262 return -EINVAL;
263 }
264
265 /* Grab reference on the chipset driver. */
266 if (!try_module_get(bridge->driver->owner)) {
267 printk (KERN_INFO PFX "Couldn't lock chipset driver.\n");
268 return -EINVAL;
269 }
270
271 error = agp_backend_initialize(bridge);
272 if (error) {
273 printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
274 goto err_out;
275 }
276
277 if (list_empty(&agp_bridges)) {
278 error = agp_frontend_initialize();
279 if (error) {
280 printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
281 goto frontend_err;
282 }
283
284 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
285 bridge->driver->fetch_size(), bridge->gart_bus_addr);
286
287 }
288
289 list_add(&bridge->list, &agp_bridges);
290 return 0;
291
292frontend_err:
293 agp_backend_cleanup(bridge);
294err_out:
295 module_put(bridge->driver->owner);
296 agp_put_bridge(bridge);
297 return error;
298}
299EXPORT_SYMBOL_GPL(agp_add_bridge);
300
301
302void agp_remove_bridge(struct agp_bridge_data *bridge)
303{
304 agp_backend_cleanup(bridge);
305 list_del(&bridge->list);
306 if (list_empty(&agp_bridges))
307 agp_frontend_cleanup();
308 module_put(bridge->driver->owner);
309}
310EXPORT_SYMBOL_GPL(agp_remove_bridge);
311
312int agp_off;
313int agp_try_unsupported_boot;
314EXPORT_SYMBOL(agp_off);
315EXPORT_SYMBOL(agp_try_unsupported_boot);
316
317static int __init agp_init(void)
318{
319 if (!agp_off)
320 printk(KERN_INFO "Linux agpgart interface v%d.%d (c) Dave Jones\n",
321 AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
322 return 0;
323}
324
325void __exit agp_exit(void)
326{
327}
328
329#ifndef MODULE
330static __init int agp_setup(char *s)
331{
332 if (!strcmp(s,"off"))
333 agp_off = 1;
334 if (!strcmp(s,"try_unsupported"))
335 agp_try_unsupported_boot = 1;
336 return 1;
337}
338__setup("agp=", agp_setup);
339#endif
340
341MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
342MODULE_DESCRIPTION("AGP GART driver");
343MODULE_LICENSE("GPL and additional rights");
344MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
345
346module_init(agp_init);
347module_exit(agp_exit);
348
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
new file mode 100644
index 000000000000..52c0a097118c
--- /dev/null
+++ b/drivers/char/agp/efficeon-agp.c
@@ -0,0 +1,463 @@
1/*
2 * Transmeta's Efficeon AGPGART driver.
3 *
4 * Based upon a diff by Linus around November '02.
5 *
6 * Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
7 * and H. Peter Anvin <hpa@transmeta.com>.
8 */
9
10/*
11 * NOTE-cpg-040217:
12 *
13 * - when compiled as a module, after loading the module,
14 * it will refuse to unload, indicating it is in use,
15 * when it is not.
16 * - no s3 (suspend to ram) testing.
17 * - tested on the efficeon integrated nothbridge for tens
18 * of iterations of starting x and glxgears.
19 * - tested with radeon 9000 and radeon mobility m9 cards
20 * - tested with c3/c4 enabled (with the mobility m9 card)
21 */
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/agp_backend.h>
27#include <linux/gfp.h>
28#include <linux/page-flags.h>
29#include <linux/mm.h>
30#include "agp.h"
31
32/*
33 * The real differences to the generic AGP code is
34 * in the GART mappings - a two-level setup with the
35 * first level being an on-chip 64-entry table.
36 *
37 * The page array is filled through the ATTPAGE register
38 * (Aperture Translation Table Page Register) at 0xB8. Bits:
39 * 31:20: physical page address
40 * 11:9: Page Attribute Table Index (PATI)
41 * must match the PAT index for the
42 * mapped pages (the 2nd level page table pages
43 * themselves should be just regular WB-cacheable,
44 * so this is normally zero.)
45 * 8: Present
46 * 7:6: reserved, write as zero
47 * 5:0: GATT directory index: which 1st-level entry
48 *
49 * The Efficeon AGP spec requires pages to be WB-cacheable
50 * but to be explicitly CLFLUSH'd after any changes.
51 */
52#define EFFICEON_ATTPAGE 0xb8
53#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */
54
55#define EFFICEON_PATI (0 << 9)
56#define EFFICEON_PRESENT (1 << 8)
57
58static struct _efficeon_private {
59 unsigned long l1_table[EFFICEON_L1_SIZE];
60} efficeon_private;
61
62static struct gatt_mask efficeon_generic_masks[] =
63{
64 {.mask = 0x00000001, .type = 0}
65};
66
67static struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
68{
69 {256, 65536, 0},
70 {128, 32768, 32},
71 {64, 16384, 48},
72 {32, 8192, 56}
73};
74
75/*
76 * Control interfaces are largely identical to
77 * the legacy Intel 440BX..
78 */
79
80static int efficeon_fetch_size(void)
81{
82 int i;
83 u16 temp;
84 struct aper_size_info_lvl2 *values;
85
86 pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
87 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
88
89 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
90 if (temp == values[i].size_value) {
91 agp_bridge->previous_size =
92 agp_bridge->current_size = (void *) (values + i);
93 agp_bridge->aperture_size_idx = i;
94 return values[i].size;
95 }
96 }
97
98 return 0;
99}
100
101static void efficeon_tlbflush(struct agp_memory * mem)
102{
103 printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
104 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
105 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
106}
107
108static void efficeon_cleanup(void)
109{
110 u16 temp;
111 struct aper_size_info_lvl2 *previous_size;
112
113 printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
114 previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
115 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
116 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
117 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
118 previous_size->size_value);
119}
120
121static int efficeon_configure(void)
122{
123 u32 temp;
124 u16 temp2;
125 struct aper_size_info_lvl2 *current_size;
126
127 printk(KERN_DEBUG PFX "efficeon_configure()\n");
128
129 current_size = A_SIZE_LVL2(agp_bridge->current_size);
130
131 /* aperture size */
132 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
133 current_size->size_value);
134
135 /* address to map to */
136 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
137 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
138
139 /* agpctrl */
140 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
141
142 /* paccfg/nbxcfg */
143 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
144 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
145 (temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
146 /* clear any possible error conditions */
147 pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
148 return 0;
149}
150
151static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
152{
153 int index, freed = 0;
154
155 for (index = 0; index < EFFICEON_L1_SIZE; index++) {
156 unsigned long page = efficeon_private.l1_table[index];
157 if (page) {
158 efficeon_private.l1_table[index] = 0;
159 ClearPageReserved(virt_to_page((char *)page));
160 free_page(page);
161 freed++;
162 }
163 printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
164 agp_bridge->dev, EFFICEON_ATTPAGE, index);
165 pci_write_config_dword(agp_bridge->dev,
166 EFFICEON_ATTPAGE, index);
167 }
168 printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
169 return 0;
170}
171
172
173/*
174 * Since we don't need contigious memory we just try
175 * to get the gatt table once
176 */
177
178#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
179#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
180 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
181#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
182#undef GET_GATT
183#define GET_GATT(addr) (efficeon_private.gatt_pages[\
184 GET_PAGE_DIR_IDX(addr)]->remapped)
185
186static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
187{
188 int index;
189 const int pati = EFFICEON_PATI;
190 const int present = EFFICEON_PRESENT;
191 const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
192 int num_entries, l1_pages;
193
194 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
195
196 printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
197
198 /* There are 2^10 PTE pages per PDE page */
199 BUG_ON(num_entries & 0x3ff);
200 l1_pages = num_entries >> 10;
201
202 for (index = 0 ; index < l1_pages ; index++) {
203 int offset;
204 unsigned long page;
205 unsigned long value;
206
207 page = efficeon_private.l1_table[index];
208 BUG_ON(page);
209
210 page = get_zeroed_page(GFP_KERNEL);
211 if (!page) {
212 efficeon_free_gatt_table(agp_bridge);
213 return -ENOMEM;
214 }
215 SetPageReserved(virt_to_page((char *)page));
216
217 for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
218 asm volatile("clflush %0" : : "m" (*(char *)(page+offset)));
219
220 efficeon_private.l1_table[index] = page;
221
222 value = __pa(page) | pati | present | index;
223
224 pci_write_config_dword(agp_bridge->dev,
225 EFFICEON_ATTPAGE, value);
226 }
227
228 return 0;
229}
230
231static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
232{
233 int i, count = mem->page_count, num_entries;
234 unsigned int *page, *last_page;
235 const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
236 const unsigned long clflush_mask = ~(clflush_chunk-1);
237
238 printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
239
240 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
241 if ((pg_start + mem->page_count) > num_entries)
242 return -EINVAL;
243 if (type != 0 || mem->type != 0)
244 return -EINVAL;
245
246 if (mem->is_flushed == FALSE) {
247 global_cache_flush();
248 mem->is_flushed = TRUE;
249 }
250
251 last_page = NULL;
252 for (i = 0; i < count; i++) {
253 int index = pg_start + i;
254 unsigned long insert = mem->memory[i];
255
256 page = (unsigned int *) efficeon_private.l1_table[index >> 10];
257
258 if (!page)
259 continue;
260
261 page += (index & 0x3ff);
262 *page = insert;
263
264 /* clflush is slow, so don't clflush until we have to */
265 if ( last_page &&
266 ((unsigned long)page^(unsigned long)last_page) & clflush_mask )
267 asm volatile("clflush %0" : : "m" (*last_page));
268
269 last_page = page;
270 }
271
272 if ( last_page )
273 asm volatile("clflush %0" : : "m" (*last_page));
274
275 agp_bridge->driver->tlb_flush(mem);
276 return 0;
277}
278
279static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
280{
281 int i, count = mem->page_count, num_entries;
282
283 printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
284
285 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
286
287 if ((pg_start + mem->page_count) > num_entries)
288 return -EINVAL;
289 if (type != 0 || mem->type != 0)
290 return -EINVAL;
291
292 for (i = 0; i < count; i++) {
293 int index = pg_start + i;
294 unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
295
296 if (!page)
297 continue;
298 page += (index & 0x3ff);
299 *page = 0;
300 }
301 agp_bridge->driver->tlb_flush(mem);
302 return 0;
303}
304
305
306struct agp_bridge_driver efficeon_driver = {
307 .owner = THIS_MODULE,
308 .aperture_sizes = efficeon_generic_sizes,
309 .size_type = LVL2_APER_SIZE,
310 .num_aperture_sizes = 4,
311 .configure = efficeon_configure,
312 .fetch_size = efficeon_fetch_size,
313 .cleanup = efficeon_cleanup,
314 .tlb_flush = efficeon_tlbflush,
315 .mask_memory = agp_generic_mask_memory,
316 .masks = efficeon_generic_masks,
317 .agp_enable = agp_generic_enable,
318 .cache_flush = global_cache_flush,
319
320 // Efficeon-specific GATT table setup / populate / teardown
321 .create_gatt_table = efficeon_create_gatt_table,
322 .free_gatt_table = efficeon_free_gatt_table,
323 .insert_memory = efficeon_insert_memory,
324 .remove_memory = efficeon_remove_memory,
325 .cant_use_aperture = 0, // 1 might be faster?
326
327 // Generic
328 .alloc_by_type = agp_generic_alloc_by_type,
329 .free_by_type = agp_generic_free_by_type,
330 .agp_alloc_page = agp_generic_alloc_page,
331 .agp_destroy_page = agp_generic_destroy_page,
332};
333
334
335static int agp_efficeon_resume(struct pci_dev *pdev)
336{
337 printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
338 return efficeon_configure();
339}
340
341static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
342 const struct pci_device_id *ent)
343{
344 struct agp_bridge_data *bridge;
345 u8 cap_ptr;
346 struct resource *r;
347
348 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
349 if (!cap_ptr)
350 return -ENODEV;
351
352 /* Probe for Efficeon controller */
353 if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
354 printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
355 pdev->device);
356 return -ENODEV;
357 }
358
359 printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
360
361 bridge = agp_alloc_bridge();
362 if (!bridge)
363 return -ENOMEM;
364
365 bridge->driver = &efficeon_driver;
366 bridge->dev = pdev;
367 bridge->capndx = cap_ptr;
368
369 /*
370 * The following fixes the case where the BIOS has "forgotten" to
371 * provide an address range for the GART.
372 * 20030610 - hamish@zot.org
373 */
374 r = &pdev->resource[0];
375 if (!r->start && r->end) {
376 if(pci_assign_resource(pdev, 0)) {
377 printk(KERN_ERR PFX "could not assign resource 0\n");
378 return -ENODEV;
379 }
380 }
381
382 /*
383 * If the device has not been properly setup, the following will catch
384 * the problem and should stop the system from crashing.
385 * 20030610 - hamish@zot.org
386 */
387 if (pci_enable_device(pdev)) {
388 printk(KERN_ERR PFX "Unable to Enable PCI device\n");
389 return -ENODEV;
390 }
391
392 /* Fill in the mode register */
393 if (cap_ptr) {
394 pci_read_config_dword(pdev,
395 bridge->capndx+PCI_AGP_STATUS,
396 &bridge->mode);
397 }
398
399 pci_set_drvdata(pdev, bridge);
400 return agp_add_bridge(bridge);
401}
402
403static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
404{
405 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
406
407 agp_remove_bridge(bridge);
408 agp_put_bridge(bridge);
409}
410
411static int agp_efficeon_suspend(struct pci_dev *dev, u32 state)
412{
413 return 0;
414}
415
416
417static struct pci_device_id agp_efficeon_pci_table[] = {
418 {
419 .class = (PCI_CLASS_BRIDGE_HOST << 8),
420 .class_mask = ~0,
421 .vendor = PCI_VENDOR_ID_TRANSMETA,
422 .device = PCI_ANY_ID,
423 .subvendor = PCI_ANY_ID,
424 .subdevice = PCI_ANY_ID,
425 },
426 { }
427};
428
429MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
430
431static struct pci_driver agp_efficeon_pci_driver = {
432 .name = "agpgart-efficeon",
433 .id_table = agp_efficeon_pci_table,
434 .probe = agp_efficeon_probe,
435 .remove = agp_efficeon_remove,
436 .suspend = agp_efficeon_suspend,
437 .resume = agp_efficeon_resume,
438};
439
440static int __init agp_efficeon_init(void)
441{
442 static int agp_initialised=0;
443
444 if (agp_off)
445 return -EINVAL;
446
447 if (agp_initialised == 1)
448 return 0;
449 agp_initialised=1;
450
451 return pci_register_driver(&agp_efficeon_pci_driver);
452}
453
454static void __exit agp_efficeon_cleanup(void)
455{
456 pci_unregister_driver(&agp_efficeon_pci_driver);
457}
458
459module_init(agp_efficeon_init);
460module_exit(agp_efficeon_cleanup);
461
462MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
463MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
new file mode 100644
index 000000000000..f633623ac802
--- /dev/null
+++ b/drivers/char/agp/frontend.c
@@ -0,0 +1,1103 @@
1/*
2 * AGPGART driver frontend
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2003 Dave Jones
5 * Copyright (C) 1999 Jeff Hartmann
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include <linux/types.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/mman.h>
33#include <linux/pci.h>
34#include <linux/init.h>
35#include <linux/miscdevice.h>
36#include <linux/agp_backend.h>
37#include <linux/agpgart.h>
38#include <linux/slab.h>
39#include <linux/mm.h>
40#include <asm/uaccess.h>
41#include <asm/pgtable.h>
42#include "agp.h"
43
44static struct agp_front_data agp_fe;
45
46static struct agp_memory *agp_find_mem_by_key(int key)
47{
48 struct agp_memory *curr;
49
50 if (agp_fe.current_controller == NULL)
51 return NULL;
52
53 curr = agp_fe.current_controller->pool;
54
55 while (curr != NULL) {
56 if (curr->key == key)
57 break;
58 curr = curr->next;
59 }
60
61 DBG("key=%d -> mem=%p", key, curr);
62 return curr;
63}
64
65static void agp_remove_from_pool(struct agp_memory *temp)
66{
67 struct agp_memory *prev;
68 struct agp_memory *next;
69
70 /* Check to see if this is even in the memory pool */
71
72 DBG("mem=%p", temp);
73 if (agp_find_mem_by_key(temp->key) != NULL) {
74 next = temp->next;
75 prev = temp->prev;
76
77 if (prev != NULL) {
78 prev->next = next;
79 if (next != NULL)
80 next->prev = prev;
81
82 } else {
83 /* This is the first item on the list */
84 if (next != NULL)
85 next->prev = NULL;
86
87 agp_fe.current_controller->pool = next;
88 }
89 }
90}
91
92/*
93 * Routines for managing each client's segment list -
94 * These routines handle adding and removing segments
95 * to each auth'ed client.
96 */
97
98static struct
99agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
100 unsigned long offset,
101 int size, pgprot_t page_prot)
102{
103 struct agp_segment_priv *seg;
104 int num_segments, i;
105 off_t pg_start;
106 size_t pg_count;
107
108 pg_start = offset / 4096;
109 pg_count = size / 4096;
110 seg = *(client->segments);
111 num_segments = client->num_segments;
112
113 for (i = 0; i < client->num_segments; i++) {
114 if ((seg[i].pg_start == pg_start) &&
115 (seg[i].pg_count == pg_count) &&
116 (pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
117 return seg + i;
118 }
119 }
120
121 return NULL;
122}
123
124static void agp_remove_seg_from_client(struct agp_client *client)
125{
126 DBG("client=%p", client);
127
128 if (client->segments != NULL) {
129 if (*(client->segments) != NULL) {
130 DBG("Freeing %p from client %p", *(client->segments), client);
131 kfree(*(client->segments));
132 }
133 DBG("Freeing %p from client %p", client->segments, client);
134 kfree(client->segments);
135 client->segments = NULL;
136 }
137}
138
139static void agp_add_seg_to_client(struct agp_client *client,
140 struct agp_segment_priv ** seg, int num_segments)
141{
142 struct agp_segment_priv **prev_seg;
143
144 prev_seg = client->segments;
145
146 if (prev_seg != NULL)
147 agp_remove_seg_from_client(client);
148
149 DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
150 client->num_segments = num_segments;
151 client->segments = seg;
152}
153
154/* Originally taken from linux/mm/mmap.c from the array
155 * protection_map.
156 * The original really should be exported to modules, or
157 * some routine which does the conversion for you
158 */
159
160static const pgprot_t my_protect_map[16] =
161{
162 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
163 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
164};
165
166static pgprot_t agp_convert_mmap_flags(int prot)
167{
168#define _trans(x,bit1,bit2) \
169((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
170
171 unsigned long prot_bits;
172 pgprot_t temp;
173
174 prot_bits = _trans(prot, PROT_READ, VM_READ) |
175 _trans(prot, PROT_WRITE, VM_WRITE) |
176 _trans(prot, PROT_EXEC, VM_EXEC);
177
178 prot_bits |= VM_SHARED;
179
180 temp = my_protect_map[prot_bits & 0x0000000f];
181
182 return temp;
183}
184
185static int agp_create_segment(struct agp_client *client, struct agp_region *region)
186{
187 struct agp_segment_priv **ret_seg;
188 struct agp_segment_priv *seg;
189 struct agp_segment *user_seg;
190 size_t i;
191
192 seg = kmalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
193 if (seg == NULL) {
194 kfree(region->seg_list);
195 region->seg_list = NULL;
196 return -ENOMEM;
197 }
198 memset(seg, 0, (sizeof(struct agp_segment_priv) * region->seg_count));
199 user_seg = region->seg_list;
200
201 for (i = 0; i < region->seg_count; i++) {
202 seg[i].pg_start = user_seg[i].pg_start;
203 seg[i].pg_count = user_seg[i].pg_count;
204 seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
205 }
206 kfree(region->seg_list);
207 region->seg_list = NULL;
208
209 ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
210 if (ret_seg == NULL) {
211 kfree(seg);
212 return -ENOMEM;
213 }
214 *ret_seg = seg;
215 agp_add_seg_to_client(client, ret_seg, region->seg_count);
216 return 0;
217}
218
219/* End - Routines for managing each client's segment list */
220
221/* This function must only be called when current_controller != NULL */
222static void agp_insert_into_pool(struct agp_memory * temp)
223{
224 struct agp_memory *prev;
225
226 prev = agp_fe.current_controller->pool;
227
228 if (prev != NULL) {
229 prev->prev = temp;
230 temp->next = prev;
231 }
232 agp_fe.current_controller->pool = temp;
233}
234
235
236/* File private list routines */
237
238struct agp_file_private *agp_find_private(pid_t pid)
239{
240 struct agp_file_private *curr;
241
242 curr = agp_fe.file_priv_list;
243
244 while (curr != NULL) {
245 if (curr->my_pid == pid)
246 return curr;
247 curr = curr->next;
248 }
249
250 return NULL;
251}
252
253void agp_insert_file_private(struct agp_file_private * priv)
254{
255 struct agp_file_private *prev;
256
257 prev = agp_fe.file_priv_list;
258
259 if (prev != NULL)
260 prev->prev = priv;
261 priv->next = prev;
262 agp_fe.file_priv_list = priv;
263}
264
265void agp_remove_file_private(struct agp_file_private * priv)
266{
267 struct agp_file_private *next;
268 struct agp_file_private *prev;
269
270 next = priv->next;
271 prev = priv->prev;
272
273 if (prev != NULL) {
274 prev->next = next;
275
276 if (next != NULL)
277 next->prev = prev;
278
279 } else {
280 if (next != NULL)
281 next->prev = NULL;
282
283 agp_fe.file_priv_list = next;
284 }
285}
286
287/* End - File flag list routines */
288
289/*
290 * Wrappers for agp_free_memory & agp_allocate_memory
291 * These make sure that internal lists are kept updated.
292 */
293static void agp_free_memory_wrap(struct agp_memory *memory)
294{
295 agp_remove_from_pool(memory);
296 agp_free_memory(memory);
297}
298
299static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
300{
301 struct agp_memory *memory;
302
303 memory = agp_allocate_memory(agp_bridge, pg_count, type);
304 if (memory == NULL)
305 return NULL;
306
307 agp_insert_into_pool(memory);
308 return memory;
309}
310
311/* Routines for managing the list of controllers -
312 * These routines manage the current controller, and the list of
313 * controllers
314 */
315
316static struct agp_controller *agp_find_controller_by_pid(pid_t id)
317{
318 struct agp_controller *controller;
319
320 controller = agp_fe.controllers;
321
322 while (controller != NULL) {
323 if (controller->pid == id)
324 return controller;
325 controller = controller->next;
326 }
327
328 return NULL;
329}
330
331static struct agp_controller *agp_create_controller(pid_t id)
332{
333 struct agp_controller *controller;
334
335 controller = kmalloc(sizeof(struct agp_controller), GFP_KERNEL);
336
337 if (controller == NULL)
338 return NULL;
339
340 memset(controller, 0, sizeof(struct agp_controller));
341 controller->pid = id;
342
343 return controller;
344}
345
346static int agp_insert_controller(struct agp_controller *controller)
347{
348 struct agp_controller *prev_controller;
349
350 prev_controller = agp_fe.controllers;
351 controller->next = prev_controller;
352
353 if (prev_controller != NULL)
354 prev_controller->prev = controller;
355
356 agp_fe.controllers = controller;
357
358 return 0;
359}
360
361static void agp_remove_all_clients(struct agp_controller *controller)
362{
363 struct agp_client *client;
364 struct agp_client *temp;
365
366 client = controller->clients;
367
368 while (client) {
369 struct agp_file_private *priv;
370
371 temp = client;
372 agp_remove_seg_from_client(temp);
373 priv = agp_find_private(temp->pid);
374
375 if (priv != NULL) {
376 clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
377 clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
378 }
379 client = client->next;
380 kfree(temp);
381 }
382}
383
384static void agp_remove_all_memory(struct agp_controller *controller)
385{
386 struct agp_memory *memory;
387 struct agp_memory *temp;
388
389 memory = controller->pool;
390
391 while (memory) {
392 temp = memory;
393 memory = memory->next;
394 agp_free_memory_wrap(temp);
395 }
396}
397
398static int agp_remove_controller(struct agp_controller *controller)
399{
400 struct agp_controller *prev_controller;
401 struct agp_controller *next_controller;
402
403 prev_controller = controller->prev;
404 next_controller = controller->next;
405
406 if (prev_controller != NULL) {
407 prev_controller->next = next_controller;
408 if (next_controller != NULL)
409 next_controller->prev = prev_controller;
410
411 } else {
412 if (next_controller != NULL)
413 next_controller->prev = NULL;
414
415 agp_fe.controllers = next_controller;
416 }
417
418 agp_remove_all_memory(controller);
419 agp_remove_all_clients(controller);
420
421 if (agp_fe.current_controller == controller) {
422 agp_fe.current_controller = NULL;
423 agp_fe.backend_acquired = FALSE;
424 agp_backend_release(agp_bridge);
425 }
426 kfree(controller);
427 return 0;
428}
429
430static void agp_controller_make_current(struct agp_controller *controller)
431{
432 struct agp_client *clients;
433
434 clients = controller->clients;
435
436 while (clients != NULL) {
437 struct agp_file_private *priv;
438
439 priv = agp_find_private(clients->pid);
440
441 if (priv != NULL) {
442 set_bit(AGP_FF_IS_VALID, &priv->access_flags);
443 set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
444 }
445 clients = clients->next;
446 }
447
448 agp_fe.current_controller = controller;
449}
450
451static void agp_controller_release_current(struct agp_controller *controller,
452 struct agp_file_private *controller_priv)
453{
454 struct agp_client *clients;
455
456 clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
457 clients = controller->clients;
458
459 while (clients != NULL) {
460 struct agp_file_private *priv;
461
462 priv = agp_find_private(clients->pid);
463
464 if (priv != NULL)
465 clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
466
467 clients = clients->next;
468 }
469
470 agp_fe.current_controller = NULL;
471 agp_fe.used_by_controller = FALSE;
472 agp_backend_release(agp_bridge);
473}
474
475/*
476 * Routines for managing client lists -
477 * These routines are for managing the list of auth'ed clients.
478 */
479
480static struct agp_client
481*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
482{
483 struct agp_client *client;
484
485 if (controller == NULL)
486 return NULL;
487
488 client = controller->clients;
489
490 while (client != NULL) {
491 if (client->pid == id)
492 return client;
493 client = client->next;
494 }
495
496 return NULL;
497}
498
499static struct agp_controller *agp_find_controller_for_client(pid_t id)
500{
501 struct agp_controller *controller;
502
503 controller = agp_fe.controllers;
504
505 while (controller != NULL) {
506 if ((agp_find_client_in_controller(controller, id)) != NULL)
507 return controller;
508 controller = controller->next;
509 }
510
511 return NULL;
512}
513
514static struct agp_client *agp_find_client_by_pid(pid_t id)
515{
516 struct agp_client *temp;
517
518 if (agp_fe.current_controller == NULL)
519 return NULL;
520
521 temp = agp_find_client_in_controller(agp_fe.current_controller, id);
522 return temp;
523}
524
525static void agp_insert_client(struct agp_client *client)
526{
527 struct agp_client *prev_client;
528
529 prev_client = agp_fe.current_controller->clients;
530 client->next = prev_client;
531
532 if (prev_client != NULL)
533 prev_client->prev = client;
534
535 agp_fe.current_controller->clients = client;
536 agp_fe.current_controller->num_clients++;
537}
538
539static struct agp_client *agp_create_client(pid_t id)
540{
541 struct agp_client *new_client;
542
543 new_client = kmalloc(sizeof(struct agp_client), GFP_KERNEL);
544
545 if (new_client == NULL)
546 return NULL;
547
548 memset(new_client, 0, sizeof(struct agp_client));
549 new_client->pid = id;
550 agp_insert_client(new_client);
551 return new_client;
552}
553
554static int agp_remove_client(pid_t id)
555{
556 struct agp_client *client;
557 struct agp_client *prev_client;
558 struct agp_client *next_client;
559 struct agp_controller *controller;
560
561 controller = agp_find_controller_for_client(id);
562 if (controller == NULL)
563 return -EINVAL;
564
565 client = agp_find_client_in_controller(controller, id);
566 if (client == NULL)
567 return -EINVAL;
568
569 prev_client = client->prev;
570 next_client = client->next;
571
572 if (prev_client != NULL) {
573 prev_client->next = next_client;
574 if (next_client != NULL)
575 next_client->prev = prev_client;
576
577 } else {
578 if (next_client != NULL)
579 next_client->prev = NULL;
580 controller->clients = next_client;
581 }
582
583 controller->num_clients--;
584 agp_remove_seg_from_client(client);
585 kfree(client);
586 return 0;
587}
588
589/* End - Routines for managing client lists */
590
591/* File Operations */
592
593static int agp_mmap(struct file *file, struct vm_area_struct *vma)
594{
595 unsigned int size, current_size;
596 unsigned long offset;
597 struct agp_client *client;
598 struct agp_file_private *priv = file->private_data;
599 struct agp_kern_info kerninfo;
600
601 down(&(agp_fe.agp_mutex));
602
603 if (agp_fe.backend_acquired != TRUE)
604 goto out_eperm;
605
606 if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
607 goto out_eperm;
608
609 agp_copy_info(agp_bridge, &kerninfo);
610 size = vma->vm_end - vma->vm_start;
611 current_size = kerninfo.aper_size;
612 current_size = current_size * 0x100000;
613 offset = vma->vm_pgoff << PAGE_SHIFT;
614 DBG("%lx:%lx", offset, offset+size);
615
616 if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
617 if ((size + offset) > current_size)
618 goto out_inval;
619
620 client = agp_find_client_by_pid(current->pid);
621
622 if (client == NULL)
623 goto out_eperm;
624
625 if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
626 goto out_inval;
627
628 DBG("client vm_ops=%p", kerninfo.vm_ops);
629 if (kerninfo.vm_ops) {
630 vma->vm_ops = kerninfo.vm_ops;
631 } else if (io_remap_pfn_range(vma, vma->vm_start,
632 (kerninfo.aper_base + offset) >> PAGE_SHIFT,
633 size, vma->vm_page_prot)) {
634 goto out_again;
635 }
636 up(&(agp_fe.agp_mutex));
637 return 0;
638 }
639
640 if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
641 if (size != current_size)
642 goto out_inval;
643
644 DBG("controller vm_ops=%p", kerninfo.vm_ops);
645 if (kerninfo.vm_ops) {
646 vma->vm_ops = kerninfo.vm_ops;
647 } else if (io_remap_pfn_range(vma, vma->vm_start,
648 kerninfo.aper_base >> PAGE_SHIFT,
649 size, vma->vm_page_prot)) {
650 goto out_again;
651 }
652 up(&(agp_fe.agp_mutex));
653 return 0;
654 }
655
656out_eperm:
657 up(&(agp_fe.agp_mutex));
658 return -EPERM;
659
660out_inval:
661 up(&(agp_fe.agp_mutex));
662 return -EINVAL;
663
664out_again:
665 up(&(agp_fe.agp_mutex));
666 return -EAGAIN;
667}
668
669static int agp_release(struct inode *inode, struct file *file)
670{
671 struct agp_file_private *priv = file->private_data;
672
673 down(&(agp_fe.agp_mutex));
674
675 DBG("priv=%p", priv);
676
677 if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
678 struct agp_controller *controller;
679
680 controller = agp_find_controller_by_pid(priv->my_pid);
681
682 if (controller != NULL) {
683 if (controller == agp_fe.current_controller)
684 agp_controller_release_current(controller, priv);
685 agp_remove_controller(controller);
686 controller = NULL;
687 }
688 }
689
690 if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
691 agp_remove_client(priv->my_pid);
692
693 agp_remove_file_private(priv);
694 kfree(priv);
695 file->private_data = NULL;
696 up(&(agp_fe.agp_mutex));
697 return 0;
698}
699
700static int agp_open(struct inode *inode, struct file *file)
701{
702 int minor = iminor(inode);
703 struct agp_file_private *priv;
704 struct agp_client *client;
705 int rc = -ENXIO;
706
707 down(&(agp_fe.agp_mutex));
708
709 if (minor != AGPGART_MINOR)
710 goto err_out;
711
712 priv = kmalloc(sizeof(struct agp_file_private), GFP_KERNEL);
713 if (priv == NULL)
714 goto err_out_nomem;
715
716 memset(priv, 0, sizeof(struct agp_file_private));
717 set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
718 priv->my_pid = current->pid;
719
720 if ((current->uid == 0) || (current->suid == 0)) {
721 /* Root priv, can be controller */
722 set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
723 }
724 client = agp_find_client_by_pid(current->pid);
725
726 if (client != NULL) {
727 set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
728 set_bit(AGP_FF_IS_VALID, &priv->access_flags);
729 }
730 file->private_data = (void *) priv;
731 agp_insert_file_private(priv);
732 DBG("private=%p, client=%p", priv, client);
733 up(&(agp_fe.agp_mutex));
734 return 0;
735
736err_out_nomem:
737 rc = -ENOMEM;
738err_out:
739 up(&(agp_fe.agp_mutex));
740 return rc;
741}
742
743
744static ssize_t agp_read(struct file *file, char __user *buf,
745 size_t count, loff_t * ppos)
746{
747 return -EINVAL;
748}
749
750static ssize_t agp_write(struct file *file, const char __user *buf,
751 size_t count, loff_t * ppos)
752{
753 return -EINVAL;
754}
755
756static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
757{
758 struct agp_info userinfo;
759 struct agp_kern_info kerninfo;
760
761 agp_copy_info(agp_bridge, &kerninfo);
762
763 userinfo.version.major = kerninfo.version.major;
764 userinfo.version.minor = kerninfo.version.minor;
765 userinfo.bridge_id = kerninfo.device->vendor |
766 (kerninfo.device->device << 16);
767 userinfo.agp_mode = kerninfo.mode;
768 userinfo.aper_base = kerninfo.aper_base;
769 userinfo.aper_size = kerninfo.aper_size;
770 userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
771 userinfo.pg_used = kerninfo.current_memory;
772
773 if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
774 return -EFAULT;
775
776 return 0;
777}
778
779static int agpioc_acquire_wrap(struct agp_file_private *priv)
780{
781 struct agp_controller *controller;
782
783 DBG("");
784
785 if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
786 return -EPERM;
787
788 if (agp_fe.current_controller != NULL)
789 return -EBUSY;
790
791 if(!agp_bridge)
792 return -ENODEV;
793
794 if (atomic_read(&agp_bridge->agp_in_use))
795 return -EBUSY;
796
797 atomic_inc(&agp_bridge->agp_in_use);
798
799 agp_fe.backend_acquired = TRUE;
800
801 controller = agp_find_controller_by_pid(priv->my_pid);
802
803 if (controller != NULL) {
804 agp_controller_make_current(controller);
805 } else {
806 controller = agp_create_controller(priv->my_pid);
807
808 if (controller == NULL) {
809 agp_fe.backend_acquired = FALSE;
810 agp_backend_release(agp_bridge);
811 return -ENOMEM;
812 }
813 agp_insert_controller(controller);
814 agp_controller_make_current(controller);
815 }
816
817 set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
818 set_bit(AGP_FF_IS_VALID, &priv->access_flags);
819 return 0;
820}
821
822static int agpioc_release_wrap(struct agp_file_private *priv)
823{
824 DBG("");
825 agp_controller_release_current(agp_fe.current_controller, priv);
826 return 0;
827}
828
829static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
830{
831 struct agp_setup mode;
832
833 DBG("");
834 if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
835 return -EFAULT;
836
837 agp_enable(agp_bridge, mode.agp_mode);
838 return 0;
839}
840
841static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
842{
843 struct agp_region reserve;
844 struct agp_client *client;
845 struct agp_file_private *client_priv;
846
847 DBG("");
848 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
849 return -EFAULT;
850
851 if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
852 return -EFAULT;
853
854 client = agp_find_client_by_pid(reserve.pid);
855
856 if (reserve.seg_count == 0) {
857 /* remove a client */
858 client_priv = agp_find_private(reserve.pid);
859
860 if (client_priv != NULL) {
861 set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
862 set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
863 }
864 if (client == NULL) {
865 /* client is already removed */
866 return 0;
867 }
868 return agp_remove_client(reserve.pid);
869 } else {
870 struct agp_segment *segment;
871
872 if (reserve.seg_count >= 16384)
873 return -EINVAL;
874
875 segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
876 GFP_KERNEL);
877
878 if (segment == NULL)
879 return -ENOMEM;
880
881 if (copy_from_user(segment, (void __user *) reserve.seg_list,
882 sizeof(struct agp_segment) * reserve.seg_count)) {
883 kfree(segment);
884 return -EFAULT;
885 }
886 reserve.seg_list = segment;
887
888 if (client == NULL) {
889 /* Create the client and add the segment */
890 client = agp_create_client(reserve.pid);
891
892 if (client == NULL) {
893 kfree(segment);
894 return -ENOMEM;
895 }
896 client_priv = agp_find_private(reserve.pid);
897
898 if (client_priv != NULL) {
899 set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
900 set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
901 }
902 }
903 return agp_create_segment(client, &reserve);
904 }
905 /* Will never really happen */
906 return -EINVAL;
907}
908
909static int agpioc_protect_wrap(struct agp_file_private *priv)
910{
911 DBG("");
912 /* This function is not currently implemented */
913 return -EINVAL;
914}
915
916static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
917{
918 struct agp_memory *memory;
919 struct agp_allocate alloc;
920
921 DBG("");
922 if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
923 return -EFAULT;
924
925 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
926
927 if (memory == NULL)
928 return -ENOMEM;
929
930 alloc.key = memory->key;
931 alloc.physical = memory->physical;
932
933 if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
934 agp_free_memory_wrap(memory);
935 return -EFAULT;
936 }
937 return 0;
938}
939
940static int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
941{
942 struct agp_memory *memory;
943
944 DBG("");
945 memory = agp_find_mem_by_key(arg);
946
947 if (memory == NULL)
948 return -EINVAL;
949
950 agp_free_memory_wrap(memory);
951 return 0;
952}
953
954static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
955{
956 struct agp_bind bind_info;
957 struct agp_memory *memory;
958
959 DBG("");
960 if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
961 return -EFAULT;
962
963 memory = agp_find_mem_by_key(bind_info.key);
964
965 if (memory == NULL)
966 return -EINVAL;
967
968 return agp_bind_memory(memory, bind_info.pg_start);
969}
970
971static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
972{
973 struct agp_memory *memory;
974 struct agp_unbind unbind;
975
976 DBG("");
977 if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
978 return -EFAULT;
979
980 memory = agp_find_mem_by_key(unbind.key);
981
982 if (memory == NULL)
983 return -EINVAL;
984
985 return agp_unbind_memory(memory);
986}
987
988static int agp_ioctl(struct inode *inode, struct file *file,
989 unsigned int cmd, unsigned long arg)
990{
991 struct agp_file_private *curr_priv = file->private_data;
992 int ret_val = -ENOTTY;
993
994 DBG("priv=%p, cmd=%x", curr_priv, cmd);
995 down(&(agp_fe.agp_mutex));
996
997 if ((agp_fe.current_controller == NULL) &&
998 (cmd != AGPIOC_ACQUIRE)) {
999 ret_val = -EINVAL;
1000 goto ioctl_out;
1001 }
1002 if ((agp_fe.backend_acquired != TRUE) &&
1003 (cmd != AGPIOC_ACQUIRE)) {
1004 ret_val = -EBUSY;
1005 goto ioctl_out;
1006 }
1007 if (cmd != AGPIOC_ACQUIRE) {
1008 if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
1009 ret_val = -EPERM;
1010 goto ioctl_out;
1011 }
1012 /* Use the original pid of the controller,
1013 * in case it's threaded */
1014
1015 if (agp_fe.current_controller->pid != curr_priv->my_pid) {
1016 ret_val = -EBUSY;
1017 goto ioctl_out;
1018 }
1019 }
1020
1021 switch (cmd) {
1022 case AGPIOC_INFO:
1023 ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
1024 break;
1025
1026 case AGPIOC_ACQUIRE:
1027 ret_val = agpioc_acquire_wrap(curr_priv);
1028 break;
1029
1030 case AGPIOC_RELEASE:
1031 ret_val = agpioc_release_wrap(curr_priv);
1032 break;
1033
1034 case AGPIOC_SETUP:
1035 ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
1036 break;
1037
1038 case AGPIOC_RESERVE:
1039 ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
1040 break;
1041
1042 case AGPIOC_PROTECT:
1043 ret_val = agpioc_protect_wrap(curr_priv);
1044 break;
1045
1046 case AGPIOC_ALLOCATE:
1047 ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
1048 break;
1049
1050 case AGPIOC_DEALLOCATE:
1051 ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
1052 break;
1053
1054 case AGPIOC_BIND:
1055 ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
1056 break;
1057
1058 case AGPIOC_UNBIND:
1059 ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
1060 break;
1061 }
1062
1063ioctl_out:
1064 DBG("ioctl returns %d\n", ret_val);
1065 up(&(agp_fe.agp_mutex));
1066 return ret_val;
1067}
1068
1069static struct file_operations agp_fops =
1070{
1071 .owner = THIS_MODULE,
1072 .llseek = no_llseek,
1073 .read = agp_read,
1074 .write = agp_write,
1075 .ioctl = agp_ioctl,
1076 .mmap = agp_mmap,
1077 .open = agp_open,
1078 .release = agp_release,
1079};
1080
1081static struct miscdevice agp_miscdev =
1082{
1083 .minor = AGPGART_MINOR,
1084 .name = "agpgart",
1085 .fops = &agp_fops
1086};
1087
1088int agp_frontend_initialize(void)
1089{
1090 memset(&agp_fe, 0, sizeof(struct agp_front_data));
1091 sema_init(&(agp_fe.agp_mutex), 1);
1092
1093 if (misc_register(&agp_miscdev)) {
1094 printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
1095 return -EIO;
1096 }
1097 return 0;
1098}
1099
1100void agp_frontend_cleanup(void)
1101{
1102 misc_deregister(&agp_miscdev);
1103}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
new file mode 100644
index 000000000000..c321a924e38a
--- /dev/null
+++ b/drivers/char/agp/generic.c
@@ -0,0 +1,1222 @@
1/*
2 * AGPGART driver.
3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2005 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * TODO:
28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 */
30#include <linux/config.h>
31#include <linux/module.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/pagemap.h>
35#include <linux/miscdevice.h>
36#include <linux/pm.h>
37#include <linux/agp_backend.h>
38#include <linux/vmalloc.h>
39#include <linux/dma-mapping.h>
40#include <linux/mm.h>
41#include <asm/io.h>
42#include <asm/cacheflush.h>
43#include <asm/pgtable.h>
44#include "agp.h"
45
46__u32 *agp_gatt_table;
47int agp_memory_reserved;
48
49/*
50 * Needed by the Nforce GART driver for the time being. Would be
51 * nice to do this some other way instead of needing this export.
52 */
53EXPORT_SYMBOL_GPL(agp_memory_reserved);
54
55#if defined(CONFIG_X86)
56int map_page_into_agp(struct page *page)
57{
58 int i;
59 i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
60 global_flush_tlb();
61 return i;
62}
63EXPORT_SYMBOL_GPL(map_page_into_agp);
64
65int unmap_page_from_agp(struct page *page)
66{
67 int i;
68 i = change_page_attr(page, 1, PAGE_KERNEL);
69 global_flush_tlb();
70 return i;
71}
72EXPORT_SYMBOL_GPL(unmap_page_from_agp);
73#endif
74
75/*
76 * Generic routines for handling agp_memory structures -
77 * They use the basic page allocation routines to do the brunt of the work.
78 */
79
80void agp_free_key(int key)
81{
82 if (key < 0)
83 return;
84
85 if (key < MAXKEY)
86 clear_bit(key, agp_bridge->key_list);
87}
88EXPORT_SYMBOL(agp_free_key);
89
90
91static int agp_get_key(void)
92{
93 int bit;
94
95 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
96 if (bit < MAXKEY) {
97 set_bit(bit, agp_bridge->key_list);
98 return bit;
99 }
100 return -1;
101}
102
103
104struct agp_memory *agp_create_memory(int scratch_pages)
105{
106 struct agp_memory *new;
107
108 new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL);
109
110 if (new == NULL)
111 return NULL;
112
113 memset(new, 0, sizeof(struct agp_memory));
114 new->key = agp_get_key();
115
116 if (new->key < 0) {
117 kfree(new);
118 return NULL;
119 }
120 new->memory = vmalloc(PAGE_SIZE * scratch_pages);
121
122 if (new->memory == NULL) {
123 agp_free_key(new->key);
124 kfree(new);
125 return NULL;
126 }
127 new->num_scratch_pages = scratch_pages;
128 return new;
129}
130EXPORT_SYMBOL(agp_create_memory);
131
132/**
133 * agp_free_memory - free memory associated with an agp_memory pointer.
134 *
135 * @curr: agp_memory pointer to be freed.
136 *
137 * It is the only function that can be called when the backend is not owned
138 * by the caller. (So it can free memory on client death.)
139 */
140void agp_free_memory(struct agp_memory *curr)
141{
142 size_t i;
143
144 if (curr == NULL)
145 return;
146
147 if (curr->is_bound == TRUE)
148 agp_unbind_memory(curr);
149
150 if (curr->type != 0) {
151 curr->bridge->driver->free_by_type(curr);
152 return;
153 }
154 if (curr->page_count != 0) {
155 for (i = 0; i < curr->page_count; i++) {
156 curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
157 }
158 }
159 agp_free_key(curr->key);
160 vfree(curr->memory);
161 kfree(curr);
162}
163EXPORT_SYMBOL(agp_free_memory);
164
165#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
166
167/**
168 * agp_allocate_memory - allocate a group of pages of a certain type.
169 *
170 * @page_count: size_t argument of the number of pages
171 * @type: u32 argument of the type of memory to be allocated.
172 *
173 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
174 * maps to physical ram. Any other type is device dependent.
175 *
176 * It returns NULL whenever memory is unavailable.
177 */
178struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
179 size_t page_count, u32 type)
180{
181 int scratch_pages;
182 struct agp_memory *new;
183 size_t i;
184
185 if (!bridge)
186 return NULL;
187
188 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
189 return NULL;
190
191 if (type != 0) {
192 new = bridge->driver->alloc_by_type(page_count, type);
193 if (new)
194 new->bridge = bridge;
195 return new;
196 }
197
198 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
199
200 new = agp_create_memory(scratch_pages);
201
202 if (new == NULL)
203 return NULL;
204
205 for (i = 0; i < page_count; i++) {
206 void *addr = bridge->driver->agp_alloc_page(bridge);
207
208 if (addr == NULL) {
209 agp_free_memory(new);
210 return NULL;
211 }
212 new->memory[i] = virt_to_phys(addr);
213 new->page_count++;
214 }
215 new->bridge = bridge;
216
217 flush_agp_mappings();
218
219 return new;
220}
221EXPORT_SYMBOL(agp_allocate_memory);
222
223
224/* End - Generic routines for handling agp_memory structures */
225
226
227static int agp_return_size(void)
228{
229 int current_size;
230 void *temp;
231
232 temp = agp_bridge->current_size;
233
234 switch (agp_bridge->driver->size_type) {
235 case U8_APER_SIZE:
236 current_size = A_SIZE_8(temp)->size;
237 break;
238 case U16_APER_SIZE:
239 current_size = A_SIZE_16(temp)->size;
240 break;
241 case U32_APER_SIZE:
242 current_size = A_SIZE_32(temp)->size;
243 break;
244 case LVL2_APER_SIZE:
245 current_size = A_SIZE_LVL2(temp)->size;
246 break;
247 case FIXED_APER_SIZE:
248 current_size = A_SIZE_FIX(temp)->size;
249 break;
250 default:
251 current_size = 0;
252 break;
253 }
254
255 current_size -= (agp_memory_reserved / (1024*1024));
256 if (current_size <0)
257 current_size = 0;
258 return current_size;
259}
260
261
262int agp_num_entries(void)
263{
264 int num_entries;
265 void *temp;
266
267 temp = agp_bridge->current_size;
268
269 switch (agp_bridge->driver->size_type) {
270 case U8_APER_SIZE:
271 num_entries = A_SIZE_8(temp)->num_entries;
272 break;
273 case U16_APER_SIZE:
274 num_entries = A_SIZE_16(temp)->num_entries;
275 break;
276 case U32_APER_SIZE:
277 num_entries = A_SIZE_32(temp)->num_entries;
278 break;
279 case LVL2_APER_SIZE:
280 num_entries = A_SIZE_LVL2(temp)->num_entries;
281 break;
282 case FIXED_APER_SIZE:
283 num_entries = A_SIZE_FIX(temp)->num_entries;
284 break;
285 default:
286 num_entries = 0;
287 break;
288 }
289
290 num_entries -= agp_memory_reserved>>PAGE_SHIFT;
291 if (num_entries<0)
292 num_entries = 0;
293 return num_entries;
294}
295EXPORT_SYMBOL_GPL(agp_num_entries);
296
297
298static int check_bridge_mode(struct pci_dev *dev)
299{
300 u32 agp3;
301 u8 cap_ptr;
302
303 cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP);
304 pci_read_config_dword(dev, cap_ptr+AGPSTAT, &agp3);
305 if (agp3 & AGPSTAT_MODE_3_0)
306 return 1;
307 return 0;
308}
309
310
311/**
312 * agp_copy_info - copy bridge state information
313 *
314 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
315 *
316 * This function copies information about the agp bridge device and the state of
317 * the agp backend into an agp_kern_info pointer.
318 */
319int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
320{
321 memset(info, 0, sizeof(struct agp_kern_info));
322 if (!bridge) {
323 info->chipset = NOT_SUPPORTED;
324 return -EIO;
325 }
326
327 info->version.major = bridge->version->major;
328 info->version.minor = bridge->version->minor;
329 info->chipset = SUPPORTED;
330 info->device = bridge->dev;
331 if (check_bridge_mode(bridge->dev))
332 info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
333 else
334 info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
335 info->mode = bridge->mode;
336 info->aper_base = bridge->gart_bus_addr;
337 info->aper_size = agp_return_size();
338 info->max_memory = bridge->max_memory_agp;
339 info->current_memory = atomic_read(&bridge->current_memory_agp);
340 info->cant_use_aperture = bridge->driver->cant_use_aperture;
341 info->vm_ops = bridge->vm_ops;
342 info->page_mask = ~0UL;
343 return 0;
344}
345EXPORT_SYMBOL(agp_copy_info);
346
347/* End - Routine to copy over information structure */
348
349/*
350 * Routines for handling swapping of agp_memory into the GATT -
351 * These routines take agp_memory and insert them into the GATT.
352 * They call device specific routines to actually write to the GATT.
353 */
354
355/**
356 * agp_bind_memory - Bind an agp_memory structure into the GATT.
357 *
358 * @curr: agp_memory pointer
359 * @pg_start: an offset into the graphics aperture translation table
360 *
361 * It returns -EINVAL if the pointer == NULL.
362 * It returns -EBUSY if the area of the table requested is already in use.
363 */
364int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
365{
366 int ret_val;
367
368 if (curr == NULL)
369 return -EINVAL;
370
371 if (curr->is_bound == TRUE) {
372 printk (KERN_INFO PFX "memory %p is already bound!\n", curr);
373 return -EINVAL;
374 }
375 if (curr->is_flushed == FALSE) {
376 curr->bridge->driver->cache_flush();
377 curr->is_flushed = TRUE;
378 }
379 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
380
381 if (ret_val != 0)
382 return ret_val;
383
384 curr->is_bound = TRUE;
385 curr->pg_start = pg_start;
386 return 0;
387}
388EXPORT_SYMBOL(agp_bind_memory);
389
390
391/**
392 * agp_unbind_memory - Removes an agp_memory structure from the GATT
393 *
394 * @curr: agp_memory pointer to be removed from the GATT.
395 *
396 * It returns -EINVAL if this piece of agp_memory is not currently bound to
397 * the graphics aperture translation table or if the agp_memory pointer == NULL
398 */
399int agp_unbind_memory(struct agp_memory *curr)
400{
401 int ret_val;
402
403 if (curr == NULL)
404 return -EINVAL;
405
406 if (curr->is_bound != TRUE) {
407 printk (KERN_INFO PFX "memory %p was not bound!\n", curr);
408 return -EINVAL;
409 }
410
411 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
412
413 if (ret_val != 0)
414 return ret_val;
415
416 curr->is_bound = FALSE;
417 curr->pg_start = 0;
418 return 0;
419}
420EXPORT_SYMBOL(agp_unbind_memory);
421
422/* End - Routines for handling swapping of agp_memory into the GATT */
423
424
425/* Generic Agp routines - Start */
426static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
427{
428 u32 tmp;
429
430 if (*requested_mode & AGP2_RESERVED_MASK) {
431 printk (KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
432 *requested_mode &= ~AGP2_RESERVED_MASK;
433 }
434
435 /* Check the speed bits make sense. Only one should be set. */
436 tmp = *requested_mode & 7;
437 switch (tmp) {
438 case 0:
439 printk (KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
440 *requested_mode |= AGPSTAT2_1X;
441 break;
442 case 1:
443 case 2:
444 break;
445 case 3:
446 *requested_mode &= ~(AGPSTAT2_1X); /* rate=2 */
447 break;
448 case 4:
449 break;
450 case 5:
451 case 6:
452 case 7:
453 *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
454 break;
455 }
456
457 /* disable SBA if it's not supported */
458 if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
459 *bridge_agpstat &= ~AGPSTAT_SBA;
460
461 /* Set rate */
462 if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
463 *bridge_agpstat &= ~AGPSTAT2_4X;
464
465 if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
466 *bridge_agpstat &= ~AGPSTAT2_2X;
467
468 if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
469 *bridge_agpstat &= ~AGPSTAT2_1X;
470
471 /* Now we know what mode it should be, clear out the unwanted bits. */
472 if (*bridge_agpstat & AGPSTAT2_4X)
473 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
474
475 if (*bridge_agpstat & AGPSTAT2_2X)
476 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
477
478 if (*bridge_agpstat & AGPSTAT2_1X)
479 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1X */
480
481 /* Apply any errata. */
482 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
483 *bridge_agpstat &= ~AGPSTAT_FW;
484
485 if (agp_bridge->flags & AGP_ERRATA_SBA)
486 *bridge_agpstat &= ~AGPSTAT_SBA;
487
488 if (agp_bridge->flags & AGP_ERRATA_1X) {
489 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
490 *bridge_agpstat |= AGPSTAT2_1X;
491 }
492
493 /* If we've dropped down to 1X, disable fast writes. */
494 if (*bridge_agpstat & AGPSTAT2_1X)
495 *bridge_agpstat &= ~AGPSTAT_FW;
496}
497
498/*
499 * requested_mode = Mode requested by (typically) X.
500 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
501 * vga_agpstat = PCI_AGP_STATUS from graphic card.
502 */
503static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
504{
505 u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
506 u32 tmp;
507
508 if (*requested_mode & AGP3_RESERVED_MASK) {
509 printk (KERN_INFO PFX "reserved bits set in mode 0x%x. Fixed.\n", *requested_mode);
510 *requested_mode &= ~AGP3_RESERVED_MASK;
511 }
512
513 /* Check the speed bits make sense. */
514 tmp = *requested_mode & 7;
515 if (tmp == 0) {
516 printk (KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
517 *requested_mode |= AGPSTAT3_4X;
518 }
519 if (tmp >= 3) {
520 printk (KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
521 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
522 }
523
524 /* ARQSZ - Set the value to the maximum one.
525 * Don't allow the mode register to override values. */
526 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
527 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
528
529 /* Calibration cycle.
530 * Don't allow the mode register to override values. */
531 *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
532 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
533
534 /* SBA *must* be supported for AGP v3 */
535 *bridge_agpstat |= AGPSTAT_SBA;
536
537 /*
538 * Set speed.
539 * Check for invalid speeds. This can happen when applications
540 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
541 */
542 if (*requested_mode & AGPSTAT_MODE_3_0) {
543 /*
544 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
545 * have been passed a 3.0 mode, but with 2.x speed bits set.
546 * AGP2.x 4x -> AGP3.0 4x.
547 */
548 if (*requested_mode & AGPSTAT2_4X) {
549 printk (KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
550 current->comm, *requested_mode);
551 *requested_mode &= ~AGPSTAT2_4X;
552 *requested_mode |= AGPSTAT3_4X;
553 }
554 } else {
555 /*
556 * The caller doesn't know what they are doing. We are in 3.0 mode,
557 * but have been passed an AGP 2.x mode.
558 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
559 */
560 printk (KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
561 current->comm, *requested_mode);
562 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
563 *requested_mode |= AGPSTAT3_4X;
564 }
565
566 if (*requested_mode & AGPSTAT3_8X) {
567 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
568 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
569 *bridge_agpstat |= AGPSTAT3_4X;
570 printk ("%s requested AGPx8 but bridge not capable.\n", current->comm);
571 return;
572 }
573 if (!(*vga_agpstat & AGPSTAT3_8X)) {
574 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
575 *bridge_agpstat |= AGPSTAT3_4X;
576 printk ("%s requested AGPx8 but graphic card not capable.\n", current->comm);
577 return;
578 }
579 /* All set, bridge & device can do AGP x8*/
580 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
581 goto done;
582
583 } else {
584
585 /*
586 * If we didn't specify AGPx8, we can only do x4.
587 * If the hardware can't do x4, we're up shit creek, and never
588 * should have got this far.
589 */
590 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
591 if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X))
592 *bridge_agpstat |= AGPSTAT3_4X;
593 else {
594 printk (KERN_INFO PFX "Badness. Don't know which AGP mode to set. "
595 "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n",
596 origbridge, origvga, *bridge_agpstat, *vga_agpstat);
597 if (!(*bridge_agpstat & AGPSTAT3_4X))
598 printk (KERN_INFO PFX "Bridge couldn't do AGP x4.\n");
599 if (!(*vga_agpstat & AGPSTAT3_4X))
600 printk (KERN_INFO PFX "Graphic card couldn't do AGP x4.\n");
601 return;
602 }
603 }
604
605done:
606 /* Apply any errata. */
607 if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
608 *bridge_agpstat &= ~AGPSTAT_FW;
609
610 if (agp_bridge->flags & AGP_ERRATA_SBA)
611 *bridge_agpstat &= ~AGPSTAT_SBA;
612
613 if (agp_bridge->flags & AGP_ERRATA_1X) {
614 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
615 *bridge_agpstat |= AGPSTAT2_1X;
616 }
617}
618
619
620/**
621 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
622 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
623 * @requested_mode: requested agp_stat from userspace (Typically from X)
624 * @bridge_agpstat: current agp_stat from AGP bridge.
625 *
626 * This function will hunt for an AGP graphics card, and try to match
627 * the requested mode to the capabilities of both the bridge and the card.
628 */
629u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
630{
631 struct pci_dev *device = NULL;
632 u32 vga_agpstat;
633 u8 cap_ptr;
634
635 for (;;) {
636 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
637 if (!device) {
638 printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
639 return 0;
640 }
641 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
642 if (cap_ptr)
643 break;
644 }
645
646 /*
647 * Ok, here we have a AGP device. Disable impossible
648 * settings, and adjust the readqueue to the minimum.
649 */
650 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
651
652 /* adjust RQ depth */
653 bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
654 min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
655 min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
656
657 /* disable FW if it's not supported */
658 if (!((bridge_agpstat & AGPSTAT_FW) &&
659 (vga_agpstat & AGPSTAT_FW) &&
660 (requested_mode & AGPSTAT_FW)))
661 bridge_agpstat &= ~AGPSTAT_FW;
662
663 /* Check to see if we are operating in 3.0 mode */
664 if (check_bridge_mode(agp_bridge->dev))
665 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
666 else
667 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
668
669 pci_dev_put(device);
670 return bridge_agpstat;
671}
672EXPORT_SYMBOL(agp_collect_device_status);
673
674
675void agp_device_command(u32 bridge_agpstat, int agp_v3)
676{
677 struct pci_dev *device = NULL;
678 int mode;
679
680 mode = bridge_agpstat & 0x7;
681 if (agp_v3)
682 mode *= 4;
683
684 for_each_pci_dev(device) {
685 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
686 if (!agp)
687 continue;
688
689 printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
690 agp_v3 ? 3 : 2, pci_name(device), mode);
691 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
692 }
693}
694EXPORT_SYMBOL(agp_device_command);
695
696
697void get_agp_version(struct agp_bridge_data *bridge)
698{
699 u32 ncapid;
700
701 /* Exit early if already set by errata workarounds. */
702 if (bridge->major_version != 0)
703 return;
704
705 pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
706 bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
707 bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
708}
709EXPORT_SYMBOL(get_agp_version);
710
711
712void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
713{
714 u32 bridge_agpstat, temp;
715
716 get_agp_version(agp_bridge);
717
718 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
719 agp_bridge->major_version,
720 agp_bridge->minor_version,
721 pci_name(agp_bridge->dev));
722
723 pci_read_config_dword(agp_bridge->dev,
724 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
725
726 bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
727 if (bridge_agpstat == 0)
728 /* Something bad happened. FIXME: Return error code? */
729 return;
730
731 bridge_agpstat |= AGPSTAT_AGP_ENABLE;
732
733 /* Do AGP version specific frobbing. */
734 if (bridge->major_version >= 3) {
735 if (check_bridge_mode(bridge->dev)) {
736 /* If we have 3.5, we can do the isoch stuff. */
737 if (bridge->minor_version >= 5)
738 agp_3_5_enable(bridge);
739 agp_device_command(bridge_agpstat, TRUE);
740 return;
741 } else {
742 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
743 bridge_agpstat &= ~(7<<10) ;
744 pci_read_config_dword(bridge->dev,
745 bridge->capndx+AGPCTRL, &temp);
746 temp |= (1<<9);
747 pci_write_config_dword(bridge->dev,
748 bridge->capndx+AGPCTRL, temp);
749
750 printk (KERN_INFO PFX "Device is in legacy mode,"
751 " falling back to 2.x\n");
752 }
753 }
754
755 /* AGP v<3 */
756 agp_device_command(bridge_agpstat, FALSE);
757}
758EXPORT_SYMBOL(agp_generic_enable);
759
760
761int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
762{
763 char *table;
764 char *table_end;
765 int size;
766 int page_order;
767 int num_entries;
768 int i;
769 void *temp;
770 struct page *page;
771
772 /* The generic routines can't handle 2 level gatt's */
773 if (bridge->driver->size_type == LVL2_APER_SIZE)
774 return -EINVAL;
775
776 table = NULL;
777 i = bridge->aperture_size_idx;
778 temp = bridge->current_size;
779 size = page_order = num_entries = 0;
780
781 if (bridge->driver->size_type != FIXED_APER_SIZE) {
782 do {
783 switch (bridge->driver->size_type) {
784 case U8_APER_SIZE:
785 size = A_SIZE_8(temp)->size;
786 page_order =
787 A_SIZE_8(temp)->page_order;
788 num_entries =
789 A_SIZE_8(temp)->num_entries;
790 break;
791 case U16_APER_SIZE:
792 size = A_SIZE_16(temp)->size;
793 page_order = A_SIZE_16(temp)->page_order;
794 num_entries = A_SIZE_16(temp)->num_entries;
795 break;
796 case U32_APER_SIZE:
797 size = A_SIZE_32(temp)->size;
798 page_order = A_SIZE_32(temp)->page_order;
799 num_entries = A_SIZE_32(temp)->num_entries;
800 break;
801 /* This case will never really happen. */
802 case FIXED_APER_SIZE:
803 case LVL2_APER_SIZE:
804 default:
805 size = page_order = num_entries = 0;
806 break;
807 }
808
809 table = (char *) __get_free_pages(GFP_KERNEL,
810 page_order);
811
812 if (table == NULL) {
813 i++;
814 switch (bridge->driver->size_type) {
815 case U8_APER_SIZE:
816 bridge->current_size = A_IDX8(bridge);
817 break;
818 case U16_APER_SIZE:
819 bridge->current_size = A_IDX16(bridge);
820 break;
821 case U32_APER_SIZE:
822 bridge->current_size = A_IDX32(bridge);
823 break;
824 /* This case will never really happen. */
825 case FIXED_APER_SIZE:
826 case LVL2_APER_SIZE:
827 default:
828 bridge->current_size =
829 bridge->current_size;
830 break;
831 }
832 temp = bridge->current_size;
833 } else {
834 bridge->aperture_size_idx = i;
835 }
836 } while (!table && (i < bridge->driver->num_aperture_sizes));
837 } else {
838 size = ((struct aper_size_info_fixed *) temp)->size;
839 page_order = ((struct aper_size_info_fixed *) temp)->page_order;
840 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
841 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
842 }
843
844 if (table == NULL)
845 return -ENOMEM;
846
847 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
848
849 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
850 SetPageReserved(page);
851
852 bridge->gatt_table_real = (u32 *) table;
853 agp_gatt_table = (void *)table;
854
855 bridge->driver->cache_flush();
856 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
857 (PAGE_SIZE * (1 << page_order)));
858 bridge->driver->cache_flush();
859
860 if (bridge->gatt_table == NULL) {
861 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
862 ClearPageReserved(page);
863
864 free_pages((unsigned long) table, page_order);
865
866 return -ENOMEM;
867 }
868 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
869
870 /* AK: bogus, should encode addresses > 4GB */
871 for (i = 0; i < num_entries; i++) {
872 writel(bridge->scratch_page, bridge->gatt_table+i);
873 readl(bridge->gatt_table+i); /* PCI Posting. */
874 }
875
876 return 0;
877}
878EXPORT_SYMBOL(agp_generic_create_gatt_table);
879
880int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
881{
882 int page_order;
883 char *table, *table_end;
884 void *temp;
885 struct page *page;
886
887 temp = bridge->current_size;
888
889 switch (bridge->driver->size_type) {
890 case U8_APER_SIZE:
891 page_order = A_SIZE_8(temp)->page_order;
892 break;
893 case U16_APER_SIZE:
894 page_order = A_SIZE_16(temp)->page_order;
895 break;
896 case U32_APER_SIZE:
897 page_order = A_SIZE_32(temp)->page_order;
898 break;
899 case FIXED_APER_SIZE:
900 page_order = A_SIZE_FIX(temp)->page_order;
901 break;
902 case LVL2_APER_SIZE:
903 /* The generic routines can't deal with 2 level gatt's */
904 return -EINVAL;
905 break;
906 default:
907 page_order = 0;
908 break;
909 }
910
911 /* Do not worry about freeing memory, because if this is
912 * called, then all agp memory is deallocated and removed
913 * from the table. */
914
915 iounmap(bridge->gatt_table);
916 table = (char *) bridge->gatt_table_real;
917 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
918
919 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
920 ClearPageReserved(page);
921
922 free_pages((unsigned long) bridge->gatt_table_real, page_order);
923
924 agp_gatt_table = NULL;
925 bridge->gatt_table = NULL;
926 bridge->gatt_table_real = NULL;
927 bridge->gatt_bus_addr = 0;
928
929 return 0;
930}
931EXPORT_SYMBOL(agp_generic_free_gatt_table);
932
933
934int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
935{
936 int num_entries;
937 size_t i;
938 off_t j;
939 void *temp;
940 struct agp_bridge_data *bridge;
941
942 bridge = mem->bridge;
943 if (!bridge)
944 return -EINVAL;
945
946 temp = bridge->current_size;
947
948 switch (bridge->driver->size_type) {
949 case U8_APER_SIZE:
950 num_entries = A_SIZE_8(temp)->num_entries;
951 break;
952 case U16_APER_SIZE:
953 num_entries = A_SIZE_16(temp)->num_entries;
954 break;
955 case U32_APER_SIZE:
956 num_entries = A_SIZE_32(temp)->num_entries;
957 break;
958 case FIXED_APER_SIZE:
959 num_entries = A_SIZE_FIX(temp)->num_entries;
960 break;
961 case LVL2_APER_SIZE:
962 /* The generic routines can't deal with 2 level gatt's */
963 return -EINVAL;
964 break;
965 default:
966 num_entries = 0;
967 break;
968 }
969
970 num_entries -= agp_memory_reserved/PAGE_SIZE;
971 if (num_entries < 0) num_entries = 0;
972
973 if (type != 0 || mem->type != 0) {
974 /* The generic routines know nothing of memory types */
975 return -EINVAL;
976 }
977
978 /* AK: could wrap */
979 if ((pg_start + mem->page_count) > num_entries)
980 return -EINVAL;
981
982 j = pg_start;
983
984 while (j < (pg_start + mem->page_count)) {
985 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
986 return -EBUSY;
987 j++;
988 }
989
990 if (mem->is_flushed == FALSE) {
991 bridge->driver->cache_flush();
992 mem->is_flushed = TRUE;
993 }
994
995 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
996 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
997 readl(bridge->gatt_table+j); /* PCI Posting. */
998 }
999
1000 bridge->driver->tlb_flush(mem);
1001 return 0;
1002}
1003EXPORT_SYMBOL(agp_generic_insert_memory);
1004
1005
1006int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1007{
1008 size_t i;
1009 struct agp_bridge_data *bridge;
1010
1011 bridge = mem->bridge;
1012 if (!bridge)
1013 return -EINVAL;
1014
1015 if (type != 0 || mem->type != 0) {
1016 /* The generic routines know nothing of memory types */
1017 return -EINVAL;
1018 }
1019
1020 /* AK: bogus, should encode addresses > 4GB */
1021 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1022 writel(bridge->scratch_page, bridge->gatt_table+i);
1023 readl(bridge->gatt_table+i); /* PCI Posting. */
1024 }
1025
1026 global_cache_flush();
1027 bridge->driver->tlb_flush(mem);
1028 return 0;
1029}
1030EXPORT_SYMBOL(agp_generic_remove_memory);
1031
1032
1033struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1034{
1035 return NULL;
1036}
1037EXPORT_SYMBOL(agp_generic_alloc_by_type);
1038
1039
1040void agp_generic_free_by_type(struct agp_memory *curr)
1041{
1042 vfree(curr->memory);
1043 agp_free_key(curr->key);
1044 kfree(curr);
1045}
1046EXPORT_SYMBOL(agp_generic_free_by_type);
1047
1048
1049/*
1050 * Basic Page Allocation Routines -
1051 * These routines handle page allocation and by default they reserve the allocated
1052 * memory. They also handle incrementing the current_memory_agp value, Which is checked
1053 * against a maximum value.
1054 */
1055
1056void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1057{
1058 struct page * page;
1059
1060 page = alloc_page(GFP_KERNEL);
1061 if (page == NULL)
1062 return NULL;
1063
1064 map_page_into_agp(page);
1065
1066 get_page(page);
1067 SetPageLocked(page);
1068 atomic_inc(&agp_bridge->current_memory_agp);
1069 return page_address(page);
1070}
1071EXPORT_SYMBOL(agp_generic_alloc_page);
1072
1073
1074void agp_generic_destroy_page(void *addr)
1075{
1076 struct page *page;
1077
1078 if (addr == NULL)
1079 return;
1080
1081 page = virt_to_page(addr);
1082 unmap_page_from_agp(page);
1083 put_page(page);
1084 unlock_page(page);
1085 free_page((unsigned long)addr);
1086 atomic_dec(&agp_bridge->current_memory_agp);
1087}
1088EXPORT_SYMBOL(agp_generic_destroy_page);
1089
1090/* End Basic Page Allocation Routines */
1091
1092
1093/**
1094 * agp_enable - initialise the agp point-to-point connection.
1095 *
1096 * @mode: agp mode register value to configure with.
1097 */
1098void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1099{
1100 if (!bridge)
1101 return;
1102 bridge->driver->agp_enable(bridge, mode);
1103}
1104EXPORT_SYMBOL(agp_enable);
1105
1106/* When we remove the global variable agp_bridge from all drivers
1107 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1108 */
1109
1110struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1111{
1112 if (list_empty(&agp_bridges))
1113 return NULL;
1114
1115 return agp_bridge;
1116}
1117
1118static void ipi_handler(void *null)
1119{
1120 flush_agp_cache();
1121}
1122
1123void global_cache_flush(void)
1124{
1125 if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
1126 panic(PFX "timed out waiting for the other CPUs!\n");
1127}
1128EXPORT_SYMBOL(global_cache_flush);
1129
1130unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1131 unsigned long addr, int type)
1132{
1133 /* memory type is ignored in the generic routine */
1134 if (bridge->driver->masks)
1135 return addr | bridge->driver->masks[0].mask;
1136 else
1137 return addr;
1138}
1139EXPORT_SYMBOL(agp_generic_mask_memory);
1140
1141/*
1142 * These functions are implemented according to the AGPv3 spec,
1143 * which covers implementation details that had previously been
1144 * left open.
1145 */
1146
1147int agp3_generic_fetch_size(void)
1148{
1149 u16 temp_size;
1150 int i;
1151 struct aper_size_info_16 *values;
1152
1153 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1154 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1155
1156 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1157 if (temp_size == values[i].size_value) {
1158 agp_bridge->previous_size =
1159 agp_bridge->current_size = (void *) (values + i);
1160
1161 agp_bridge->aperture_size_idx = i;
1162 return values[i].size;
1163 }
1164 }
1165 return 0;
1166}
1167EXPORT_SYMBOL(agp3_generic_fetch_size);
1168
1169void agp3_generic_tlbflush(struct agp_memory *mem)
1170{
1171 u32 ctrl;
1172 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1173 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1174 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1175}
1176EXPORT_SYMBOL(agp3_generic_tlbflush);
1177
1178int agp3_generic_configure(void)
1179{
1180 u32 temp;
1181 struct aper_size_info_16 *current_size;
1182
1183 current_size = A_SIZE_16(agp_bridge->current_size);
1184
1185 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1186 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1187
1188 /* set aperture size */
1189 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1190 /* set gart pointer */
1191 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1192 /* enable aperture and GTLB */
1193 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1194 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1195 return 0;
1196}
1197EXPORT_SYMBOL(agp3_generic_configure);
1198
1199void agp3_generic_cleanup(void)
1200{
1201 u32 ctrl;
1202 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1203 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1204}
1205EXPORT_SYMBOL(agp3_generic_cleanup);
1206
1207struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1208{
1209 {4096, 1048576, 10,0x000},
1210 {2048, 524288, 9, 0x800},
1211 {1024, 262144, 8, 0xc00},
1212 { 512, 131072, 7, 0xe00},
1213 { 256, 65536, 6, 0xf00},
1214 { 128, 32768, 5, 0xf20},
1215 { 64, 16384, 4, 0xf30},
1216 { 32, 8192, 3, 0xf38},
1217 { 16, 4096, 2, 0xf3c},
1218 { 8, 2048, 1, 0xf3e},
1219 { 4, 1024, 0, 0xf3f}
1220};
1221EXPORT_SYMBOL(agp3_generic_sizes);
1222
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
new file mode 100644
index 000000000000..6052bfa04c72
--- /dev/null
+++ b/drivers/char/agp/hp-agp.c
@@ -0,0 +1,552 @@
1/*
2 * HP zx1 AGPGART routines.
3 *
4 * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
5 * Bjorn Helgaas <bjorn.helgaas@hp.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/acpi.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/init.h>
16#include <linux/agp_backend.h>
17
18#include <asm/acpi-ext.h>
19
20#include "agp.h"
21
22#ifndef log2
23#define log2(x) ffz(~(x))
24#endif
25
26#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
27
28/* HP ZX1 IOC registers */
29#define HP_ZX1_IBASE 0x300
30#define HP_ZX1_IMASK 0x308
31#define HP_ZX1_PCOM 0x310
32#define HP_ZX1_TCNFG 0x318
33#define HP_ZX1_PDIR_BASE 0x320
34
35#define HP_ZX1_IOVA_BASE GB(1UL)
36#define HP_ZX1_IOVA_SIZE GB(1UL)
37#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
38#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
39
40#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
41#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
42
43#define AGP8X_MODE_BIT 3
44#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
45
46/* AGP bridge need not be PCI device, but DRM thinks it is. */
47static struct pci_dev fake_bridge_dev;
48
49static int hp_zx1_gart_found;
50
51static struct aper_size_info_fixed hp_zx1_sizes[] =
52{
53 {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
54};
55
56static struct gatt_mask hp_zx1_masks[] =
57{
58 {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
59};
60
61static struct _hp_private {
62 volatile u8 __iomem *ioc_regs;
63 volatile u8 __iomem *lba_regs;
64 int lba_cap_offset;
65 u64 *io_pdir; // PDIR for entire IOVA
66 u64 *gatt; // PDIR just for GART (subset of above)
67 u64 gatt_entries;
68 u64 iova_base;
69 u64 gart_base;
70 u64 gart_size;
71 u64 io_pdir_size;
72 int io_pdir_owner; // do we own it, or share it with sba_iommu?
73 int io_page_size;
74 int io_tlb_shift;
75 int io_tlb_ps; // IOC ps config
76 int io_pages_per_kpage;
77} hp_private;
78
79static int __init hp_zx1_ioc_shared(void)
80{
81 struct _hp_private *hp = &hp_private;
82
83 printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
84
85 /*
86 * IOC already configured by sba_iommu module; just use
87 * its setup. We assume:
88 * - IOVA space is 1Gb in size
89 * - first 512Mb is IOMMU, second 512Mb is GART
90 */
91 hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
92 switch (hp->io_tlb_ps) {
93 case 0: hp->io_tlb_shift = 12; break;
94 case 1: hp->io_tlb_shift = 13; break;
95 case 2: hp->io_tlb_shift = 14; break;
96 case 3: hp->io_tlb_shift = 16; break;
97 default:
98 printk(KERN_ERR PFX "Invalid IOTLB page size "
99 "configuration 0x%x\n", hp->io_tlb_ps);
100 hp->gatt = NULL;
101 hp->gatt_entries = 0;
102 return -ENODEV;
103 }
104 hp->io_page_size = 1 << hp->io_tlb_shift;
105 hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
106
107 hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
108 hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
109
110 hp->gart_size = HP_ZX1_GART_SIZE;
111 hp->gatt_entries = hp->gart_size / hp->io_page_size;
112
113 hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
114 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
115
116 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
117 /* Normal case when no AGP device in system */
118 hp->gatt = NULL;
119 hp->gatt_entries = 0;
120 printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
121 "GART disabled\n");
122 return -ENODEV;
123 }
124
125 return 0;
126}
127
128static int __init
129hp_zx1_ioc_owner (void)
130{
131 struct _hp_private *hp = &hp_private;
132
133 printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
134
135 /*
136 * Select an IOV page size no larger than system page size.
137 */
138 if (PAGE_SIZE >= KB(64)) {
139 hp->io_tlb_shift = 16;
140 hp->io_tlb_ps = 3;
141 } else if (PAGE_SIZE >= KB(16)) {
142 hp->io_tlb_shift = 14;
143 hp->io_tlb_ps = 2;
144 } else if (PAGE_SIZE >= KB(8)) {
145 hp->io_tlb_shift = 13;
146 hp->io_tlb_ps = 1;
147 } else {
148 hp->io_tlb_shift = 12;
149 hp->io_tlb_ps = 0;
150 }
151 hp->io_page_size = 1 << hp->io_tlb_shift;
152 hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
153
154 hp->iova_base = HP_ZX1_IOVA_BASE;
155 hp->gart_size = HP_ZX1_GART_SIZE;
156 hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
157
158 hp->gatt_entries = hp->gart_size / hp->io_page_size;
159 hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
160
161 return 0;
162}
163
164static int __init
165hp_zx1_ioc_init (u64 hpa)
166{
167 struct _hp_private *hp = &hp_private;
168
169 hp->ioc_regs = ioremap(hpa, 1024);
170 if (!hp->ioc_regs)
171 return -ENOMEM;
172
173 /*
174 * If the IOTLB is currently disabled, we can take it over.
175 * Otherwise, we have to share with sba_iommu.
176 */
177 hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
178
179 if (hp->io_pdir_owner)
180 return hp_zx1_ioc_owner();
181
182 return hp_zx1_ioc_shared();
183}
184
185static int
186hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
187{
188 u16 status;
189 u8 pos, id;
190 int ttl = 48;
191
192 status = readw(hpa+PCI_STATUS);
193 if (!(status & PCI_STATUS_CAP_LIST))
194 return 0;
195 pos = readb(hpa+PCI_CAPABILITY_LIST);
196 while (ttl-- && pos >= 0x40) {
197 pos &= ~3;
198 id = readb(hpa+pos+PCI_CAP_LIST_ID);
199 if (id == 0xff)
200 break;
201 if (id == cap)
202 return pos;
203 pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
204 }
205 return 0;
206}
207
208static int __init
209hp_zx1_lba_init (u64 hpa)
210{
211 struct _hp_private *hp = &hp_private;
212 int cap;
213
214 hp->lba_regs = ioremap(hpa, 256);
215 if (!hp->lba_regs)
216 return -ENOMEM;
217
218 hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
219
220 cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
221 if (cap != PCI_CAP_ID_AGP) {
222 printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
223 cap, hp->lba_cap_offset);
224 return -ENODEV;
225 }
226
227 return 0;
228}
229
230static int
231hp_zx1_fetch_size(void)
232{
233 int size;
234
235 size = hp_private.gart_size / MB(1);
236 hp_zx1_sizes[0].size = size;
237 agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
238 return size;
239}
240
241static int
242hp_zx1_configure (void)
243{
244 struct _hp_private *hp = &hp_private;
245
246 agp_bridge->gart_bus_addr = hp->gart_base;
247 agp_bridge->capndx = hp->lba_cap_offset;
248 agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
249
250 if (hp->io_pdir_owner) {
251 writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
254 readl(hp->ioc_regs+HP_ZX1_TCNFG);
255 writel(~(HP_ZX1_IOVA_SIZE-1), hp->ioc_regs+HP_ZX1_IMASK);
256 readl(hp->ioc_regs+HP_ZX1_IMASK);
257 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
258 readl(hp->ioc_regs+HP_ZX1_IBASE);
259 writel(hp->iova_base|log2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
260 readl(hp->ioc_regs+HP_ZX1_PCOM);
261 }
262
263 return 0;
264}
265
266static void
267hp_zx1_cleanup (void)
268{
269 struct _hp_private *hp = &hp_private;
270
271 if (hp->ioc_regs) {
272 if (hp->io_pdir_owner) {
273 writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
274 readq(hp->ioc_regs+HP_ZX1_IBASE);
275 }
276 iounmap(hp->ioc_regs);
277 }
278 if (hp->lba_regs)
279 iounmap(hp->lba_regs);
280}
281
282static void
283hp_zx1_tlbflush (struct agp_memory *mem)
284{
285 struct _hp_private *hp = &hp_private;
286
287 writeq(hp->gart_base | log2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
288 readq(hp->ioc_regs+HP_ZX1_PCOM);
289}
290
291static int
292hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
293{
294 struct _hp_private *hp = &hp_private;
295 int i;
296
297 if (hp->io_pdir_owner) {
298 hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
299 get_order(hp->io_pdir_size));
300 if (!hp->io_pdir) {
301 printk(KERN_ERR PFX "Couldn't allocate contiguous "
302 "memory for I/O PDIR\n");
303 hp->gatt = NULL;
304 hp->gatt_entries = 0;
305 return -ENOMEM;
306 }
307 memset(hp->io_pdir, 0, hp->io_pdir_size);
308
309 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
310 }
311
312 for (i = 0; i < hp->gatt_entries; i++) {
313 hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
314 }
315
316 return 0;
317}
318
319static int
320hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
321{
322 struct _hp_private *hp = &hp_private;
323
324 if (hp->io_pdir_owner)
325 free_pages((unsigned long) hp->io_pdir,
326 get_order(hp->io_pdir_size));
327 else
328 hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
329 return 0;
330}
331
332static int
333hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
334{
335 struct _hp_private *hp = &hp_private;
336 int i, k;
337 off_t j, io_pg_start;
338 int io_pg_count;
339
340 if (type != 0 || mem->type != 0) {
341 return -EINVAL;
342 }
343
344 io_pg_start = hp->io_pages_per_kpage * pg_start;
345 io_pg_count = hp->io_pages_per_kpage * mem->page_count;
346 if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
347 return -EINVAL;
348 }
349
350 j = io_pg_start;
351 while (j < (io_pg_start + io_pg_count)) {
352 if (hp->gatt[j]) {
353 return -EBUSY;
354 }
355 j++;
356 }
357
358 if (mem->is_flushed == FALSE) {
359 global_cache_flush();
360 mem->is_flushed = TRUE;
361 }
362
363 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
364 unsigned long paddr;
365
366 paddr = mem->memory[i];
367 for (k = 0;
368 k < hp->io_pages_per_kpage;
369 k++, j++, paddr += hp->io_page_size) {
370 hp->gatt[j] =
371 agp_bridge->driver->mask_memory(agp_bridge,
372 paddr, type);
373 }
374 }
375
376 agp_bridge->driver->tlb_flush(mem);
377 return 0;
378}
379
380static int
381hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
382{
383 struct _hp_private *hp = &hp_private;
384 int i, io_pg_start, io_pg_count;
385
386 if (type != 0 || mem->type != 0) {
387 return -EINVAL;
388 }
389
390 io_pg_start = hp->io_pages_per_kpage * pg_start;
391 io_pg_count = hp->io_pages_per_kpage * mem->page_count;
392 for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
393 hp->gatt[i] = agp_bridge->scratch_page;
394 }
395
396 agp_bridge->driver->tlb_flush(mem);
397 return 0;
398}
399
400static unsigned long
401hp_zx1_mask_memory (struct agp_bridge_data *bridge,
402 unsigned long addr, int type)
403{
404 return HP_ZX1_PDIR_VALID_BIT | addr;
405}
406
407static void
408hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
409{
410 struct _hp_private *hp = &hp_private;
411 u32 command;
412
413 command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
414 command = agp_collect_device_status(bridge, mode, command);
415 command |= 0x00000100;
416
417 writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
418
419 agp_device_command(command, (mode & AGP8X_MODE) != 0);
420}
421
422struct agp_bridge_driver hp_zx1_driver = {
423 .owner = THIS_MODULE,
424 .size_type = FIXED_APER_SIZE,
425 .configure = hp_zx1_configure,
426 .fetch_size = hp_zx1_fetch_size,
427 .cleanup = hp_zx1_cleanup,
428 .tlb_flush = hp_zx1_tlbflush,
429 .mask_memory = hp_zx1_mask_memory,
430 .masks = hp_zx1_masks,
431 .agp_enable = hp_zx1_enable,
432 .cache_flush = global_cache_flush,
433 .create_gatt_table = hp_zx1_create_gatt_table,
434 .free_gatt_table = hp_zx1_free_gatt_table,
435 .insert_memory = hp_zx1_insert_memory,
436 .remove_memory = hp_zx1_remove_memory,
437 .alloc_by_type = agp_generic_alloc_by_type,
438 .free_by_type = agp_generic_free_by_type,
439 .agp_alloc_page = agp_generic_alloc_page,
440 .agp_destroy_page = agp_generic_destroy_page,
441 .cant_use_aperture = 1,
442};
443
444static int __init
445hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
446{
447 struct agp_bridge_data *bridge;
448 int error = 0;
449
450 error = hp_zx1_ioc_init(ioc_hpa);
451 if (error)
452 goto fail;
453
454 error = hp_zx1_lba_init(lba_hpa);
455 if (error)
456 goto fail;
457
458 bridge = agp_alloc_bridge();
459 if (!bridge) {
460 error = -ENOMEM;
461 goto fail;
462 }
463 bridge->driver = &hp_zx1_driver;
464
465 fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
466 fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
467 bridge->dev = &fake_bridge_dev;
468
469 error = agp_add_bridge(bridge);
470 fail:
471 if (error)
472 hp_zx1_cleanup();
473 return error;
474}
475
476static acpi_status __init
477zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
478{
479 acpi_handle handle, parent;
480 acpi_status status;
481 struct acpi_buffer buffer;
482 struct acpi_device_info *info;
483 u64 lba_hpa, sba_hpa, length;
484 int match;
485
486 status = hp_acpi_csr_space(obj, &lba_hpa, &length);
487 if (ACPI_FAILURE(status))
488 return AE_OK; /* keep looking for another bridge */
489
490 /* Look for an enclosing IOC scope and find its CSR space */
491 handle = obj;
492 do {
493 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
494 status = acpi_get_object_info(handle, &buffer);
495 if (ACPI_SUCCESS(status)) {
496 /* TBD check _CID also */
497 info = buffer.pointer;
498 info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
499 match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
500 ACPI_MEM_FREE(info);
501 if (match) {
502 status = hp_acpi_csr_space(handle, &sba_hpa, &length);
503 if (ACPI_SUCCESS(status))
504 break;
505 else {
506 printk(KERN_ERR PFX "Detected HP ZX1 "
507 "AGP LBA but no IOC.\n");
508 return AE_OK;
509 }
510 }
511 }
512
513 status = acpi_get_parent(handle, &parent);
514 handle = parent;
515 } while (ACPI_SUCCESS(status));
516
517 if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
518 return AE_OK;
519
520 printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n",
521 (char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
522
523 hp_zx1_gart_found = 1;
524 return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
525}
526
527static int __init
528agp_hp_init (void)
529{
530 if (agp_off)
531 return -EINVAL;
532
533 acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
534 if (hp_zx1_gart_found)
535 return 0;
536
537 acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
538 if (hp_zx1_gart_found)
539 return 0;
540
541 return -ENODEV;
542}
543
544static void __exit
545agp_hp_cleanup (void)
546{
547}
548
549module_init(agp_hp_init);
550module_exit(agp_hp_cleanup);
551
552MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
new file mode 100644
index 000000000000..adbea896c0d2
--- /dev/null
+++ b/drivers/char/agp/i460-agp.c
@@ -0,0 +1,642 @@
1/*
2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
3 * the "Intel 460GTX Chipset Software Developer's Manual":
4 * http://developer.intel.com/design/itanium/downloads/24870401s.htm
5 */
6/*
7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
8 * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/agp_backend.h>
14
15#include "agp.h"
16
17#define INTEL_I460_BAPBASE 0x98
18#define INTEL_I460_GXBCTL 0xa0
19#define INTEL_I460_AGPSIZ 0xa2
20#define INTEL_I460_ATTBASE 0xfe200000
21#define INTEL_I460_GATT_VALID (1UL << 24)
22#define INTEL_I460_GATT_COHERENT (1UL << 25)
23
24/*
25 * The i460 can operate with large (4MB) pages, but there is no sane way to support this
26 * within the current kernel/DRM environment, so we disable the relevant code for now.
27 * See also comments in ia64_alloc_page()...
28 */
29#define I460_LARGE_IO_PAGES 0
30
31#if I460_LARGE_IO_PAGES
32# define I460_IO_PAGE_SHIFT i460.io_page_shift
33#else
34# define I460_IO_PAGE_SHIFT 12
35#endif
36
37#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
38#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
39#define I460_SRAM_IO_DISABLE (1 << 4)
40#define I460_BAPBASE_ENABLE (1 << 3)
41#define I460_AGPSIZ_MASK 0x7
42#define I460_4M_PS (1 << 1)
43
44/* Control bits for Out-Of-GART coherency and Burst Write Combining */
45#define I460_GXBCTL_OOG (1UL << 0)
46#define I460_GXBCTL_BWC (1UL << 2)
47
48/*
49 * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
50 * gatt_table and gatt_table_real pointers a "void *"...
51 */
52#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
53#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
54/*
55 * The 460 spec says we have to read the last location written to make sure that all
56 * writes have taken effect
57 */
58#define WR_FLUSH_GATT(index) RD_GATT(index)
59
60#define log2(x) ffz(~(x))
61
62static struct {
63 void *gatt; /* ioremap'd GATT area */
64
65 /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
66 u8 io_page_shift;
67
68 /* BIOS configures chipset to one of 2 possible apbase values: */
69 u8 dynamic_apbase;
70
71 /* structure for tracking partial use of 4MB GART pages: */
72 struct lp_desc {
73 unsigned long *alloced_map; /* bitmap of kernel-pages in use */
74 int refcount; /* number of kernel pages using the large page */
75 u64 paddr; /* physical address of large page */
76 } *lp_desc;
77} i460;
78
79static struct aper_size_info_8 i460_sizes[3] =
80{
81 /*
82 * The 32GB aperture is only available with a 4M GART page size. Due to the
83 * dynamic GART page size, we can't figure out page_order or num_entries until
84 * runtime.
85 */
86 {32768, 0, 0, 4},
87 {1024, 0, 0, 2},
88 {256, 0, 0, 1}
89};
90
91static struct gatt_mask i460_masks[] =
92{
93 {
94 .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
95 .type = 0
96 }
97};
98
99static int i460_fetch_size (void)
100{
101 int i;
102 u8 temp;
103 struct aper_size_info_8 *values;
104
105 /* Determine the GART page size */
106 pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
107 i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
108 pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
109
110 if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
111 printk(KERN_ERR PFX
112 "I/O (GART) page-size %ZuKB doesn't match expected size %ZuKB\n",
113 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT));
114 return 0;
115 }
116
117 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
118
119 pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
120
121 /* Exit now if the IO drivers for the GART SRAMS are turned off */
122 if (temp & I460_SRAM_IO_DISABLE) {
123 printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
124 printk(KERN_ERR PFX "AGPGART operation not possible\n");
125 return 0;
126 }
127
128 /* Make sure we don't try to create an 2 ^ 23 entry GATT */
129 if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
130 printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
131 return 0;
132 }
133
134 /* Determine the proper APBASE register */
135 if (temp & I460_BAPBASE_ENABLE)
136 i460.dynamic_apbase = INTEL_I460_BAPBASE;
137 else
138 i460.dynamic_apbase = AGP_APBASE;
139
140 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
141 /*
142 * Dynamically calculate the proper num_entries and page_order values for
143 * the define aperture sizes. Take care not to shift off the end of
144 * values[i].size.
145 */
146 values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
147 values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
148 }
149
150 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
151 /* Neglect control bits when matching up size_value */
152 if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
153 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
154 agp_bridge->aperture_size_idx = i;
155 return values[i].size;
156 }
157 }
158
159 return 0;
160}
161
162/* There isn't anything to do here since 460 has no GART TLB. */
163static void i460_tlb_flush (struct agp_memory *mem)
164{
165 return;
166}
167
168/*
169 * This utility function is needed to prevent corruption of the control bits
170 * which are stored along with the aperture size in 460's AGPSIZ register
171 */
172static void i460_write_agpsiz (u8 size_value)
173{
174 u8 temp;
175
176 pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
177 pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
178 ((temp & ~I460_AGPSIZ_MASK) | size_value));
179}
180
181static void i460_cleanup (void)
182{
183 struct aper_size_info_8 *previous_size;
184
185 previous_size = A_SIZE_8(agp_bridge->previous_size);
186 i460_write_agpsiz(previous_size->size_value);
187
188 if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
189 kfree(i460.lp_desc);
190}
191
192static int i460_configure (void)
193{
194 union {
195 u32 small[2];
196 u64 large;
197 } temp;
198 size_t size;
199 u8 scratch;
200 struct aper_size_info_8 *current_size;
201
202 temp.large = 0;
203
204 current_size = A_SIZE_8(agp_bridge->current_size);
205 i460_write_agpsiz(current_size->size_value);
206
207 /*
208 * Do the necessary rigmarole to read all eight bytes of APBASE.
209 * This has to be done since the AGP aperture can be above 4GB on
210 * 460 based systems.
211 */
212 pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
213 pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
214
215 /* Clear BAR control bits */
216 agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
217
218 pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
219 pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
220 (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
221
222 /*
223 * Initialize partial allocation trackers if a GART page is bigger than a kernel
224 * page.
225 */
226 if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
227 size = current_size->num_entries * sizeof(i460.lp_desc[0]);
228 i460.lp_desc = kmalloc(size, GFP_KERNEL);
229 if (!i460.lp_desc)
230 return -ENOMEM;
231 memset(i460.lp_desc, 0, size);
232 }
233 return 0;
234}
235
236static int i460_create_gatt_table (struct agp_bridge_data *bridge)
237{
238 int page_order, num_entries, i;
239 void *temp;
240
241 /*
242 * Load up the fixed address of the GART SRAMS which hold our GATT table.
243 */
244 temp = agp_bridge->current_size;
245 page_order = A_SIZE_8(temp)->page_order;
246 num_entries = A_SIZE_8(temp)->num_entries;
247
248 i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
249
250 /* These are no good, the should be removed from the agp_bridge strucure... */
251 agp_bridge->gatt_table_real = NULL;
252 agp_bridge->gatt_table = NULL;
253 agp_bridge->gatt_bus_addr = 0;
254
255 for (i = 0; i < num_entries; ++i)
256 WR_GATT(i, 0);
257 WR_FLUSH_GATT(i - 1);
258 return 0;
259}
260
261static int i460_free_gatt_table (struct agp_bridge_data *bridge)
262{
263 int num_entries, i;
264 void *temp;
265
266 temp = agp_bridge->current_size;
267
268 num_entries = A_SIZE_8(temp)->num_entries;
269
270 for (i = 0; i < num_entries; ++i)
271 WR_GATT(i, 0);
272 WR_FLUSH_GATT(num_entries - 1);
273
274 iounmap(i460.gatt);
275 return 0;
276}
277
278/*
279 * The following functions are called when the I/O (GART) page size is smaller than
280 * PAGE_SIZE.
281 */
282
283static int i460_insert_memory_small_io_page (struct agp_memory *mem,
284 off_t pg_start, int type)
285{
286 unsigned long paddr, io_pg_start, io_page_size;
287 int i, j, k, num_entries;
288 void *temp;
289
290 pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
291 mem, pg_start, type, mem->memory[0]);
292
293 io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
294
295 temp = agp_bridge->current_size;
296 num_entries = A_SIZE_8(temp)->num_entries;
297
298 if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
299 printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
300 return -EINVAL;
301 }
302
303 j = io_pg_start;
304 while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
305 if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
306 pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
307 j, RD_GATT(j));
308 return -EBUSY;
309 }
310 j++;
311 }
312
313 io_page_size = 1UL << I460_IO_PAGE_SHIFT;
314 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
315 paddr = mem->memory[i];
316 for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
317 WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge,
318 paddr, mem->type));
319 }
320 WR_FLUSH_GATT(j - 1);
321 return 0;
322}
323
324static int i460_remove_memory_small_io_page(struct agp_memory *mem,
325 off_t pg_start, int type)
326{
327 int i;
328
329 pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
330 mem, pg_start, type);
331
332 pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
333
334 for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
335 WR_GATT(i, 0);
336 WR_FLUSH_GATT(i - 1);
337 return 0;
338}
339
340#if I460_LARGE_IO_PAGES
341
342/*
343 * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
344 *
345 * This situation is interesting since AGP memory allocations that are smaller than a
346 * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
347 * large GART pages to work around this issue.
348 *
349 * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
350 * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
351 * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
352 */
353
354static int i460_alloc_large_page (struct lp_desc *lp)
355{
356 unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
357 size_t map_size;
358 void *lpage;
359
360 lpage = (void *) __get_free_pages(GFP_KERNEL, order);
361 if (!lpage) {
362 printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
363 return -ENOMEM;
364 }
365
366 map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
367 lp->alloced_map = kmalloc(map_size, GFP_KERNEL);
368 if (!lp->alloced_map) {
369 free_pages((unsigned long) lpage, order);
370 printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
371 return -ENOMEM;
372 }
373 memset(lp->alloced_map, 0, map_size);
374
375 lp->paddr = virt_to_phys(lpage);
376 lp->refcount = 0;
377 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
378 return 0;
379}
380
381static void i460_free_large_page (struct lp_desc *lp)
382{
383 kfree(lp->alloced_map);
384 lp->alloced_map = NULL;
385
386 free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
387 atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
388}
389
390static int i460_insert_memory_large_io_page (struct agp_memory *mem,
391 off_t pg_start, int type)
392{
393 int i, start_offset, end_offset, idx, pg, num_entries;
394 struct lp_desc *start, *end, *lp;
395 void *temp;
396
397 temp = agp_bridge->current_size;
398 num_entries = A_SIZE_8(temp)->num_entries;
399
400 /* Figure out what pg_start means in terms of our large GART pages */
401 start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
402 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
403 start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
404 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
405
406 if (end > i460.lp_desc + num_entries) {
407 printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
408 return -EINVAL;
409 }
410
411 /* Check if the requested region of the aperture is free */
412 for (lp = start; lp <= end; ++lp) {
413 if (!lp->alloced_map)
414 continue; /* OK, the entire large page is available... */
415
416 for (idx = ((lp == start) ? start_offset : 0);
417 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
418 idx++)
419 {
420 if (test_bit(idx, lp->alloced_map))
421 return -EBUSY;
422 }
423 }
424
425 for (lp = start, i = 0; lp <= end; ++lp) {
426 if (!lp->alloced_map) {
427 /* Allocate new GART pages... */
428 if (i460_alloc_large_page(lp) < 0)
429 return -ENOMEM;
430 pg = lp - i460.lp_desc;
431 WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge,
432 lp->paddr, 0));
433 WR_FLUSH_GATT(pg);
434 }
435
436 for (idx = ((lp == start) ? start_offset : 0);
437 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
438 idx++, i++)
439 {
440 mem->memory[i] = lp->paddr + idx*PAGE_SIZE;
441 __set_bit(idx, lp->alloced_map);
442 ++lp->refcount;
443 }
444 }
445 return 0;
446}
447
448static int i460_remove_memory_large_io_page (struct agp_memory *mem,
449 off_t pg_start, int type)
450{
451 int i, pg, start_offset, end_offset, idx, num_entries;
452 struct lp_desc *start, *end, *lp;
453 void *temp;
454
455 temp = agp_bridge->driver->current_size;
456 num_entries = A_SIZE_8(temp)->num_entries;
457
458 /* Figure out what pg_start means in terms of our large GART pages */
459 start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
460 end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
461 start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
462 end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
463
464 for (i = 0, lp = start; lp <= end; ++lp) {
465 for (idx = ((lp == start) ? start_offset : 0);
466 idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
467 idx++, i++)
468 {
469 mem->memory[i] = 0;
470 __clear_bit(idx, lp->alloced_map);
471 --lp->refcount;
472 }
473
474 /* Free GART pages if they are unused */
475 if (lp->refcount == 0) {
476 pg = lp - i460.lp_desc;
477 WR_GATT(pg, 0);
478 WR_FLUSH_GATT(pg);
479 i460_free_large_page(lp);
480 }
481 }
482 return 0;
483}
484
485/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
486
487static int i460_insert_memory (struct agp_memory *mem,
488 off_t pg_start, int type)
489{
490 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
491 return i460_insert_memory_small_io_page(mem, pg_start, type);
492 else
493 return i460_insert_memory_large_io_page(mem, pg_start, type);
494}
495
496static int i460_remove_memory (struct agp_memory *mem,
497 off_t pg_start, int type)
498{
499 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
500 return i460_remove_memory_small_io_page(mem, pg_start, type);
501 else
502 return i460_remove_memory_large_io_page(mem, pg_start, type);
503}
504
505/*
506 * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
507 * allocate memory until we know where it is to be bound in the aperture (a
508 * multi-kernel-page alloc might fit inside of an already allocated GART page).
509 *
510 * Let's just hope nobody counts on the allocated AGP memory being there before bind time
511 * (I don't think current drivers do)...
512 */
513static void *i460_alloc_page (struct agp_bridge_data *bridge)
514{
515 void *page;
516
517 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
518 page = agp_generic_alloc_page(agp_bridge);
519 else
520 /* Returning NULL would cause problems */
521 /* AK: really dubious code. */
522 page = (void *)~0UL;
523 return page;
524}
525
526static void i460_destroy_page (void *page)
527{
528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
529 agp_generic_destroy_page(page);
530}
531
532#endif /* I460_LARGE_IO_PAGES */
533
534static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
535 unsigned long addr, int type)
536{
537 /* Make sure the returned address is a valid GATT entry */
538 return bridge->driver->masks[0].mask
539 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12);
540}
541
542struct agp_bridge_driver intel_i460_driver = {
543 .owner = THIS_MODULE,
544 .aperture_sizes = i460_sizes,
545 .size_type = U8_APER_SIZE,
546 .num_aperture_sizes = 3,
547 .configure = i460_configure,
548 .fetch_size = i460_fetch_size,
549 .cleanup = i460_cleanup,
550 .tlb_flush = i460_tlb_flush,
551 .mask_memory = i460_mask_memory,
552 .masks = i460_masks,
553 .agp_enable = agp_generic_enable,
554 .cache_flush = global_cache_flush,
555 .create_gatt_table = i460_create_gatt_table,
556 .free_gatt_table = i460_free_gatt_table,
557#if I460_LARGE_IO_PAGES
558 .insert_memory = i460_insert_memory,
559 .remove_memory = i460_remove_memory,
560 .agp_alloc_page = i460_alloc_page,
561 .agp_destroy_page = i460_destroy_page,
562#else
563 .insert_memory = i460_insert_memory_small_io_page,
564 .remove_memory = i460_remove_memory_small_io_page,
565 .agp_alloc_page = agp_generic_alloc_page,
566 .agp_destroy_page = agp_generic_destroy_page,
567#endif
568 .alloc_by_type = agp_generic_alloc_by_type,
569 .free_by_type = agp_generic_free_by_type,
570 .cant_use_aperture = 1,
571};
572
573static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
574 const struct pci_device_id *ent)
575{
576 struct agp_bridge_data *bridge;
577 u8 cap_ptr;
578
579 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
580 if (!cap_ptr)
581 return -ENODEV;
582
583 bridge = agp_alloc_bridge();
584 if (!bridge)
585 return -ENOMEM;
586
587 bridge->driver = &intel_i460_driver;
588 bridge->dev = pdev;
589 bridge->capndx = cap_ptr;
590
591 printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
592
593 pci_set_drvdata(pdev, bridge);
594 return agp_add_bridge(bridge);
595}
596
597static void __devexit agp_intel_i460_remove(struct pci_dev *pdev)
598{
599 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
600
601 agp_remove_bridge(bridge);
602 agp_put_bridge(bridge);
603}
604
605static struct pci_device_id agp_intel_i460_pci_table[] = {
606 {
607 .class = (PCI_CLASS_BRIDGE_HOST << 8),
608 .class_mask = ~0,
609 .vendor = PCI_VENDOR_ID_INTEL,
610 .device = PCI_DEVICE_ID_INTEL_84460GX,
611 .subvendor = PCI_ANY_ID,
612 .subdevice = PCI_ANY_ID,
613 },
614 { }
615};
616
617MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
618
619static struct pci_driver agp_intel_i460_pci_driver = {
620 .name = "agpgart-intel-i460",
621 .id_table = agp_intel_i460_pci_table,
622 .probe = agp_intel_i460_probe,
623 .remove = __devexit_p(agp_intel_i460_remove),
624};
625
626static int __init agp_intel_i460_init(void)
627{
628 if (agp_off)
629 return -EINVAL;
630 return pci_register_driver(&agp_intel_i460_pci_driver);
631}
632
633static void __exit agp_intel_i460_cleanup(void)
634{
635 pci_unregister_driver(&agp_intel_i460_pci_driver);
636}
637
638module_init(agp_intel_i460_init);
639module_exit(agp_intel_i460_cleanup);
640
641MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
642MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
new file mode 100644
index 000000000000..8c7d727432bb
--- /dev/null
+++ b/drivers/char/agp/intel-agp.c
@@ -0,0 +1,1833 @@
1/*
2 * Intel AGPGART routines.
3 */
4
5/*
6 * Intel(R) 855GM/852GM and 865G support added by David Dawes
7 * <dawes@tungstengraphics.com>.
8 *
9 * Intel(R) 915G/915GM support added by Alan Hourihane
10 * <alanh@tungstengraphics.com>.
11 */
12
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/init.h>
16#include <linux/pagemap.h>
17#include <linux/agp_backend.h>
18#include "agp.h"
19
20/* Intel 815 register */
21#define INTEL_815_APCONT 0x51
22#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
23
24/* Intel i820 registers */
25#define INTEL_I820_RDCR 0x51
26#define INTEL_I820_ERRSTS 0xc8
27
28/* Intel i840 registers */
29#define INTEL_I840_MCHCFG 0x50
30#define INTEL_I840_ERRSTS 0xc8
31
32/* Intel i850 registers */
33#define INTEL_I850_MCHCFG 0x50
34#define INTEL_I850_ERRSTS 0xc8
35
36/* intel 915G registers */
37#define I915_GMADDR 0x18
38#define I915_MMADDR 0x10
39#define I915_PTEADDR 0x1C
40#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
41#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
42
43
44/* Intel 7505 registers */
45#define INTEL_I7505_APSIZE 0x74
46#define INTEL_I7505_NCAPID 0x60
47#define INTEL_I7505_NISTAT 0x6c
48#define INTEL_I7505_ATTBASE 0x78
49#define INTEL_I7505_ERRSTS 0x42
50#define INTEL_I7505_AGPCTRL 0x70
51#define INTEL_I7505_MCHCFG 0x50
52
53static struct aper_size_info_fixed intel_i810_sizes[] =
54{
55 {64, 16384, 4},
56 /* The 32M mode still requires a 64k gatt */
57 {32, 8192, 4}
58};
59
60#define AGP_DCACHE_MEMORY 1
61#define AGP_PHYS_MEMORY 2
62
63static struct gatt_mask intel_i810_masks[] =
64{
65 {.mask = I810_PTE_VALID, .type = 0},
66 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
67 {.mask = I810_PTE_VALID, .type = 0}
68};
69
70static struct _intel_i810_private {
71 struct pci_dev *i810_dev; /* device one */
72 volatile u8 __iomem *registers;
73 int num_dcache_entries;
74} intel_i810_private;
75
76static int intel_i810_fetch_size(void)
77{
78 u32 smram_miscc;
79 struct aper_size_info_fixed *values;
80
81 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
82 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
83
84 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
85 printk(KERN_WARNING PFX "i810 is disabled\n");
86 return 0;
87 }
88 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
89 agp_bridge->previous_size =
90 agp_bridge->current_size = (void *) (values + 1);
91 agp_bridge->aperture_size_idx = 1;
92 return values[1].size;
93 } else {
94 agp_bridge->previous_size =
95 agp_bridge->current_size = (void *) (values);
96 agp_bridge->aperture_size_idx = 0;
97 return values[0].size;
98 }
99
100 return 0;
101}
102
103static int intel_i810_configure(void)
104{
105 struct aper_size_info_fixed *current_size;
106 u32 temp;
107 int i;
108
109 current_size = A_SIZE_FIX(agp_bridge->current_size);
110
111 pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
112 temp &= 0xfff80000;
113
114 intel_i810_private.registers = ioremap(temp, 128 * 4096);
115 if (!intel_i810_private.registers) {
116 printk(KERN_ERR PFX "Unable to remap memory.\n");
117 return -ENOMEM;
118 }
119
120 if ((readl(intel_i810_private.registers+I810_DRAM_CTL)
121 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
122 /* This will need to be dynamically assigned */
123 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n");
124 intel_i810_private.num_dcache_entries = 1024;
125 }
126 pci_read_config_dword(intel_i810_private.i810_dev, I810_GMADDR, &temp);
127 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
128 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_i810_private.registers+I810_PGETBL_CTL);
129 readl(intel_i810_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
130
131 if (agp_bridge->driver->needs_scratch_page) {
132 for (i = 0; i < current_size->num_entries; i++) {
133 writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
134 readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI posting. */
135 }
136 }
137 global_cache_flush();
138 return 0;
139}
140
141static void intel_i810_cleanup(void)
142{
143 writel(0, intel_i810_private.registers+I810_PGETBL_CTL);
144 readl(intel_i810_private.registers); /* PCI Posting. */
145 iounmap(intel_i810_private.registers);
146}
147
148static void intel_i810_tlbflush(struct agp_memory *mem)
149{
150 return;
151}
152
153static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
154{
155 return;
156}
157
158/* Exists to support ARGB cursors */
159static void *i8xx_alloc_pages(void)
160{
161 struct page * page;
162
163 page = alloc_pages(GFP_KERNEL, 2);
164 if (page == NULL)
165 return NULL;
166
167 if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) {
168 global_flush_tlb();
169 __free_page(page);
170 return NULL;
171 }
172 global_flush_tlb();
173 get_page(page);
174 SetPageLocked(page);
175 atomic_inc(&agp_bridge->current_memory_agp);
176 return page_address(page);
177}
178
179static void i8xx_destroy_pages(void *addr)
180{
181 struct page *page;
182
183 if (addr == NULL)
184 return;
185
186 page = virt_to_page(addr);
187 change_page_attr(page, 4, PAGE_KERNEL);
188 global_flush_tlb();
189 put_page(page);
190 unlock_page(page);
191 free_pages((unsigned long)addr, 2);
192 atomic_dec(&agp_bridge->current_memory_agp);
193}
194
195static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
196 int type)
197{
198 int i, j, num_entries;
199 void *temp;
200
201 temp = agp_bridge->current_size;
202 num_entries = A_SIZE_FIX(temp)->num_entries;
203
204 if ((pg_start + mem->page_count) > num_entries) {
205 return -EINVAL;
206 }
207 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
208 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
209 return -EBUSY;
210 }
211
212 if (type != 0 || mem->type != 0) {
213 if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
214 /* special insert */
215 global_cache_flush();
216 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
217 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
218 readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
219 }
220 global_cache_flush();
221 agp_bridge->driver->tlb_flush(mem);
222 return 0;
223 }
224 if((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
225 goto insert;
226 return -EINVAL;
227 }
228
229insert:
230 global_cache_flush();
231 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
232 writel(agp_bridge->driver->mask_memory(agp_bridge,
233 mem->memory[i], mem->type),
234 intel_i810_private.registers+I810_PTE_BASE+(j*4));
235 readl(intel_i810_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
236 }
237 global_cache_flush();
238
239 agp_bridge->driver->tlb_flush(mem);
240 return 0;
241}
242
243static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
244 int type)
245{
246 int i;
247
248 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
249 writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
250 readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
251 }
252
253 global_cache_flush();
254 agp_bridge->driver->tlb_flush(mem);
255 return 0;
256}
257
258/*
259 * The i810/i830 requires a physical address to program its mouse
260 * pointer into hardware.
261 * However the Xserver still writes to it through the agp aperture.
262 */
263static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
264{
265 struct agp_memory *new;
266 void *addr;
267
268 if (pg_count != 1 && pg_count != 4)
269 return NULL;
270
271 switch (pg_count) {
272 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
273 break;
274 case 4:
275 /* kludge to get 4 physical pages for ARGB cursor */
276 addr = i8xx_alloc_pages();
277 break;
278 default:
279 return NULL;
280 }
281
282 if (addr == NULL)
283 return NULL;
284
285 new = agp_create_memory(pg_count);
286 if (new == NULL)
287 return NULL;
288
289 new->memory[0] = virt_to_phys(addr);
290 if (pg_count == 4) {
291 /* kludge to get 4 physical pages for ARGB cursor */
292 new->memory[1] = new->memory[0] + PAGE_SIZE;
293 new->memory[2] = new->memory[1] + PAGE_SIZE;
294 new->memory[3] = new->memory[2] + PAGE_SIZE;
295 }
296 new->page_count = pg_count;
297 new->num_scratch_pages = pg_count;
298 new->type = AGP_PHYS_MEMORY;
299 new->physical = new->memory[0];
300 return new;
301}
302
303static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
304{
305 struct agp_memory *new;
306
307 if (type == AGP_DCACHE_MEMORY) {
308 if (pg_count != intel_i810_private.num_dcache_entries)
309 return NULL;
310
311 new = agp_create_memory(1);
312 if (new == NULL)
313 return NULL;
314
315 new->type = AGP_DCACHE_MEMORY;
316 new->page_count = pg_count;
317 new->num_scratch_pages = 0;
318 vfree(new->memory);
319 return new;
320 }
321 if (type == AGP_PHYS_MEMORY)
322 return alloc_agpphysmem_i8xx(pg_count, type);
323
324 return NULL;
325}
326
327static void intel_i810_free_by_type(struct agp_memory *curr)
328{
329 agp_free_key(curr->key);
330 if(curr->type == AGP_PHYS_MEMORY) {
331 if (curr->page_count == 4)
332 i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
333 else
334 agp_bridge->driver->agp_destroy_page(
335 phys_to_virt(curr->memory[0]));
336 vfree(curr->memory);
337 }
338 kfree(curr);
339}
340
341static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
342 unsigned long addr, int type)
343{
344 /* Type checking must be done elsewhere */
345 return addr | bridge->driver->masks[type].mask;
346}
347
348static struct aper_size_info_fixed intel_i830_sizes[] =
349{
350 {128, 32768, 5},
351 /* The 64M mode still requires a 128k gatt */
352 {64, 16384, 5},
353 {256, 65536, 6},
354};
355
356static struct _intel_i830_private {
357 struct pci_dev *i830_dev; /* device one */
358 volatile u8 __iomem *registers;
359 volatile u32 __iomem *gtt; /* I915G */
360 int gtt_entries;
361} intel_i830_private;
362
363static void intel_i830_init_gtt_entries(void)
364{
365 u16 gmch_ctrl;
366 int gtt_entries;
367 u8 rdct;
368 int local = 0;
369 static const int ddt[4] = { 0, 16, 32, 64 };
370 int size;
371
372 pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
373
374 /* We obtain the size of the GTT, which is also stored (for some
375 * reason) at the top of stolen memory. Then we add 4KB to that
376 * for the video BIOS popup, which is also stored in there. */
377 size = agp_bridge->driver->fetch_size() + 4;
378
379 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
380 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
381 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
382 case I830_GMCH_GMS_STOLEN_512:
383 gtt_entries = KB(512) - KB(size);
384 break;
385 case I830_GMCH_GMS_STOLEN_1024:
386 gtt_entries = MB(1) - KB(size);
387 break;
388 case I830_GMCH_GMS_STOLEN_8192:
389 gtt_entries = MB(8) - KB(size);
390 break;
391 case I830_GMCH_GMS_LOCAL:
392 rdct = readb(intel_i830_private.registers+I830_RDRAM_CHANNEL_TYPE);
393 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
394 MB(ddt[I830_RDRAM_DDT(rdct)]);
395 local = 1;
396 break;
397 default:
398 gtt_entries = 0;
399 break;
400 }
401 } else {
402 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
403 case I855_GMCH_GMS_STOLEN_1M:
404 gtt_entries = MB(1) - KB(size);
405 break;
406 case I855_GMCH_GMS_STOLEN_4M:
407 gtt_entries = MB(4) - KB(size);
408 break;
409 case I855_GMCH_GMS_STOLEN_8M:
410 gtt_entries = MB(8) - KB(size);
411 break;
412 case I855_GMCH_GMS_STOLEN_16M:
413 gtt_entries = MB(16) - KB(size);
414 break;
415 case I855_GMCH_GMS_STOLEN_32M:
416 gtt_entries = MB(32) - KB(size);
417 break;
418 case I915_GMCH_GMS_STOLEN_48M:
419 /* Check it's really I915G */
420 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
421 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
422 gtt_entries = MB(48) - KB(size);
423 else
424 gtt_entries = 0;
425 break;
426 case I915_GMCH_GMS_STOLEN_64M:
427 /* Check it's really I915G */
428 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
429 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
430 gtt_entries = MB(64) - KB(size);
431 else
432 gtt_entries = 0;
433 default:
434 gtt_entries = 0;
435 break;
436 }
437 }
438 if (gtt_entries > 0)
439 printk(KERN_INFO PFX "Detected %dK %s memory.\n",
440 gtt_entries / KB(1), local ? "local" : "stolen");
441 else
442 printk(KERN_INFO PFX
443 "No pre-allocated video memory detected.\n");
444 gtt_entries /= KB(4);
445
446 intel_i830_private.gtt_entries = gtt_entries;
447}
448
449/* The intel i830 automatically initializes the agp aperture during POST.
450 * Use the memory already set aside for in the GTT.
451 */
452static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
453{
454 int page_order;
455 struct aper_size_info_fixed *size;
456 int num_entries;
457 u32 temp;
458
459 size = agp_bridge->current_size;
460 page_order = size->page_order;
461 num_entries = size->num_entries;
462 agp_bridge->gatt_table_real = NULL;
463
464 pci_read_config_dword(intel_i830_private.i830_dev,I810_MMADDR,&temp);
465 temp &= 0xfff80000;
466
467 intel_i830_private.registers = ioremap(temp,128 * 4096);
468 if (!intel_i830_private.registers)
469 return -ENOMEM;
470
471 temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
472 global_cache_flush(); /* FIXME: ?? */
473
474 /* we have to call this as early as possible after the MMIO base address is known */
475 intel_i830_init_gtt_entries();
476
477 agp_bridge->gatt_table = NULL;
478
479 agp_bridge->gatt_bus_addr = temp;
480
481 return 0;
482}
483
484/* Return the gatt table to a sane state. Use the top of stolen
485 * memory for the GTT.
486 */
487static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
488{
489 return 0;
490}
491
492static int intel_i830_fetch_size(void)
493{
494 u16 gmch_ctrl;
495 struct aper_size_info_fixed *values;
496
497 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
498
499 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
500 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
501 /* 855GM/852GM/865G has 128MB aperture size */
502 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
503 agp_bridge->aperture_size_idx = 0;
504 return values[0].size;
505 }
506
507 pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
508
509 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
510 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
511 agp_bridge->aperture_size_idx = 0;
512 return values[0].size;
513 } else {
514 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
515 agp_bridge->aperture_size_idx = 1;
516 return values[1].size;
517 }
518
519 return 0;
520}
521
522static int intel_i830_configure(void)
523{
524 struct aper_size_info_fixed *current_size;
525 u32 temp;
526 u16 gmch_ctrl;
527 int i;
528
529 current_size = A_SIZE_FIX(agp_bridge->current_size);
530
531 pci_read_config_dword(intel_i830_private.i830_dev,I810_GMADDR,&temp);
532 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
533
534 pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
535 gmch_ctrl |= I830_GMCH_ENABLED;
536 pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
537
538 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
539 readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
540
541 if (agp_bridge->driver->needs_scratch_page) {
542 for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
543 writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
544 readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
545 }
546 }
547
548 global_cache_flush();
549 return 0;
550}
551
552static void intel_i830_cleanup(void)
553{
554 iounmap(intel_i830_private.registers);
555}
556
557static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int type)
558{
559 int i,j,num_entries;
560 void *temp;
561
562 temp = agp_bridge->current_size;
563 num_entries = A_SIZE_FIX(temp)->num_entries;
564
565 if (pg_start < intel_i830_private.gtt_entries) {
566 printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
567 pg_start,intel_i830_private.gtt_entries);
568
569 printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
570 return -EINVAL;
571 }
572
573 if ((pg_start + mem->page_count) > num_entries)
574 return -EINVAL;
575
576 /* The i830 can't check the GTT for entries since its read only,
577 * depend on the caller to make the correct offset decisions.
578 */
579
580 if ((type != 0 && type != AGP_PHYS_MEMORY) ||
581 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
582 return -EINVAL;
583
584 global_cache_flush(); /* FIXME: Necessary ?*/
585
586 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
587 writel(agp_bridge->driver->mask_memory(agp_bridge,
588 mem->memory[i], mem->type),
589 intel_i830_private.registers+I810_PTE_BASE+(j*4));
590 readl(intel_i830_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
591 }
592
593 global_cache_flush();
594 agp_bridge->driver->tlb_flush(mem);
595 return 0;
596}
597
598static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
599 int type)
600{
601 int i;
602
603 global_cache_flush();
604
605 if (pg_start < intel_i830_private.gtt_entries) {
606 printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
607 return -EINVAL;
608 }
609
610 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
611 writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
612 readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
613 }
614
615 global_cache_flush();
616 agp_bridge->driver->tlb_flush(mem);
617 return 0;
618}
619
620static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
621{
622 if (type == AGP_PHYS_MEMORY)
623 return alloc_agpphysmem_i8xx(pg_count, type);
624
625 /* always return NULL for other allocation types for now */
626 return NULL;
627}
628
629static int intel_i915_configure(void)
630{
631 struct aper_size_info_fixed *current_size;
632 u32 temp;
633 u16 gmch_ctrl;
634 int i;
635
636 current_size = A_SIZE_FIX(agp_bridge->current_size);
637
638 pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
639
640 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
641
642 pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
643 gmch_ctrl |= I830_GMCH_ENABLED;
644 pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
645
646 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
647 readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
648
649 if (agp_bridge->driver->needs_scratch_page) {
650 for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
651 writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
652 readl(intel_i830_private.gtt+i); /* PCI Posting. */
653 }
654 }
655
656 global_cache_flush();
657 return 0;
658}
659
660static void intel_i915_cleanup(void)
661{
662 iounmap(intel_i830_private.gtt);
663 iounmap(intel_i830_private.registers);
664}
665
666static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
667 int type)
668{
669 int i,j,num_entries;
670 void *temp;
671
672 temp = agp_bridge->current_size;
673 num_entries = A_SIZE_FIX(temp)->num_entries;
674
675 if (pg_start < intel_i830_private.gtt_entries) {
676 printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_i830_private.gtt_entries == 0x%.8x\n",
677 pg_start,intel_i830_private.gtt_entries);
678
679 printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
680 return -EINVAL;
681 }
682
683 if ((pg_start + mem->page_count) > num_entries)
684 return -EINVAL;
685
686 /* The i830 can't check the GTT for entries since its read only,
687 * depend on the caller to make the correct offset decisions.
688 */
689
690 if ((type != 0 && type != AGP_PHYS_MEMORY) ||
691 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
692 return -EINVAL;
693
694 global_cache_flush();
695
696 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
697 writel(agp_bridge->driver->mask_memory(agp_bridge,
698 mem->memory[i], mem->type), intel_i830_private.gtt+j);
699 readl(intel_i830_private.gtt+j); /* PCI Posting. */
700 }
701
702 global_cache_flush();
703 agp_bridge->driver->tlb_flush(mem);
704 return 0;
705}
706
707static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
708 int type)
709{
710 int i;
711
712 global_cache_flush();
713
714 if (pg_start < intel_i830_private.gtt_entries) {
715 printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
716 return -EINVAL;
717 }
718
719 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
720 writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
721 readl(intel_i830_private.gtt+i);
722 }
723
724 global_cache_flush();
725 agp_bridge->driver->tlb_flush(mem);
726 return 0;
727}
728
729static int intel_i915_fetch_size(void)
730{
731 struct aper_size_info_fixed *values;
732 u32 temp, offset = 0;
733
734#define I915_256MB_ADDRESS_MASK (1<<27)
735
736 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
737
738 pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp);
739 if (temp & I915_256MB_ADDRESS_MASK)
740 offset = 0; /* 128MB aperture */
741 else
742 offset = 2; /* 256MB aperture */
743 agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
744 return values[offset].size;
745}
746
747/* The intel i915 automatically initializes the agp aperture during POST.
748 * Use the memory already set aside for in the GTT.
749 */
750static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
751{
752 int page_order;
753 struct aper_size_info_fixed *size;
754 int num_entries;
755 u32 temp, temp2;
756
757 size = agp_bridge->current_size;
758 page_order = size->page_order;
759 num_entries = size->num_entries;
760 agp_bridge->gatt_table_real = NULL;
761
762 pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
763 pci_read_config_dword(intel_i830_private.i830_dev, I915_PTEADDR,&temp2);
764
765 intel_i830_private.gtt = ioremap(temp2, 256 * 1024);
766 if (!intel_i830_private.gtt)
767 return -ENOMEM;
768
769 temp &= 0xfff80000;
770
771 intel_i830_private.registers = ioremap(temp,128 * 4096);
772 if (!intel_i830_private.registers)
773 return -ENOMEM;
774
775 temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
776 global_cache_flush(); /* FIXME: ? */
777
778 /* we have to call this as early as possible after the MMIO base address is known */
779 intel_i830_init_gtt_entries();
780
781 agp_bridge->gatt_table = NULL;
782
783 agp_bridge->gatt_bus_addr = temp;
784
785 return 0;
786}
787
788static int intel_fetch_size(void)
789{
790 int i;
791 u16 temp;
792 struct aper_size_info_16 *values;
793
794 pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
795 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
796
797 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
798 if (temp == values[i].size_value) {
799 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
800 agp_bridge->aperture_size_idx = i;
801 return values[i].size;
802 }
803 }
804
805 return 0;
806}
807
808static int __intel_8xx_fetch_size(u8 temp)
809{
810 int i;
811 struct aper_size_info_8 *values;
812
813 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
814
815 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
816 if (temp == values[i].size_value) {
817 agp_bridge->previous_size =
818 agp_bridge->current_size = (void *) (values + i);
819 agp_bridge->aperture_size_idx = i;
820 return values[i].size;
821 }
822 }
823 return 0;
824}
825
826static int intel_8xx_fetch_size(void)
827{
828 u8 temp;
829
830 pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
831 return __intel_8xx_fetch_size(temp);
832}
833
834static int intel_815_fetch_size(void)
835{
836 u8 temp;
837
838 /* Intel 815 chipsets have a _weird_ APSIZE register with only
839 * one non-reserved bit, so mask the others out ... */
840 pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
841 temp &= (1 << 3);
842
843 return __intel_8xx_fetch_size(temp);
844}
845
846static void intel_tlbflush(struct agp_memory *mem)
847{
848 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
849 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
850}
851
852
853static void intel_8xx_tlbflush(struct agp_memory *mem)
854{
855 u32 temp;
856 pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
857 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
858 pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
859 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
860}
861
862
863static void intel_cleanup(void)
864{
865 u16 temp;
866 struct aper_size_info_16 *previous_size;
867
868 previous_size = A_SIZE_16(agp_bridge->previous_size);
869 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
870 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
871 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
872}
873
874
875static void intel_8xx_cleanup(void)
876{
877 u16 temp;
878 struct aper_size_info_8 *previous_size;
879
880 previous_size = A_SIZE_8(agp_bridge->previous_size);
881 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
882 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
883 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
884}
885
886
887static int intel_configure(void)
888{
889 u32 temp;
890 u16 temp2;
891 struct aper_size_info_16 *current_size;
892
893 current_size = A_SIZE_16(agp_bridge->current_size);
894
895 /* aperture size */
896 pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
897
898 /* address to map to */
899 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
900 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
901
902 /* attbase - aperture base */
903 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
904
905 /* agpctrl */
906 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
907
908 /* paccfg/nbxcfg */
909 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
910 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
911 (temp2 & ~(1 << 10)) | (1 << 9));
912 /* clear any possible error conditions */
913 pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
914 return 0;
915}
916
917static int intel_815_configure(void)
918{
919 u32 temp, addr;
920 u8 temp2;
921 struct aper_size_info_8 *current_size;
922
923 /* attbase - aperture base */
924 /* the Intel 815 chipset spec. says that bits 29-31 in the
925 * ATTBASE register are reserved -> try not to write them */
926 if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
927 printk (KERN_EMERG PFX "gatt bus addr too high");
928 return -EINVAL;
929 }
930
931 current_size = A_SIZE_8(agp_bridge->current_size);
932
933 /* aperture size */
934 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
935 current_size->size_value);
936
937 /* address to map to */
938 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
939 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
940
941 pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
942 addr &= INTEL_815_ATTBASE_MASK;
943 addr |= agp_bridge->gatt_bus_addr;
944 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
945
946 /* agpctrl */
947 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
948
949 /* apcont */
950 pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
951 pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
952
953 /* clear any possible error conditions */
954 /* Oddness : this chipset seems to have no ERRSTS register ! */
955 return 0;
956}
957
958static void intel_820_tlbflush(struct agp_memory *mem)
959{
960 return;
961}
962
963static void intel_820_cleanup(void)
964{
965 u8 temp;
966 struct aper_size_info_8 *previous_size;
967
968 previous_size = A_SIZE_8(agp_bridge->previous_size);
969 pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
970 pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
971 temp & ~(1 << 1));
972 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
973 previous_size->size_value);
974}
975
976
977static int intel_820_configure(void)
978{
979 u32 temp;
980 u8 temp2;
981 struct aper_size_info_8 *current_size;
982
983 current_size = A_SIZE_8(agp_bridge->current_size);
984
985 /* aperture size */
986 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
987
988 /* address to map to */
989 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
990 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
991
992 /* attbase - aperture base */
993 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
994
995 /* agpctrl */
996 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
997
998 /* global enable aperture access */
999 /* This flag is not accessed through MCHCFG register as in */
1000 /* i850 chipset. */
1001 pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
1002 pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
1003 /* clear any possible AGP-related error conditions */
1004 pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
1005 return 0;
1006}
1007
1008static int intel_840_configure(void)
1009{
1010 u32 temp;
1011 u16 temp2;
1012 struct aper_size_info_8 *current_size;
1013
1014 current_size = A_SIZE_8(agp_bridge->current_size);
1015
1016 /* aperture size */
1017 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1018
1019 /* address to map to */
1020 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1021 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1022
1023 /* attbase - aperture base */
1024 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1025
1026 /* agpctrl */
1027 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1028
1029 /* mcgcfg */
1030 pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
1031 pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
1032 /* clear any possible error conditions */
1033 pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
1034 return 0;
1035}
1036
1037static int intel_845_configure(void)
1038{
1039 u32 temp;
1040 u8 temp2;
1041 struct aper_size_info_8 *current_size;
1042
1043 current_size = A_SIZE_8(agp_bridge->current_size);
1044
1045 /* aperture size */
1046 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1047
1048 /* address to map to */
1049 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1050 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1051
1052 /* attbase - aperture base */
1053 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1054
1055 /* agpctrl */
1056 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1057
1058 /* agpm */
1059 pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
1060 pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
1061 /* clear any possible error conditions */
1062 pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
1063 return 0;
1064}
1065
1066static int intel_850_configure(void)
1067{
1068 u32 temp;
1069 u16 temp2;
1070 struct aper_size_info_8 *current_size;
1071
1072 current_size = A_SIZE_8(agp_bridge->current_size);
1073
1074 /* aperture size */
1075 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1076
1077 /* address to map to */
1078 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1079 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1080
1081 /* attbase - aperture base */
1082 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1083
1084 /* agpctrl */
1085 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1086
1087 /* mcgcfg */
1088 pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
1089 pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
1090 /* clear any possible AGP-related error conditions */
1091 pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
1092 return 0;
1093}
1094
1095static int intel_860_configure(void)
1096{
1097 u32 temp;
1098 u16 temp2;
1099 struct aper_size_info_8 *current_size;
1100
1101 current_size = A_SIZE_8(agp_bridge->current_size);
1102
1103 /* aperture size */
1104 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1105
1106 /* address to map to */
1107 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1108 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1109
1110 /* attbase - aperture base */
1111 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1112
1113 /* agpctrl */
1114 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1115
1116 /* mcgcfg */
1117 pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
1118 pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
1119 /* clear any possible AGP-related error conditions */
1120 pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
1121 return 0;
1122}
1123
1124static int intel_830mp_configure(void)
1125{
1126 u32 temp;
1127 u16 temp2;
1128 struct aper_size_info_8 *current_size;
1129
1130 current_size = A_SIZE_8(agp_bridge->current_size);
1131
1132 /* aperture size */
1133 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1134
1135 /* address to map to */
1136 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1137 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1138
1139 /* attbase - aperture base */
1140 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1141
1142 /* agpctrl */
1143 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1144
1145 /* gmch */
1146 pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
1147 pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
1148 /* clear any possible AGP-related error conditions */
1149 pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
1150 return 0;
1151}
1152
1153static int intel_7505_configure(void)
1154{
1155 u32 temp;
1156 u16 temp2;
1157 struct aper_size_info_8 *current_size;
1158
1159 current_size = A_SIZE_8(agp_bridge->current_size);
1160
1161 /* aperture size */
1162 pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
1163
1164 /* address to map to */
1165 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1166 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1167
1168 /* attbase - aperture base */
1169 pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
1170
1171 /* agpctrl */
1172 pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
1173
1174 /* mchcfg */
1175 pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
1176 pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
1177
1178 return 0;
1179}
1180
1181/* Setup function */
1182static struct gatt_mask intel_generic_masks[] =
1183{
1184 {.mask = 0x00000017, .type = 0}
1185};
1186
1187static struct aper_size_info_8 intel_815_sizes[2] =
1188{
1189 {64, 16384, 4, 0},
1190 {32, 8192, 3, 8},
1191};
1192
1193static struct aper_size_info_8 intel_8xx_sizes[7] =
1194{
1195 {256, 65536, 6, 0},
1196 {128, 32768, 5, 32},
1197 {64, 16384, 4, 48},
1198 {32, 8192, 3, 56},
1199 {16, 4096, 2, 60},
1200 {8, 2048, 1, 62},
1201 {4, 1024, 0, 63}
1202};
1203
1204static struct aper_size_info_16 intel_generic_sizes[7] =
1205{
1206 {256, 65536, 6, 0},
1207 {128, 32768, 5, 32},
1208 {64, 16384, 4, 48},
1209 {32, 8192, 3, 56},
1210 {16, 4096, 2, 60},
1211 {8, 2048, 1, 62},
1212 {4, 1024, 0, 63}
1213};
1214
1215static struct aper_size_info_8 intel_830mp_sizes[4] =
1216{
1217 {256, 65536, 6, 0},
1218 {128, 32768, 5, 32},
1219 {64, 16384, 4, 48},
1220 {32, 8192, 3, 56}
1221};
1222
1223static struct agp_bridge_driver intel_generic_driver = {
1224 .owner = THIS_MODULE,
1225 .aperture_sizes = intel_generic_sizes,
1226 .size_type = U16_APER_SIZE,
1227 .num_aperture_sizes = 7,
1228 .configure = intel_configure,
1229 .fetch_size = intel_fetch_size,
1230 .cleanup = intel_cleanup,
1231 .tlb_flush = intel_tlbflush,
1232 .mask_memory = agp_generic_mask_memory,
1233 .masks = intel_generic_masks,
1234 .agp_enable = agp_generic_enable,
1235 .cache_flush = global_cache_flush,
1236 .create_gatt_table = agp_generic_create_gatt_table,
1237 .free_gatt_table = agp_generic_free_gatt_table,
1238 .insert_memory = agp_generic_insert_memory,
1239 .remove_memory = agp_generic_remove_memory,
1240 .alloc_by_type = agp_generic_alloc_by_type,
1241 .free_by_type = agp_generic_free_by_type,
1242 .agp_alloc_page = agp_generic_alloc_page,
1243 .agp_destroy_page = agp_generic_destroy_page,
1244};
1245
1246static struct agp_bridge_driver intel_810_driver = {
1247 .owner = THIS_MODULE,
1248 .aperture_sizes = intel_i810_sizes,
1249 .size_type = FIXED_APER_SIZE,
1250 .num_aperture_sizes = 2,
1251 .needs_scratch_page = TRUE,
1252 .configure = intel_i810_configure,
1253 .fetch_size = intel_i810_fetch_size,
1254 .cleanup = intel_i810_cleanup,
1255 .tlb_flush = intel_i810_tlbflush,
1256 .mask_memory = intel_i810_mask_memory,
1257 .masks = intel_i810_masks,
1258 .agp_enable = intel_i810_agp_enable,
1259 .cache_flush = global_cache_flush,
1260 .create_gatt_table = agp_generic_create_gatt_table,
1261 .free_gatt_table = agp_generic_free_gatt_table,
1262 .insert_memory = intel_i810_insert_entries,
1263 .remove_memory = intel_i810_remove_entries,
1264 .alloc_by_type = intel_i810_alloc_by_type,
1265 .free_by_type = intel_i810_free_by_type,
1266 .agp_alloc_page = agp_generic_alloc_page,
1267 .agp_destroy_page = agp_generic_destroy_page,
1268};
1269
1270static struct agp_bridge_driver intel_815_driver = {
1271 .owner = THIS_MODULE,
1272 .aperture_sizes = intel_815_sizes,
1273 .size_type = U8_APER_SIZE,
1274 .num_aperture_sizes = 2,
1275 .configure = intel_815_configure,
1276 .fetch_size = intel_815_fetch_size,
1277 .cleanup = intel_8xx_cleanup,
1278 .tlb_flush = intel_8xx_tlbflush,
1279 .mask_memory = agp_generic_mask_memory,
1280 .masks = intel_generic_masks,
1281 .agp_enable = agp_generic_enable,
1282 .cache_flush = global_cache_flush,
1283 .create_gatt_table = agp_generic_create_gatt_table,
1284 .free_gatt_table = agp_generic_free_gatt_table,
1285 .insert_memory = agp_generic_insert_memory,
1286 .remove_memory = agp_generic_remove_memory,
1287 .alloc_by_type = agp_generic_alloc_by_type,
1288 .free_by_type = agp_generic_free_by_type,
1289 .agp_alloc_page = agp_generic_alloc_page,
1290 .agp_destroy_page = agp_generic_destroy_page,
1291};
1292
1293static struct agp_bridge_driver intel_830_driver = {
1294 .owner = THIS_MODULE,
1295 .aperture_sizes = intel_i830_sizes,
1296 .size_type = FIXED_APER_SIZE,
1297 .num_aperture_sizes = 3,
1298 .needs_scratch_page = TRUE,
1299 .configure = intel_i830_configure,
1300 .fetch_size = intel_i830_fetch_size,
1301 .cleanup = intel_i830_cleanup,
1302 .tlb_flush = intel_i810_tlbflush,
1303 .mask_memory = intel_i810_mask_memory,
1304 .masks = intel_i810_masks,
1305 .agp_enable = intel_i810_agp_enable,
1306 .cache_flush = global_cache_flush,
1307 .create_gatt_table = intel_i830_create_gatt_table,
1308 .free_gatt_table = intel_i830_free_gatt_table,
1309 .insert_memory = intel_i830_insert_entries,
1310 .remove_memory = intel_i830_remove_entries,
1311 .alloc_by_type = intel_i830_alloc_by_type,
1312 .free_by_type = intel_i810_free_by_type,
1313 .agp_alloc_page = agp_generic_alloc_page,
1314 .agp_destroy_page = agp_generic_destroy_page,
1315};
1316
1317static struct agp_bridge_driver intel_820_driver = {
1318 .owner = THIS_MODULE,
1319 .aperture_sizes = intel_8xx_sizes,
1320 .size_type = U8_APER_SIZE,
1321 .num_aperture_sizes = 7,
1322 .configure = intel_820_configure,
1323 .fetch_size = intel_8xx_fetch_size,
1324 .cleanup = intel_820_cleanup,
1325 .tlb_flush = intel_820_tlbflush,
1326 .mask_memory = agp_generic_mask_memory,
1327 .masks = intel_generic_masks,
1328 .agp_enable = agp_generic_enable,
1329 .cache_flush = global_cache_flush,
1330 .create_gatt_table = agp_generic_create_gatt_table,
1331 .free_gatt_table = agp_generic_free_gatt_table,
1332 .insert_memory = agp_generic_insert_memory,
1333 .remove_memory = agp_generic_remove_memory,
1334 .alloc_by_type = agp_generic_alloc_by_type,
1335 .free_by_type = agp_generic_free_by_type,
1336 .agp_alloc_page = agp_generic_alloc_page,
1337 .agp_destroy_page = agp_generic_destroy_page,
1338};
1339
1340static struct agp_bridge_driver intel_830mp_driver = {
1341 .owner = THIS_MODULE,
1342 .aperture_sizes = intel_830mp_sizes,
1343 .size_type = U8_APER_SIZE,
1344 .num_aperture_sizes = 4,
1345 .configure = intel_830mp_configure,
1346 .fetch_size = intel_8xx_fetch_size,
1347 .cleanup = intel_8xx_cleanup,
1348 .tlb_flush = intel_8xx_tlbflush,
1349 .mask_memory = agp_generic_mask_memory,
1350 .masks = intel_generic_masks,
1351 .agp_enable = agp_generic_enable,
1352 .cache_flush = global_cache_flush,
1353 .create_gatt_table = agp_generic_create_gatt_table,
1354 .free_gatt_table = agp_generic_free_gatt_table,
1355 .insert_memory = agp_generic_insert_memory,
1356 .remove_memory = agp_generic_remove_memory,
1357 .alloc_by_type = agp_generic_alloc_by_type,
1358 .free_by_type = agp_generic_free_by_type,
1359 .agp_alloc_page = agp_generic_alloc_page,
1360 .agp_destroy_page = agp_generic_destroy_page,
1361};
1362
1363static struct agp_bridge_driver intel_840_driver = {
1364 .owner = THIS_MODULE,
1365 .aperture_sizes = intel_8xx_sizes,
1366 .size_type = U8_APER_SIZE,
1367 .num_aperture_sizes = 7,
1368 .configure = intel_840_configure,
1369 .fetch_size = intel_8xx_fetch_size,
1370 .cleanup = intel_8xx_cleanup,
1371 .tlb_flush = intel_8xx_tlbflush,
1372 .mask_memory = agp_generic_mask_memory,
1373 .masks = intel_generic_masks,
1374 .agp_enable = agp_generic_enable,
1375 .cache_flush = global_cache_flush,
1376 .create_gatt_table = agp_generic_create_gatt_table,
1377 .free_gatt_table = agp_generic_free_gatt_table,
1378 .insert_memory = agp_generic_insert_memory,
1379 .remove_memory = agp_generic_remove_memory,
1380 .alloc_by_type = agp_generic_alloc_by_type,
1381 .free_by_type = agp_generic_free_by_type,
1382 .agp_alloc_page = agp_generic_alloc_page,
1383 .agp_destroy_page = agp_generic_destroy_page,
1384};
1385
1386static struct agp_bridge_driver intel_845_driver = {
1387 .owner = THIS_MODULE,
1388 .aperture_sizes = intel_8xx_sizes,
1389 .size_type = U8_APER_SIZE,
1390 .num_aperture_sizes = 7,
1391 .configure = intel_845_configure,
1392 .fetch_size = intel_8xx_fetch_size,
1393 .cleanup = intel_8xx_cleanup,
1394 .tlb_flush = intel_8xx_tlbflush,
1395 .mask_memory = agp_generic_mask_memory,
1396 .masks = intel_generic_masks,
1397 .agp_enable = agp_generic_enable,
1398 .cache_flush = global_cache_flush,
1399 .create_gatt_table = agp_generic_create_gatt_table,
1400 .free_gatt_table = agp_generic_free_gatt_table,
1401 .insert_memory = agp_generic_insert_memory,
1402 .remove_memory = agp_generic_remove_memory,
1403 .alloc_by_type = agp_generic_alloc_by_type,
1404 .free_by_type = agp_generic_free_by_type,
1405 .agp_alloc_page = agp_generic_alloc_page,
1406 .agp_destroy_page = agp_generic_destroy_page,
1407};
1408
1409static struct agp_bridge_driver intel_850_driver = {
1410 .owner = THIS_MODULE,
1411 .aperture_sizes = intel_8xx_sizes,
1412 .size_type = U8_APER_SIZE,
1413 .num_aperture_sizes = 7,
1414 .configure = intel_850_configure,
1415 .fetch_size = intel_8xx_fetch_size,
1416 .cleanup = intel_8xx_cleanup,
1417 .tlb_flush = intel_8xx_tlbflush,
1418 .mask_memory = agp_generic_mask_memory,
1419 .masks = intel_generic_masks,
1420 .agp_enable = agp_generic_enable,
1421 .cache_flush = global_cache_flush,
1422 .create_gatt_table = agp_generic_create_gatt_table,
1423 .free_gatt_table = agp_generic_free_gatt_table,
1424 .insert_memory = agp_generic_insert_memory,
1425 .remove_memory = agp_generic_remove_memory,
1426 .alloc_by_type = agp_generic_alloc_by_type,
1427 .free_by_type = agp_generic_free_by_type,
1428 .agp_alloc_page = agp_generic_alloc_page,
1429 .agp_destroy_page = agp_generic_destroy_page,
1430};
1431
1432static struct agp_bridge_driver intel_860_driver = {
1433 .owner = THIS_MODULE,
1434 .aperture_sizes = intel_8xx_sizes,
1435 .size_type = U8_APER_SIZE,
1436 .num_aperture_sizes = 7,
1437 .configure = intel_860_configure,
1438 .fetch_size = intel_8xx_fetch_size,
1439 .cleanup = intel_8xx_cleanup,
1440 .tlb_flush = intel_8xx_tlbflush,
1441 .mask_memory = agp_generic_mask_memory,
1442 .masks = intel_generic_masks,
1443 .agp_enable = agp_generic_enable,
1444 .cache_flush = global_cache_flush,
1445 .create_gatt_table = agp_generic_create_gatt_table,
1446 .free_gatt_table = agp_generic_free_gatt_table,
1447 .insert_memory = agp_generic_insert_memory,
1448 .remove_memory = agp_generic_remove_memory,
1449 .alloc_by_type = agp_generic_alloc_by_type,
1450 .free_by_type = agp_generic_free_by_type,
1451 .agp_alloc_page = agp_generic_alloc_page,
1452 .agp_destroy_page = agp_generic_destroy_page,
1453};
1454
1455static struct agp_bridge_driver intel_915_driver = {
1456 .owner = THIS_MODULE,
1457 .aperture_sizes = intel_i830_sizes,
1458 .size_type = FIXED_APER_SIZE,
1459 .num_aperture_sizes = 3,
1460 .needs_scratch_page = TRUE,
1461 .configure = intel_i915_configure,
1462 .fetch_size = intel_i915_fetch_size,
1463 .cleanup = intel_i915_cleanup,
1464 .tlb_flush = intel_i810_tlbflush,
1465 .mask_memory = intel_i810_mask_memory,
1466 .masks = intel_i810_masks,
1467 .agp_enable = intel_i810_agp_enable,
1468 .cache_flush = global_cache_flush,
1469 .create_gatt_table = intel_i915_create_gatt_table,
1470 .free_gatt_table = intel_i830_free_gatt_table,
1471 .insert_memory = intel_i915_insert_entries,
1472 .remove_memory = intel_i915_remove_entries,
1473 .alloc_by_type = intel_i830_alloc_by_type,
1474 .free_by_type = intel_i810_free_by_type,
1475 .agp_alloc_page = agp_generic_alloc_page,
1476 .agp_destroy_page = agp_generic_destroy_page,
1477};
1478
1479
1480static struct agp_bridge_driver intel_7505_driver = {
1481 .owner = THIS_MODULE,
1482 .aperture_sizes = intel_8xx_sizes,
1483 .size_type = U8_APER_SIZE,
1484 .num_aperture_sizes = 7,
1485 .configure = intel_7505_configure,
1486 .fetch_size = intel_8xx_fetch_size,
1487 .cleanup = intel_8xx_cleanup,
1488 .tlb_flush = intel_8xx_tlbflush,
1489 .mask_memory = agp_generic_mask_memory,
1490 .masks = intel_generic_masks,
1491 .agp_enable = agp_generic_enable,
1492 .cache_flush = global_cache_flush,
1493 .create_gatt_table = agp_generic_create_gatt_table,
1494 .free_gatt_table = agp_generic_free_gatt_table,
1495 .insert_memory = agp_generic_insert_memory,
1496 .remove_memory = agp_generic_remove_memory,
1497 .alloc_by_type = agp_generic_alloc_by_type,
1498 .free_by_type = agp_generic_free_by_type,
1499 .agp_alloc_page = agp_generic_alloc_page,
1500 .agp_destroy_page = agp_generic_destroy_page,
1501};
1502
1503static int find_i810(u16 device)
1504{
1505 struct pci_dev *i810_dev;
1506
1507 i810_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1508 if (!i810_dev)
1509 return 0;
1510 intel_i810_private.i810_dev = i810_dev;
1511 return 1;
1512}
1513
1514static int find_i830(u16 device)
1515{
1516 struct pci_dev *i830_dev;
1517
1518 i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1519 if (i830_dev && PCI_FUNC(i830_dev->devfn) != 0) {
1520 i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1521 device, i830_dev);
1522 }
1523
1524 if (!i830_dev)
1525 return 0;
1526
1527 intel_i830_private.i830_dev = i830_dev;
1528 return 1;
1529}
1530
1531static int __devinit agp_intel_probe(struct pci_dev *pdev,
1532 const struct pci_device_id *ent)
1533{
1534 struct agp_bridge_data *bridge;
1535 char *name = "(unknown)";
1536 u8 cap_ptr = 0;
1537 struct resource *r;
1538
1539 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
1540
1541 bridge = agp_alloc_bridge();
1542 if (!bridge)
1543 return -ENOMEM;
1544
1545 switch (pdev->device) {
1546 case PCI_DEVICE_ID_INTEL_82443LX_0:
1547 bridge->driver = &intel_generic_driver;
1548 name = "440LX";
1549 break;
1550 case PCI_DEVICE_ID_INTEL_82443BX_0:
1551 bridge->driver = &intel_generic_driver;
1552 name = "440BX";
1553 break;
1554 case PCI_DEVICE_ID_INTEL_82443GX_0:
1555 bridge->driver = &intel_generic_driver;
1556 name = "440GX";
1557 break;
1558 case PCI_DEVICE_ID_INTEL_82810_MC1:
1559 name = "i810";
1560 if (!find_i810(PCI_DEVICE_ID_INTEL_82810_IG1))
1561 goto fail;
1562 bridge->driver = &intel_810_driver;
1563 break;
1564 case PCI_DEVICE_ID_INTEL_82810_MC3:
1565 name = "i810 DC100";
1566 if (!find_i810(PCI_DEVICE_ID_INTEL_82810_IG3))
1567 goto fail;
1568 bridge->driver = &intel_810_driver;
1569 break;
1570 case PCI_DEVICE_ID_INTEL_82810E_MC:
1571 name = "i810 E";
1572 if (!find_i810(PCI_DEVICE_ID_INTEL_82810E_IG))
1573 goto fail;
1574 bridge->driver = &intel_810_driver;
1575 break;
1576 case PCI_DEVICE_ID_INTEL_82815_MC:
1577 /*
1578 * The i815 can operate either as an i810 style
1579 * integrated device, or as an AGP4X motherboard.
1580 */
1581 if (find_i810(PCI_DEVICE_ID_INTEL_82815_CGC))
1582 bridge->driver = &intel_810_driver;
1583 else
1584 bridge->driver = &intel_815_driver;
1585 name = "i815";
1586 break;
1587 case PCI_DEVICE_ID_INTEL_82820_HB:
1588 case PCI_DEVICE_ID_INTEL_82820_UP_HB:
1589 bridge->driver = &intel_820_driver;
1590 name = "i820";
1591 break;
1592 case PCI_DEVICE_ID_INTEL_82830_HB:
1593 if (find_i830(PCI_DEVICE_ID_INTEL_82830_CGC)) {
1594 bridge->driver = &intel_830_driver;
1595 } else {
1596 bridge->driver = &intel_830mp_driver;
1597 }
1598 name = "830M";
1599 break;
1600 case PCI_DEVICE_ID_INTEL_82840_HB:
1601 bridge->driver = &intel_840_driver;
1602 name = "i840";
1603 break;
1604 case PCI_DEVICE_ID_INTEL_82845_HB:
1605 bridge->driver = &intel_845_driver;
1606 name = "i845";
1607 break;
1608 case PCI_DEVICE_ID_INTEL_82845G_HB:
1609 if (find_i830(PCI_DEVICE_ID_INTEL_82845G_IG)) {
1610 bridge->driver = &intel_830_driver;
1611 } else {
1612 bridge->driver = &intel_845_driver;
1613 }
1614 name = "845G";
1615 break;
1616 case PCI_DEVICE_ID_INTEL_82850_HB:
1617 bridge->driver = &intel_850_driver;
1618 name = "i850";
1619 break;
1620 case PCI_DEVICE_ID_INTEL_82855PM_HB:
1621 bridge->driver = &intel_845_driver;
1622 name = "855PM";
1623 break;
1624 case PCI_DEVICE_ID_INTEL_82855GM_HB:
1625 if (find_i830(PCI_DEVICE_ID_INTEL_82855GM_IG)) {
1626 bridge->driver = &intel_830_driver;
1627 name = "855";
1628 } else {
1629 bridge->driver = &intel_845_driver;
1630 name = "855GM";
1631 }
1632 break;
1633 case PCI_DEVICE_ID_INTEL_82860_HB:
1634 bridge->driver = &intel_860_driver;
1635 name = "i860";
1636 break;
1637 case PCI_DEVICE_ID_INTEL_82865_HB:
1638 if (find_i830(PCI_DEVICE_ID_INTEL_82865_IG)) {
1639 bridge->driver = &intel_830_driver;
1640 } else {
1641 bridge->driver = &intel_845_driver;
1642 }
1643 name = "865";
1644 break;
1645 case PCI_DEVICE_ID_INTEL_82875_HB:
1646 bridge->driver = &intel_845_driver;
1647 name = "i875";
1648 break;
1649 case PCI_DEVICE_ID_INTEL_82915G_HB:
1650 if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
1651 bridge->driver = &intel_915_driver;
1652 } else {
1653 bridge->driver = &intel_845_driver;
1654 }
1655 name = "915G";
1656 break;
1657 case PCI_DEVICE_ID_INTEL_82915GM_HB:
1658 if (find_i830(PCI_DEVICE_ID_INTEL_82915GM_IG)) {
1659 bridge->driver = &intel_915_driver;
1660 } else {
1661 bridge->driver = &intel_845_driver;
1662 }
1663 name = "915GM";
1664 break;
1665 case PCI_DEVICE_ID_INTEL_7505_0:
1666 bridge->driver = &intel_7505_driver;
1667 name = "E7505";
1668 break;
1669 case PCI_DEVICE_ID_INTEL_7205_0:
1670 bridge->driver = &intel_7505_driver;
1671 name = "E7205";
1672 break;
1673 default:
1674 if (cap_ptr)
1675 printk(KERN_WARNING PFX "Unsupported Intel chipset (device id: %04x)\n",
1676 pdev->device);
1677 agp_put_bridge(bridge);
1678 return -ENODEV;
1679 };
1680
1681 bridge->dev = pdev;
1682 bridge->capndx = cap_ptr;
1683
1684 if (bridge->driver == &intel_810_driver)
1685 bridge->dev_private_data = &intel_i810_private;
1686 else if (bridge->driver == &intel_830_driver)
1687 bridge->dev_private_data = &intel_i830_private;
1688
1689 printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", name);
1690
1691 /*
1692 * The following fixes the case where the BIOS has "forgotten" to
1693 * provide an address range for the GART.
1694 * 20030610 - hamish@zot.org
1695 */
1696 r = &pdev->resource[0];
1697 if (!r->start && r->end) {
1698 if(pci_assign_resource(pdev, 0)) {
1699 printk(KERN_ERR PFX "could not assign resource 0\n");
1700 agp_put_bridge(bridge);
1701 return -ENODEV;
1702 }
1703 }
1704
1705 /*
1706 * If the device has not been properly setup, the following will catch
1707 * the problem and should stop the system from crashing.
1708 * 20030610 - hamish@zot.org
1709 */
1710 if (pci_enable_device(pdev)) {
1711 printk(KERN_ERR PFX "Unable to Enable PCI device\n");
1712 agp_put_bridge(bridge);
1713 return -ENODEV;
1714 }
1715
1716 /* Fill in the mode register */
1717 if (cap_ptr) {
1718 pci_read_config_dword(pdev,
1719 bridge->capndx+PCI_AGP_STATUS,
1720 &bridge->mode);
1721 }
1722
1723 pci_set_drvdata(pdev, bridge);
1724 return agp_add_bridge(bridge);
1725
1726fail:
1727 printk(KERN_ERR PFX "Detected an Intel %s chipset, "
1728 "but could not find the secondary device.\n", name);
1729 agp_put_bridge(bridge);
1730 return -ENODEV;
1731}
1732
1733static void __devexit agp_intel_remove(struct pci_dev *pdev)
1734{
1735 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
1736
1737 agp_remove_bridge(bridge);
1738
1739 if (intel_i810_private.i810_dev)
1740 pci_dev_put(intel_i810_private.i810_dev);
1741 if (intel_i830_private.i830_dev)
1742 pci_dev_put(intel_i830_private.i830_dev);
1743
1744 agp_put_bridge(bridge);
1745}
1746
1747static int agp_intel_resume(struct pci_dev *pdev)
1748{
1749 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
1750
1751 pci_restore_state(pdev);
1752
1753 if (bridge->driver == &intel_generic_driver)
1754 intel_configure();
1755 else if (bridge->driver == &intel_850_driver)
1756 intel_850_configure();
1757 else if (bridge->driver == &intel_845_driver)
1758 intel_845_configure();
1759 else if (bridge->driver == &intel_830mp_driver)
1760 intel_830mp_configure();
1761 else if (bridge->driver == &intel_915_driver)
1762 intel_i915_configure();
1763 else if (bridge->driver == &intel_830_driver)
1764 intel_i830_configure();
1765 else if (bridge->driver == &intel_810_driver)
1766 intel_i810_configure();
1767
1768 return 0;
1769}
1770
1771static struct pci_device_id agp_intel_pci_table[] = {
1772#define ID(x) \
1773 { \
1774 .class = (PCI_CLASS_BRIDGE_HOST << 8), \
1775 .class_mask = ~0, \
1776 .vendor = PCI_VENDOR_ID_INTEL, \
1777 .device = x, \
1778 .subvendor = PCI_ANY_ID, \
1779 .subdevice = PCI_ANY_ID, \
1780 }
1781 ID(PCI_DEVICE_ID_INTEL_82443LX_0),
1782 ID(PCI_DEVICE_ID_INTEL_82443BX_0),
1783 ID(PCI_DEVICE_ID_INTEL_82443GX_0),
1784 ID(PCI_DEVICE_ID_INTEL_82810_MC1),
1785 ID(PCI_DEVICE_ID_INTEL_82810_MC3),
1786 ID(PCI_DEVICE_ID_INTEL_82810E_MC),
1787 ID(PCI_DEVICE_ID_INTEL_82815_MC),
1788 ID(PCI_DEVICE_ID_INTEL_82820_HB),
1789 ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
1790 ID(PCI_DEVICE_ID_INTEL_82830_HB),
1791 ID(PCI_DEVICE_ID_INTEL_82840_HB),
1792 ID(PCI_DEVICE_ID_INTEL_82845_HB),
1793 ID(PCI_DEVICE_ID_INTEL_82845G_HB),
1794 ID(PCI_DEVICE_ID_INTEL_82850_HB),
1795 ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
1796 ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
1797 ID(PCI_DEVICE_ID_INTEL_82860_HB),
1798 ID(PCI_DEVICE_ID_INTEL_82865_HB),
1799 ID(PCI_DEVICE_ID_INTEL_82875_HB),
1800 ID(PCI_DEVICE_ID_INTEL_7505_0),
1801 ID(PCI_DEVICE_ID_INTEL_7205_0),
1802 ID(PCI_DEVICE_ID_INTEL_82915G_HB),
1803 ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
1804 { }
1805};
1806
1807MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
1808
1809static struct pci_driver agp_intel_pci_driver = {
1810 .name = "agpgart-intel",
1811 .id_table = agp_intel_pci_table,
1812 .probe = agp_intel_probe,
1813 .remove = __devexit_p(agp_intel_remove),
1814 .resume = agp_intel_resume,
1815};
1816
1817static int __init agp_intel_init(void)
1818{
1819 if (agp_off)
1820 return -EINVAL;
1821 return pci_register_driver(&agp_intel_pci_driver);
1822}
1823
1824static void __exit agp_intel_cleanup(void)
1825{
1826 pci_unregister_driver(&agp_intel_pci_driver);
1827}
1828
1829module_init(agp_intel_init);
1830module_exit(agp_intel_cleanup);
1831
1832MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
1833MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
new file mode 100644
index 000000000000..c9ac731504f2
--- /dev/null
+++ b/drivers/char/agp/isoch.c
@@ -0,0 +1,470 @@
1/*
2 * Setup routines for AGP 3.5 compliant bridges.
3 */
4
5#include <linux/list.h>
6#include <linux/pci.h>
7#include <linux/agp_backend.h>
8#include <linux/module.h>
9
10#include "agp.h"
11
12/* Generic AGP 3.5 enabling routines */
13
14struct agp_3_5_dev {
15 struct list_head list;
16 u8 capndx;
17 u32 maxbw;
18 struct pci_dev *dev;
19};
20
21static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
22{
23 struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
24 struct list_head *pos;
25
26 list_for_each(pos, head) {
27 cur = list_entry(pos, struct agp_3_5_dev, list);
28 if(cur->maxbw > n->maxbw)
29 break;
30 }
31 list_add_tail(new, pos);
32}
33
34static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
35{
36 struct agp_3_5_dev *cur;
37 struct pci_dev *dev;
38 struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
39 u32 nistat;
40
41 INIT_LIST_HEAD(head);
42
43 for (pos=start; pos!=head; ) {
44 cur = list_entry(pos, struct agp_3_5_dev, list);
45 dev = cur->dev;
46
47 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
48 cur->maxbw = (nistat >> 16) & 0xff;
49
50 tmp = pos;
51 pos = pos->next;
52 agp_3_5_dev_list_insert(head, tmp);
53 }
54}
55
56/*
57 * Initialize all isochronous transfer parameters for an AGP 3.0
58 * node (i.e. a host bridge in combination with the adapters
59 * lying behind it...)
60 */
61
62static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
63 struct agp_3_5_dev *dev_list, unsigned int ndevs)
64{
65 /*
66 * Convenience structure to make the calculations clearer
67 * here. The field names come straight from the AGP 3.0 spec.
68 */
69 struct isoch_data {
70 u32 maxbw;
71 u32 n;
72 u32 y;
73 u32 l;
74 u32 rq;
75 struct agp_3_5_dev *dev;
76 };
77
78 struct pci_dev *td = bridge->dev, *dev;
79 struct list_head *head = &dev_list->list, *pos;
80 struct agp_3_5_dev *cur;
81 struct isoch_data *master, target;
82 unsigned int cdev = 0;
83 u32 mnistat, tnistat, tstatus, mcmd;
84 u16 tnicmd, mnicmd;
85 u8 mcapndx;
86 u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
87 u32 step, rem, rem_isoch, rem_async;
88 int ret = 0;
89
90 /*
91 * We'll work with an array of isoch_data's (one for each
92 * device in dev_list) throughout this function.
93 */
94 if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
95 ret = -ENOMEM;
96 goto get_out;
97 }
98
99 /*
100 * Sort the device list by maxbw. We need to do this because the
101 * spec suggests that the devices with the smallest requirements
102 * have their resources allocated first, with all remaining resources
103 * falling to the device with the largest requirement.
104 *
105 * We don't exactly do this, we divide target resources by ndevs
106 * and split them amongst the AGP 3.0 devices. The remainder of such
107 * division operations are dropped on the last device, sort of like
108 * the spec mentions it should be done.
109 *
110 * We can't do this sort when we initially construct the dev_list
111 * because we don't know until this function whether isochronous
112 * transfers are enabled and consequently whether maxbw will mean
113 * anything.
114 */
115 agp_3_5_dev_list_sort(dev_list, ndevs);
116
117 pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
118 pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
119
120 /* Extract power-on defaults from the target */
121 target.maxbw = (tnistat >> 16) & 0xff;
122 target.n = (tnistat >> 8) & 0xff;
123 target.y = (tnistat >> 6) & 0x3;
124 target.l = (tnistat >> 3) & 0x7;
125 target.rq = (tstatus >> 24) & 0xff;
126
127 y_max = target.y;
128
129 /*
130 * Extract power-on defaults for each device in dev_list. Along
131 * the way, calculate the total isochronous bandwidth required
132 * by these devices and the largest requested payload size.
133 */
134 list_for_each(pos, head) {
135 cur = list_entry(pos, struct agp_3_5_dev, list);
136 dev = cur->dev;
137
138 mcapndx = cur->capndx;
139
140 pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
141
142 master[cdev].maxbw = (mnistat >> 16) & 0xff;
143 master[cdev].n = (mnistat >> 8) & 0xff;
144 master[cdev].y = (mnistat >> 6) & 0x3;
145 master[cdev].dev = cur;
146
147 tot_bw += master[cdev].maxbw;
148 y_max = max(y_max, master[cdev].y);
149
150 cdev++;
151 }
152
153 /* Check if this configuration has any chance of working */
154 if (tot_bw > target.maxbw) {
155 printk(KERN_ERR PFX "isochronous bandwidth required "
156 "by AGP 3.0 devices exceeds that which is supported by "
157 "the AGP 3.0 bridge!\n");
158 ret = -ENODEV;
159 goto free_and_exit;
160 }
161
162 target.y = y_max;
163
164 /*
165 * Write the calculated payload size into the target's NICMD
166 * register. Doing this directly effects the ISOCH_N value
167 * in the target's NISTAT register, so we need to do this now
168 * to get an accurate value for ISOCH_N later.
169 */
170 pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
171 tnicmd &= ~(0x3 << 6);
172 tnicmd |= target.y << 6;
173 pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
174
175 /* Reread the target's ISOCH_N */
176 pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
177 target.n = (tnistat >> 8) & 0xff;
178
179 /* Calculate the minimum ISOCH_N needed by each master */
180 for (cdev=0; cdev<ndevs; cdev++) {
181 master[cdev].y = target.y;
182 master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
183
184 tot_n += master[cdev].n;
185 }
186
187 /* Exit if the minimal ISOCH_N allocation among the masters is more
188 * than the target can handle. */
189 if (tot_n > target.n) {
190 printk(KERN_ERR PFX "number of isochronous "
191 "transactions per period required by AGP 3.0 devices "
192 "exceeds that which is supported by the AGP 3.0 "
193 "bridge!\n");
194 ret = -ENODEV;
195 goto free_and_exit;
196 }
197
198 /* Calculate left over ISOCH_N capability in the target. We'll give
199 * this to the hungriest device (as per the spec) */
200 rem = target.n - tot_n;
201
202 /*
203 * Calculate the minimum isochronous RQ depth needed by each master.
204 * Along the way, distribute the extra ISOCH_N capability calculated
205 * above.
206 */
207 for (cdev=0; cdev<ndevs; cdev++) {
208 /*
209 * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
210 * byte isochronous writes will be broken into 64B pieces.
211 * This means we need to budget more RQ depth to account for
212 * these kind of writes (each isochronous write is actually
213 * many writes on the AGP bus).
214 */
215 master[cdev].rq = master[cdev].n;
216 if(master[cdev].y > 0x1)
217 master[cdev].rq *= (1 << (master[cdev].y - 1));
218
219 tot_rq += master[cdev].rq;
220
221 if (cdev == ndevs-1)
222 master[cdev].n += rem;
223 }
224
225 /* Figure the number of isochronous and asynchronous RQ slots the
226 * target is providing. */
227 rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
228 rq_async = target.rq - rq_isoch;
229
230 /* Exit if the minimal RQ needs of the masters exceeds what the target
231 * can provide. */
232 if (tot_rq > rq_isoch) {
233 printk(KERN_ERR PFX "number of request queue slots "
234 "required by the isochronous bandwidth requested by "
235 "AGP 3.0 devices exceeds the number provided by the "
236 "AGP 3.0 bridge!\n");
237 ret = -ENODEV;
238 goto free_and_exit;
239 }
240
241 /* Calculate asynchronous RQ capability in the target (per master) as
242 * well as the total number of leftover isochronous RQ slots. */
243 step = rq_async / ndevs;
244 rem_async = step + (rq_async % ndevs);
245 rem_isoch = rq_isoch - tot_rq;
246
247 /* Distribute the extra RQ slots calculated above and write our
248 * isochronous settings out to the actual devices. */
249 for (cdev=0; cdev<ndevs; cdev++) {
250 cur = master[cdev].dev;
251 dev = cur->dev;
252
253 mcapndx = cur->capndx;
254
255 master[cdev].rq += (cdev == ndevs - 1)
256 ? (rem_async + rem_isoch) : step;
257
258 pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
259 pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
260
261 mnicmd &= ~(0xff << 8);
262 mnicmd &= ~(0x3 << 6);
263 mcmd &= ~(0xff << 24);
264
265 mnicmd |= master[cdev].n << 8;
266 mnicmd |= master[cdev].y << 6;
267 mcmd |= master[cdev].rq << 24;
268
269 pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
270 pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
271 }
272
273free_and_exit:
274 kfree(master);
275
276get_out:
277 return ret;
278}
279
280/*
281 * This function basically allocates request queue slots among the
282 * AGP 3.0 systems in nonisochronous nodes. The algorithm is
283 * pretty stupid, divide the total number of RQ slots provided by the
284 * target by ndevs. Distribute this many slots to each AGP 3.0 device,
285 * giving any left over slots to the last device in dev_list.
286 */
287static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
288 struct agp_3_5_dev *dev_list, unsigned int ndevs)
289{
290 struct agp_3_5_dev *cur;
291 struct list_head *head = &dev_list->list, *pos;
292 u32 tstatus, mcmd;
293 u32 trq, mrq, rem;
294 unsigned int cdev = 0;
295
296 pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
297
298 trq = (tstatus >> 24) & 0xff;
299 mrq = trq / ndevs;
300
301 rem = mrq + (trq % ndevs);
302
303 for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
304 cur = list_entry(pos, struct agp_3_5_dev, list);
305
306 pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
307 mcmd &= ~(0xff << 24);
308 mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
309 pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
310 }
311}
312
313/*
314 * Fully configure and enable an AGP 3.0 host bridge and all the devices
315 * lying behind it.
316 */
317int agp_3_5_enable(struct agp_bridge_data *bridge)
318{
319 struct pci_dev *td = bridge->dev, *dev = NULL;
320 u8 mcapndx;
321 u32 isoch, arqsz;
322 u32 tstatus, mstatus, ncapid;
323 u32 mmajor;
324 u16 mpstat;
325 struct agp_3_5_dev *dev_list, *cur;
326 struct list_head *head, *pos;
327 unsigned int ndevs = 0;
328 int ret = 0;
329
330 /* Extract some power-on defaults from the target */
331 pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
332 isoch = (tstatus >> 17) & 0x1;
333 if (isoch == 0) /* isoch xfers not available, bail out. */
334 return -ENODEV;
335
336 arqsz = (tstatus >> 13) & 0x7;
337
338 /*
339 * Allocate a head for our AGP 3.5 device list
340 * (multiple AGP v3 devices are allowed behind a single bridge).
341 */
342 if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
343 ret = -ENOMEM;
344 goto get_out;
345 }
346 head = &dev_list->list;
347 INIT_LIST_HEAD(head);
348
349 /* Find all AGP devices, and add them to dev_list. */
350 for_each_pci_dev(dev) {
351 mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
352 if (mcapndx == 0)
353 continue;
354
355 switch ((dev->class >>8) & 0xff00) {
356 case 0x0600: /* Bridge */
357 /* Skip bridges. We should call this function for each one. */
358 continue;
359
360 case 0x0001: /* Unclassified device */
361 /* Don't know what this is, but log it for investigation. */
362 if (mcapndx != 0) {
363 printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n",
364 dev->vendor, dev->device);
365 }
366 continue;
367
368 case 0x0300: /* Display controller */
369 case 0x0400: /* Multimedia controller */
370 if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
371 ret = -ENOMEM;
372 goto free_and_exit;
373 }
374 cur->dev = dev;
375
376 pos = &cur->list;
377 list_add(pos, head);
378 ndevs++;
379 continue;
380
381 default:
382 continue;
383 }
384 }
385
386 /*
387 * Take an initial pass through the devices lying behind our host
388 * bridge. Make sure each one is actually an AGP 3.0 device, otherwise
389 * exit with an error message. Along the way store the AGP 3.0
390 * cap_ptr for each device
391 */
392 list_for_each(pos, head) {
393 cur = list_entry(pos, struct agp_3_5_dev, list);
394 dev = cur->dev;
395
396 pci_read_config_word(dev, PCI_STATUS, &mpstat);
397 if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
398 continue;
399
400 pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
401 if (mcapndx != 0) {
402 do {
403 pci_read_config_dword(dev, mcapndx, &ncapid);
404 if ((ncapid & 0xff) != 2)
405 mcapndx = (ncapid >> 8) & 0xff;
406 }
407 while (((ncapid & 0xff) != 2) && (mcapndx != 0));
408 }
409
410 if (mcapndx == 0) {
411 printk(KERN_ERR PFX "woah! Non-AGP device "
412 "found on the secondary bus of an AGP 3.5 bridge!\n");
413 ret = -ENODEV;
414 goto free_and_exit;
415 }
416
417 mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
418 if (mmajor < 3) {
419 printk(KERN_ERR PFX "woah! AGP 2.0 device "
420 "found on the secondary bus of an AGP 3.5 "
421 "bridge operating with AGP 3.0 electricals!\n");
422 ret = -ENODEV;
423 goto free_and_exit;
424 }
425
426 cur->capndx = mcapndx;
427
428 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
429
430 if (((mstatus >> 3) & 0x1) == 0) {
431 printk(KERN_ERR PFX "woah! AGP 3.x device "
432 "not operating in AGP 3.x mode found on the "
433 "secondary bus of an AGP 3.5 bridge operating "
434 "with AGP 3.0 electricals!\n");
435 ret = -ENODEV;
436 goto free_and_exit;
437 }
438 }
439
440 /*
441 * Call functions to divide target resources amongst the AGP 3.0
442 * masters. This process is dramatically different depending on
443 * whether isochronous transfers are supported.
444 */
445 if (isoch) {
446 ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
447 if (ret) {
448 printk(KERN_INFO PFX "Something bad happened setting "
449 "up isochronous xfers. Falling back to "
450 "non-isochronous xfer mode.\n");
451 } else {
452 goto free_and_exit;
453 }
454 }
455 agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
456
457free_and_exit:
458 /* Be sure to free the dev_list */
459 for (pos=head->next; pos!=head; ) {
460 cur = list_entry(pos, struct agp_3_5_dev, list);
461
462 pos = pos->next;
463 kfree(cur);
464 }
465 kfree(dev_list);
466
467get_out:
468 return ret;
469}
470
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
new file mode 100644
index 000000000000..4f7a3e8bc919
--- /dev/null
+++ b/drivers/char/agp/nvidia-agp.c
@@ -0,0 +1,424 @@
1/*
2 * Nvidia AGPGART routines.
3 * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
4 * to work in 2.5 by Dave Jones <davej@codemonkey.org.uk>
5 */
6
7#include <linux/module.h>
8#include <linux/pci.h>
9#include <linux/init.h>
10#include <linux/agp_backend.h>
11#include <linux/gfp.h>
12#include <linux/page-flags.h>
13#include <linux/mm.h>
14#include "agp.h"
15
16/* NVIDIA registers */
17#define NVIDIA_0_APSIZE 0x80
18#define NVIDIA_1_WBC 0xf0
19#define NVIDIA_2_GARTCTRL 0xd0
20#define NVIDIA_2_APBASE 0xd8
21#define NVIDIA_2_APLIMIT 0xdc
22#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
23#define NVIDIA_3_APBASE 0x50
24#define NVIDIA_3_APLIMIT 0x54
25
26
27static struct _nvidia_private {
28 struct pci_dev *dev_1;
29 struct pci_dev *dev_2;
30 struct pci_dev *dev_3;
31 volatile u32 __iomem *aperture;
32 int num_active_entries;
33 off_t pg_offset;
34 u32 wbc_mask;
35} nvidia_private;
36
37
38static int nvidia_fetch_size(void)
39{
40 int i;
41 u8 size_value;
42 struct aper_size_info_8 *values;
43
44 pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
45 size_value &= 0x0f;
46 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
47
48 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
49 if (size_value == values[i].size_value) {
50 agp_bridge->previous_size =
51 agp_bridge->current_size = (void *) (values + i);
52 agp_bridge->aperture_size_idx = i;
53 return values[i].size;
54 }
55 }
56
57 return 0;
58}
59
60#define SYSCFG 0xC0010010
61#define IORR_BASE0 0xC0010016
62#define IORR_MASK0 0xC0010017
63#define AMD_K7_NUM_IORR 2
64
65static int nvidia_init_iorr(u32 base, u32 size)
66{
67 u32 base_hi, base_lo;
68 u32 mask_hi, mask_lo;
69 u32 sys_hi, sys_lo;
70 u32 iorr_addr, free_iorr_addr;
71
72 /* Find the iorr that is already used for the base */
73 /* If not found, determine the uppermost available iorr */
74 free_iorr_addr = AMD_K7_NUM_IORR;
75 for(iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
76 rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
77 rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
78
79 if ((base_lo & 0xfffff000) == (base & 0xfffff000))
80 break;
81
82 if ((mask_lo & 0x00000800) == 0)
83 free_iorr_addr = iorr_addr;
84 }
85
86 if (iorr_addr >= AMD_K7_NUM_IORR) {
87 iorr_addr = free_iorr_addr;
88 if (iorr_addr >= AMD_K7_NUM_IORR)
89 return -EINVAL;
90 }
91 base_hi = 0x0;
92 base_lo = (base & ~0xfff) | 0x18;
93 mask_hi = 0xf;
94 mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
95 wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
96 wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
97
98 rdmsr(SYSCFG, sys_lo, sys_hi);
99 sys_lo |= 0x00100000;
100 wrmsr(SYSCFG, sys_lo, sys_hi);
101
102 return 0;
103}
104
105static int nvidia_configure(void)
106{
107 int i, rc, num_dirs;
108 u32 apbase, aplimit;
109 struct aper_size_info_8 *current_size;
110 u32 temp;
111
112 current_size = A_SIZE_8(agp_bridge->current_size);
113
114 /* aperture size */
115 pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
116 current_size->size_value);
117
118 /* address to map to */
119 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &apbase);
120 apbase &= PCI_BASE_ADDRESS_MEM_MASK;
121 agp_bridge->gart_bus_addr = apbase;
122 aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
123 pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
124 pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
125 pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
126 pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
127 if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
128 return rc;
129
130 /* directory size is 64k */
131 num_dirs = current_size->size / 64;
132 nvidia_private.num_active_entries = current_size->num_entries;
133 nvidia_private.pg_offset = 0;
134 if (num_dirs == 0) {
135 num_dirs = 1;
136 nvidia_private.num_active_entries /= (64 / current_size->size);
137 nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
138 ~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
139 }
140
141 /* attbase */
142 for(i = 0; i < 8; i++) {
143 pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
144 (agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
145 }
146
147 /* gtlb control */
148 pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
149 pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
150
151 /* gart control */
152 pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
153 pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
154
155 /* map aperture */
156 nvidia_private.aperture =
157 (volatile u32 __iomem *) ioremap(apbase, 33 * PAGE_SIZE);
158
159 return 0;
160}
161
162static void nvidia_cleanup(void)
163{
164 struct aper_size_info_8 *previous_size;
165 u32 temp;
166
167 /* gart control */
168 pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
169 pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
170
171 /* gtlb control */
172 pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
173 pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
174
175 /* unmap aperture */
176 iounmap((void __iomem *) nvidia_private.aperture);
177
178 /* restore previous aperture size */
179 previous_size = A_SIZE_8(agp_bridge->previous_size);
180 pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
181 previous_size->size_value);
182
183 /* restore iorr for previous aperture size */
184 nvidia_init_iorr(agp_bridge->gart_bus_addr,
185 previous_size->size * 1024 * 1024);
186}
187
188
189/*
190 * Note we can't use the generic routines, even though they are 99% the same.
191 * Aperture sizes <64M still requires a full 64k GART directory, but
192 * only use the portion of the TLB entries that correspond to the apertures
193 * alignment inside the surrounding 64M block.
194 */
195extern int agp_memory_reserved;
196
197static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
198{
199 int i, j;
200
201 if ((type != 0) || (mem->type != 0))
202 return -EINVAL;
203
204 if ((pg_start + mem->page_count) >
205 (nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
206 return -EINVAL;
207
208 for(j = pg_start; j < (pg_start + mem->page_count); j++) {
209 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
210 return -EBUSY;
211 }
212
213 if (mem->is_flushed == FALSE) {
214 global_cache_flush();
215 mem->is_flushed = TRUE;
216 }
217 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
218 writel(agp_bridge->driver->mask_memory(agp_bridge,
219 mem->memory[i], mem->type),
220 agp_bridge->gatt_table+nvidia_private.pg_offset+j);
221 readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j); /* PCI Posting. */
222 }
223 agp_bridge->driver->tlb_flush(mem);
224 return 0;
225}
226
227
228static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
229{
230 int i;
231
232 if ((type != 0) || (mem->type != 0))
233 return -EINVAL;
234
235 for (i = pg_start; i < (mem->page_count + pg_start); i++)
236 writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
237
238 agp_bridge->driver->tlb_flush(mem);
239 return 0;
240}
241
242
243static void nvidia_tlbflush(struct agp_memory *mem)
244{
245 unsigned long end;
246 u32 wbc_reg, temp;
247 int i;
248
249 /* flush chipset */
250 if (nvidia_private.wbc_mask) {
251 pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
252 wbc_reg |= nvidia_private.wbc_mask;
253 pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
254
255 end = jiffies + 3*HZ;
256 do {
257 pci_read_config_dword(nvidia_private.dev_1,
258 NVIDIA_1_WBC, &wbc_reg);
259 if ((signed)(end - jiffies) <= 0) {
260 printk(KERN_ERR PFX
261 "TLB flush took more than 3 seconds.\n");
262 }
263 } while (wbc_reg & nvidia_private.wbc_mask);
264 }
265
266 /* flush TLB entries */
267 for(i = 0; i < 32 + 1; i++)
268 temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
269 for(i = 0; i < 32 + 1; i++)
270 temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
271}
272
273
274static struct aper_size_info_8 nvidia_generic_sizes[5] =
275{
276 {512, 131072, 7, 0},
277 {256, 65536, 6, 8},
278 {128, 32768, 5, 12},
279 {64, 16384, 4, 14},
280 /* The 32M mode still requires a 64k gatt */
281 {32, 16384, 4, 15}
282};
283
284
285static struct gatt_mask nvidia_generic_masks[] =
286{
287 { .mask = 1, .type = 0}
288};
289
290
291struct agp_bridge_driver nvidia_driver = {
292 .owner = THIS_MODULE,
293 .aperture_sizes = nvidia_generic_sizes,
294 .size_type = U8_APER_SIZE,
295 .num_aperture_sizes = 5,
296 .configure = nvidia_configure,
297 .fetch_size = nvidia_fetch_size,
298 .cleanup = nvidia_cleanup,
299 .tlb_flush = nvidia_tlbflush,
300 .mask_memory = agp_generic_mask_memory,
301 .masks = nvidia_generic_masks,
302 .agp_enable = agp_generic_enable,
303 .cache_flush = global_cache_flush,
304 .create_gatt_table = agp_generic_create_gatt_table,
305 .free_gatt_table = agp_generic_free_gatt_table,
306 .insert_memory = nvidia_insert_memory,
307 .remove_memory = nvidia_remove_memory,
308 .alloc_by_type = agp_generic_alloc_by_type,
309 .free_by_type = agp_generic_free_by_type,
310 .agp_alloc_page = agp_generic_alloc_page,
311 .agp_destroy_page = agp_generic_destroy_page,
312};
313
314static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
315 const struct pci_device_id *ent)
316{
317 struct agp_bridge_data *bridge;
318 u8 cap_ptr;
319
320 nvidia_private.dev_1 =
321 pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1));
322 nvidia_private.dev_2 =
323 pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2));
324 nvidia_private.dev_3 =
325 pci_find_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0));
326
327 if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
328 printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
329 "chipset, but could not find the secondary devices.\n");
330 return -ENODEV;
331 }
332
333 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
334 if (!cap_ptr)
335 return -ENODEV;
336
337 switch (pdev->device) {
338 case PCI_DEVICE_ID_NVIDIA_NFORCE:
339 printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
340 nvidia_private.wbc_mask = 0x00010000;
341 break;
342 case PCI_DEVICE_ID_NVIDIA_NFORCE2:
343 printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
344 nvidia_private.wbc_mask = 0x80000000;
345 break;
346 default:
347 printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
348 pdev->device);
349 return -ENODEV;
350 }
351
352 bridge = agp_alloc_bridge();
353 if (!bridge)
354 return -ENOMEM;
355
356 bridge->driver = &nvidia_driver;
357 bridge->dev_private_data = &nvidia_private,
358 bridge->dev = pdev;
359 bridge->capndx = cap_ptr;
360
361 /* Fill in the mode register */
362 pci_read_config_dword(pdev,
363 bridge->capndx+PCI_AGP_STATUS,
364 &bridge->mode);
365
366 pci_set_drvdata(pdev, bridge);
367 return agp_add_bridge(bridge);
368}
369
370static void __devexit agp_nvidia_remove(struct pci_dev *pdev)
371{
372 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
373
374 agp_remove_bridge(bridge);
375 agp_put_bridge(bridge);
376}
377
378static struct pci_device_id agp_nvidia_pci_table[] = {
379 {
380 .class = (PCI_CLASS_BRIDGE_HOST << 8),
381 .class_mask = ~0,
382 .vendor = PCI_VENDOR_ID_NVIDIA,
383 .device = PCI_DEVICE_ID_NVIDIA_NFORCE,
384 .subvendor = PCI_ANY_ID,
385 .subdevice = PCI_ANY_ID,
386 },
387 {
388 .class = (PCI_CLASS_BRIDGE_HOST << 8),
389 .class_mask = ~0,
390 .vendor = PCI_VENDOR_ID_NVIDIA,
391 .device = PCI_DEVICE_ID_NVIDIA_NFORCE2,
392 .subvendor = PCI_ANY_ID,
393 .subdevice = PCI_ANY_ID,
394 },
395 { }
396};
397
398MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
399
400static struct pci_driver agp_nvidia_pci_driver = {
401 .name = "agpgart-nvidia",
402 .id_table = agp_nvidia_pci_table,
403 .probe = agp_nvidia_probe,
404 .remove = agp_nvidia_remove,
405};
406
407static int __init agp_nvidia_init(void)
408{
409 if (agp_off)
410 return -EINVAL;
411 return pci_register_driver(&agp_nvidia_pci_driver);
412}
413
414static void __exit agp_nvidia_cleanup(void)
415{
416 pci_unregister_driver(&agp_nvidia_pci_driver);
417}
418
419module_init(agp_nvidia_init);
420module_exit(agp_nvidia_cleanup);
421
422MODULE_LICENSE("GPL and additional rights");
423MODULE_AUTHOR("NVIDIA Corporation");
424
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
new file mode 100644
index 000000000000..4b3eda267976
--- /dev/null
+++ b/drivers/char/agp/sgi-agp.c
@@ -0,0 +1,331 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9/*
10 * SGI TIOCA AGPGART routines.
11 *
12 */
13
14#include <linux/acpi.h>
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/init.h>
18#include <linux/agp_backend.h>
19#include <asm/sn/addrs.h>
20#include <asm/sn/pcidev.h>
21#include <asm/sn/pcibus_provider_defs.h>
22#include <asm/sn/tioca_provider.h>
23#include "agp.h"
24
25extern int agp_memory_reserved;
26extern uint32_t tioca_gart_found;
27extern struct list_head tioca_list;
28static struct agp_bridge_data **sgi_tioca_agp_bridges;
29
30/*
31 * The aperature size and related information is set up at TIOCA init time.
32 * Values for this table will be extracted and filled in at
33 * sgi_tioca_fetch_size() time.
34 */
35
36static struct aper_size_info_fixed sgi_tioca_sizes[] = {
37 {0, 0, 0},
38};
39
40static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
41{
42 struct page *page;
43 int nid;
44 struct tioca_kernel *info =
45 (struct tioca_kernel *)bridge->dev_private_data;
46
47 nid = info->ca_closest_node;
48 page = alloc_pages_node(nid, GFP_KERNEL, 0);
49 if (page == NULL) {
50 return 0;
51 }
52
53 get_page(page);
54 SetPageLocked(page);
55 atomic_inc(&agp_bridge->current_memory_agp);
56 return page_address(page);
57}
58
59/*
60 * Flush GART tlb's. Cannot selectively flush based on memory so the mem
61 * arg is ignored.
62 */
63
64static void sgi_tioca_tlbflush(struct agp_memory *mem)
65{
66 tioca_tlbflush(mem->bridge->dev_private_data);
67}
68
69/*
70 * Given an address of a host physical page, turn it into a valid gart
71 * entry.
72 */
73static unsigned long
74sgi_tioca_mask_memory(struct agp_bridge_data *bridge,
75 unsigned long addr, int type)
76{
77 return tioca_physpage_to_gart(addr);
78}
79
80static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
81{
82 tioca_fastwrite_enable(bridge->dev_private_data);
83}
84
85/*
86 * sgi_tioca_configure() doesn't have anything to do since the base CA driver
87 * has alreay set up the GART.
88 */
89
90static int sgi_tioca_configure(void)
91{
92 return 0;
93}
94
95/*
96 * Determine gfx aperature size. This has already been determined by the
97 * CA driver init, so just need to set agp_bridge values accordingly.
98 */
99
100static int sgi_tioca_fetch_size(void)
101{
102 struct tioca_kernel *info =
103 (struct tioca_kernel *)agp_bridge->dev_private_data;
104
105 sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
106 sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
107
108 return sgi_tioca_sizes[0].size;
109}
110
111static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
112{
113 struct tioca_kernel *info =
114 (struct tioca_kernel *)bridge->dev_private_data;
115
116 bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
117 bridge->gatt_table = bridge->gatt_table_real;
118 bridge->gatt_bus_addr = info->ca_gfxgart_base;
119
120 return 0;
121}
122
123static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
124{
125 return 0;
126}
127
128static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
129 int type)
130{
131 int num_entries;
132 size_t i;
133 off_t j;
134 void *temp;
135 struct agp_bridge_data *bridge;
136
137 bridge = mem->bridge;
138 if (!bridge)
139 return -EINVAL;
140
141 temp = bridge->current_size;
142
143 switch (bridge->driver->size_type) {
144 case U8_APER_SIZE:
145 num_entries = A_SIZE_8(temp)->num_entries;
146 break;
147 case U16_APER_SIZE:
148 num_entries = A_SIZE_16(temp)->num_entries;
149 break;
150 case U32_APER_SIZE:
151 num_entries = A_SIZE_32(temp)->num_entries;
152 break;
153 case FIXED_APER_SIZE:
154 num_entries = A_SIZE_FIX(temp)->num_entries;
155 break;
156 case LVL2_APER_SIZE:
157 return -EINVAL;
158 break;
159 default:
160 num_entries = 0;
161 break;
162 }
163
164 num_entries -= agp_memory_reserved / PAGE_SIZE;
165 if (num_entries < 0)
166 num_entries = 0;
167
168 if (type != 0 || mem->type != 0) {
169 return -EINVAL;
170 }
171
172 if ((pg_start + mem->page_count) > num_entries)
173 return -EINVAL;
174
175 j = pg_start;
176
177 while (j < (pg_start + mem->page_count)) {
178 if (*(bridge->gatt_table + j))
179 return -EBUSY;
180 j++;
181 }
182
183 if (mem->is_flushed == FALSE) {
184 bridge->driver->cache_flush();
185 mem->is_flushed = TRUE;
186 }
187
188 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
189 *(bridge->gatt_table + j) =
190 bridge->driver->mask_memory(bridge, mem->memory[i],
191 mem->type);
192 }
193
194 bridge->driver->tlb_flush(mem);
195 return 0;
196}
197
198static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
199 int type)
200{
201 size_t i;
202 struct agp_bridge_data *bridge;
203
204 bridge = mem->bridge;
205 if (!bridge)
206 return -EINVAL;
207
208 if (type != 0 || mem->type != 0) {
209 return -EINVAL;
210 }
211
212 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
213 *(bridge->gatt_table + i) = 0;
214 }
215
216 bridge->driver->tlb_flush(mem);
217 return 0;
218}
219
220static void sgi_tioca_cache_flush(void)
221{
222}
223
224/*
225 * Cleanup. Nothing to do as the CA driver owns the GART.
226 */
227
228static void sgi_tioca_cleanup(void)
229{
230}
231
232static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
233{
234 struct agp_bridge_data *bridge;
235
236 list_for_each_entry(bridge, &agp_bridges, list) {
237 if (bridge->dev->bus == pdev->bus)
238 break;
239 }
240 return bridge;
241}
242
243struct agp_bridge_driver sgi_tioca_driver = {
244 .owner = THIS_MODULE,
245 .size_type = U16_APER_SIZE,
246 .configure = sgi_tioca_configure,
247 .fetch_size = sgi_tioca_fetch_size,
248 .cleanup = sgi_tioca_cleanup,
249 .tlb_flush = sgi_tioca_tlbflush,
250 .mask_memory = sgi_tioca_mask_memory,
251 .agp_enable = sgi_tioca_agp_enable,
252 .cache_flush = sgi_tioca_cache_flush,
253 .create_gatt_table = sgi_tioca_create_gatt_table,
254 .free_gatt_table = sgi_tioca_free_gatt_table,
255 .insert_memory = sgi_tioca_insert_memory,
256 .remove_memory = sgi_tioca_remove_memory,
257 .alloc_by_type = agp_generic_alloc_by_type,
258 .free_by_type = agp_generic_free_by_type,
259 .agp_alloc_page = sgi_tioca_alloc_page,
260 .agp_destroy_page = agp_generic_destroy_page,
261 .cant_use_aperture = 1,
262 .needs_scratch_page = 0,
263 .num_aperture_sizes = 1,
264};
265
266static int __devinit agp_sgi_init(void)
267{
268 unsigned int j;
269 struct tioca_kernel *info;
270 struct pci_dev *pdev = NULL;
271
272 if (tioca_gart_found)
273 printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
274 else
275 return 0;
276
277 sgi_tioca_agp_bridges =
278 (struct agp_bridge_data **)kmalloc(tioca_gart_found *
279 sizeof(struct agp_bridge_data *),
280 GFP_KERNEL);
281
282 j = 0;
283 list_for_each_entry(info, &tioca_list, ca_list) {
284 struct list_head *tmp;
285 list_for_each(tmp, info->ca_devices) {
286 u8 cap_ptr;
287 pdev = pci_dev_b(tmp);
288 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
289 continue;
290 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
291 if (!cap_ptr)
292 continue;
293 }
294 sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
295 printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
296 sgi_tioca_agp_bridges[j]);
297 if (sgi_tioca_agp_bridges[j]) {
298 sgi_tioca_agp_bridges[j]->dev = pdev;
299 sgi_tioca_agp_bridges[j]->dev_private_data = info;
300 sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
301 sgi_tioca_agp_bridges[j]->gart_bus_addr =
302 info->ca_gfxap_base;
303 sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */
304 (0x1 << 9) | /* SBA supported */
305 (0x1 << 5) | /* 64-bit addresses supported */
306 (0x1 << 4) | /* FW supported */
307 (0x1 << 3) | /* AGP 3.0 mode */
308 0x2; /* 8x transfer only */
309 sgi_tioca_agp_bridges[j]->current_size =
310 sgi_tioca_agp_bridges[j]->previous_size =
311 (void *)&sgi_tioca_sizes[0];
312 agp_add_bridge(sgi_tioca_agp_bridges[j]);
313 }
314 j++;
315 }
316
317 agp_find_bridge = &sgi_tioca_find_bridge;
318 return 0;
319}
320
321static void __devexit agp_sgi_cleanup(void)
322{
323 if(sgi_tioca_agp_bridges)
324 kfree(sgi_tioca_agp_bridges);
325 sgi_tioca_agp_bridges=NULL;
326}
327
328module_init(agp_sgi_init);
329module_exit(agp_sgi_cleanup);
330
331MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
new file mode 100644
index 000000000000..cfccacb2a647
--- /dev/null
+++ b/drivers/char/agp/sis-agp.c
@@ -0,0 +1,360 @@
1/*
2 * SiS AGPGART routines.
3 */
4
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/init.h>
8#include <linux/agp_backend.h>
9#include <linux/delay.h>
10#include "agp.h"
11
12#define SIS_ATTBASE 0x90
13#define SIS_APSIZE 0x94
14#define SIS_TLBCNTRL 0x97
15#define SIS_TLBFLUSH 0x98
16
17static int __devinitdata agp_sis_force_delay = 0;
18static int __devinitdata agp_sis_agp_spec = -1;
19
20static int sis_fetch_size(void)
21{
22 u8 temp_size;
23 int i;
24 struct aper_size_info_8 *values;
25
26 pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
27 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
28 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
29 if ((temp_size == values[i].size_value) ||
30 ((temp_size & ~(0x03)) ==
31 (values[i].size_value & ~(0x03)))) {
32 agp_bridge->previous_size =
33 agp_bridge->current_size = (void *) (values + i);
34
35 agp_bridge->aperture_size_idx = i;
36 return values[i].size;
37 }
38 }
39
40 return 0;
41}
42
43static void sis_tlbflush(struct agp_memory *mem)
44{
45 pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
46}
47
48static int sis_configure(void)
49{
50 u32 temp;
51 struct aper_size_info_8 *current_size;
52
53 current_size = A_SIZE_8(agp_bridge->current_size);
54 pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
55 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
56 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
57 pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
58 agp_bridge->gatt_bus_addr);
59 pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
60 current_size->size_value);
61 return 0;
62}
63
64static void sis_cleanup(void)
65{
66 struct aper_size_info_8 *previous_size;
67
68 previous_size = A_SIZE_8(agp_bridge->previous_size);
69 pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
70 (previous_size->size_value & ~(0x03)));
71}
72
73static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
74{
75 struct pci_dev *device = NULL;
76 u32 command;
77 int rate;
78
79 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
80 agp_bridge->major_version,
81 agp_bridge->minor_version,
82 pci_name(agp_bridge->dev));
83
84 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
85 command = agp_collect_device_status(bridge, mode, command);
86 command |= AGPSTAT_AGP_ENABLE;
87 rate = (command & 0x7) << 2;
88
89 for_each_pci_dev(device) {
90 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
91 if (!agp)
92 continue;
93
94 printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n",
95 pci_name(device), rate);
96
97 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
98
99 /*
100 * Weird: on some sis chipsets any rate change in the target
101 * command register triggers a 5ms screwup during which the master
102 * cannot be configured
103 */
104 if (device->device == bridge->dev->device) {
105 printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
106 msleep(10);
107 }
108 }
109}
110
111static struct aper_size_info_8 sis_generic_sizes[7] =
112{
113 {256, 65536, 6, 99},
114 {128, 32768, 5, 83},
115 {64, 16384, 4, 67},
116 {32, 8192, 3, 51},
117 {16, 4096, 2, 35},
118 {8, 2048, 1, 19},
119 {4, 1024, 0, 3}
120};
121
122struct agp_bridge_driver sis_driver = {
123 .owner = THIS_MODULE,
124 .aperture_sizes = sis_generic_sizes,
125 .size_type = U8_APER_SIZE,
126 .num_aperture_sizes = 7,
127 .configure = sis_configure,
128 .fetch_size = sis_fetch_size,
129 .cleanup = sis_cleanup,
130 .tlb_flush = sis_tlbflush,
131 .mask_memory = agp_generic_mask_memory,
132 .masks = NULL,
133 .agp_enable = agp_generic_enable,
134 .cache_flush = global_cache_flush,
135 .create_gatt_table = agp_generic_create_gatt_table,
136 .free_gatt_table = agp_generic_free_gatt_table,
137 .insert_memory = agp_generic_insert_memory,
138 .remove_memory = agp_generic_remove_memory,
139 .alloc_by_type = agp_generic_alloc_by_type,
140 .free_by_type = agp_generic_free_by_type,
141 .agp_alloc_page = agp_generic_alloc_page,
142 .agp_destroy_page = agp_generic_destroy_page,
143};
144
145static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
146{
147 {
148 .device_id = PCI_DEVICE_ID_SI_5591_AGP,
149 .chipset_name = "5591",
150 },
151 {
152 .device_id = PCI_DEVICE_ID_SI_530,
153 .chipset_name = "530",
154 },
155 {
156 .device_id = PCI_DEVICE_ID_SI_540,
157 .chipset_name = "540",
158 },
159 {
160 .device_id = PCI_DEVICE_ID_SI_550,
161 .chipset_name = "550",
162 },
163 {
164 .device_id = PCI_DEVICE_ID_SI_620,
165 .chipset_name = "620",
166 },
167 {
168 .device_id = PCI_DEVICE_ID_SI_630,
169 .chipset_name = "630",
170 },
171 {
172 .device_id = PCI_DEVICE_ID_SI_635,
173 .chipset_name = "635",
174 },
175 {
176 .device_id = PCI_DEVICE_ID_SI_645,
177 .chipset_name = "645",
178 },
179 {
180 .device_id = PCI_DEVICE_ID_SI_646,
181 .chipset_name = "646",
182 },
183 {
184 .device_id = PCI_DEVICE_ID_SI_648,
185 .chipset_name = "648",
186 },
187 {
188 .device_id = PCI_DEVICE_ID_SI_650,
189 .chipset_name = "650",
190 },
191 {
192 .device_id = PCI_DEVICE_ID_SI_651,
193 .chipset_name = "651",
194 },
195 {
196 .device_id = PCI_DEVICE_ID_SI_655,
197 .chipset_name = "655",
198 },
199 {
200 .device_id = PCI_DEVICE_ID_SI_661,
201 .chipset_name = "661",
202 },
203 {
204 .device_id = PCI_DEVICE_ID_SI_730,
205 .chipset_name = "730",
206 },
207 {
208 .device_id = PCI_DEVICE_ID_SI_735,
209 .chipset_name = "735",
210 },
211 {
212 .device_id = PCI_DEVICE_ID_SI_740,
213 .chipset_name = "740",
214 },
215 {
216 .device_id = PCI_DEVICE_ID_SI_741,
217 .chipset_name = "741",
218 },
219 {
220 .device_id = PCI_DEVICE_ID_SI_745,
221 .chipset_name = "745",
222 },
223 {
224 .device_id = PCI_DEVICE_ID_SI_746,
225 .chipset_name = "746",
226 },
227 {
228 .device_id = PCI_DEVICE_ID_SI_760,
229 .chipset_name = "760",
230 },
231 { }, /* dummy final entry, always present */
232};
233
234
235// chipsets that require the 'delay hack'
236static int sis_broken_chipsets[] __devinitdata = {
237 PCI_DEVICE_ID_SI_648,
238 PCI_DEVICE_ID_SI_746,
239 0 // terminator
240};
241
242static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
243{
244 int i;
245
246 for(i=0; sis_broken_chipsets[i]!=0; ++i)
247 if(bridge->dev->device==sis_broken_chipsets[i])
248 break;
249
250 if(sis_broken_chipsets[i] || agp_sis_force_delay)
251 sis_driver.agp_enable=sis_delayed_enable;
252
253 // sis chipsets that indicate less than agp3.5
254 // are not actually fully agp3 compliant
255 if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
256 && agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
257 sis_driver.aperture_sizes = agp3_generic_sizes;
258 sis_driver.size_type = U16_APER_SIZE;
259 sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
260 sis_driver.configure = agp3_generic_configure;
261 sis_driver.fetch_size = agp3_generic_fetch_size;
262 sis_driver.cleanup = agp3_generic_cleanup;
263 sis_driver.tlb_flush = agp3_generic_tlbflush;
264 }
265}
266
267
268static int __devinit agp_sis_probe(struct pci_dev *pdev,
269 const struct pci_device_id *ent)
270{
271 struct agp_device_ids *devs = sis_agp_device_ids;
272 struct agp_bridge_data *bridge;
273 u8 cap_ptr;
274 int j;
275
276 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
277 if (!cap_ptr)
278 return -ENODEV;
279
280 /* probe for known chipsets */
281 for (j = 0; devs[j].chipset_name; j++) {
282 if (pdev->device == devs[j].device_id) {
283 printk(KERN_INFO PFX "Detected SiS %s chipset\n",
284 devs[j].chipset_name);
285 goto found;
286 }
287 }
288
289 printk(KERN_ERR PFX "Unsupported SiS chipset (device id: %04x)\n",
290 pdev->device);
291 return -ENODEV;
292
293found:
294 bridge = agp_alloc_bridge();
295 if (!bridge)
296 return -ENOMEM;
297
298 bridge->driver = &sis_driver;
299 bridge->dev = pdev;
300 bridge->capndx = cap_ptr;
301
302 get_agp_version(bridge);
303
304 /* Fill in the mode register */
305 pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
306 sis_get_driver(bridge);
307
308 pci_set_drvdata(pdev, bridge);
309 return agp_add_bridge(bridge);
310}
311
312static void __devexit agp_sis_remove(struct pci_dev *pdev)
313{
314 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
315
316 agp_remove_bridge(bridge);
317 agp_put_bridge(bridge);
318}
319
320static struct pci_device_id agp_sis_pci_table[] = {
321 {
322 .class = (PCI_CLASS_BRIDGE_HOST << 8),
323 .class_mask = ~0,
324 .vendor = PCI_VENDOR_ID_SI,
325 .device = PCI_ANY_ID,
326 .subvendor = PCI_ANY_ID,
327 .subdevice = PCI_ANY_ID,
328 },
329 { }
330};
331
332MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
333
334static struct pci_driver agp_sis_pci_driver = {
335 .name = "agpgart-sis",
336 .id_table = agp_sis_pci_table,
337 .probe = agp_sis_probe,
338 .remove = agp_sis_remove,
339};
340
341static int __init agp_sis_init(void)
342{
343 if (agp_off)
344 return -EINVAL;
345 return pci_register_driver(&agp_sis_pci_driver);
346}
347
348static void __exit agp_sis_cleanup(void)
349{
350 pci_unregister_driver(&agp_sis_pci_driver);
351}
352
353module_init(agp_sis_init);
354module_exit(agp_sis_cleanup);
355
356module_param(agp_sis_force_delay, bool, 0);
357MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
358module_param(agp_sis_agp_spec, int, 0);
359MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
360MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
new file mode 100644
index 000000000000..bb338d9134e0
--- /dev/null
+++ b/drivers/char/agp/sworks-agp.c
@@ -0,0 +1,556 @@
1/*
2 * Serverworks AGPGART routines.
3 */
4
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/init.h>
8#include <linux/agp_backend.h>
9#include "agp.h"
10
11#define SVWRKS_COMMAND 0x04
12#define SVWRKS_APSIZE 0x10
13#define SVWRKS_MMBASE 0x14
14#define SVWRKS_CACHING 0x4b
15#define SVWRKS_AGP_ENABLE 0x60
16#define SVWRKS_FEATURE 0x68
17
18#define SVWRKS_SIZE_MASK 0xfe000000
19
20/* Memory mapped registers */
21#define SVWRKS_GART_CACHE 0x02
22#define SVWRKS_GATTBASE 0x04
23#define SVWRKS_TLBFLUSH 0x10
24#define SVWRKS_POSTFLUSH 0x14
25#define SVWRKS_DIRFLUSH 0x0c
26
27
28struct serverworks_page_map {
29 unsigned long *real;
30 unsigned long __iomem *remapped;
31};
32
33static struct _serverworks_private {
34 struct pci_dev *svrwrks_dev; /* device one */
35 volatile u8 __iomem *registers;
36 struct serverworks_page_map **gatt_pages;
37 int num_tables;
38 struct serverworks_page_map scratch_dir;
39
40 int gart_addr_ofs;
41 int mm_addr_ofs;
42} serverworks_private;
43
44static int serverworks_create_page_map(struct serverworks_page_map *page_map)
45{
46 int i;
47
48 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
49 if (page_map->real == NULL) {
50 return -ENOMEM;
51 }
52 SetPageReserved(virt_to_page(page_map->real));
53 global_cache_flush();
54 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
55 PAGE_SIZE);
56 if (page_map->remapped == NULL) {
57 ClearPageReserved(virt_to_page(page_map->real));
58 free_page((unsigned long) page_map->real);
59 page_map->real = NULL;
60 return -ENOMEM;
61 }
62 global_cache_flush();
63
64 for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
65 writel(agp_bridge->scratch_page, page_map->remapped+i);
66
67 return 0;
68}
69
70static void serverworks_free_page_map(struct serverworks_page_map *page_map)
71{
72 iounmap(page_map->remapped);
73 ClearPageReserved(virt_to_page(page_map->real));
74 free_page((unsigned long) page_map->real);
75}
76
77static void serverworks_free_gatt_pages(void)
78{
79 int i;
80 struct serverworks_page_map **tables;
81 struct serverworks_page_map *entry;
82
83 tables = serverworks_private.gatt_pages;
84 for(i = 0; i < serverworks_private.num_tables; i++) {
85 entry = tables[i];
86 if (entry != NULL) {
87 if (entry->real != NULL) {
88 serverworks_free_page_map(entry);
89 }
90 kfree(entry);
91 }
92 }
93 kfree(tables);
94}
95
96static int serverworks_create_gatt_pages(int nr_tables)
97{
98 struct serverworks_page_map **tables;
99 struct serverworks_page_map *entry;
100 int retval = 0;
101 int i;
102
103 tables = kmalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
104 GFP_KERNEL);
105 if (tables == NULL) {
106 return -ENOMEM;
107 }
108 memset(tables, 0, sizeof(struct serverworks_page_map *) * (nr_tables + 1));
109 for (i = 0; i < nr_tables; i++) {
110 entry = kmalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
111 if (entry == NULL) {
112 retval = -ENOMEM;
113 break;
114 }
115 memset(entry, 0, sizeof(struct serverworks_page_map));
116 tables[i] = entry;
117 retval = serverworks_create_page_map(entry);
118 if (retval != 0) break;
119 }
120 serverworks_private.num_tables = nr_tables;
121 serverworks_private.gatt_pages = tables;
122
123 if (retval != 0) serverworks_free_gatt_pages();
124
125 return retval;
126}
127
128#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
129 GET_PAGE_DIR_IDX(addr)]->remapped)
130
131#ifndef GET_PAGE_DIR_OFF
132#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
133#endif
134
135#ifndef GET_PAGE_DIR_IDX
136#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
137 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
138#endif
139
140#ifndef GET_GATT_OFF
141#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
142#endif
143
144static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
145{
146 struct aper_size_info_lvl2 *value;
147 struct serverworks_page_map page_dir;
148 int retval;
149 u32 temp;
150 int i;
151
152 value = A_SIZE_LVL2(agp_bridge->current_size);
153 retval = serverworks_create_page_map(&page_dir);
154 if (retval != 0) {
155 return retval;
156 }
157 retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
158 if (retval != 0) {
159 serverworks_free_page_map(&page_dir);
160 return retval;
161 }
162 /* Create a fake scratch directory */
163 for(i = 0; i < 1024; i++) {
164 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
165 writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
166 }
167
168 retval = serverworks_create_gatt_pages(value->num_entries / 1024);
169 if (retval != 0) {
170 serverworks_free_page_map(&page_dir);
171 serverworks_free_page_map(&serverworks_private.scratch_dir);
172 return retval;
173 }
174
175 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
176 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
177 agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
178
179 /* Get the address for the gart region.
180 * This is a bus address even on the alpha, b/c its
181 * used to program the agp master not the cpu
182 */
183
184 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
185 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
186
187 /* Calculate the agp offset */
188
189 for(i = 0; i < value->num_entries / 1024; i++)
190 writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
191
192 return 0;
193}
194
195static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
196{
197 struct serverworks_page_map page_dir;
198
199 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
200 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
201
202 serverworks_free_gatt_pages();
203 serverworks_free_page_map(&page_dir);
204 serverworks_free_page_map(&serverworks_private.scratch_dir);
205 return 0;
206}
207
208static int serverworks_fetch_size(void)
209{
210 int i;
211 u32 temp;
212 u32 temp2;
213 struct aper_size_info_lvl2 *values;
214
215 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
216 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
217 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
218 SVWRKS_SIZE_MASK);
219 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
220 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
221 temp2 &= SVWRKS_SIZE_MASK;
222
223 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
224 if (temp2 == values[i].size_value) {
225 agp_bridge->previous_size =
226 agp_bridge->current_size = (void *) (values + i);
227
228 agp_bridge->aperture_size_idx = i;
229 return values[i].size;
230 }
231 }
232
233 return 0;
234}
235
236/*
237 * This routine could be implemented by taking the addresses
238 * written to the GATT, and flushing them individually. However
239 * currently it just flushes the whole table. Which is probably
240 * more efficent, since agp_memory blocks can be a large number of
241 * entries.
242 */
243static void serverworks_tlbflush(struct agp_memory *temp)
244{
245 writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
246 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1)
247 cpu_relax();
248
249 writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
250 while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1)
251 cpu_relax();
252}
253
254static int serverworks_configure(void)
255{
256 struct aper_size_info_lvl2 *current_size;
257 u32 temp;
258 u8 enable_reg;
259 u16 cap_reg;
260
261 current_size = A_SIZE_LVL2(agp_bridge->current_size);
262
263 /* Get the memory mapped registers */
264 pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
265 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
266 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
267 if (!serverworks_private.registers) {
268 printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
269 return -ENOMEM;
270 }
271
272 writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
273 readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
274
275 writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
276 readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
277
278 cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
279 cap_reg &= ~0x0007;
280 cap_reg |= 0x4;
281 writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
282 readw(serverworks_private.registers+SVWRKS_COMMAND);
283
284 pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
285 enable_reg |= 0x1; /* Agp Enable bit */
286 pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
287 serverworks_tlbflush(NULL);
288
289 agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
290
291 /* Fill in the mode register */
292 pci_read_config_dword(serverworks_private.svrwrks_dev,
293 agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
294
295 pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
296 enable_reg &= ~0x3;
297 pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
298
299 pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
300 enable_reg |= (1<<6);
301 pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
302
303 return 0;
304}
305
306static void serverworks_cleanup(void)
307{
308 iounmap((void __iomem *) serverworks_private.registers);
309}
310
311static int serverworks_insert_memory(struct agp_memory *mem,
312 off_t pg_start, int type)
313{
314 int i, j, num_entries;
315 unsigned long __iomem *cur_gatt;
316 unsigned long addr;
317
318 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
319
320 if (type != 0 || mem->type != 0) {
321 return -EINVAL;
322 }
323 if ((pg_start + mem->page_count) > num_entries) {
324 return -EINVAL;
325 }
326
327 j = pg_start;
328 while (j < (pg_start + mem->page_count)) {
329 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
330 cur_gatt = SVRWRKS_GET_GATT(addr);
331 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
332 return -EBUSY;
333 j++;
334 }
335
336 if (mem->is_flushed == FALSE) {
337 global_cache_flush();
338 mem->is_flushed = TRUE;
339 }
340
341 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
342 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
343 cur_gatt = SVRWRKS_GET_GATT(addr);
344 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
345 }
346 serverworks_tlbflush(mem);
347 return 0;
348}
349
350static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
351 int type)
352{
353 int i;
354 unsigned long __iomem *cur_gatt;
355 unsigned long addr;
356
357 if (type != 0 || mem->type != 0) {
358 return -EINVAL;
359 }
360
361 global_cache_flush();
362 serverworks_tlbflush(mem);
363
364 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
365 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
366 cur_gatt = SVRWRKS_GET_GATT(addr);
367 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
368 }
369
370 serverworks_tlbflush(mem);
371 return 0;
372}
373
374static struct gatt_mask serverworks_masks[] =
375{
376 {.mask = 1, .type = 0}
377};
378
379static struct aper_size_info_lvl2 serverworks_sizes[7] =
380{
381 {2048, 524288, 0x80000000},
382 {1024, 262144, 0xc0000000},
383 {512, 131072, 0xe0000000},
384 {256, 65536, 0xf0000000},
385 {128, 32768, 0xf8000000},
386 {64, 16384, 0xfc000000},
387 {32, 8192, 0xfe000000}
388};
389
390static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
391{
392 u32 command;
393
394 pci_read_config_dword(serverworks_private.svrwrks_dev,
395 bridge->capndx + PCI_AGP_STATUS,
396 &command);
397
398 command = agp_collect_device_status(bridge, mode, command);
399
400 command &= ~0x10; /* disable FW */
401 command &= ~0x08;
402
403 command |= 0x100;
404
405 pci_write_config_dword(serverworks_private.svrwrks_dev,
406 bridge->capndx + PCI_AGP_COMMAND,
407 command);
408
409 agp_device_command(command, 0);
410}
411
412struct agp_bridge_driver sworks_driver = {
413 .owner = THIS_MODULE,
414 .aperture_sizes = serverworks_sizes,
415 .size_type = LVL2_APER_SIZE,
416 .num_aperture_sizes = 7,
417 .configure = serverworks_configure,
418 .fetch_size = serverworks_fetch_size,
419 .cleanup = serverworks_cleanup,
420 .tlb_flush = serverworks_tlbflush,
421 .mask_memory = agp_generic_mask_memory,
422 .masks = serverworks_masks,
423 .agp_enable = serverworks_agp_enable,
424 .cache_flush = global_cache_flush,
425 .create_gatt_table = serverworks_create_gatt_table,
426 .free_gatt_table = serverworks_free_gatt_table,
427 .insert_memory = serverworks_insert_memory,
428 .remove_memory = serverworks_remove_memory,
429 .alloc_by_type = agp_generic_alloc_by_type,
430 .free_by_type = agp_generic_free_by_type,
431 .agp_alloc_page = agp_generic_alloc_page,
432 .agp_destroy_page = agp_generic_destroy_page,
433};
434
435static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
436 const struct pci_device_id *ent)
437{
438 struct agp_bridge_data *bridge;
439 struct pci_dev *bridge_dev;
440 u32 temp, temp2;
441 u8 cap_ptr = 0;
442
443 /* Everything is on func 1 here so we are hardcoding function one */
444 bridge_dev = pci_find_slot((unsigned int)pdev->bus->number,
445 PCI_DEVFN(0, 1));
446 if (!bridge_dev) {
447 printk(KERN_INFO PFX "Detected a Serverworks chipset "
448 "but could not find the secondary device.\n");
449 return -ENODEV;
450 }
451
452 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
453
454 switch (pdev->device) {
455 case 0x0006:
456 /* ServerWorks CNB20HE
457 Fail silently.*/
458 printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
459 return -ENODEV;
460
461 case PCI_DEVICE_ID_SERVERWORKS_HE:
462 case PCI_DEVICE_ID_SERVERWORKS_LE:
463 case 0x0007:
464 break;
465
466 default:
467 if (cap_ptr)
468 printk(KERN_ERR PFX "Unsupported Serverworks chipset "
469 "(device id: %04x)\n", pdev->device);
470 return -ENODEV;
471 }
472
473 serverworks_private.svrwrks_dev = bridge_dev;
474 serverworks_private.gart_addr_ofs = 0x10;
475
476 pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
477 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
478 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
479 if (temp2 != 0) {
480 printk(KERN_INFO PFX "Detected 64 bit aperture address, "
481 "but top bits are not zero. Disabling agp\n");
482 return -ENODEV;
483 }
484 serverworks_private.mm_addr_ofs = 0x18;
485 } else
486 serverworks_private.mm_addr_ofs = 0x14;
487
488 pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
489 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
490 pci_read_config_dword(pdev,
491 serverworks_private.mm_addr_ofs + 4, &temp2);
492 if (temp2 != 0) {
493 printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
494 "but top bits are not zero. Disabling agp\n");
495 return -ENODEV;
496 }
497 }
498
499 bridge = agp_alloc_bridge();
500 if (!bridge)
501 return -ENOMEM;
502
503 bridge->driver = &sworks_driver;
504 bridge->dev_private_data = &serverworks_private,
505 bridge->dev = pdev;
506
507 pci_set_drvdata(pdev, bridge);
508 return agp_add_bridge(bridge);
509}
510
511static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
512{
513 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
514
515 agp_remove_bridge(bridge);
516 agp_put_bridge(bridge);
517}
518
519static struct pci_device_id agp_serverworks_pci_table[] = {
520 {
521 .class = (PCI_CLASS_BRIDGE_HOST << 8),
522 .class_mask = ~0,
523 .vendor = PCI_VENDOR_ID_SERVERWORKS,
524 .device = PCI_ANY_ID,
525 .subvendor = PCI_ANY_ID,
526 .subdevice = PCI_ANY_ID,
527 },
528 { }
529};
530
531MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
532
533static struct pci_driver agp_serverworks_pci_driver = {
534 .name = "agpgart-serverworks",
535 .id_table = agp_serverworks_pci_table,
536 .probe = agp_serverworks_probe,
537 .remove = agp_serverworks_remove,
538};
539
540static int __init agp_serverworks_init(void)
541{
542 if (agp_off)
543 return -EINVAL;
544 return pci_register_driver(&agp_serverworks_pci_driver);
545}
546
547static void __exit agp_serverworks_cleanup(void)
548{
549 pci_unregister_driver(&agp_serverworks_pci_driver);
550}
551
552module_init(agp_serverworks_init);
553module_exit(agp_serverworks_cleanup);
554
555MODULE_LICENSE("GPL and additional rights");
556
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
new file mode 100644
index 000000000000..0f248239b4ba
--- /dev/null
+++ b/drivers/char/agp/uninorth-agp.c
@@ -0,0 +1,647 @@
1/*
2 * UniNorth AGPGART routines.
3 */
4#include <linux/module.h>
5#include <linux/pci.h>
6#include <linux/init.h>
7#include <linux/pagemap.h>
8#include <linux/agp_backend.h>
9#include <linux/delay.h>
10#include <asm/uninorth.h>
11#include <asm/pci-bridge.h>
12#include <asm/prom.h>
13#include "agp.h"
14
15/*
16 * NOTES for uninorth3 (G5 AGP) supports :
17 *
18 * There maybe also possibility to have bigger cache line size for
19 * agp (see pmac_pci.c and look for cache line). Need to be investigated
20 * by someone.
21 *
22 * PAGE size are hardcoded but this may change, see asm/page.h.
23 *
24 * Jerome Glisse <j.glisse@gmail.com>
25 */
26static int uninorth_rev;
27static int is_u3;
28
29static int uninorth_fetch_size(void)
30{
31 int i;
32 u32 temp;
33 struct aper_size_info_32 *values;
34
35 pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_BASE, &temp);
36 temp &= ~(0xfffff000);
37 values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
38
39 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
40 if (temp == values[i].size_value) {
41 agp_bridge->previous_size =
42 agp_bridge->current_size = (void *) (values + i);
43 agp_bridge->aperture_size_idx = i;
44 return values[i].size;
45 }
46 }
47
48 agp_bridge->previous_size =
49 agp_bridge->current_size = (void *) (values + 1);
50 agp_bridge->aperture_size_idx = 1;
51 return values[1].size;
52
53 return 0;
54}
55
56static void uninorth_tlbflush(struct agp_memory *mem)
57{
58 u32 ctrl = UNI_N_CFG_GART_ENABLE;
59
60 if (is_u3)
61 ctrl |= U3_N_CFG_GART_PERFRD;
62 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
63 ctrl | UNI_N_CFG_GART_INVAL);
64 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
65
66 if (uninorth_rev <= 0x30) {
67 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
68 ctrl | UNI_N_CFG_GART_2xRESET);
69 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
70 ctrl);
71 }
72}
73
74static void uninorth_cleanup(void)
75{
76 u32 tmp;
77
78 pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
79 if (!(tmp & UNI_N_CFG_GART_ENABLE))
80 return;
81 tmp |= UNI_N_CFG_GART_INVAL;
82 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
83 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
84
85 if (uninorth_rev <= 0x30) {
86 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
87 UNI_N_CFG_GART_2xRESET);
88 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
89 0);
90 }
91}
92
93static int uninorth_configure(void)
94{
95 struct aper_size_info_32 *current_size;
96
97 current_size = A_SIZE_32(agp_bridge->current_size);
98
99 printk(KERN_INFO PFX "configuring for size idx: %d\n",
100 current_size->size_value);
101
102 /* aperture size and gatt addr */
103 pci_write_config_dword(agp_bridge->dev,
104 UNI_N_CFG_GART_BASE,
105 (agp_bridge->gatt_bus_addr & 0xfffff000)
106 | current_size->size_value);
107
108 /* HACK ALERT
109 * UniNorth seem to be buggy enough not to handle properly when
110 * the AGP aperture isn't mapped at bus physical address 0
111 */
112 agp_bridge->gart_bus_addr = 0;
113#ifdef CONFIG_PPC64
114 /* Assume U3 or later on PPC64 systems */
115 /* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
116 pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
117 (agp_bridge->gatt_bus_addr >> 32) & 0xf);
118#else
119 pci_write_config_dword(agp_bridge->dev,
120 UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
121#endif
122
123 if (is_u3) {
124 pci_write_config_dword(agp_bridge->dev,
125 UNI_N_CFG_GART_DUMMY_PAGE,
126 agp_bridge->scratch_page_real >> 12);
127 }
128
129 return 0;
130}
131
132static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
133 int type)
134{
135 int i, j, num_entries;
136 void *temp;
137
138 temp = agp_bridge->current_size;
139 num_entries = A_SIZE_32(temp)->num_entries;
140
141 if (type != 0 || mem->type != 0)
142 /* We know nothing of memory types */
143 return -EINVAL;
144 if ((pg_start + mem->page_count) > num_entries)
145 return -EINVAL;
146
147 j = pg_start;
148
149 while (j < (pg_start + mem->page_count)) {
150 if (agp_bridge->gatt_table[j])
151 return -EBUSY;
152 j++;
153 }
154
155 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
156 agp_bridge->gatt_table[j] =
157 cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL);
158 flush_dcache_range((unsigned long)__va(mem->memory[i]),
159 (unsigned long)__va(mem->memory[i])+0x1000);
160 }
161 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
162 mb();
163 flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start],
164 (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]);
165
166 uninorth_tlbflush(mem);
167 return 0;
168}
169
170static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
171{
172 int i, num_entries;
173 void *temp;
174 u32 *gp;
175
176 temp = agp_bridge->current_size;
177 num_entries = A_SIZE_32(temp)->num_entries;
178
179 if (type != 0 || mem->type != 0)
180 /* We know nothing of memory types */
181 return -EINVAL;
182 if ((pg_start + mem->page_count) > num_entries)
183 return -EINVAL;
184
185 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
186 for (i = 0; i < mem->page_count; ++i) {
187 if (gp[i]) {
188 printk("u3_insert_memory: entry 0x%x occupied (%x)\n",
189 i, gp[i]);
190 return -EBUSY;
191 }
192 }
193
194 for (i = 0; i < mem->page_count; i++) {
195 gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL;
196 flush_dcache_range((unsigned long)__va(mem->memory[i]),
197 (unsigned long)__va(mem->memory[i])+0x1000);
198 }
199 mb();
200 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
201 uninorth_tlbflush(mem);
202
203 return 0;
204}
205
206int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
207{
208 size_t i;
209 u32 *gp;
210
211 if (type != 0 || mem->type != 0)
212 /* We know nothing of memory types */
213 return -EINVAL;
214
215 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
216 for (i = 0; i < mem->page_count; ++i)
217 gp[i] = 0;
218 mb();
219 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
220 uninorth_tlbflush(mem);
221
222 return 0;
223}
224
225static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
226{
227 u32 command, scratch, status;
228 int timeout;
229
230 pci_read_config_dword(bridge->dev,
231 bridge->capndx + PCI_AGP_STATUS,
232 &status);
233
234 command = agp_collect_device_status(bridge, mode, status);
235 command |= PCI_AGP_COMMAND_AGP;
236
237 if (uninorth_rev == 0x21) {
238 /*
239 * Darwin disable AGP 4x on this revision, thus we
240 * may assume it's broken. This is an AGP2 controller.
241 */
242 command &= ~AGPSTAT2_4X;
243 }
244
245 if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
246 /*
247 * We need to to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
248 * 2.2 and 2.3, Darwin do so.
249 */
250 if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
251 command = (command & ~AGPSTAT_RQ_DEPTH)
252 | (7 << AGPSTAT_RQ_DEPTH_SHIFT);
253 }
254
255 uninorth_tlbflush(NULL);
256
257 timeout = 0;
258 do {
259 pci_write_config_dword(bridge->dev,
260 bridge->capndx + PCI_AGP_COMMAND,
261 command);
262 pci_read_config_dword(bridge->dev,
263 bridge->capndx + PCI_AGP_COMMAND,
264 &scratch);
265 } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
266 if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
267 printk(KERN_ERR PFX "failed to write UniNorth AGP command reg\n");
268
269 if (uninorth_rev >= 0x30) {
270 /* This is an AGP V3 */
271 agp_device_command(command, (status & AGPSTAT_MODE_3_0));
272 } else {
273 /* AGP V2 */
274 agp_device_command(command, 0);
275 }
276
277 uninorth_tlbflush(NULL);
278}
279
280#ifdef CONFIG_PM
281static int agp_uninorth_suspend(struct pci_dev *pdev, pm_message_t state)
282{
283 u32 cmd;
284 u8 agp;
285 struct pci_dev *device = NULL;
286
287 if (state != PMSG_SUSPEND)
288 return 0;
289
290 /* turn off AGP on the video chip, if it was enabled */
291 for_each_pci_dev(device) {
292 /* Don't touch the bridge yet, device first */
293 if (device == pdev)
294 continue;
295 /* Only deal with devices on the same bus here, no Mac has a P2P
296 * bridge on the AGP port, and mucking around the entire PCI
297 * tree is source of problems on some machines because of a bug
298 * in some versions of pci_find_capability() when hitting a dead
299 * device
300 */
301 if (device->bus != pdev->bus)
302 continue;
303 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
304 if (!agp)
305 continue;
306 pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
307 if (!(cmd & PCI_AGP_COMMAND_AGP))
308 continue;
309 printk("uninorth-agp: disabling AGP on device %s\n",
310 pci_name(device));
311 cmd &= ~PCI_AGP_COMMAND_AGP;
312 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
313 }
314
315 /* turn off AGP on the bridge */
316 agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
317 pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
318 if (cmd & PCI_AGP_COMMAND_AGP) {
319 printk("uninorth-agp: disabling AGP on bridge %s\n",
320 pci_name(pdev));
321 cmd &= ~PCI_AGP_COMMAND_AGP;
322 pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
323 }
324 /* turn off the GART */
325 uninorth_cleanup();
326
327 return 0;
328}
329
330static int agp_uninorth_resume(struct pci_dev *pdev)
331{
332 return 0;
333}
334#endif
335
336static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
337{
338 char *table;
339 char *table_end;
340 int size;
341 int page_order;
342 int num_entries;
343 int i;
344 void *temp;
345 struct page *page;
346
347 /* We can't handle 2 level gatt's */
348 if (bridge->driver->size_type == LVL2_APER_SIZE)
349 return -EINVAL;
350
351 table = NULL;
352 i = bridge->aperture_size_idx;
353 temp = bridge->current_size;
354 size = page_order = num_entries = 0;
355
356 do {
357 size = A_SIZE_32(temp)->size;
358 page_order = A_SIZE_32(temp)->page_order;
359 num_entries = A_SIZE_32(temp)->num_entries;
360
361 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
362
363 if (table == NULL) {
364 i++;
365 bridge->current_size = A_IDX32(bridge);
366 } else {
367 bridge->aperture_size_idx = i;
368 }
369 } while (!table && (i < bridge->driver->num_aperture_sizes));
370
371 if (table == NULL)
372 return -ENOMEM;
373
374 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
375
376 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
377 SetPageReserved(page);
378
379 bridge->gatt_table_real = (u32 *) table;
380 bridge->gatt_table = (u32 *)table;
381 bridge->gatt_bus_addr = virt_to_phys(table);
382
383 for (i = 0; i < num_entries; i++)
384 bridge->gatt_table[i] = 0;
385
386 flush_dcache_range((unsigned long)table, (unsigned long)table_end);
387
388 return 0;
389}
390
391static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
392{
393 int page_order;
394 char *table, *table_end;
395 void *temp;
396 struct page *page;
397
398 temp = bridge->current_size;
399 page_order = A_SIZE_32(temp)->page_order;
400
401 /* Do not worry about freeing memory, because if this is
402 * called, then all agp memory is deallocated and removed
403 * from the table.
404 */
405
406 table = (char *) bridge->gatt_table_real;
407 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
408
409 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
410 ClearPageReserved(page);
411
412 free_pages((unsigned long) bridge->gatt_table_real, page_order);
413
414 return 0;
415}
416
417void null_cache_flush(void)
418{
419 mb();
420}
421
422/* Setup function */
423
424static struct aper_size_info_32 uninorth_sizes[7] =
425{
426#if 0 /* Not sure uninorth supports that high aperture sizes */
427 {256, 65536, 6, 64},
428 {128, 32768, 5, 32},
429 {64, 16384, 4, 16},
430#endif
431 {32, 8192, 3, 8},
432 {16, 4096, 2, 4},
433 {8, 2048, 1, 2},
434 {4, 1024, 0, 1}
435};
436
437/*
438 * Not sure that u3 supports that high aperture sizes but it
439 * would strange if it did not :)
440 */
441static struct aper_size_info_32 u3_sizes[8] =
442{
443 {512, 131072, 7, 128},
444 {256, 65536, 6, 64},
445 {128, 32768, 5, 32},
446 {64, 16384, 4, 16},
447 {32, 8192, 3, 8},
448 {16, 4096, 2, 4},
449 {8, 2048, 1, 2},
450 {4, 1024, 0, 1}
451};
452
453struct agp_bridge_driver uninorth_agp_driver = {
454 .owner = THIS_MODULE,
455 .aperture_sizes = (void *)uninorth_sizes,
456 .size_type = U32_APER_SIZE,
457 .num_aperture_sizes = 4,
458 .configure = uninorth_configure,
459 .fetch_size = uninorth_fetch_size,
460 .cleanup = uninorth_cleanup,
461 .tlb_flush = uninorth_tlbflush,
462 .mask_memory = agp_generic_mask_memory,
463 .masks = NULL,
464 .cache_flush = null_cache_flush,
465 .agp_enable = uninorth_agp_enable,
466 .create_gatt_table = uninorth_create_gatt_table,
467 .free_gatt_table = uninorth_free_gatt_table,
468 .insert_memory = uninorth_insert_memory,
469 .remove_memory = agp_generic_remove_memory,
470 .alloc_by_type = agp_generic_alloc_by_type,
471 .free_by_type = agp_generic_free_by_type,
472 .agp_alloc_page = agp_generic_alloc_page,
473 .agp_destroy_page = agp_generic_destroy_page,
474 .cant_use_aperture = 1,
475};
476
477struct agp_bridge_driver u3_agp_driver = {
478 .owner = THIS_MODULE,
479 .aperture_sizes = (void *)u3_sizes,
480 .size_type = U32_APER_SIZE,
481 .num_aperture_sizes = 8,
482 .configure = uninorth_configure,
483 .fetch_size = uninorth_fetch_size,
484 .cleanup = uninorth_cleanup,
485 .tlb_flush = uninorth_tlbflush,
486 .mask_memory = agp_generic_mask_memory,
487 .masks = NULL,
488 .cache_flush = null_cache_flush,
489 .agp_enable = uninorth_agp_enable,
490 .create_gatt_table = uninorth_create_gatt_table,
491 .free_gatt_table = uninorth_free_gatt_table,
492 .insert_memory = u3_insert_memory,
493 .remove_memory = u3_remove_memory,
494 .alloc_by_type = agp_generic_alloc_by_type,
495 .free_by_type = agp_generic_free_by_type,
496 .agp_alloc_page = agp_generic_alloc_page,
497 .agp_destroy_page = agp_generic_destroy_page,
498 .cant_use_aperture = 1,
499 .needs_scratch_page = 1,
500};
501
502static struct agp_device_ids uninorth_agp_device_ids[] __devinitdata = {
503 {
504 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
505 .chipset_name = "UniNorth",
506 },
507 {
508 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
509 .chipset_name = "UniNorth/Pangea",
510 },
511 {
512 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
513 .chipset_name = "UniNorth 1.5",
514 },
515 {
516 .device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
517 .chipset_name = "UniNorth 2",
518 },
519 {
520 .device_id = PCI_DEVICE_ID_APPLE_U3_AGP,
521 .chipset_name = "U3",
522 },
523 {
524 .device_id = PCI_DEVICE_ID_APPLE_U3L_AGP,
525 .chipset_name = "U3L",
526 },
527 {
528 .device_id = PCI_DEVICE_ID_APPLE_U3H_AGP,
529 .chipset_name = "U3H",
530 },
531};
532
533static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
534 const struct pci_device_id *ent)
535{
536 struct agp_device_ids *devs = uninorth_agp_device_ids;
537 struct agp_bridge_data *bridge;
538 struct device_node *uninorth_node;
539 u8 cap_ptr;
540 int j;
541
542 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
543 if (cap_ptr == 0)
544 return -ENODEV;
545
546 /* probe for known chipsets */
547 for (j = 0; devs[j].chipset_name != NULL; ++j) {
548 if (pdev->device == devs[j].device_id) {
549 printk(KERN_INFO PFX "Detected Apple %s chipset\n",
550 devs[j].chipset_name);
551 goto found;
552 }
553 }
554
555 printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n",
556 pdev->device);
557 return -ENODEV;
558
559 found:
560 /* Set revision to 0 if we could not read it. */
561 uninorth_rev = 0;
562 is_u3 = 0;
563 /* Locate core99 Uni-N */
564 uninorth_node = of_find_node_by_name(NULL, "uni-n");
565 /* Locate G5 u3 */
566 if (uninorth_node == NULL) {
567 is_u3 = 1;
568 uninorth_node = of_find_node_by_name(NULL, "u3");
569 }
570 if (uninorth_node) {
571 int *revprop = (int *)
572 get_property(uninorth_node, "device-rev", NULL);
573 if (revprop != NULL)
574 uninorth_rev = *revprop & 0x3f;
575 of_node_put(uninorth_node);
576 }
577
578 bridge = agp_alloc_bridge();
579 if (!bridge)
580 return -ENOMEM;
581
582 if (is_u3)
583 bridge->driver = &u3_agp_driver;
584 else
585 bridge->driver = &uninorth_agp_driver;
586
587 bridge->dev = pdev;
588 bridge->capndx = cap_ptr;
589 bridge->flags = AGP_ERRATA_FASTWRITES;
590
591 /* Fill in the mode register */
592 pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
593
594 pci_set_drvdata(pdev, bridge);
595 return agp_add_bridge(bridge);
596}
597
598static void __devexit agp_uninorth_remove(struct pci_dev *pdev)
599{
600 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
601
602 agp_remove_bridge(bridge);
603 agp_put_bridge(bridge);
604}
605
606static struct pci_device_id agp_uninorth_pci_table[] = {
607 {
608 .class = (PCI_CLASS_BRIDGE_HOST << 8),
609 .class_mask = ~0,
610 .vendor = PCI_VENDOR_ID_APPLE,
611 .device = PCI_ANY_ID,
612 .subvendor = PCI_ANY_ID,
613 .subdevice = PCI_ANY_ID,
614 },
615 { }
616};
617
618MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
619
620static struct pci_driver agp_uninorth_pci_driver = {
621 .name = "agpgart-uninorth",
622 .id_table = agp_uninorth_pci_table,
623 .probe = agp_uninorth_probe,
624 .remove = agp_uninorth_remove,
625#ifdef CONFIG_PM
626 .suspend = agp_uninorth_suspend,
627 .resume = agp_uninorth_resume,
628#endif
629};
630
631static int __init agp_uninorth_init(void)
632{
633 if (agp_off)
634 return -EINVAL;
635 return pci_register_driver(&agp_uninorth_pci_driver);
636}
637
638static void __exit agp_uninorth_cleanup(void)
639{
640 pci_unregister_driver(&agp_uninorth_pci_driver);
641}
642
643module_init(agp_uninorth_init);
644module_exit(agp_uninorth_cleanup);
645
646MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
647MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
new file mode 100644
index 000000000000..e1451dd9b6a7
--- /dev/null
+++ b/drivers/char/agp/via-agp.c
@@ -0,0 +1,548 @@
1/*
2 * VIA AGPGART routines.
3 */
4
5#include <linux/types.h>
6#include <linux/module.h>
7#include <linux/pci.h>
8#include <linux/init.h>
9#include <linux/agp_backend.h>
10#include "agp.h"
11
12static struct pci_device_id agp_via_pci_table[];
13
14#define VIA_GARTCTRL 0x80
15#define VIA_APSIZE 0x84
16#define VIA_ATTBASE 0x88
17
18#define VIA_AGP3_GARTCTRL 0x90
19#define VIA_AGP3_APSIZE 0x94
20#define VIA_AGP3_ATTBASE 0x98
21#define VIA_AGPSEL 0xfd
22
23static int via_fetch_size(void)
24{
25 int i;
26 u8 temp;
27 struct aper_size_info_8 *values;
28
29 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
30 pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp);
31 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
32 if (temp == values[i].size_value) {
33 agp_bridge->previous_size =
34 agp_bridge->current_size = (void *) (values + i);
35 agp_bridge->aperture_size_idx = i;
36 return values[i].size;
37 }
38 }
39 printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
40 return 0;
41}
42
43
44static int via_configure(void)
45{
46 u32 temp;
47 struct aper_size_info_8 *current_size;
48
49 current_size = A_SIZE_8(agp_bridge->current_size);
50 /* aperture size */
51 pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
52 current_size->size_value);
53 /* address to map too */
54 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
55 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
56
57 /* GART control register */
58 pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
59
60 /* attbase - aperture GATT base */
61 pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE,
62 (agp_bridge->gatt_bus_addr & 0xfffff000) | 3);
63 return 0;
64}
65
66
67static void via_cleanup(void)
68{
69 struct aper_size_info_8 *previous_size;
70
71 previous_size = A_SIZE_8(agp_bridge->previous_size);
72 pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
73 previous_size->size_value);
74 /* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
75 * during reinitialization.
76 */
77}
78
79
80static void via_tlbflush(struct agp_memory *mem)
81{
82 u32 temp;
83
84 pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
85 temp |= (1<<7);
86 pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
87 temp &= ~(1<<7);
88 pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
89}
90
91
92static struct aper_size_info_8 via_generic_sizes[9] =
93{
94 {256, 65536, 6, 0},
95 {128, 32768, 5, 128},
96 {64, 16384, 4, 192},
97 {32, 8192, 3, 224},
98 {16, 4096, 2, 240},
99 {8, 2048, 1, 248},
100 {4, 1024, 0, 252},
101 {2, 512, 0, 254},
102 {1, 256, 0, 255}
103};
104
105
106static int via_fetch_size_agp3(void)
107{
108 int i;
109 u16 temp;
110 struct aper_size_info_16 *values;
111
112 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
113 pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp);
114 temp &= 0xfff;
115
116 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
117 if (temp == values[i].size_value) {
118 agp_bridge->previous_size =
119 agp_bridge->current_size = (void *) (values + i);
120 agp_bridge->aperture_size_idx = i;
121 return values[i].size;
122 }
123 }
124 return 0;
125}
126
127
128static int via_configure_agp3(void)
129{
130 u32 temp;
131 struct aper_size_info_16 *current_size;
132
133 current_size = A_SIZE_16(agp_bridge->current_size);
134
135 /* address to map too */
136 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
137 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
138
139 /* attbase - aperture GATT base */
140 pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
141 agp_bridge->gatt_bus_addr & 0xfffff000);
142
143 /* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
144 * translation table first.
145 * 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
146 * graphics AGP aperture for the AGP3.0 port.
147 */
148 pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
149 pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
150 return 0;
151}
152
153
154static void via_cleanup_agp3(void)
155{
156 struct aper_size_info_16 *previous_size;
157
158 previous_size = A_SIZE_16(agp_bridge->previous_size);
159 pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value);
160}
161
162
163static void via_tlbflush_agp3(struct agp_memory *mem)
164{
165 u32 temp;
166
167 pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
168 pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7));
169 pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp);
170}
171
172
173struct agp_bridge_driver via_agp3_driver = {
174 .owner = THIS_MODULE,
175 .aperture_sizes = agp3_generic_sizes,
176 .size_type = U8_APER_SIZE,
177 .num_aperture_sizes = 10,
178 .configure = via_configure_agp3,
179 .fetch_size = via_fetch_size_agp3,
180 .cleanup = via_cleanup_agp3,
181 .tlb_flush = via_tlbflush_agp3,
182 .mask_memory = agp_generic_mask_memory,
183 .masks = NULL,
184 .agp_enable = agp_generic_enable,
185 .cache_flush = global_cache_flush,
186 .create_gatt_table = agp_generic_create_gatt_table,
187 .free_gatt_table = agp_generic_free_gatt_table,
188 .insert_memory = agp_generic_insert_memory,
189 .remove_memory = agp_generic_remove_memory,
190 .alloc_by_type = agp_generic_alloc_by_type,
191 .free_by_type = agp_generic_free_by_type,
192 .agp_alloc_page = agp_generic_alloc_page,
193 .agp_destroy_page = agp_generic_destroy_page,
194};
195
196struct agp_bridge_driver via_driver = {
197 .owner = THIS_MODULE,
198 .aperture_sizes = via_generic_sizes,
199 .size_type = U8_APER_SIZE,
200 .num_aperture_sizes = 9,
201 .configure = via_configure,
202 .fetch_size = via_fetch_size,
203 .cleanup = via_cleanup,
204 .tlb_flush = via_tlbflush,
205 .mask_memory = agp_generic_mask_memory,
206 .masks = NULL,
207 .agp_enable = agp_generic_enable,
208 .cache_flush = global_cache_flush,
209 .create_gatt_table = agp_generic_create_gatt_table,
210 .free_gatt_table = agp_generic_free_gatt_table,
211 .insert_memory = agp_generic_insert_memory,
212 .remove_memory = agp_generic_remove_memory,
213 .alloc_by_type = agp_generic_alloc_by_type,
214 .free_by_type = agp_generic_free_by_type,
215 .agp_alloc_page = agp_generic_alloc_page,
216 .agp_destroy_page = agp_generic_destroy_page,
217};
218
219static struct agp_device_ids via_agp_device_ids[] __devinitdata =
220{
221 {
222 .device_id = PCI_DEVICE_ID_VIA_82C597_0,
223 .chipset_name = "Apollo VP3",
224 },
225
226 {
227 .device_id = PCI_DEVICE_ID_VIA_82C598_0,
228 .chipset_name = "Apollo MVP3",
229 },
230
231 {
232 .device_id = PCI_DEVICE_ID_VIA_8501_0,
233 .chipset_name = "Apollo MVP4",
234 },
235
236 /* VT8601 */
237 {
238 .device_id = PCI_DEVICE_ID_VIA_8601_0,
239 .chipset_name = "Apollo ProMedia/PLE133Ta",
240 },
241
242 /* VT82C693A / VT28C694T */
243 {
244 .device_id = PCI_DEVICE_ID_VIA_82C691_0,
245 .chipset_name = "Apollo Pro 133",
246 },
247
248 {
249 .device_id = PCI_DEVICE_ID_VIA_8371_0,
250 .chipset_name = "KX133",
251 },
252
253 /* VT8633 */
254 {
255 .device_id = PCI_DEVICE_ID_VIA_8633_0,
256 .chipset_name = "Pro 266",
257 },
258
259 {
260 .device_id = PCI_DEVICE_ID_VIA_XN266,
261 .chipset_name = "Apollo Pro266",
262 },
263
264 /* VT8361 */
265 {
266 .device_id = PCI_DEVICE_ID_VIA_8361,
267 .chipset_name = "KLE133",
268 },
269
270 /* VT8365 / VT8362 */
271 {
272 .device_id = PCI_DEVICE_ID_VIA_8363_0,
273 .chipset_name = "Twister-K/KT133x/KM133",
274 },
275
276 /* VT8753A */
277 {
278 .device_id = PCI_DEVICE_ID_VIA_8753_0,
279 .chipset_name = "P4X266",
280 },
281
282 /* VT8366 */
283 {
284 .device_id = PCI_DEVICE_ID_VIA_8367_0,
285 .chipset_name = "KT266/KY266x/KT333",
286 },
287
288 /* VT8633 (for CuMine/ Celeron) */
289 {
290 .device_id = PCI_DEVICE_ID_VIA_8653_0,
291 .chipset_name = "Pro266T",
292 },
293
294 /* KM266 / PM266 */
295 {
296 .device_id = PCI_DEVICE_ID_VIA_XM266,
297 .chipset_name = "PM266/KM266",
298 },
299
300 /* CLE266 */
301 {
302 .device_id = PCI_DEVICE_ID_VIA_862X_0,
303 .chipset_name = "CLE266",
304 },
305
306 {
307 .device_id = PCI_DEVICE_ID_VIA_8377_0,
308 .chipset_name = "KT400/KT400A/KT600",
309 },
310
311 /* VT8604 / VT8605 / VT8603
312 * (Apollo Pro133A chipset with S3 Savage4) */
313 {
314 .device_id = PCI_DEVICE_ID_VIA_8605_0,
315 .chipset_name = "ProSavage PM133/PL133/PN133"
316 },
317
318 /* P4M266x/P4N266 */
319 {
320 .device_id = PCI_DEVICE_ID_VIA_8703_51_0,
321 .chipset_name = "P4M266x/P4N266",
322 },
323
324 /* VT8754 */
325 {
326 .device_id = PCI_DEVICE_ID_VIA_8754C_0,
327 .chipset_name = "PT800",
328 },
329
330 /* P4X600 */
331 {
332 .device_id = PCI_DEVICE_ID_VIA_8763_0,
333 .chipset_name = "P4X600"
334 },
335
336 /* KM400 */
337 {
338 .device_id = PCI_DEVICE_ID_VIA_8378_0,
339 .chipset_name = "KM400/KM400A",
340 },
341
342 /* PT880 */
343 {
344 .device_id = PCI_DEVICE_ID_VIA_PT880,
345 .chipset_name = "PT880",
346 },
347
348 /* PT890 */
349 {
350 .device_id = PCI_DEVICE_ID_VIA_8783_0,
351 .chipset_name = "PT890",
352 },
353
354 /* PM800/PN800/PM880/PN880 */
355 {
356 .device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
357 .chipset_name = "PM800/PN800/PM880/PN880",
358 },
359 /* KT880 */
360 {
361 .device_id = PCI_DEVICE_ID_VIA_3269_0,
362 .chipset_name = "KT880",
363 },
364 /* KTxxx/Px8xx */
365 {
366 .device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
367 .chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
368 },
369 /* P4M800 */
370 {
371 .device_id = PCI_DEVICE_ID_VIA_3296_0,
372 .chipset_name = "P4M800",
373 },
374
375 { }, /* dummy final entry, always present */
376};
377
378
379/*
380 * VIA's AGP3 chipsets do magick to put the AGP bridge compliant
381 * with the same standards version as the graphics card.
382 */
383static void check_via_agp3 (struct agp_bridge_data *bridge)
384{
385 u8 reg;
386
387 pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg);
388 /* Check AGP 2.0 compatibility mode. */
389 if ((reg & (1<<1))==0)
390 bridge->driver = &via_agp3_driver;
391}
392
393
394static int __devinit agp_via_probe(struct pci_dev *pdev,
395 const struct pci_device_id *ent)
396{
397 struct agp_device_ids *devs = via_agp_device_ids;
398 struct agp_bridge_data *bridge;
399 int j = 0;
400 u8 cap_ptr;
401
402 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
403 if (!cap_ptr)
404 return -ENODEV;
405
406 j = ent - agp_via_pci_table;
407 printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name);
408
409 bridge = agp_alloc_bridge();
410 if (!bridge)
411 return -ENOMEM;
412
413 bridge->dev = pdev;
414 bridge->capndx = cap_ptr;
415 bridge->driver = &via_driver;
416
417 /*
418 * Garg, there are KT400s with KT266 IDs.
419 */
420 if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) {
421 /* Is there a KT400 subsystem ? */
422 if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) {
423 printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n");
424 check_via_agp3(bridge);
425 }
426 }
427
428 /* If this is an AGP3 bridge, check which mode its in and adjust. */
429 get_agp_version(bridge);
430 if (bridge->major_version >= 3)
431 check_via_agp3(bridge);
432
433 /* Fill in the mode register */
434 pci_read_config_dword(pdev,
435 bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
436
437 pci_set_drvdata(pdev, bridge);
438 return agp_add_bridge(bridge);
439}
440
441static void __devexit agp_via_remove(struct pci_dev *pdev)
442{
443 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
444
445 agp_remove_bridge(bridge);
446 agp_put_bridge(bridge);
447}
448
449#ifdef CONFIG_PM
450
451static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
452{
453 pci_save_state (pdev);
454 pci_set_power_state (pdev, PCI_D3hot);
455
456 return 0;
457}
458
459static int agp_via_resume(struct pci_dev *pdev)
460{
461 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
462
463 pci_set_power_state (pdev, PCI_D0);
464 pci_restore_state(pdev);
465
466 if (bridge->driver == &via_agp3_driver)
467 return via_configure_agp3();
468 else if (bridge->driver == &via_driver)
469 return via_configure();
470
471 return 0;
472}
473
474#endif /* CONFIG_PM */
475
476/* must be the same order as name table above */
477static struct pci_device_id agp_via_pci_table[] = {
478#define ID(x) \
479 { \
480 .class = (PCI_CLASS_BRIDGE_HOST << 8), \
481 .class_mask = ~0, \
482 .vendor = PCI_VENDOR_ID_VIA, \
483 .device = x, \
484 .subvendor = PCI_ANY_ID, \
485 .subdevice = PCI_ANY_ID, \
486 }
487 ID(PCI_DEVICE_ID_VIA_82C597_0),
488 ID(PCI_DEVICE_ID_VIA_82C598_0),
489 ID(PCI_DEVICE_ID_VIA_8501_0),
490 ID(PCI_DEVICE_ID_VIA_8601_0),
491 ID(PCI_DEVICE_ID_VIA_82C691_0),
492 ID(PCI_DEVICE_ID_VIA_8371_0),
493 ID(PCI_DEVICE_ID_VIA_8633_0),
494 ID(PCI_DEVICE_ID_VIA_XN266),
495 ID(PCI_DEVICE_ID_VIA_8361),
496 ID(PCI_DEVICE_ID_VIA_8363_0),
497 ID(PCI_DEVICE_ID_VIA_8753_0),
498 ID(PCI_DEVICE_ID_VIA_8367_0),
499 ID(PCI_DEVICE_ID_VIA_8653_0),
500 ID(PCI_DEVICE_ID_VIA_XM266),
501 ID(PCI_DEVICE_ID_VIA_862X_0),
502 ID(PCI_DEVICE_ID_VIA_8377_0),
503 ID(PCI_DEVICE_ID_VIA_8605_0),
504 ID(PCI_DEVICE_ID_VIA_8703_51_0),
505 ID(PCI_DEVICE_ID_VIA_8754C_0),
506 ID(PCI_DEVICE_ID_VIA_8763_0),
507 ID(PCI_DEVICE_ID_VIA_8378_0),
508 ID(PCI_DEVICE_ID_VIA_PT880),
509 ID(PCI_DEVICE_ID_VIA_8783_0),
510 ID(PCI_DEVICE_ID_VIA_PX8X0_0),
511 ID(PCI_DEVICE_ID_VIA_3269_0),
512 ID(PCI_DEVICE_ID_VIA_83_87XX_1),
513 ID(PCI_DEVICE_ID_VIA_3296_0),
514 { }
515};
516
517MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
518
519
520static struct pci_driver agp_via_pci_driver = {
521 .name = "agpgart-via",
522 .id_table = agp_via_pci_table,
523 .probe = agp_via_probe,
524 .remove = agp_via_remove,
525#ifdef CONFIG_PM
526 .suspend = agp_via_suspend,
527 .resume = agp_via_resume,
528#endif
529};
530
531
532static int __init agp_via_init(void)
533{
534 if (agp_off)
535 return -EINVAL;
536 return pci_register_driver(&agp_via_pci_driver);
537}
538
539static void __exit agp_via_cleanup(void)
540{
541 pci_unregister_driver(&agp_via_pci_driver);
542}
543
544module_init(agp_via_init);
545module_exit(agp_via_cleanup);
546
547MODULE_LICENSE("GPL");
548MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");