aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/agp')
-rw-r--r--drivers/char/agp/agp.h80
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.c1878
-rw-r--r--drivers/char/agp/intel-agp.h239
-rw-r--r--drivers/char/agp/intel-gtt.c1516
5 files changed, 1856 insertions, 1858 deletions
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 870f12cfed93..120490949997 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -178,86 +178,6 @@ struct agp_bridge_data {
178#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page) 178#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
179 179
180 180
181/* Intel registers */
182#define INTEL_APSIZE 0xb4
183#define INTEL_ATTBASE 0xb8
184#define INTEL_AGPCTRL 0xb0
185#define INTEL_NBXCFG 0x50
186#define INTEL_ERRSTS 0x91
187
188/* Intel i830 registers */
189#define I830_GMCH_CTRL 0x52
190#define I830_GMCH_ENABLED 0x4
191#define I830_GMCH_MEM_MASK 0x1
192#define I830_GMCH_MEM_64M 0x1
193#define I830_GMCH_MEM_128M 0
194#define I830_GMCH_GMS_MASK 0x70
195#define I830_GMCH_GMS_DISABLED 0x00
196#define I830_GMCH_GMS_LOCAL 0x10
197#define I830_GMCH_GMS_STOLEN_512 0x20
198#define I830_GMCH_GMS_STOLEN_1024 0x30
199#define I830_GMCH_GMS_STOLEN_8192 0x40
200#define I830_RDRAM_CHANNEL_TYPE 0x03010
201#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
202#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
203
204/* This one is for I830MP w. an external graphic card */
205#define INTEL_I830_ERRSTS 0x92
206
207/* Intel 855GM/852GM registers */
208#define I855_GMCH_GMS_MASK 0xF0
209#define I855_GMCH_GMS_STOLEN_0M 0x0
210#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
211#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
212#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
213#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
214#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
215#define I85X_CAPID 0x44
216#define I85X_VARIANT_MASK 0x7
217#define I85X_VARIANT_SHIFT 5
218#define I855_GME 0x0
219#define I855_GM 0x4
220#define I852_GME 0x2
221#define I852_GM 0x5
222
223/* Intel i845 registers */
224#define INTEL_I845_AGPM 0x51
225#define INTEL_I845_ERRSTS 0xc8
226
227/* Intel i860 registers */
228#define INTEL_I860_MCHCFG 0x50
229#define INTEL_I860_ERRSTS 0xc8
230
231/* Intel i810 registers */
232#define I810_GMADDR 0x10
233#define I810_MMADDR 0x14
234#define I810_PTE_BASE 0x10000
235#define I810_PTE_MAIN_UNCACHED 0x00000000
236#define I810_PTE_LOCAL 0x00000002
237#define I810_PTE_VALID 0x00000001
238#define I830_PTE_SYSTEM_CACHED 0x00000006
239#define I810_SMRAM_MISCC 0x70
240#define I810_GFX_MEM_WIN_SIZE 0x00010000
241#define I810_GFX_MEM_WIN_32M 0x00010000
242#define I810_GMS 0x000000c0
243#define I810_GMS_DISABLE 0x00000000
244#define I810_PGETBL_CTL 0x2020
245#define I810_PGETBL_ENABLED 0x00000001
246#define I965_PGETBL_SIZE_MASK 0x0000000e
247#define I965_PGETBL_SIZE_512KB (0 << 1)
248#define I965_PGETBL_SIZE_256KB (1 << 1)
249#define I965_PGETBL_SIZE_128KB (2 << 1)
250#define I965_PGETBL_SIZE_1MB (3 << 1)
251#define I965_PGETBL_SIZE_2MB (4 << 1)
252#define I965_PGETBL_SIZE_1_5MB (5 << 1)
253#define G33_PGETBL_SIZE_MASK (3 << 8)
254#define G33_PGETBL_SIZE_1M (1 << 8)
255#define G33_PGETBL_SIZE_2M (2 << 8)
256
257#define I810_DRAM_CTL 0x3000
258#define I810_DRAM_ROW_0 0x00000001
259#define I810_DRAM_ROW_0_SDRAM 0x00000001
260
261struct agp_device_ids { 181struct agp_device_ids {
262 unsigned short device_id; /* first, to make table easier to read */ 182 unsigned short device_id; /* first, to make table easier to read */
263 enum chipset_type chipset; 183 enum chipset_type chipset;
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 793f39ea9618..aa109cbe0e6e 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -28,6 +28,7 @@
28#include <linux/page-flags.h> 28#include <linux/page-flags.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include "agp.h" 30#include "agp.h"
31#include "intel-agp.h"
31 32
32/* 33/*
33 * The real differences to the generic AGP code is 34 * The real differences to the generic AGP code is
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index aa4248efc5d8..07a9aad28c11 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,1531 +11,13 @@
11#include <linux/agp_backend.h> 11#include <linux/agp_backend.h>
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h"
15
16#include "intel-gtt.c"
14 17
15int intel_agp_enabled; 18int intel_agp_enabled;
16EXPORT_SYMBOL(intel_agp_enabled); 19EXPORT_SYMBOL(intel_agp_enabled);
17 20
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
29#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
30#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
31#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
32#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
33#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
34#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
35#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
36#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
37#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
38#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
39#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
40#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
41#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
42#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
43#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
44#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
45#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
46#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
47#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
48#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
49#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
50#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
51#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
52#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
53#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
54#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
55#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
56#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
57#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
58#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
59#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
60#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
61#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
62#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
63#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
64#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
65#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
66#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
67#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
68#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
69#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
70#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
71#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
72#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
73#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
74#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
75#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
76
77/* cover 915 and 945 variants */
78#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
79 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
80 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
84
85#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
86 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
87 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
88 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
89 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
90 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
91
92#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
97
98#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
100
101#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
102 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
103
104#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
105 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
106 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
107 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
108 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
109 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
110 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
111 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
112 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
113 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
114 IS_SNB)
115
116extern int agp_memory_reserved;
117
118
119/* Intel 815 register */
120#define INTEL_815_APCONT 0x51
121#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
122
123/* Intel i820 registers */
124#define INTEL_I820_RDCR 0x51
125#define INTEL_I820_ERRSTS 0xc8
126
127/* Intel i840 registers */
128#define INTEL_I840_MCHCFG 0x50
129#define INTEL_I840_ERRSTS 0xc8
130
131/* Intel i850 registers */
132#define INTEL_I850_MCHCFG 0x50
133#define INTEL_I850_ERRSTS 0xc8
134
135/* intel 915G registers */
136#define I915_GMADDR 0x18
137#define I915_MMADDR 0x10
138#define I915_PTEADDR 0x1C
139#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
140#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
141#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
142#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
143#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
144#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
145#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
146#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
147
148#define I915_IFPADDR 0x60
149
150/* Intel 965G registers */
151#define I965_MSAC 0x62
152#define I965_IFPADDR 0x70
153
154/* Intel 7505 registers */
155#define INTEL_I7505_APSIZE 0x74
156#define INTEL_I7505_NCAPID 0x60
157#define INTEL_I7505_NISTAT 0x6c
158#define INTEL_I7505_ATTBASE 0x78
159#define INTEL_I7505_ERRSTS 0x42
160#define INTEL_I7505_AGPCTRL 0x70
161#define INTEL_I7505_MCHCFG 0x50
162
163#define SNB_GMCH_CTRL 0x50
164#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
165#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
166#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
167#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
168#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
169#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
170#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
171#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
172#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
173#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
174#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
175#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
176#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
177#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
178#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
179#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
180#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
181#define SNB_GTT_SIZE_0M (0 << 8)
182#define SNB_GTT_SIZE_1M (1 << 8)
183#define SNB_GTT_SIZE_2M (2 << 8)
184#define SNB_GTT_SIZE_MASK (3 << 8)
185
186static const struct aper_size_info_fixed intel_i810_sizes[] =
187{
188 {64, 16384, 4},
189 /* The 32M mode still requires a 64k gatt */
190 {32, 8192, 4}
191};
192
193#define AGP_DCACHE_MEMORY 1
194#define AGP_PHYS_MEMORY 2
195#define INTEL_AGP_CACHED_MEMORY 3
196
197static struct gatt_mask intel_i810_masks[] =
198{
199 {.mask = I810_PTE_VALID, .type = 0},
200 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
201 {.mask = I810_PTE_VALID, .type = 0},
202 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
203 .type = INTEL_AGP_CACHED_MEMORY}
204};
205
206static struct _intel_private {
207 struct pci_dev *pcidev; /* device one */
208 u8 __iomem *registers;
209 u32 __iomem *gtt; /* I915G */
210 int num_dcache_entries;
211 /* gtt_entries is the number of gtt entries that are already mapped
212 * to stolen memory. Stolen memory is larger than the memory mapped
213 * through gtt_entries, as it includes some reserved space for the BIOS
214 * popup and for the GTT.
215 */
216 int gtt_entries; /* i830+ */
217 int gtt_total_size;
218 union {
219 void __iomem *i9xx_flush_page;
220 void *i8xx_flush_page;
221 };
222 struct page *i8xx_page;
223 struct resource ifp_resource;
224 int resource_valid;
225} intel_private;
226
227#ifdef USE_PCI_DMA_API
228static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
229{
230 *ret = pci_map_page(intel_private.pcidev, page, 0,
231 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
232 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
233 return -EINVAL;
234 return 0;
235}
236
237static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
238{
239 pci_unmap_page(intel_private.pcidev, dma,
240 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
241}
242
243static void intel_agp_free_sglist(struct agp_memory *mem)
244{
245 struct sg_table st;
246
247 st.sgl = mem->sg_list;
248 st.orig_nents = st.nents = mem->page_count;
249
250 sg_free_table(&st);
251
252 mem->sg_list = NULL;
253 mem->num_sg = 0;
254}
255
256static int intel_agp_map_memory(struct agp_memory *mem)
257{
258 struct sg_table st;
259 struct scatterlist *sg;
260 int i;
261
262 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
263
264 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
265 return -ENOMEM;
266
267 mem->sg_list = sg = st.sgl;
268
269 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
270 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
271
272 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
273 mem->page_count, PCI_DMA_BIDIRECTIONAL);
274 if (unlikely(!mem->num_sg)) {
275 intel_agp_free_sglist(mem);
276 return -ENOMEM;
277 }
278 return 0;
279}
280
281static void intel_agp_unmap_memory(struct agp_memory *mem)
282{
283 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
284
285 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
286 mem->page_count, PCI_DMA_BIDIRECTIONAL);
287 intel_agp_free_sglist(mem);
288}
289
290static void intel_agp_insert_sg_entries(struct agp_memory *mem,
291 off_t pg_start, int mask_type)
292{
293 struct scatterlist *sg;
294 int i, j;
295
296 j = pg_start;
297
298 WARN_ON(!mem->num_sg);
299
300 if (mem->num_sg == mem->page_count) {
301 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
302 writel(agp_bridge->driver->mask_memory(agp_bridge,
303 sg_dma_address(sg), mask_type),
304 intel_private.gtt+j);
305 j++;
306 }
307 } else {
308 /* sg may merge pages, but we have to separate
309 * per-page addr for GTT */
310 unsigned int len, m;
311
312 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
313 len = sg_dma_len(sg) / PAGE_SIZE;
314 for (m = 0; m < len; m++) {
315 writel(agp_bridge->driver->mask_memory(agp_bridge,
316 sg_dma_address(sg) + m * PAGE_SIZE,
317 mask_type),
318 intel_private.gtt+j);
319 j++;
320 }
321 }
322 }
323 readl(intel_private.gtt+j-1);
324}
325
326#else
327
328static void intel_agp_insert_sg_entries(struct agp_memory *mem,
329 off_t pg_start, int mask_type)
330{
331 int i, j;
332 u32 cache_bits = 0;
333
334 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
335 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
336 {
337 cache_bits = I830_PTE_SYSTEM_CACHED;
338 }
339
340 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
341 writel(agp_bridge->driver->mask_memory(agp_bridge,
342 page_to_phys(mem->pages[i]), mask_type),
343 intel_private.gtt+j);
344 }
345
346 readl(intel_private.gtt+j-1);
347}
348
349#endif
350
351static int intel_i810_fetch_size(void)
352{
353 u32 smram_miscc;
354 struct aper_size_info_fixed *values;
355
356 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
357 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
358
359 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
360 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
361 return 0;
362 }
363 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
364 agp_bridge->previous_size =
365 agp_bridge->current_size = (void *) (values + 1);
366 agp_bridge->aperture_size_idx = 1;
367 return values[1].size;
368 } else {
369 agp_bridge->previous_size =
370 agp_bridge->current_size = (void *) (values);
371 agp_bridge->aperture_size_idx = 0;
372 return values[0].size;
373 }
374
375 return 0;
376}
377
378static int intel_i810_configure(void)
379{
380 struct aper_size_info_fixed *current_size;
381 u32 temp;
382 int i;
383
384 current_size = A_SIZE_FIX(agp_bridge->current_size);
385
386 if (!intel_private.registers) {
387 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
388 temp &= 0xfff80000;
389
390 intel_private.registers = ioremap(temp, 128 * 4096);
391 if (!intel_private.registers) {
392 dev_err(&intel_private.pcidev->dev,
393 "can't remap memory\n");
394 return -ENOMEM;
395 }
396 }
397
398 if ((readl(intel_private.registers+I810_DRAM_CTL)
399 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
400 /* This will need to be dynamically assigned */
401 dev_info(&intel_private.pcidev->dev,
402 "detected 4MB dedicated video ram\n");
403 intel_private.num_dcache_entries = 1024;
404 }
405 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
406 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
407 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
408 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
409
410 if (agp_bridge->driver->needs_scratch_page) {
411 for (i = 0; i < current_size->num_entries; i++) {
412 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
413 }
414 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
415 }
416 global_cache_flush();
417 return 0;
418}
419
420static void intel_i810_cleanup(void)
421{
422 writel(0, intel_private.registers+I810_PGETBL_CTL);
423 readl(intel_private.registers); /* PCI Posting. */
424 iounmap(intel_private.registers);
425}
426
427static void intel_i810_tlbflush(struct agp_memory *mem)
428{
429 return;
430}
431
432static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
433{
434 return;
435}
436
437/* Exists to support ARGB cursors */
438static struct page *i8xx_alloc_pages(void)
439{
440 struct page *page;
441
442 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
443 if (page == NULL)
444 return NULL;
445
446 if (set_pages_uc(page, 4) < 0) {
447 set_pages_wb(page, 4);
448 __free_pages(page, 2);
449 return NULL;
450 }
451 get_page(page);
452 atomic_inc(&agp_bridge->current_memory_agp);
453 return page;
454}
455
456static void i8xx_destroy_pages(struct page *page)
457{
458 if (page == NULL)
459 return;
460
461 set_pages_wb(page, 4);
462 put_page(page);
463 __free_pages(page, 2);
464 atomic_dec(&agp_bridge->current_memory_agp);
465}
466
467static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
468 int type)
469{
470 if (type < AGP_USER_TYPES)
471 return type;
472 else if (type == AGP_USER_CACHED_MEMORY)
473 return INTEL_AGP_CACHED_MEMORY;
474 else
475 return 0;
476}
477
478static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
479 int type)
480{
481 int i, j, num_entries;
482 void *temp;
483 int ret = -EINVAL;
484 int mask_type;
485
486 if (mem->page_count == 0)
487 goto out;
488
489 temp = agp_bridge->current_size;
490 num_entries = A_SIZE_FIX(temp)->num_entries;
491
492 if ((pg_start + mem->page_count) > num_entries)
493 goto out_err;
494
495
496 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
497 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
498 ret = -EBUSY;
499 goto out_err;
500 }
501 }
502
503 if (type != mem->type)
504 goto out_err;
505
506 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
507
508 switch (mask_type) {
509 case AGP_DCACHE_MEMORY:
510 if (!mem->is_flushed)
511 global_cache_flush();
512 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
513 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
514 intel_private.registers+I810_PTE_BASE+(i*4));
515 }
516 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
517 break;
518 case AGP_PHYS_MEMORY:
519 case AGP_NORMAL_MEMORY:
520 if (!mem->is_flushed)
521 global_cache_flush();
522 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
523 writel(agp_bridge->driver->mask_memory(agp_bridge,
524 page_to_phys(mem->pages[i]), mask_type),
525 intel_private.registers+I810_PTE_BASE+(j*4));
526 }
527 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
528 break;
529 default:
530 goto out_err;
531 }
532
533 agp_bridge->driver->tlb_flush(mem);
534out:
535 ret = 0;
536out_err:
537 mem->is_flushed = true;
538 return ret;
539}
540
541static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
542 int type)
543{
544 int i;
545
546 if (mem->page_count == 0)
547 return 0;
548
549 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
550 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
551 }
552 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
553
554 agp_bridge->driver->tlb_flush(mem);
555 return 0;
556}
557
558/*
559 * The i810/i830 requires a physical address to program its mouse
560 * pointer into hardware.
561 * However the Xserver still writes to it through the agp aperture.
562 */
563static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
564{
565 struct agp_memory *new;
566 struct page *page;
567
568 switch (pg_count) {
569 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
570 break;
571 case 4:
572 /* kludge to get 4 physical pages for ARGB cursor */
573 page = i8xx_alloc_pages();
574 break;
575 default:
576 return NULL;
577 }
578
579 if (page == NULL)
580 return NULL;
581
582 new = agp_create_memory(pg_count);
583 if (new == NULL)
584 return NULL;
585
586 new->pages[0] = page;
587 if (pg_count == 4) {
588 /* kludge to get 4 physical pages for ARGB cursor */
589 new->pages[1] = new->pages[0] + 1;
590 new->pages[2] = new->pages[1] + 1;
591 new->pages[3] = new->pages[2] + 1;
592 }
593 new->page_count = pg_count;
594 new->num_scratch_pages = pg_count;
595 new->type = AGP_PHYS_MEMORY;
596 new->physical = page_to_phys(new->pages[0]);
597 return new;
598}
599
600static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
601{
602 struct agp_memory *new;
603
604 if (type == AGP_DCACHE_MEMORY) {
605 if (pg_count != intel_private.num_dcache_entries)
606 return NULL;
607
608 new = agp_create_memory(1);
609 if (new == NULL)
610 return NULL;
611
612 new->type = AGP_DCACHE_MEMORY;
613 new->page_count = pg_count;
614 new->num_scratch_pages = 0;
615 agp_free_page_array(new);
616 return new;
617 }
618 if (type == AGP_PHYS_MEMORY)
619 return alloc_agpphysmem_i8xx(pg_count, type);
620 return NULL;
621}
622
623static void intel_i810_free_by_type(struct agp_memory *curr)
624{
625 agp_free_key(curr->key);
626 if (curr->type == AGP_PHYS_MEMORY) {
627 if (curr->page_count == 4)
628 i8xx_destroy_pages(curr->pages[0]);
629 else {
630 agp_bridge->driver->agp_destroy_page(curr->pages[0],
631 AGP_PAGE_DESTROY_UNMAP);
632 agp_bridge->driver->agp_destroy_page(curr->pages[0],
633 AGP_PAGE_DESTROY_FREE);
634 }
635 agp_free_page_array(curr);
636 }
637 kfree(curr);
638}
639
640static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
641 dma_addr_t addr, int type)
642{
643 /* Type checking must be done elsewhere */
644 return addr | bridge->driver->masks[type].mask;
645}
646
647static struct aper_size_info_fixed intel_i830_sizes[] =
648{
649 {128, 32768, 5},
650 /* The 64M mode still requires a 128k gatt */
651 {64, 16384, 5},
652 {256, 65536, 6},
653 {512, 131072, 7},
654};
655
656static void intel_i830_init_gtt_entries(void)
657{
658 u16 gmch_ctrl;
659 int gtt_entries = 0;
660 u8 rdct;
661 int local = 0;
662 static const int ddt[4] = { 0, 16, 32, 64 };
663 int size; /* reserved space (in kb) at the top of stolen memory */
664
665 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
666
667 if (IS_I965) {
668 u32 pgetbl_ctl;
669 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
670
671 /* The 965 has a field telling us the size of the GTT,
672 * which may be larger than what is necessary to map the
673 * aperture.
674 */
675 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
676 case I965_PGETBL_SIZE_128KB:
677 size = 128;
678 break;
679 case I965_PGETBL_SIZE_256KB:
680 size = 256;
681 break;
682 case I965_PGETBL_SIZE_512KB:
683 size = 512;
684 break;
685 case I965_PGETBL_SIZE_1MB:
686 size = 1024;
687 break;
688 case I965_PGETBL_SIZE_2MB:
689 size = 2048;
690 break;
691 case I965_PGETBL_SIZE_1_5MB:
692 size = 1024 + 512;
693 break;
694 default:
695 dev_info(&intel_private.pcidev->dev,
696 "unknown page table size, assuming 512KB\n");
697 size = 512;
698 }
699 size += 4; /* add in BIOS popup space */
700 } else if (IS_G33 && !IS_PINEVIEW) {
701 /* G33's GTT size defined in gmch_ctrl */
702 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
703 case G33_PGETBL_SIZE_1M:
704 size = 1024;
705 break;
706 case G33_PGETBL_SIZE_2M:
707 size = 2048;
708 break;
709 default:
710 dev_info(&agp_bridge->dev->dev,
711 "unknown page table size 0x%x, assuming 512KB\n",
712 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
713 size = 512;
714 }
715 size += 4;
716 } else if (IS_G4X || IS_PINEVIEW) {
717 /* On 4 series hardware, GTT stolen is separate from graphics
718 * stolen, ignore it in stolen gtt entries counting. However,
719 * 4KB of the stolen memory doesn't get mapped to the GTT.
720 */
721 size = 4;
722 } else {
723 /* On previous hardware, the GTT size was just what was
724 * required to map the aperture.
725 */
726 size = agp_bridge->driver->fetch_size() + 4;
727 }
728
729 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
730 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
731 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
732 case I830_GMCH_GMS_STOLEN_512:
733 gtt_entries = KB(512) - KB(size);
734 break;
735 case I830_GMCH_GMS_STOLEN_1024:
736 gtt_entries = MB(1) - KB(size);
737 break;
738 case I830_GMCH_GMS_STOLEN_8192:
739 gtt_entries = MB(8) - KB(size);
740 break;
741 case I830_GMCH_GMS_LOCAL:
742 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
743 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
744 MB(ddt[I830_RDRAM_DDT(rdct)]);
745 local = 1;
746 break;
747 default:
748 gtt_entries = 0;
749 break;
750 }
751 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
752 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
753 /*
754 * SandyBridge has new memory control reg at 0x50.w
755 */
756 u16 snb_gmch_ctl;
757 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
758 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
759 case SNB_GMCH_GMS_STOLEN_32M:
760 gtt_entries = MB(32) - KB(size);
761 break;
762 case SNB_GMCH_GMS_STOLEN_64M:
763 gtt_entries = MB(64) - KB(size);
764 break;
765 case SNB_GMCH_GMS_STOLEN_96M:
766 gtt_entries = MB(96) - KB(size);
767 break;
768 case SNB_GMCH_GMS_STOLEN_128M:
769 gtt_entries = MB(128) - KB(size);
770 break;
771 case SNB_GMCH_GMS_STOLEN_160M:
772 gtt_entries = MB(160) - KB(size);
773 break;
774 case SNB_GMCH_GMS_STOLEN_192M:
775 gtt_entries = MB(192) - KB(size);
776 break;
777 case SNB_GMCH_GMS_STOLEN_224M:
778 gtt_entries = MB(224) - KB(size);
779 break;
780 case SNB_GMCH_GMS_STOLEN_256M:
781 gtt_entries = MB(256) - KB(size);
782 break;
783 case SNB_GMCH_GMS_STOLEN_288M:
784 gtt_entries = MB(288) - KB(size);
785 break;
786 case SNB_GMCH_GMS_STOLEN_320M:
787 gtt_entries = MB(320) - KB(size);
788 break;
789 case SNB_GMCH_GMS_STOLEN_352M:
790 gtt_entries = MB(352) - KB(size);
791 break;
792 case SNB_GMCH_GMS_STOLEN_384M:
793 gtt_entries = MB(384) - KB(size);
794 break;
795 case SNB_GMCH_GMS_STOLEN_416M:
796 gtt_entries = MB(416) - KB(size);
797 break;
798 case SNB_GMCH_GMS_STOLEN_448M:
799 gtt_entries = MB(448) - KB(size);
800 break;
801 case SNB_GMCH_GMS_STOLEN_480M:
802 gtt_entries = MB(480) - KB(size);
803 break;
804 case SNB_GMCH_GMS_STOLEN_512M:
805 gtt_entries = MB(512) - KB(size);
806 break;
807 }
808 } else {
809 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
810 case I855_GMCH_GMS_STOLEN_1M:
811 gtt_entries = MB(1) - KB(size);
812 break;
813 case I855_GMCH_GMS_STOLEN_4M:
814 gtt_entries = MB(4) - KB(size);
815 break;
816 case I855_GMCH_GMS_STOLEN_8M:
817 gtt_entries = MB(8) - KB(size);
818 break;
819 case I855_GMCH_GMS_STOLEN_16M:
820 gtt_entries = MB(16) - KB(size);
821 break;
822 case I855_GMCH_GMS_STOLEN_32M:
823 gtt_entries = MB(32) - KB(size);
824 break;
825 case I915_GMCH_GMS_STOLEN_48M:
826 /* Check it's really I915G */
827 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
828 gtt_entries = MB(48) - KB(size);
829 else
830 gtt_entries = 0;
831 break;
832 case I915_GMCH_GMS_STOLEN_64M:
833 /* Check it's really I915G */
834 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
835 gtt_entries = MB(64) - KB(size);
836 else
837 gtt_entries = 0;
838 break;
839 case G33_GMCH_GMS_STOLEN_128M:
840 if (IS_G33 || IS_I965 || IS_G4X)
841 gtt_entries = MB(128) - KB(size);
842 else
843 gtt_entries = 0;
844 break;
845 case G33_GMCH_GMS_STOLEN_256M:
846 if (IS_G33 || IS_I965 || IS_G4X)
847 gtt_entries = MB(256) - KB(size);
848 else
849 gtt_entries = 0;
850 break;
851 case INTEL_GMCH_GMS_STOLEN_96M:
852 if (IS_I965 || IS_G4X)
853 gtt_entries = MB(96) - KB(size);
854 else
855 gtt_entries = 0;
856 break;
857 case INTEL_GMCH_GMS_STOLEN_160M:
858 if (IS_I965 || IS_G4X)
859 gtt_entries = MB(160) - KB(size);
860 else
861 gtt_entries = 0;
862 break;
863 case INTEL_GMCH_GMS_STOLEN_224M:
864 if (IS_I965 || IS_G4X)
865 gtt_entries = MB(224) - KB(size);
866 else
867 gtt_entries = 0;
868 break;
869 case INTEL_GMCH_GMS_STOLEN_352M:
870 if (IS_I965 || IS_G4X)
871 gtt_entries = MB(352) - KB(size);
872 else
873 gtt_entries = 0;
874 break;
875 default:
876 gtt_entries = 0;
877 break;
878 }
879 }
880 if (gtt_entries > 0) {
881 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
882 gtt_entries / KB(1), local ? "local" : "stolen");
883 gtt_entries /= KB(4);
884 } else {
885 dev_info(&agp_bridge->dev->dev,
886 "no pre-allocated video memory detected\n");
887 gtt_entries = 0;
888 }
889
890 intel_private.gtt_entries = gtt_entries;
891}
892
893static void intel_i830_fini_flush(void)
894{
895 kunmap(intel_private.i8xx_page);
896 intel_private.i8xx_flush_page = NULL;
897 unmap_page_from_agp(intel_private.i8xx_page);
898
899 __free_page(intel_private.i8xx_page);
900 intel_private.i8xx_page = NULL;
901}
902
903static void intel_i830_setup_flush(void)
904{
905 /* return if we've already set the flush mechanism up */
906 if (intel_private.i8xx_page)
907 return;
908
909 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
910 if (!intel_private.i8xx_page)
911 return;
912
913 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
914 if (!intel_private.i8xx_flush_page)
915 intel_i830_fini_flush();
916}
917
918/* The chipset_flush interface needs to get data that has already been
919 * flushed out of the CPU all the way out to main memory, because the GPU
920 * doesn't snoop those buffers.
921 *
922 * The 8xx series doesn't have the same lovely interface for flushing the
923 * chipset write buffers that the later chips do. According to the 865
924 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
925 * that buffer out, we just fill 1KB and clflush it out, on the assumption
926 * that it'll push whatever was in there out. It appears to work.
927 */
928static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
929{
930 unsigned int *pg = intel_private.i8xx_flush_page;
931
932 memset(pg, 0, 1024);
933
934 if (cpu_has_clflush)
935 clflush_cache_range(pg, 1024);
936 else if (wbinvd_on_all_cpus() != 0)
937 printk(KERN_ERR "Timed out waiting for cache flush.\n");
938}
939
940/* The intel i830 automatically initializes the agp aperture during POST.
941 * Use the memory already set aside for in the GTT.
942 */
943static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
944{
945 int page_order;
946 struct aper_size_info_fixed *size;
947 int num_entries;
948 u32 temp;
949
950 size = agp_bridge->current_size;
951 page_order = size->page_order;
952 num_entries = size->num_entries;
953 agp_bridge->gatt_table_real = NULL;
954
955 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
956 temp &= 0xfff80000;
957
958 intel_private.registers = ioremap(temp, 128 * 4096);
959 if (!intel_private.registers)
960 return -ENOMEM;
961
962 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
963 global_cache_flush(); /* FIXME: ?? */
964
965 /* we have to call this as early as possible after the MMIO base address is known */
966 intel_i830_init_gtt_entries();
967
968 agp_bridge->gatt_table = NULL;
969
970 agp_bridge->gatt_bus_addr = temp;
971
972 return 0;
973}
974
975/* Return the gatt table to a sane state. Use the top of stolen
976 * memory for the GTT.
977 */
978static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
979{
980 return 0;
981}
982
983static int intel_i830_fetch_size(void)
984{
985 u16 gmch_ctrl;
986 struct aper_size_info_fixed *values;
987
988 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
989
990 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
991 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
992 /* 855GM/852GM/865G has 128MB aperture size */
993 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
994 agp_bridge->aperture_size_idx = 0;
995 return values[0].size;
996 }
997
998 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
999
1000 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
1001 agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
1002 agp_bridge->aperture_size_idx = 0;
1003 return values[0].size;
1004 } else {
1005 agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + 1);
1006 agp_bridge->aperture_size_idx = 1;
1007 return values[1].size;
1008 }
1009
1010 return 0;
1011}
1012
1013static int intel_i830_configure(void)
1014{
1015 struct aper_size_info_fixed *current_size;
1016 u32 temp;
1017 u16 gmch_ctrl;
1018 int i;
1019
1020 current_size = A_SIZE_FIX(agp_bridge->current_size);
1021
1022 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
1023 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1024
1025 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1026 gmch_ctrl |= I830_GMCH_ENABLED;
1027 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1028
1029 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1030 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1031
1032 if (agp_bridge->driver->needs_scratch_page) {
1033 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
1034 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
1035 }
1036 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
1037 }
1038
1039 global_cache_flush();
1040
1041 intel_i830_setup_flush();
1042 return 0;
1043}
1044
1045static void intel_i830_cleanup(void)
1046{
1047 iounmap(intel_private.registers);
1048}
1049
1050static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
1051 int type)
1052{
1053 int i, j, num_entries;
1054 void *temp;
1055 int ret = -EINVAL;
1056 int mask_type;
1057
1058 if (mem->page_count == 0)
1059 goto out;
1060
1061 temp = agp_bridge->current_size;
1062 num_entries = A_SIZE_FIX(temp)->num_entries;
1063
1064 if (pg_start < intel_private.gtt_entries) {
1065 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1066 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1067 pg_start, intel_private.gtt_entries);
1068
1069 dev_info(&intel_private.pcidev->dev,
1070 "trying to insert into local/stolen memory\n");
1071 goto out_err;
1072 }
1073
1074 if ((pg_start + mem->page_count) > num_entries)
1075 goto out_err;
1076
1077 /* The i830 can't check the GTT for entries since its read only,
1078 * depend on the caller to make the correct offset decisions.
1079 */
1080
1081 if (type != mem->type)
1082 goto out_err;
1083
1084 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1085
1086 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1087 mask_type != INTEL_AGP_CACHED_MEMORY)
1088 goto out_err;
1089
1090 if (!mem->is_flushed)
1091 global_cache_flush();
1092
1093 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1094 writel(agp_bridge->driver->mask_memory(agp_bridge,
1095 page_to_phys(mem->pages[i]), mask_type),
1096 intel_private.registers+I810_PTE_BASE+(j*4));
1097 }
1098 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
1099 agp_bridge->driver->tlb_flush(mem);
1100
1101out:
1102 ret = 0;
1103out_err:
1104 mem->is_flushed = true;
1105 return ret;
1106}
1107
1108static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
1109 int type)
1110{
1111 int i;
1112
1113 if (mem->page_count == 0)
1114 return 0;
1115
1116 if (pg_start < intel_private.gtt_entries) {
1117 dev_info(&intel_private.pcidev->dev,
1118 "trying to disable local/stolen memory\n");
1119 return -EINVAL;
1120 }
1121
1122 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1123 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
1124 }
1125 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
1126
1127 agp_bridge->driver->tlb_flush(mem);
1128 return 0;
1129}
1130
1131static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
1132{
1133 if (type == AGP_PHYS_MEMORY)
1134 return alloc_agpphysmem_i8xx(pg_count, type);
1135 /* always return NULL for other allocation types for now */
1136 return NULL;
1137}
1138
1139static int intel_alloc_chipset_flush_resource(void)
1140{
1141 int ret;
1142 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1143 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1144 pcibios_align_resource, agp_bridge->dev);
1145
1146 return ret;
1147}
1148
1149static void intel_i915_setup_chipset_flush(void)
1150{
1151 int ret;
1152 u32 temp;
1153
1154 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
1155 if (!(temp & 0x1)) {
1156 intel_alloc_chipset_flush_resource();
1157 intel_private.resource_valid = 1;
1158 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1159 } else {
1160 temp &= ~1;
1161
1162 intel_private.resource_valid = 1;
1163 intel_private.ifp_resource.start = temp;
1164 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1165 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1166 /* some BIOSes reserve this area in a pnp some don't */
1167 if (ret)
1168 intel_private.resource_valid = 0;
1169 }
1170}
1171
1172static void intel_i965_g33_setup_chipset_flush(void)
1173{
1174 u32 temp_hi, temp_lo;
1175 int ret;
1176
1177 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1178 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1179
1180 if (!(temp_lo & 0x1)) {
1181
1182 intel_alloc_chipset_flush_resource();
1183
1184 intel_private.resource_valid = 1;
1185 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1186 upper_32_bits(intel_private.ifp_resource.start));
1187 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1188 } else {
1189 u64 l64;
1190
1191 temp_lo &= ~0x1;
1192 l64 = ((u64)temp_hi << 32) | temp_lo;
1193
1194 intel_private.resource_valid = 1;
1195 intel_private.ifp_resource.start = l64;
1196 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1197 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1198 /* some BIOSes reserve this area in a pnp some don't */
1199 if (ret)
1200 intel_private.resource_valid = 0;
1201 }
1202}
1203
1204static void intel_i9xx_setup_flush(void)
1205{
1206 /* return if already configured */
1207 if (intel_private.ifp_resource.start)
1208 return;
1209
1210 if (IS_SNB)
1211 return;
1212
1213 /* setup a resource for this object */
1214 intel_private.ifp_resource.name = "Intel Flush Page";
1215 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1216
1217 /* Setup chipset flush for 915 */
1218 if (IS_I965 || IS_G33 || IS_G4X) {
1219 intel_i965_g33_setup_chipset_flush();
1220 } else {
1221 intel_i915_setup_chipset_flush();
1222 }
1223
1224 if (intel_private.ifp_resource.start) {
1225 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1226 if (!intel_private.i9xx_flush_page)
1227 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1228 }
1229}
1230
1231static int intel_i915_configure(void)
1232{
1233 struct aper_size_info_fixed *current_size;
1234 u32 temp;
1235 u16 gmch_ctrl;
1236 int i;
1237
1238 current_size = A_SIZE_FIX(agp_bridge->current_size);
1239
1240 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1241
1242 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1243
1244 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1245 gmch_ctrl |= I830_GMCH_ENABLED;
1246 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1247
1248 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1249 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1250
1251 if (agp_bridge->driver->needs_scratch_page) {
1252 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1253 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1254 }
1255 readl(intel_private.gtt+i-1); /* PCI Posting. */
1256 }
1257
1258 global_cache_flush();
1259
1260 intel_i9xx_setup_flush();
1261
1262 return 0;
1263}
1264
1265static void intel_i915_cleanup(void)
1266{
1267 if (intel_private.i9xx_flush_page)
1268 iounmap(intel_private.i9xx_flush_page);
1269 if (intel_private.resource_valid)
1270 release_resource(&intel_private.ifp_resource);
1271 intel_private.ifp_resource.start = 0;
1272 intel_private.resource_valid = 0;
1273 iounmap(intel_private.gtt);
1274 iounmap(intel_private.registers);
1275}
1276
1277static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1278{
1279 if (intel_private.i9xx_flush_page)
1280 writel(1, intel_private.i9xx_flush_page);
1281}
1282
1283static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1284 int type)
1285{
1286 int num_entries;
1287 void *temp;
1288 int ret = -EINVAL;
1289 int mask_type;
1290
1291 if (mem->page_count == 0)
1292 goto out;
1293
1294 temp = agp_bridge->current_size;
1295 num_entries = A_SIZE_FIX(temp)->num_entries;
1296
1297 if (pg_start < intel_private.gtt_entries) {
1298 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1299 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1300 pg_start, intel_private.gtt_entries);
1301
1302 dev_info(&intel_private.pcidev->dev,
1303 "trying to insert into local/stolen memory\n");
1304 goto out_err;
1305 }
1306
1307 if ((pg_start + mem->page_count) > num_entries)
1308 goto out_err;
1309
1310 /* The i915 can't check the GTT for entries since it's read only;
1311 * depend on the caller to make the correct offset decisions.
1312 */
1313
1314 if (type != mem->type)
1315 goto out_err;
1316
1317 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1318
1319 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1320 mask_type != INTEL_AGP_CACHED_MEMORY)
1321 goto out_err;
1322
1323 if (!mem->is_flushed)
1324 global_cache_flush();
1325
1326 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1327 agp_bridge->driver->tlb_flush(mem);
1328
1329 out:
1330 ret = 0;
1331 out_err:
1332 mem->is_flushed = true;
1333 return ret;
1334}
1335
1336static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1337 int type)
1338{
1339 int i;
1340
1341 if (mem->page_count == 0)
1342 return 0;
1343
1344 if (pg_start < intel_private.gtt_entries) {
1345 dev_info(&intel_private.pcidev->dev,
1346 "trying to disable local/stolen memory\n");
1347 return -EINVAL;
1348 }
1349
1350 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1351 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1352
1353 readl(intel_private.gtt+i-1);
1354
1355 agp_bridge->driver->tlb_flush(mem);
1356 return 0;
1357}
1358
1359/* Return the aperture size by just checking the resource length. The effect
1360 * described in the spec of the MSAC registers is just changing of the
1361 * resource size.
1362 */
1363static int intel_i9xx_fetch_size(void)
1364{
1365 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1366 int aper_size; /* size in megabytes */
1367 int i;
1368
1369 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1370
1371 for (i = 0; i < num_sizes; i++) {
1372 if (aper_size == intel_i830_sizes[i].size) {
1373 agp_bridge->current_size = intel_i830_sizes + i;
1374 agp_bridge->previous_size = agp_bridge->current_size;
1375 return aper_size;
1376 }
1377 }
1378
1379 return 0;
1380}
1381
1382/* The intel i915 automatically initializes the agp aperture during POST.
1383 * Use the memory already set aside for in the GTT.
1384 */
1385static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1386{
1387 int page_order;
1388 struct aper_size_info_fixed *size;
1389 int num_entries;
1390 u32 temp, temp2;
1391 int gtt_map_size = 256 * 1024;
1392
1393 size = agp_bridge->current_size;
1394 page_order = size->page_order;
1395 num_entries = size->num_entries;
1396 agp_bridge->gatt_table_real = NULL;
1397
1398 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1399 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1400
1401 if (IS_G33)
1402 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1403 intel_private.gtt = ioremap(temp2, gtt_map_size);
1404 if (!intel_private.gtt)
1405 return -ENOMEM;
1406
1407 intel_private.gtt_total_size = gtt_map_size / 4;
1408
1409 temp &= 0xfff80000;
1410
1411 intel_private.registers = ioremap(temp, 128 * 4096);
1412 if (!intel_private.registers) {
1413 iounmap(intel_private.gtt);
1414 return -ENOMEM;
1415 }
1416
1417 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1418 global_cache_flush(); /* FIXME: ? */
1419
1420 /* we have to call this as early as possible after the MMIO base address is known */
1421 intel_i830_init_gtt_entries();
1422
1423 agp_bridge->gatt_table = NULL;
1424
1425 agp_bridge->gatt_bus_addr = temp;
1426
1427 return 0;
1428}
1429
1430/*
1431 * The i965 supports 36-bit physical addresses, but to keep
1432 * the format of the GTT the same, the bits that don't fit
1433 * in a 32-bit word are shifted down to bits 4..7.
1434 *
1435 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1436 * is always zero on 32-bit architectures, so no need to make
1437 * this conditional.
1438 */
1439static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1440 dma_addr_t addr, int type)
1441{
1442 /* Shift high bits down */
1443 addr |= (addr >> 28) & 0xf0;
1444
1445 /* Type checking must be done elsewhere */
1446 return addr | bridge->driver->masks[type].mask;
1447}
1448
1449static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1450{
1451 u16 snb_gmch_ctl;
1452
1453 switch (agp_bridge->dev->device) {
1454 case PCI_DEVICE_ID_INTEL_GM45_HB:
1455 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1456 case PCI_DEVICE_ID_INTEL_Q45_HB:
1457 case PCI_DEVICE_ID_INTEL_G45_HB:
1458 case PCI_DEVICE_ID_INTEL_G41_HB:
1459 case PCI_DEVICE_ID_INTEL_B43_HB:
1460 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1461 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1462 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1463 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1464 *gtt_offset = *gtt_size = MB(2);
1465 break;
1466 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1467 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1468 *gtt_offset = MB(2);
1469
1470 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1471 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1472 default:
1473 case SNB_GTT_SIZE_0M:
1474 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1475 *gtt_size = MB(0);
1476 break;
1477 case SNB_GTT_SIZE_1M:
1478 *gtt_size = MB(1);
1479 break;
1480 case SNB_GTT_SIZE_2M:
1481 *gtt_size = MB(2);
1482 break;
1483 }
1484 break;
1485 default:
1486 *gtt_offset = *gtt_size = KB(512);
1487 }
1488}
1489
1490/* The intel i965 automatically initializes the agp aperture during POST.
1491 * Use the memory already set aside for in the GTT.
1492 */
1493static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1494{
1495 int page_order;
1496 struct aper_size_info_fixed *size;
1497 int num_entries;
1498 u32 temp;
1499 int gtt_offset, gtt_size;
1500
1501 size = agp_bridge->current_size;
1502 page_order = size->page_order;
1503 num_entries = size->num_entries;
1504 agp_bridge->gatt_table_real = NULL;
1505
1506 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1507
1508 temp &= 0xfff00000;
1509
1510 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1511
1512 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1513
1514 if (!intel_private.gtt)
1515 return -ENOMEM;
1516
1517 intel_private.gtt_total_size = gtt_size / 4;
1518
1519 intel_private.registers = ioremap(temp, 128 * 4096);
1520 if (!intel_private.registers) {
1521 iounmap(intel_private.gtt);
1522 return -ENOMEM;
1523 }
1524
1525 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1526 global_cache_flush(); /* FIXME: ? */
1527
1528 /* we have to call this as early as possible after the MMIO base address is known */
1529 intel_i830_init_gtt_entries();
1530
1531 agp_bridge->gatt_table = NULL;
1532
1533 agp_bridge->gatt_bus_addr = temp;
1534
1535 return 0;
1536}
1537
1538
1539static int intel_fetch_size(void) 21static int intel_fetch_size(void)
1540{ 22{
1541 int i; 23 int i;
@@ -2003,33 +485,6 @@ static const struct agp_bridge_driver intel_generic_driver = {
2003 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 485 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2004}; 486};
2005 487
2006static const struct agp_bridge_driver intel_810_driver = {
2007 .owner = THIS_MODULE,
2008 .aperture_sizes = intel_i810_sizes,
2009 .size_type = FIXED_APER_SIZE,
2010 .num_aperture_sizes = 2,
2011 .needs_scratch_page = true,
2012 .configure = intel_i810_configure,
2013 .fetch_size = intel_i810_fetch_size,
2014 .cleanup = intel_i810_cleanup,
2015 .tlb_flush = intel_i810_tlbflush,
2016 .mask_memory = intel_i810_mask_memory,
2017 .masks = intel_i810_masks,
2018 .agp_enable = intel_i810_agp_enable,
2019 .cache_flush = global_cache_flush,
2020 .create_gatt_table = agp_generic_create_gatt_table,
2021 .free_gatt_table = agp_generic_free_gatt_table,
2022 .insert_memory = intel_i810_insert_entries,
2023 .remove_memory = intel_i810_remove_entries,
2024 .alloc_by_type = intel_i810_alloc_by_type,
2025 .free_by_type = intel_i810_free_by_type,
2026 .agp_alloc_page = agp_generic_alloc_page,
2027 .agp_alloc_pages = agp_generic_alloc_pages,
2028 .agp_destroy_page = agp_generic_destroy_page,
2029 .agp_destroy_pages = agp_generic_destroy_pages,
2030 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2031};
2032
2033static const struct agp_bridge_driver intel_815_driver = { 488static const struct agp_bridge_driver intel_815_driver = {
2034 .owner = THIS_MODULE, 489 .owner = THIS_MODULE,
2035 .aperture_sizes = intel_815_sizes, 490 .aperture_sizes = intel_815_sizes,
@@ -2056,34 +511,6 @@ static const struct agp_bridge_driver intel_815_driver = {
2056 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 511 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2057}; 512};
2058 513
2059static const struct agp_bridge_driver intel_830_driver = {
2060 .owner = THIS_MODULE,
2061 .aperture_sizes = intel_i830_sizes,
2062 .size_type = FIXED_APER_SIZE,
2063 .num_aperture_sizes = 4,
2064 .needs_scratch_page = true,
2065 .configure = intel_i830_configure,
2066 .fetch_size = intel_i830_fetch_size,
2067 .cleanup = intel_i830_cleanup,
2068 .tlb_flush = intel_i810_tlbflush,
2069 .mask_memory = intel_i810_mask_memory,
2070 .masks = intel_i810_masks,
2071 .agp_enable = intel_i810_agp_enable,
2072 .cache_flush = global_cache_flush,
2073 .create_gatt_table = intel_i830_create_gatt_table,
2074 .free_gatt_table = intel_i830_free_gatt_table,
2075 .insert_memory = intel_i830_insert_entries,
2076 .remove_memory = intel_i830_remove_entries,
2077 .alloc_by_type = intel_i830_alloc_by_type,
2078 .free_by_type = intel_i810_free_by_type,
2079 .agp_alloc_page = agp_generic_alloc_page,
2080 .agp_alloc_pages = agp_generic_alloc_pages,
2081 .agp_destroy_page = agp_generic_destroy_page,
2082 .agp_destroy_pages = agp_generic_destroy_pages,
2083 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2084 .chipset_flush = intel_i830_chipset_flush,
2085};
2086
2087static const struct agp_bridge_driver intel_820_driver = { 514static const struct agp_bridge_driver intel_820_driver = {
2088 .owner = THIS_MODULE, 515 .owner = THIS_MODULE,
2089 .aperture_sizes = intel_8xx_sizes, 516 .aperture_sizes = intel_8xx_sizes,
@@ -2240,74 +667,6 @@ static const struct agp_bridge_driver intel_860_driver = {
2240 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 667 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2241}; 668};
2242 669
2243static const struct agp_bridge_driver intel_915_driver = {
2244 .owner = THIS_MODULE,
2245 .aperture_sizes = intel_i830_sizes,
2246 .size_type = FIXED_APER_SIZE,
2247 .num_aperture_sizes = 4,
2248 .needs_scratch_page = true,
2249 .configure = intel_i915_configure,
2250 .fetch_size = intel_i9xx_fetch_size,
2251 .cleanup = intel_i915_cleanup,
2252 .tlb_flush = intel_i810_tlbflush,
2253 .mask_memory = intel_i810_mask_memory,
2254 .masks = intel_i810_masks,
2255 .agp_enable = intel_i810_agp_enable,
2256 .cache_flush = global_cache_flush,
2257 .create_gatt_table = intel_i915_create_gatt_table,
2258 .free_gatt_table = intel_i830_free_gatt_table,
2259 .insert_memory = intel_i915_insert_entries,
2260 .remove_memory = intel_i915_remove_entries,
2261 .alloc_by_type = intel_i830_alloc_by_type,
2262 .free_by_type = intel_i810_free_by_type,
2263 .agp_alloc_page = agp_generic_alloc_page,
2264 .agp_alloc_pages = agp_generic_alloc_pages,
2265 .agp_destroy_page = agp_generic_destroy_page,
2266 .agp_destroy_pages = agp_generic_destroy_pages,
2267 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2268 .chipset_flush = intel_i915_chipset_flush,
2269#ifdef USE_PCI_DMA_API
2270 .agp_map_page = intel_agp_map_page,
2271 .agp_unmap_page = intel_agp_unmap_page,
2272 .agp_map_memory = intel_agp_map_memory,
2273 .agp_unmap_memory = intel_agp_unmap_memory,
2274#endif
2275};
2276
2277static const struct agp_bridge_driver intel_i965_driver = {
2278 .owner = THIS_MODULE,
2279 .aperture_sizes = intel_i830_sizes,
2280 .size_type = FIXED_APER_SIZE,
2281 .num_aperture_sizes = 4,
2282 .needs_scratch_page = true,
2283 .configure = intel_i915_configure,
2284 .fetch_size = intel_i9xx_fetch_size,
2285 .cleanup = intel_i915_cleanup,
2286 .tlb_flush = intel_i810_tlbflush,
2287 .mask_memory = intel_i965_mask_memory,
2288 .masks = intel_i810_masks,
2289 .agp_enable = intel_i810_agp_enable,
2290 .cache_flush = global_cache_flush,
2291 .create_gatt_table = intel_i965_create_gatt_table,
2292 .free_gatt_table = intel_i830_free_gatt_table,
2293 .insert_memory = intel_i915_insert_entries,
2294 .remove_memory = intel_i915_remove_entries,
2295 .alloc_by_type = intel_i830_alloc_by_type,
2296 .free_by_type = intel_i810_free_by_type,
2297 .agp_alloc_page = agp_generic_alloc_page,
2298 .agp_alloc_pages = agp_generic_alloc_pages,
2299 .agp_destroy_page = agp_generic_destroy_page,
2300 .agp_destroy_pages = agp_generic_destroy_pages,
2301 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2302 .chipset_flush = intel_i915_chipset_flush,
2303#ifdef USE_PCI_DMA_API
2304 .agp_map_page = intel_agp_map_page,
2305 .agp_unmap_page = intel_agp_unmap_page,
2306 .agp_map_memory = intel_agp_map_memory,
2307 .agp_unmap_memory = intel_agp_unmap_memory,
2308#endif
2309};
2310
2311static const struct agp_bridge_driver intel_7505_driver = { 670static const struct agp_bridge_driver intel_7505_driver = {
2312 .owner = THIS_MODULE, 671 .owner = THIS_MODULE,
2313 .aperture_sizes = intel_8xx_sizes, 672 .aperture_sizes = intel_8xx_sizes,
@@ -2334,40 +693,6 @@ static const struct agp_bridge_driver intel_7505_driver = {
2334 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 693 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
2335}; 694};
2336 695
2337static const struct agp_bridge_driver intel_g33_driver = {
2338 .owner = THIS_MODULE,
2339 .aperture_sizes = intel_i830_sizes,
2340 .size_type = FIXED_APER_SIZE,
2341 .num_aperture_sizes = 4,
2342 .needs_scratch_page = true,
2343 .configure = intel_i915_configure,
2344 .fetch_size = intel_i9xx_fetch_size,
2345 .cleanup = intel_i915_cleanup,
2346 .tlb_flush = intel_i810_tlbflush,
2347 .mask_memory = intel_i965_mask_memory,
2348 .masks = intel_i810_masks,
2349 .agp_enable = intel_i810_agp_enable,
2350 .cache_flush = global_cache_flush,
2351 .create_gatt_table = intel_i915_create_gatt_table,
2352 .free_gatt_table = intel_i830_free_gatt_table,
2353 .insert_memory = intel_i915_insert_entries,
2354 .remove_memory = intel_i915_remove_entries,
2355 .alloc_by_type = intel_i830_alloc_by_type,
2356 .free_by_type = intel_i810_free_by_type,
2357 .agp_alloc_page = agp_generic_alloc_page,
2358 .agp_alloc_pages = agp_generic_alloc_pages,
2359 .agp_destroy_page = agp_generic_destroy_page,
2360 .agp_destroy_pages = agp_generic_destroy_pages,
2361 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2362 .chipset_flush = intel_i915_chipset_flush,
2363#ifdef USE_PCI_DMA_API
2364 .agp_map_page = intel_agp_map_page,
2365 .agp_unmap_page = intel_agp_unmap_page,
2366 .agp_map_memory = intel_agp_map_memory,
2367 .agp_unmap_memory = intel_agp_unmap_memory,
2368#endif
2369};
2370
2371static int find_gmch(u16 device) 696static int find_gmch(u16 device)
2372{ 697{
2373 struct pci_dev *gmch_device; 698 struct pci_dev *gmch_device;
@@ -2392,103 +717,137 @@ static int find_gmch(u16 device)
2392static const struct intel_driver_description { 717static const struct intel_driver_description {
2393 unsigned int chip_id; 718 unsigned int chip_id;
2394 unsigned int gmch_chip_id; 719 unsigned int gmch_chip_id;
2395 unsigned int multi_gmch_chip; /* if we have more gfx chip type on this HB. */
2396 char *name; 720 char *name;
2397 const struct agp_bridge_driver *driver; 721 const struct agp_bridge_driver *driver;
2398 const struct agp_bridge_driver *gmch_driver; 722 const struct agp_bridge_driver *gmch_driver;
2399} intel_agp_chipsets[] = { 723} intel_agp_chipsets[] = {
2400 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, 0, "440LX", &intel_generic_driver, NULL }, 724 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
2401 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, 0, "440BX", &intel_generic_driver, NULL }, 725 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
2402 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, 0, "440GX", &intel_generic_driver, NULL }, 726 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
2403 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, 0, "i810", 727 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
2404 NULL, &intel_810_driver }, 728 NULL, &intel_810_driver },
2405 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, 0, "i810", 729 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
2406 NULL, &intel_810_driver }, 730 NULL, &intel_810_driver },
2407 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, 0, "i810", 731 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
2408 NULL, &intel_810_driver }, 732 NULL, &intel_810_driver },
2409 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, 0, "i815", 733 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
2410 &intel_815_driver, &intel_810_driver }, 734 &intel_815_driver, &intel_810_driver },
2411 { PCI_DEVICE_ID_INTEL_82820_HB, 0, 0, "i820", &intel_820_driver, NULL }, 735 { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
2412 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, 0, "i820", &intel_820_driver, NULL }, 736 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
2413 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, 0, "830M", 737 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
2414 &intel_830mp_driver, &intel_830_driver }, 738 &intel_830mp_driver, &intel_830_driver },
2415 { PCI_DEVICE_ID_INTEL_82840_HB, 0, 0, "i840", &intel_840_driver, NULL }, 739 { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
2416 { PCI_DEVICE_ID_INTEL_82845_HB, 0, 0, "845G", &intel_845_driver, NULL }, 740 { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
2417 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", 741 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
2418 &intel_845_driver, &intel_830_driver }, 742 &intel_845_driver, &intel_830_driver },
2419 { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, 743 { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
2420 { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", 744 { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
2421 &intel_845_driver, &intel_830_driver }, 745 &intel_845_driver, &intel_830_driver },
2422 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, 746 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
2423 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", 747 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
2424 &intel_845_driver, &intel_830_driver }, 748 &intel_845_driver, &intel_830_driver },
2425 { PCI_DEVICE_ID_INTEL_82860_HB, 0, 0, "i860", &intel_860_driver, NULL }, 749 { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
2426 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, 0, "865", 750 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
2427 &intel_845_driver, &intel_830_driver }, 751 &intel_845_driver, &intel_830_driver },
2428 { PCI_DEVICE_ID_INTEL_82875_HB, 0, 0, "i875", &intel_845_driver, NULL }, 752 { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
2429 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, 0, "E7221 (i915)", 753 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
2430 NULL, &intel_915_driver }, 754 NULL, &intel_915_driver },
2431 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, 0, "915G", 755 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
2432 NULL, &intel_915_driver }, 756 NULL, &intel_915_driver },
2433 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, 0, "915GM", 757 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
2434 NULL, &intel_915_driver }, 758 NULL, &intel_915_driver },
2435 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", 759 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
2436 NULL, &intel_915_driver }, 760 NULL, &intel_915_driver },
2437 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", 761 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
2438 NULL, &intel_915_driver }, 762 NULL, &intel_915_driver },
2439 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", 763 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
2440 NULL, &intel_915_driver }, 764 NULL, &intel_915_driver },
2441 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", 765 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
2442 NULL, &intel_i965_driver }, 766 NULL, &intel_i965_driver },
2443 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35", 767 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
2444 NULL, &intel_i965_driver }, 768 NULL, &intel_i965_driver },
2445 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q", 769 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
2446 NULL, &intel_i965_driver }, 770 NULL, &intel_i965_driver },
2447 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", 771 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
2448 NULL, &intel_i965_driver }, 772 NULL, &intel_i965_driver },
2449 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", 773 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
2450 NULL, &intel_i965_driver }, 774 NULL, &intel_i965_driver },
2451 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", 775 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
2452 NULL, &intel_i965_driver }, 776 NULL, &intel_i965_driver },
2453 { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, 777 { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
2454 { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, 778 { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
2455 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, 0, "G33", 779 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
2456 NULL, &intel_g33_driver }, 780 NULL, &intel_g33_driver },
2457 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, 0, "Q35", 781 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
2458 NULL, &intel_g33_driver }, 782 NULL, &intel_g33_driver },
2459 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 783 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
2460 NULL, &intel_g33_driver }, 784 NULL, &intel_g33_driver },
2461 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", 785 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
2462 NULL, &intel_g33_driver }, 786 NULL, &intel_g33_driver },
2463 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", 787 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
2464 NULL, &intel_g33_driver }, 788 NULL, &intel_g33_driver },
2465 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 789 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
2466 "GM45", NULL, &intel_i965_driver }, 790 "GM45", NULL, &intel_i965_driver },
2467 { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0, 791 { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
2468 "Eaglelake", NULL, &intel_i965_driver }, 792 "Eaglelake", NULL, &intel_i965_driver },
2469 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, 793 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
2470 "Q45/Q43", NULL, &intel_i965_driver }, 794 "Q45/Q43", NULL, &intel_i965_driver },
2471 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 795 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
2472 "G45/G43", NULL, &intel_i965_driver }, 796 "G45/G43", NULL, &intel_i965_driver },
2473 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, 797 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
2474 "B43", NULL, &intel_i965_driver }, 798 "B43", NULL, &intel_i965_driver },
2475 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 799 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
2476 "G41", NULL, &intel_i965_driver }, 800 "G41", NULL, &intel_i965_driver },
2477 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, 801 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
2478 "HD Graphics", NULL, &intel_i965_driver }, 802 "HD Graphics", NULL, &intel_i965_driver },
2479 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 803 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
2480 "HD Graphics", NULL, &intel_i965_driver }, 804 "HD Graphics", NULL, &intel_i965_driver },
2481 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 805 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
2482 "HD Graphics", NULL, &intel_i965_driver }, 806 "HD Graphics", NULL, &intel_i965_driver },
2483 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, 807 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
2484 "HD Graphics", NULL, &intel_i965_driver }, 808 "HD Graphics", NULL, &intel_i965_driver },
2485 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, 809 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
2486 "Sandybridge", NULL, &intel_i965_driver }, 810 "Sandybridge", NULL, &intel_i965_driver },
2487 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, 811 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
2488 "Sandybridge", NULL, &intel_i965_driver }, 812 "Sandybridge", NULL, &intel_i965_driver },
2489 { 0, 0, 0, NULL, NULL, NULL } 813 { 0, 0, NULL, NULL, NULL }
2490}; 814};
2491 815
816static int __devinit intel_gmch_probe(struct pci_dev *pdev,
817 struct agp_bridge_data *bridge)
818{
819 int i;
820 bridge->driver = NULL;
821
822 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
823 if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
824 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
825 bridge->driver =
826 intel_agp_chipsets[i].gmch_driver;
827 break;
828 }
829 }
830
831 if (!bridge->driver)
832 return 0;
833
834 bridge->dev_private_data = &intel_private;
835 bridge->dev = pdev;
836
837 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
838
839 if (bridge->driver->mask_memory == intel_i965_mask_memory) {
840 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
841 dev_err(&intel_private.pcidev->dev,
842 "set gfx device dma mask 36bit failed!\n");
843 else
844 pci_set_consistent_dma_mask(intel_private.pcidev,
845 DMA_BIT_MASK(36));
846 }
847
848 return 1;
849}
850
2492static int __devinit agp_intel_probe(struct pci_dev *pdev, 851static int __devinit agp_intel_probe(struct pci_dev *pdev,
2493 const struct pci_device_id *ent) 852 const struct pci_device_id *ent)
2494{ 853{
@@ -2503,22 +862,18 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2503 if (!bridge) 862 if (!bridge)
2504 return -ENOMEM; 863 return -ENOMEM;
2505 864
865 bridge->capndx = cap_ptr;
866
867 if (intel_gmch_probe(pdev, bridge))
868 goto found_gmch;
869
2506 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { 870 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
2507 /* In case that multiple models of gfx chip may 871 /* In case that multiple models of gfx chip may
2508 stand on same host bridge type, this can be 872 stand on same host bridge type, this can be
2509 sure we detect the right IGD. */ 873 sure we detect the right IGD. */
2510 if (pdev->device == intel_agp_chipsets[i].chip_id) { 874 if (pdev->device == intel_agp_chipsets[i].chip_id) {
2511 if ((intel_agp_chipsets[i].gmch_chip_id != 0) && 875 bridge->driver = intel_agp_chipsets[i].driver;
2512 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { 876 break;
2513 bridge->driver =
2514 intel_agp_chipsets[i].gmch_driver;
2515 break;
2516 } else if (intel_agp_chipsets[i].multi_gmch_chip) {
2517 continue;
2518 } else {
2519 bridge->driver = intel_agp_chipsets[i].driver;
2520 break;
2521 }
2522 } 877 }
2523 } 878 }
2524 879
@@ -2530,18 +885,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2530 return -ENODEV; 885 return -ENODEV;
2531 } 886 }
2532 887
2533 if (bridge->driver == NULL) {
2534 /* bridge has no AGP and no IGD detected */
2535 if (cap_ptr)
2536 dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
2537 intel_agp_chipsets[i].gmch_chip_id);
2538 agp_put_bridge(bridge);
2539 return -ENODEV;
2540 }
2541
2542 bridge->dev = pdev; 888 bridge->dev = pdev;
2543 bridge->capndx = cap_ptr; 889 bridge->dev_private_data = NULL;
2544 bridge->dev_private_data = &intel_private;
2545 890
2546 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); 891 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
2547 892
@@ -2577,15 +922,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2577 &bridge->mode); 922 &bridge->mode);
2578 } 923 }
2579 924
2580 if (bridge->driver->mask_memory == intel_i965_mask_memory) { 925found_gmch:
2581 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
2582 dev_err(&intel_private.pcidev->dev,
2583 "set gfx device dma mask 36bit failed!\n");
2584 else
2585 pci_set_consistent_dma_mask(intel_private.pcidev,
2586 DMA_BIT_MASK(36));
2587 }
2588
2589 pci_set_drvdata(pdev, bridge); 926 pci_set_drvdata(pdev, bridge);
2590 err = agp_add_bridge(bridge); 927 err = agp_add_bridge(bridge);
2591 if (!err) 928 if (!err)
@@ -2611,22 +948,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
2611 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 948 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
2612 int ret_val; 949 int ret_val;
2613 950
2614 if (bridge->driver == &intel_generic_driver) 951 bridge->driver->configure();
2615 intel_configure();
2616 else if (bridge->driver == &intel_850_driver)
2617 intel_850_configure();
2618 else if (bridge->driver == &intel_845_driver)
2619 intel_845_configure();
2620 else if (bridge->driver == &intel_830mp_driver)
2621 intel_830mp_configure();
2622 else if (bridge->driver == &intel_915_driver)
2623 intel_i915_configure();
2624 else if (bridge->driver == &intel_830_driver)
2625 intel_i830_configure();
2626 else if (bridge->driver == &intel_810_driver)
2627 intel_i810_configure();
2628 else if (bridge->driver == &intel_i965_driver)
2629 intel_i915_configure();
2630 952
2631 ret_val = agp_rebind_memory(); 953 ret_val = agp_rebind_memory();
2632 if (ret_val != 0) 954 if (ret_val != 0)
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
new file mode 100644
index 000000000000..2547465d4658
--- /dev/null
+++ b/drivers/char/agp/intel-agp.h
@@ -0,0 +1,239 @@
1/*
2 * Common Intel AGPGART and GTT definitions.
3 */
4
5/* Intel registers */
6#define INTEL_APSIZE 0xb4
7#define INTEL_ATTBASE 0xb8
8#define INTEL_AGPCTRL 0xb0
9#define INTEL_NBXCFG 0x50
10#define INTEL_ERRSTS 0x91
11
12/* Intel i830 registers */
13#define I830_GMCH_CTRL 0x52
14#define I830_GMCH_ENABLED 0x4
15#define I830_GMCH_MEM_MASK 0x1
16#define I830_GMCH_MEM_64M 0x1
17#define I830_GMCH_MEM_128M 0
18#define I830_GMCH_GMS_MASK 0x70
19#define I830_GMCH_GMS_DISABLED 0x00
20#define I830_GMCH_GMS_LOCAL 0x10
21#define I830_GMCH_GMS_STOLEN_512 0x20
22#define I830_GMCH_GMS_STOLEN_1024 0x30
23#define I830_GMCH_GMS_STOLEN_8192 0x40
24#define I830_RDRAM_CHANNEL_TYPE 0x03010
25#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
26#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
27
28/* This one is for I830MP w. an external graphic card */
29#define INTEL_I830_ERRSTS 0x92
30
31/* Intel 855GM/852GM registers */
32#define I855_GMCH_GMS_MASK 0xF0
33#define I855_GMCH_GMS_STOLEN_0M 0x0
34#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
35#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
36#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
37#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
38#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
39#define I85X_CAPID 0x44
40#define I85X_VARIANT_MASK 0x7
41#define I85X_VARIANT_SHIFT 5
42#define I855_GME 0x0
43#define I855_GM 0x4
44#define I852_GME 0x2
45#define I852_GM 0x5
46
47/* Intel i845 registers */
48#define INTEL_I845_AGPM 0x51
49#define INTEL_I845_ERRSTS 0xc8
50
51/* Intel i860 registers */
52#define INTEL_I860_MCHCFG 0x50
53#define INTEL_I860_ERRSTS 0xc8
54
55/* Intel i810 registers */
56#define I810_GMADDR 0x10
57#define I810_MMADDR 0x14
58#define I810_PTE_BASE 0x10000
59#define I810_PTE_MAIN_UNCACHED 0x00000000
60#define I810_PTE_LOCAL 0x00000002
61#define I810_PTE_VALID 0x00000001
62#define I830_PTE_SYSTEM_CACHED 0x00000006
63#define I810_SMRAM_MISCC 0x70
64#define I810_GFX_MEM_WIN_SIZE 0x00010000
65#define I810_GFX_MEM_WIN_32M 0x00010000
66#define I810_GMS 0x000000c0
67#define I810_GMS_DISABLE 0x00000000
68#define I810_PGETBL_CTL 0x2020
69#define I810_PGETBL_ENABLED 0x00000001
70#define I965_PGETBL_SIZE_MASK 0x0000000e
71#define I965_PGETBL_SIZE_512KB (0 << 1)
72#define I965_PGETBL_SIZE_256KB (1 << 1)
73#define I965_PGETBL_SIZE_128KB (2 << 1)
74#define I965_PGETBL_SIZE_1MB (3 << 1)
75#define I965_PGETBL_SIZE_2MB (4 << 1)
76#define I965_PGETBL_SIZE_1_5MB (5 << 1)
77#define G33_PGETBL_SIZE_MASK (3 << 8)
78#define G33_PGETBL_SIZE_1M (1 << 8)
79#define G33_PGETBL_SIZE_2M (2 << 8)
80
81#define I810_DRAM_CTL 0x3000
82#define I810_DRAM_ROW_0 0x00000001
83#define I810_DRAM_ROW_0_SDRAM 0x00000001
84
85/* Intel 815 register */
86#define INTEL_815_APCONT 0x51
87#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
88
89/* Intel i820 registers */
90#define INTEL_I820_RDCR 0x51
91#define INTEL_I820_ERRSTS 0xc8
92
93/* Intel i840 registers */
94#define INTEL_I840_MCHCFG 0x50
95#define INTEL_I840_ERRSTS 0xc8
96
97/* Intel i850 registers */
98#define INTEL_I850_MCHCFG 0x50
99#define INTEL_I850_ERRSTS 0xc8
100
101/* intel 915G registers */
102#define I915_GMADDR 0x18
103#define I915_MMADDR 0x10
104#define I915_PTEADDR 0x1C
105#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
106#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
107#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
108#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
109#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
110#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
111#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
112#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
113
114#define I915_IFPADDR 0x60
115
116/* Intel 965G registers */
117#define I965_MSAC 0x62
118#define I965_IFPADDR 0x70
119
120/* Intel 7505 registers */
121#define INTEL_I7505_APSIZE 0x74
122#define INTEL_I7505_NCAPID 0x60
123#define INTEL_I7505_NISTAT 0x6c
124#define INTEL_I7505_ATTBASE 0x78
125#define INTEL_I7505_ERRSTS 0x42
126#define INTEL_I7505_AGPCTRL 0x70
127#define INTEL_I7505_MCHCFG 0x50
128
129#define SNB_GMCH_CTRL 0x50
130#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
131#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
132#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
133#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
134#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
135#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
136#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
137#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
138#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
139#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
140#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
141#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
142#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
143#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
144#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
145#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
146#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
147#define SNB_GTT_SIZE_0M (0 << 8)
148#define SNB_GTT_SIZE_1M (1 << 8)
149#define SNB_GTT_SIZE_2M (2 << 8)
150#define SNB_GTT_SIZE_MASK (3 << 8)
151
152/* pci devices ids */
153#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
154#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
155#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
156#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
157#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
158#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
159#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
160#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
161#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
162#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
163#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
164#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
165#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
166#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
167#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
168#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
169#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
170#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
171#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
172#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
173#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
174#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
175#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
176#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
177#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
178#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
179#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
180#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
181#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
182#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
183#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
184#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
185#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
186#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
187#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
188#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
189#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
190#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
191#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
192#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
193#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
194#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
195#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
196#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
197#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
198#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
199#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
200#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
201
202/* cover 915 and 945 variants */
203#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
204 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
205 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
206 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
207 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
208 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
209
210#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
211 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
212 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
213 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
214 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
215 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
216
217#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
218 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
219 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
220 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
221 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
222
223#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
224 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
225
226#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
227 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
228
229#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
230 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
231 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
232 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
233 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
234 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
235 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
236 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
237 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
238 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
239 IS_SNB)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
new file mode 100644
index 000000000000..e8ea6825822c
--- /dev/null
+++ b/drivers/char/agp/intel-gtt.c
@@ -0,0 +1,1516 @@
1/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28static const struct aper_size_info_fixed intel_i810_sizes[] =
29{
30 {64, 16384, 4},
31 /* The 32M mode still requires a 64k gatt */
32 {32, 8192, 4}
33};
34
35#define AGP_DCACHE_MEMORY 1
36#define AGP_PHYS_MEMORY 2
37#define INTEL_AGP_CACHED_MEMORY 3
38
39static struct gatt_mask intel_i810_masks[] =
40{
41 {.mask = I810_PTE_VALID, .type = 0},
42 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43 {.mask = I810_PTE_VALID, .type = 0},
44 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45 .type = INTEL_AGP_CACHED_MEMORY}
46};
47
48static struct _intel_private {
49 struct pci_dev *pcidev; /* device one */
50 u8 __iomem *registers;
51 u32 __iomem *gtt; /* I915G */
52 int num_dcache_entries;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
57 */
58 int gtt_entries; /* i830+ */
59 int gtt_total_size;
60 union {
61 void __iomem *i9xx_flush_page;
62 void *i8xx_flush_page;
63 };
64 struct page *i8xx_page;
65 struct resource ifp_resource;
66 int resource_valid;
67} intel_private;
68
69#ifdef USE_PCI_DMA_API
70static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71{
72 *ret = pci_map_page(intel_private.pcidev, page, 0,
73 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75 return -EINVAL;
76 return 0;
77}
78
79static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80{
81 pci_unmap_page(intel_private.pcidev, dma,
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83}
84
85static void intel_agp_free_sglist(struct agp_memory *mem)
86{
87 struct sg_table st;
88
89 st.sgl = mem->sg_list;
90 st.orig_nents = st.nents = mem->page_count;
91
92 sg_free_table(&st);
93
94 mem->sg_list = NULL;
95 mem->num_sg = 0;
96}
97
98static int intel_agp_map_memory(struct agp_memory *mem)
99{
100 struct sg_table st;
101 struct scatterlist *sg;
102 int i;
103
104 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105
106 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 return -ENOMEM;
108
109 mem->sg_list = sg = st.sgl;
110
111 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113
114 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 if (unlikely(!mem->num_sg)) {
117 intel_agp_free_sglist(mem);
118 return -ENOMEM;
119 }
120 return 0;
121}
122
123static void intel_agp_unmap_memory(struct agp_memory *mem)
124{
125 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126
127 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128 mem->page_count, PCI_DMA_BIDIRECTIONAL);
129 intel_agp_free_sglist(mem);
130}
131
132static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133 off_t pg_start, int mask_type)
134{
135 struct scatterlist *sg;
136 int i, j;
137
138 j = pg_start;
139
140 WARN_ON(!mem->num_sg);
141
142 if (mem->num_sg == mem->page_count) {
143 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144 writel(agp_bridge->driver->mask_memory(agp_bridge,
145 sg_dma_address(sg), mask_type),
146 intel_private.gtt+j);
147 j++;
148 }
149 } else {
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
152 unsigned int len, m;
153
154 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155 len = sg_dma_len(sg) / PAGE_SIZE;
156 for (m = 0; m < len; m++) {
157 writel(agp_bridge->driver->mask_memory(agp_bridge,
158 sg_dma_address(sg) + m * PAGE_SIZE,
159 mask_type),
160 intel_private.gtt+j);
161 j++;
162 }
163 }
164 }
165 readl(intel_private.gtt+j-1);
166}
167
168#else
169
170static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171 off_t pg_start, int mask_type)
172{
173 int i, j;
174 u32 cache_bits = 0;
175
176 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 {
179 cache_bits = I830_PTE_SYSTEM_CACHED;
180 }
181
182 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183 writel(agp_bridge->driver->mask_memory(agp_bridge,
184 page_to_phys(mem->pages[i]), mask_type),
185 intel_private.gtt+j);
186 }
187
188 readl(intel_private.gtt+j-1);
189}
190
191#endif
192
193static int intel_i810_fetch_size(void)
194{
195 u32 smram_miscc;
196 struct aper_size_info_fixed *values;
197
198 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200
201 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203 return 0;
204 }
205 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
206 agp_bridge->current_size = (void *) (values + 1);
207 agp_bridge->aperture_size_idx = 1;
208 return values[1].size;
209 } else {
210 agp_bridge->current_size = (void *) (values);
211 agp_bridge->aperture_size_idx = 0;
212 return values[0].size;
213 }
214
215 return 0;
216}
217
218static int intel_i810_configure(void)
219{
220 struct aper_size_info_fixed *current_size;
221 u32 temp;
222 int i;
223
224 current_size = A_SIZE_FIX(agp_bridge->current_size);
225
226 if (!intel_private.registers) {
227 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
228 temp &= 0xfff80000;
229
230 intel_private.registers = ioremap(temp, 128 * 4096);
231 if (!intel_private.registers) {
232 dev_err(&intel_private.pcidev->dev,
233 "can't remap memory\n");
234 return -ENOMEM;
235 }
236 }
237
238 if ((readl(intel_private.registers+I810_DRAM_CTL)
239 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
240 /* This will need to be dynamically assigned */
241 dev_info(&intel_private.pcidev->dev,
242 "detected 4MB dedicated video ram\n");
243 intel_private.num_dcache_entries = 1024;
244 }
245 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
246 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
247 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
248 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
249
250 if (agp_bridge->driver->needs_scratch_page) {
251 for (i = 0; i < current_size->num_entries; i++) {
252 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
253 }
254 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
255 }
256 global_cache_flush();
257 return 0;
258}
259
260static void intel_i810_cleanup(void)
261{
262 writel(0, intel_private.registers+I810_PGETBL_CTL);
263 readl(intel_private.registers); /* PCI Posting. */
264 iounmap(intel_private.registers);
265}
266
267static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
268{
269 return;
270}
271
272/* Exists to support ARGB cursors */
273static struct page *i8xx_alloc_pages(void)
274{
275 struct page *page;
276
277 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
278 if (page == NULL)
279 return NULL;
280
281 if (set_pages_uc(page, 4) < 0) {
282 set_pages_wb(page, 4);
283 __free_pages(page, 2);
284 return NULL;
285 }
286 get_page(page);
287 atomic_inc(&agp_bridge->current_memory_agp);
288 return page;
289}
290
291static void i8xx_destroy_pages(struct page *page)
292{
293 if (page == NULL)
294 return;
295
296 set_pages_wb(page, 4);
297 put_page(page);
298 __free_pages(page, 2);
299 atomic_dec(&agp_bridge->current_memory_agp);
300}
301
302static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
303 int type)
304{
305 if (type < AGP_USER_TYPES)
306 return type;
307 else if (type == AGP_USER_CACHED_MEMORY)
308 return INTEL_AGP_CACHED_MEMORY;
309 else
310 return 0;
311}
312
313static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
314 int type)
315{
316 int i, j, num_entries;
317 void *temp;
318 int ret = -EINVAL;
319 int mask_type;
320
321 if (mem->page_count == 0)
322 goto out;
323
324 temp = agp_bridge->current_size;
325 num_entries = A_SIZE_FIX(temp)->num_entries;
326
327 if ((pg_start + mem->page_count) > num_entries)
328 goto out_err;
329
330
331 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
332 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
333 ret = -EBUSY;
334 goto out_err;
335 }
336 }
337
338 if (type != mem->type)
339 goto out_err;
340
341 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
342
343 switch (mask_type) {
344 case AGP_DCACHE_MEMORY:
345 if (!mem->is_flushed)
346 global_cache_flush();
347 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
348 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
349 intel_private.registers+I810_PTE_BASE+(i*4));
350 }
351 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
352 break;
353 case AGP_PHYS_MEMORY:
354 case AGP_NORMAL_MEMORY:
355 if (!mem->is_flushed)
356 global_cache_flush();
357 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
358 writel(agp_bridge->driver->mask_memory(agp_bridge,
359 page_to_phys(mem->pages[i]), mask_type),
360 intel_private.registers+I810_PTE_BASE+(j*4));
361 }
362 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
363 break;
364 default:
365 goto out_err;
366 }
367
368out:
369 ret = 0;
370out_err:
371 mem->is_flushed = true;
372 return ret;
373}
374
375static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
376 int type)
377{
378 int i;
379
380 if (mem->page_count == 0)
381 return 0;
382
383 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
384 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
385 }
386 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
387
388 return 0;
389}
390
391/*
392 * The i810/i830 requires a physical address to program its mouse
393 * pointer into hardware.
394 * However the Xserver still writes to it through the agp aperture.
395 */
396static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
397{
398 struct agp_memory *new;
399 struct page *page;
400
401 switch (pg_count) {
402 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
403 break;
404 case 4:
405 /* kludge to get 4 physical pages for ARGB cursor */
406 page = i8xx_alloc_pages();
407 break;
408 default:
409 return NULL;
410 }
411
412 if (page == NULL)
413 return NULL;
414
415 new = agp_create_memory(pg_count);
416 if (new == NULL)
417 return NULL;
418
419 new->pages[0] = page;
420 if (pg_count == 4) {
421 /* kludge to get 4 physical pages for ARGB cursor */
422 new->pages[1] = new->pages[0] + 1;
423 new->pages[2] = new->pages[1] + 1;
424 new->pages[3] = new->pages[2] + 1;
425 }
426 new->page_count = pg_count;
427 new->num_scratch_pages = pg_count;
428 new->type = AGP_PHYS_MEMORY;
429 new->physical = page_to_phys(new->pages[0]);
430 return new;
431}
432
433static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
434{
435 struct agp_memory *new;
436
437 if (type == AGP_DCACHE_MEMORY) {
438 if (pg_count != intel_private.num_dcache_entries)
439 return NULL;
440
441 new = agp_create_memory(1);
442 if (new == NULL)
443 return NULL;
444
445 new->type = AGP_DCACHE_MEMORY;
446 new->page_count = pg_count;
447 new->num_scratch_pages = 0;
448 agp_free_page_array(new);
449 return new;
450 }
451 if (type == AGP_PHYS_MEMORY)
452 return alloc_agpphysmem_i8xx(pg_count, type);
453 return NULL;
454}
455
456static void intel_i810_free_by_type(struct agp_memory *curr)
457{
458 agp_free_key(curr->key);
459 if (curr->type == AGP_PHYS_MEMORY) {
460 if (curr->page_count == 4)
461 i8xx_destroy_pages(curr->pages[0]);
462 else {
463 agp_bridge->driver->agp_destroy_page(curr->pages[0],
464 AGP_PAGE_DESTROY_UNMAP);
465 agp_bridge->driver->agp_destroy_page(curr->pages[0],
466 AGP_PAGE_DESTROY_FREE);
467 }
468 agp_free_page_array(curr);
469 }
470 kfree(curr);
471}
472
473static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
474 dma_addr_t addr, int type)
475{
476 /* Type checking must be done elsewhere */
477 return addr | bridge->driver->masks[type].mask;
478}
479
480static struct aper_size_info_fixed intel_i830_sizes[] =
481{
482 {128, 32768, 5},
483 /* The 64M mode still requires a 128k gatt */
484 {64, 16384, 5},
485 {256, 65536, 6},
486 {512, 131072, 7},
487};
488
489static void intel_i830_init_gtt_entries(void)
490{
491 u16 gmch_ctrl;
492 int gtt_entries = 0;
493 u8 rdct;
494 int local = 0;
495 static const int ddt[4] = { 0, 16, 32, 64 };
496 int size; /* reserved space (in kb) at the top of stolen memory */
497
498 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
499
500 if (IS_I965) {
501 u32 pgetbl_ctl;
502 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
503
504 /* The 965 has a field telling us the size of the GTT,
505 * which may be larger than what is necessary to map the
506 * aperture.
507 */
508 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
509 case I965_PGETBL_SIZE_128KB:
510 size = 128;
511 break;
512 case I965_PGETBL_SIZE_256KB:
513 size = 256;
514 break;
515 case I965_PGETBL_SIZE_512KB:
516 size = 512;
517 break;
518 case I965_PGETBL_SIZE_1MB:
519 size = 1024;
520 break;
521 case I965_PGETBL_SIZE_2MB:
522 size = 2048;
523 break;
524 case I965_PGETBL_SIZE_1_5MB:
525 size = 1024 + 512;
526 break;
527 default:
528 dev_info(&intel_private.pcidev->dev,
529 "unknown page table size, assuming 512KB\n");
530 size = 512;
531 }
532 size += 4; /* add in BIOS popup space */
533 } else if (IS_G33 && !IS_PINEVIEW) {
534 /* G33's GTT size defined in gmch_ctrl */
535 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
536 case G33_PGETBL_SIZE_1M:
537 size = 1024;
538 break;
539 case G33_PGETBL_SIZE_2M:
540 size = 2048;
541 break;
542 default:
543 dev_info(&agp_bridge->dev->dev,
544 "unknown page table size 0x%x, assuming 512KB\n",
545 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
546 size = 512;
547 }
548 size += 4;
549 } else if (IS_G4X || IS_PINEVIEW) {
550 /* On 4 series hardware, GTT stolen is separate from graphics
551 * stolen, ignore it in stolen gtt entries counting. However,
552 * 4KB of the stolen memory doesn't get mapped to the GTT.
553 */
554 size = 4;
555 } else {
556 /* On previous hardware, the GTT size was just what was
557 * required to map the aperture.
558 */
559 size = agp_bridge->driver->fetch_size() + 4;
560 }
561
562 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
563 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
564 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
565 case I830_GMCH_GMS_STOLEN_512:
566 gtt_entries = KB(512) - KB(size);
567 break;
568 case I830_GMCH_GMS_STOLEN_1024:
569 gtt_entries = MB(1) - KB(size);
570 break;
571 case I830_GMCH_GMS_STOLEN_8192:
572 gtt_entries = MB(8) - KB(size);
573 break;
574 case I830_GMCH_GMS_LOCAL:
575 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
576 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
577 MB(ddt[I830_RDRAM_DDT(rdct)]);
578 local = 1;
579 break;
580 default:
581 gtt_entries = 0;
582 break;
583 }
584 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
585 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
586 /*
587 * SandyBridge has new memory control reg at 0x50.w
588 */
589 u16 snb_gmch_ctl;
590 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
591 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
592 case SNB_GMCH_GMS_STOLEN_32M:
593 gtt_entries = MB(32) - KB(size);
594 break;
595 case SNB_GMCH_GMS_STOLEN_64M:
596 gtt_entries = MB(64) - KB(size);
597 break;
598 case SNB_GMCH_GMS_STOLEN_96M:
599 gtt_entries = MB(96) - KB(size);
600 break;
601 case SNB_GMCH_GMS_STOLEN_128M:
602 gtt_entries = MB(128) - KB(size);
603 break;
604 case SNB_GMCH_GMS_STOLEN_160M:
605 gtt_entries = MB(160) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_192M:
608 gtt_entries = MB(192) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_224M:
611 gtt_entries = MB(224) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_256M:
614 gtt_entries = MB(256) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_288M:
617 gtt_entries = MB(288) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_320M:
620 gtt_entries = MB(320) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_352M:
623 gtt_entries = MB(352) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_384M:
626 gtt_entries = MB(384) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_416M:
629 gtt_entries = MB(416) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_448M:
632 gtt_entries = MB(448) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_480M:
635 gtt_entries = MB(480) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_512M:
638 gtt_entries = MB(512) - KB(size);
639 break;
640 }
641 } else {
642 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
643 case I855_GMCH_GMS_STOLEN_1M:
644 gtt_entries = MB(1) - KB(size);
645 break;
646 case I855_GMCH_GMS_STOLEN_4M:
647 gtt_entries = MB(4) - KB(size);
648 break;
649 case I855_GMCH_GMS_STOLEN_8M:
650 gtt_entries = MB(8) - KB(size);
651 break;
652 case I855_GMCH_GMS_STOLEN_16M:
653 gtt_entries = MB(16) - KB(size);
654 break;
655 case I855_GMCH_GMS_STOLEN_32M:
656 gtt_entries = MB(32) - KB(size);
657 break;
658 case I915_GMCH_GMS_STOLEN_48M:
659 /* Check it's really I915G */
660 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
661 gtt_entries = MB(48) - KB(size);
662 else
663 gtt_entries = 0;
664 break;
665 case I915_GMCH_GMS_STOLEN_64M:
666 /* Check it's really I915G */
667 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
668 gtt_entries = MB(64) - KB(size);
669 else
670 gtt_entries = 0;
671 break;
672 case G33_GMCH_GMS_STOLEN_128M:
673 if (IS_G33 || IS_I965 || IS_G4X)
674 gtt_entries = MB(128) - KB(size);
675 else
676 gtt_entries = 0;
677 break;
678 case G33_GMCH_GMS_STOLEN_256M:
679 if (IS_G33 || IS_I965 || IS_G4X)
680 gtt_entries = MB(256) - KB(size);
681 else
682 gtt_entries = 0;
683 break;
684 case INTEL_GMCH_GMS_STOLEN_96M:
685 if (IS_I965 || IS_G4X)
686 gtt_entries = MB(96) - KB(size);
687 else
688 gtt_entries = 0;
689 break;
690 case INTEL_GMCH_GMS_STOLEN_160M:
691 if (IS_I965 || IS_G4X)
692 gtt_entries = MB(160) - KB(size);
693 else
694 gtt_entries = 0;
695 break;
696 case INTEL_GMCH_GMS_STOLEN_224M:
697 if (IS_I965 || IS_G4X)
698 gtt_entries = MB(224) - KB(size);
699 else
700 gtt_entries = 0;
701 break;
702 case INTEL_GMCH_GMS_STOLEN_352M:
703 if (IS_I965 || IS_G4X)
704 gtt_entries = MB(352) - KB(size);
705 else
706 gtt_entries = 0;
707 break;
708 default:
709 gtt_entries = 0;
710 break;
711 }
712 }
713 if (gtt_entries > 0) {
714 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
715 gtt_entries / KB(1), local ? "local" : "stolen");
716 gtt_entries /= KB(4);
717 } else {
718 dev_info(&agp_bridge->dev->dev,
719 "no pre-allocated video memory detected\n");
720 gtt_entries = 0;
721 }
722
723 intel_private.gtt_entries = gtt_entries;
724}
725
726static void intel_i830_fini_flush(void)
727{
728 kunmap(intel_private.i8xx_page);
729 intel_private.i8xx_flush_page = NULL;
730 unmap_page_from_agp(intel_private.i8xx_page);
731
732 __free_page(intel_private.i8xx_page);
733 intel_private.i8xx_page = NULL;
734}
735
736static void intel_i830_setup_flush(void)
737{
738 /* return if we've already set the flush mechanism up */
739 if (intel_private.i8xx_page)
740 return;
741
742 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
743 if (!intel_private.i8xx_page)
744 return;
745
746 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
747 if (!intel_private.i8xx_flush_page)
748 intel_i830_fini_flush();
749}
750
751/* The chipset_flush interface needs to get data that has already been
752 * flushed out of the CPU all the way out to main memory, because the GPU
753 * doesn't snoop those buffers.
754 *
755 * The 8xx series doesn't have the same lovely interface for flushing the
756 * chipset write buffers that the later chips do. According to the 865
757 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
758 * that buffer out, we just fill 1KB and clflush it out, on the assumption
759 * that it'll push whatever was in there out. It appears to work.
760 */
761static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
762{
763 unsigned int *pg = intel_private.i8xx_flush_page;
764
765 memset(pg, 0, 1024);
766
767 if (cpu_has_clflush)
768 clflush_cache_range(pg, 1024);
769 else if (wbinvd_on_all_cpus() != 0)
770 printk(KERN_ERR "Timed out waiting for cache flush.\n");
771}
772
773/* The intel i830 automatically initializes the agp aperture during POST.
774 * Use the memory already set aside for in the GTT.
775 */
776static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
777{
778 int page_order;
779 struct aper_size_info_fixed *size;
780 int num_entries;
781 u32 temp;
782
783 size = agp_bridge->current_size;
784 page_order = size->page_order;
785 num_entries = size->num_entries;
786 agp_bridge->gatt_table_real = NULL;
787
788 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
789 temp &= 0xfff80000;
790
791 intel_private.registers = ioremap(temp, 128 * 4096);
792 if (!intel_private.registers)
793 return -ENOMEM;
794
795 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
796 global_cache_flush(); /* FIXME: ?? */
797
798 /* we have to call this as early as possible after the MMIO base address is known */
799 intel_i830_init_gtt_entries();
800
801 agp_bridge->gatt_table = NULL;
802
803 agp_bridge->gatt_bus_addr = temp;
804
805 return 0;
806}
807
808/* Return the gatt table to a sane state. Use the top of stolen
809 * memory for the GTT.
810 */
811static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
812{
813 return 0;
814}
815
816static int intel_i830_fetch_size(void)
817{
818 u16 gmch_ctrl;
819 struct aper_size_info_fixed *values;
820
821 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
822
823 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
824 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
825 /* 855GM/852GM/865G has 128MB aperture size */
826 agp_bridge->current_size = (void *) values;
827 agp_bridge->aperture_size_idx = 0;
828 return values[0].size;
829 }
830
831 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
832
833 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
834 agp_bridge->current_size = (void *) values;
835 agp_bridge->aperture_size_idx = 0;
836 return values[0].size;
837 } else {
838 agp_bridge->current_size = (void *) (values + 1);
839 agp_bridge->aperture_size_idx = 1;
840 return values[1].size;
841 }
842
843 return 0;
844}
845
846static int intel_i830_configure(void)
847{
848 struct aper_size_info_fixed *current_size;
849 u32 temp;
850 u16 gmch_ctrl;
851 int i;
852
853 current_size = A_SIZE_FIX(agp_bridge->current_size);
854
855 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
856 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
857
858 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
859 gmch_ctrl |= I830_GMCH_ENABLED;
860 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
861
862 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
863 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
864
865 if (agp_bridge->driver->needs_scratch_page) {
866 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
867 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
868 }
869 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
870 }
871
872 global_cache_flush();
873
874 intel_i830_setup_flush();
875 return 0;
876}
877
878static void intel_i830_cleanup(void)
879{
880 iounmap(intel_private.registers);
881}
882
883static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
884 int type)
885{
886 int i, j, num_entries;
887 void *temp;
888 int ret = -EINVAL;
889 int mask_type;
890
891 if (mem->page_count == 0)
892 goto out;
893
894 temp = agp_bridge->current_size;
895 num_entries = A_SIZE_FIX(temp)->num_entries;
896
897 if (pg_start < intel_private.gtt_entries) {
898 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
899 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
900 pg_start, intel_private.gtt_entries);
901
902 dev_info(&intel_private.pcidev->dev,
903 "trying to insert into local/stolen memory\n");
904 goto out_err;
905 }
906
907 if ((pg_start + mem->page_count) > num_entries)
908 goto out_err;
909
910 /* The i830 can't check the GTT for entries since its read only,
911 * depend on the caller to make the correct offset decisions.
912 */
913
914 if (type != mem->type)
915 goto out_err;
916
917 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
918
919 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
920 mask_type != INTEL_AGP_CACHED_MEMORY)
921 goto out_err;
922
923 if (!mem->is_flushed)
924 global_cache_flush();
925
926 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
927 writel(agp_bridge->driver->mask_memory(agp_bridge,
928 page_to_phys(mem->pages[i]), mask_type),
929 intel_private.registers+I810_PTE_BASE+(j*4));
930 }
931 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
932
933out:
934 ret = 0;
935out_err:
936 mem->is_flushed = true;
937 return ret;
938}
939
940static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
941 int type)
942{
943 int i;
944
945 if (mem->page_count == 0)
946 return 0;
947
948 if (pg_start < intel_private.gtt_entries) {
949 dev_info(&intel_private.pcidev->dev,
950 "trying to disable local/stolen memory\n");
951 return -EINVAL;
952 }
953
954 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
955 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
956 }
957 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
958
959 return 0;
960}
961
962static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
963{
964 if (type == AGP_PHYS_MEMORY)
965 return alloc_agpphysmem_i8xx(pg_count, type);
966 /* always return NULL for other allocation types for now */
967 return NULL;
968}
969
970static int intel_alloc_chipset_flush_resource(void)
971{
972 int ret;
973 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
974 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
975 pcibios_align_resource, agp_bridge->dev);
976
977 return ret;
978}
979
980static void intel_i915_setup_chipset_flush(void)
981{
982 int ret;
983 u32 temp;
984
985 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
986 if (!(temp & 0x1)) {
987 intel_alloc_chipset_flush_resource();
988 intel_private.resource_valid = 1;
989 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
990 } else {
991 temp &= ~1;
992
993 intel_private.resource_valid = 1;
994 intel_private.ifp_resource.start = temp;
995 intel_private.ifp_resource.end = temp + PAGE_SIZE;
996 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
997 /* some BIOSes reserve this area in a pnp some don't */
998 if (ret)
999 intel_private.resource_valid = 0;
1000 }
1001}
1002
1003static void intel_i965_g33_setup_chipset_flush(void)
1004{
1005 u32 temp_hi, temp_lo;
1006 int ret;
1007
1008 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1009 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1010
1011 if (!(temp_lo & 0x1)) {
1012
1013 intel_alloc_chipset_flush_resource();
1014
1015 intel_private.resource_valid = 1;
1016 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1017 upper_32_bits(intel_private.ifp_resource.start));
1018 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1019 } else {
1020 u64 l64;
1021
1022 temp_lo &= ~0x1;
1023 l64 = ((u64)temp_hi << 32) | temp_lo;
1024
1025 intel_private.resource_valid = 1;
1026 intel_private.ifp_resource.start = l64;
1027 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1028 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1029 /* some BIOSes reserve this area in a pnp some don't */
1030 if (ret)
1031 intel_private.resource_valid = 0;
1032 }
1033}
1034
1035static void intel_i9xx_setup_flush(void)
1036{
1037 /* return if already configured */
1038 if (intel_private.ifp_resource.start)
1039 return;
1040
1041 if (IS_SNB)
1042 return;
1043
1044 /* setup a resource for this object */
1045 intel_private.ifp_resource.name = "Intel Flush Page";
1046 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1047
1048 /* Setup chipset flush for 915 */
1049 if (IS_I965 || IS_G33 || IS_G4X) {
1050 intel_i965_g33_setup_chipset_flush();
1051 } else {
1052 intel_i915_setup_chipset_flush();
1053 }
1054
1055 if (intel_private.ifp_resource.start) {
1056 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1057 if (!intel_private.i9xx_flush_page)
1058 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1059 }
1060}
1061
1062static int intel_i915_configure(void)
1063{
1064 struct aper_size_info_fixed *current_size;
1065 u32 temp;
1066 u16 gmch_ctrl;
1067 int i;
1068
1069 current_size = A_SIZE_FIX(agp_bridge->current_size);
1070
1071 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1072
1073 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1074
1075 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1076 gmch_ctrl |= I830_GMCH_ENABLED;
1077 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1078
1079 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1080 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1081
1082 if (agp_bridge->driver->needs_scratch_page) {
1083 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1084 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1085 }
1086 readl(intel_private.gtt+i-1); /* PCI Posting. */
1087 }
1088
1089 global_cache_flush();
1090
1091 intel_i9xx_setup_flush();
1092
1093 return 0;
1094}
1095
1096static void intel_i915_cleanup(void)
1097{
1098 if (intel_private.i9xx_flush_page)
1099 iounmap(intel_private.i9xx_flush_page);
1100 if (intel_private.resource_valid)
1101 release_resource(&intel_private.ifp_resource);
1102 intel_private.ifp_resource.start = 0;
1103 intel_private.resource_valid = 0;
1104 iounmap(intel_private.gtt);
1105 iounmap(intel_private.registers);
1106}
1107
1108static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1109{
1110 if (intel_private.i9xx_flush_page)
1111 writel(1, intel_private.i9xx_flush_page);
1112}
1113
1114static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1115 int type)
1116{
1117 int num_entries;
1118 void *temp;
1119 int ret = -EINVAL;
1120 int mask_type;
1121
1122 if (mem->page_count == 0)
1123 goto out;
1124
1125 temp = agp_bridge->current_size;
1126 num_entries = A_SIZE_FIX(temp)->num_entries;
1127
1128 if (pg_start < intel_private.gtt_entries) {
1129 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1130 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1131 pg_start, intel_private.gtt_entries);
1132
1133 dev_info(&intel_private.pcidev->dev,
1134 "trying to insert into local/stolen memory\n");
1135 goto out_err;
1136 }
1137
1138 if ((pg_start + mem->page_count) > num_entries)
1139 goto out_err;
1140
1141 /* The i915 can't check the GTT for entries since it's read only;
1142 * depend on the caller to make the correct offset decisions.
1143 */
1144
1145 if (type != mem->type)
1146 goto out_err;
1147
1148 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1149
1150 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1151 mask_type != INTEL_AGP_CACHED_MEMORY)
1152 goto out_err;
1153
1154 if (!mem->is_flushed)
1155 global_cache_flush();
1156
1157 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1158
1159 out:
1160 ret = 0;
1161 out_err:
1162 mem->is_flushed = true;
1163 return ret;
1164}
1165
1166static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1167 int type)
1168{
1169 int i;
1170
1171 if (mem->page_count == 0)
1172 return 0;
1173
1174 if (pg_start < intel_private.gtt_entries) {
1175 dev_info(&intel_private.pcidev->dev,
1176 "trying to disable local/stolen memory\n");
1177 return -EINVAL;
1178 }
1179
1180 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1181 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1182
1183 readl(intel_private.gtt+i-1);
1184
1185 return 0;
1186}
1187
1188/* Return the aperture size by just checking the resource length. The effect
1189 * described in the spec of the MSAC registers is just changing of the
1190 * resource size.
1191 */
1192static int intel_i9xx_fetch_size(void)
1193{
1194 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1195 int aper_size; /* size in megabytes */
1196 int i;
1197
1198 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1199
1200 for (i = 0; i < num_sizes; i++) {
1201 if (aper_size == intel_i830_sizes[i].size) {
1202 agp_bridge->current_size = intel_i830_sizes + i;
1203 return aper_size;
1204 }
1205 }
1206
1207 return 0;
1208}
1209
1210/* The intel i915 automatically initializes the agp aperture during POST.
1211 * Use the memory already set aside for in the GTT.
1212 */
1213static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1214{
1215 int page_order;
1216 struct aper_size_info_fixed *size;
1217 int num_entries;
1218 u32 temp, temp2;
1219 int gtt_map_size = 256 * 1024;
1220
1221 size = agp_bridge->current_size;
1222 page_order = size->page_order;
1223 num_entries = size->num_entries;
1224 agp_bridge->gatt_table_real = NULL;
1225
1226 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1227 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1228
1229 if (IS_G33)
1230 gtt_map_size = 1024 * 1024; /* 1M on G33 */
1231 intel_private.gtt = ioremap(temp2, gtt_map_size);
1232 if (!intel_private.gtt)
1233 return -ENOMEM;
1234
1235 intel_private.gtt_total_size = gtt_map_size / 4;
1236
1237 temp &= 0xfff80000;
1238
1239 intel_private.registers = ioremap(temp, 128 * 4096);
1240 if (!intel_private.registers) {
1241 iounmap(intel_private.gtt);
1242 return -ENOMEM;
1243 }
1244
1245 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1246 global_cache_flush(); /* FIXME: ? */
1247
1248 /* we have to call this as early as possible after the MMIO base address is known */
1249 intel_i830_init_gtt_entries();
1250
1251 agp_bridge->gatt_table = NULL;
1252
1253 agp_bridge->gatt_bus_addr = temp;
1254
1255 return 0;
1256}
1257
1258/*
1259 * The i965 supports 36-bit physical addresses, but to keep
1260 * the format of the GTT the same, the bits that don't fit
1261 * in a 32-bit word are shifted down to bits 4..7.
1262 *
1263 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1264 * is always zero on 32-bit architectures, so no need to make
1265 * this conditional.
1266 */
1267static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1268 dma_addr_t addr, int type)
1269{
1270 /* Shift high bits down */
1271 addr |= (addr >> 28) & 0xf0;
1272
1273 /* Type checking must be done elsewhere */
1274 return addr | bridge->driver->masks[type].mask;
1275}
1276
1277static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1278{
1279 u16 snb_gmch_ctl;
1280
1281 switch (agp_bridge->dev->device) {
1282 case PCI_DEVICE_ID_INTEL_GM45_HB:
1283 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1284 case PCI_DEVICE_ID_INTEL_Q45_HB:
1285 case PCI_DEVICE_ID_INTEL_G45_HB:
1286 case PCI_DEVICE_ID_INTEL_G41_HB:
1287 case PCI_DEVICE_ID_INTEL_B43_HB:
1288 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1289 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1290 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1291 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1292 *gtt_offset = *gtt_size = MB(2);
1293 break;
1294 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1295 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1296 *gtt_offset = MB(2);
1297
1298 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1299 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1300 default:
1301 case SNB_GTT_SIZE_0M:
1302 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1303 *gtt_size = MB(0);
1304 break;
1305 case SNB_GTT_SIZE_1M:
1306 *gtt_size = MB(1);
1307 break;
1308 case SNB_GTT_SIZE_2M:
1309 *gtt_size = MB(2);
1310 break;
1311 }
1312 break;
1313 default:
1314 *gtt_offset = *gtt_size = KB(512);
1315 }
1316}
1317
1318/* The intel i965 automatically initializes the agp aperture during POST.
1319 * Use the memory already set aside for in the GTT.
1320 */
1321static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1322{
1323 int page_order;
1324 struct aper_size_info_fixed *size;
1325 int num_entries;
1326 u32 temp;
1327 int gtt_offset, gtt_size;
1328
1329 size = agp_bridge->current_size;
1330 page_order = size->page_order;
1331 num_entries = size->num_entries;
1332 agp_bridge->gatt_table_real = NULL;
1333
1334 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1335
1336 temp &= 0xfff00000;
1337
1338 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1339
1340 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1341
1342 if (!intel_private.gtt)
1343 return -ENOMEM;
1344
1345 intel_private.gtt_total_size = gtt_size / 4;
1346
1347 intel_private.registers = ioremap(temp, 128 * 4096);
1348 if (!intel_private.registers) {
1349 iounmap(intel_private.gtt);
1350 return -ENOMEM;
1351 }
1352
1353 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1354 global_cache_flush(); /* FIXME: ? */
1355
1356 /* we have to call this as early as possible after the MMIO base address is known */
1357 intel_i830_init_gtt_entries();
1358
1359 agp_bridge->gatt_table = NULL;
1360
1361 agp_bridge->gatt_bus_addr = temp;
1362
1363 return 0;
1364}
1365
1366static const struct agp_bridge_driver intel_810_driver = {
1367 .owner = THIS_MODULE,
1368 .aperture_sizes = intel_i810_sizes,
1369 .size_type = FIXED_APER_SIZE,
1370 .num_aperture_sizes = 2,
1371 .needs_scratch_page = true,
1372 .configure = intel_i810_configure,
1373 .fetch_size = intel_i810_fetch_size,
1374 .cleanup = intel_i810_cleanup,
1375 .mask_memory = intel_i810_mask_memory,
1376 .masks = intel_i810_masks,
1377 .agp_enable = intel_i810_agp_enable,
1378 .cache_flush = global_cache_flush,
1379 .create_gatt_table = agp_generic_create_gatt_table,
1380 .free_gatt_table = agp_generic_free_gatt_table,
1381 .insert_memory = intel_i810_insert_entries,
1382 .remove_memory = intel_i810_remove_entries,
1383 .alloc_by_type = intel_i810_alloc_by_type,
1384 .free_by_type = intel_i810_free_by_type,
1385 .agp_alloc_page = agp_generic_alloc_page,
1386 .agp_alloc_pages = agp_generic_alloc_pages,
1387 .agp_destroy_page = agp_generic_destroy_page,
1388 .agp_destroy_pages = agp_generic_destroy_pages,
1389 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1390};
1391
1392static const struct agp_bridge_driver intel_830_driver = {
1393 .owner = THIS_MODULE,
1394 .aperture_sizes = intel_i830_sizes,
1395 .size_type = FIXED_APER_SIZE,
1396 .num_aperture_sizes = 4,
1397 .needs_scratch_page = true,
1398 .configure = intel_i830_configure,
1399 .fetch_size = intel_i830_fetch_size,
1400 .cleanup = intel_i830_cleanup,
1401 .mask_memory = intel_i810_mask_memory,
1402 .masks = intel_i810_masks,
1403 .agp_enable = intel_i810_agp_enable,
1404 .cache_flush = global_cache_flush,
1405 .create_gatt_table = intel_i830_create_gatt_table,
1406 .free_gatt_table = intel_i830_free_gatt_table,
1407 .insert_memory = intel_i830_insert_entries,
1408 .remove_memory = intel_i830_remove_entries,
1409 .alloc_by_type = intel_i830_alloc_by_type,
1410 .free_by_type = intel_i810_free_by_type,
1411 .agp_alloc_page = agp_generic_alloc_page,
1412 .agp_alloc_pages = agp_generic_alloc_pages,
1413 .agp_destroy_page = agp_generic_destroy_page,
1414 .agp_destroy_pages = agp_generic_destroy_pages,
1415 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1416 .chipset_flush = intel_i830_chipset_flush,
1417};
1418
1419static const struct agp_bridge_driver intel_915_driver = {
1420 .owner = THIS_MODULE,
1421 .aperture_sizes = intel_i830_sizes,
1422 .size_type = FIXED_APER_SIZE,
1423 .num_aperture_sizes = 4,
1424 .needs_scratch_page = true,
1425 .configure = intel_i915_configure,
1426 .fetch_size = intel_i9xx_fetch_size,
1427 .cleanup = intel_i915_cleanup,
1428 .mask_memory = intel_i810_mask_memory,
1429 .masks = intel_i810_masks,
1430 .agp_enable = intel_i810_agp_enable,
1431 .cache_flush = global_cache_flush,
1432 .create_gatt_table = intel_i915_create_gatt_table,
1433 .free_gatt_table = intel_i830_free_gatt_table,
1434 .insert_memory = intel_i915_insert_entries,
1435 .remove_memory = intel_i915_remove_entries,
1436 .alloc_by_type = intel_i830_alloc_by_type,
1437 .free_by_type = intel_i810_free_by_type,
1438 .agp_alloc_page = agp_generic_alloc_page,
1439 .agp_alloc_pages = agp_generic_alloc_pages,
1440 .agp_destroy_page = agp_generic_destroy_page,
1441 .agp_destroy_pages = agp_generic_destroy_pages,
1442 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1443 .chipset_flush = intel_i915_chipset_flush,
1444#ifdef USE_PCI_DMA_API
1445 .agp_map_page = intel_agp_map_page,
1446 .agp_unmap_page = intel_agp_unmap_page,
1447 .agp_map_memory = intel_agp_map_memory,
1448 .agp_unmap_memory = intel_agp_unmap_memory,
1449#endif
1450};
1451
1452static const struct agp_bridge_driver intel_i965_driver = {
1453 .owner = THIS_MODULE,
1454 .aperture_sizes = intel_i830_sizes,
1455 .size_type = FIXED_APER_SIZE,
1456 .num_aperture_sizes = 4,
1457 .needs_scratch_page = true,
1458 .configure = intel_i915_configure,
1459 .fetch_size = intel_i9xx_fetch_size,
1460 .cleanup = intel_i915_cleanup,
1461 .mask_memory = intel_i965_mask_memory,
1462 .masks = intel_i810_masks,
1463 .agp_enable = intel_i810_agp_enable,
1464 .cache_flush = global_cache_flush,
1465 .create_gatt_table = intel_i965_create_gatt_table,
1466 .free_gatt_table = intel_i830_free_gatt_table,
1467 .insert_memory = intel_i915_insert_entries,
1468 .remove_memory = intel_i915_remove_entries,
1469 .alloc_by_type = intel_i830_alloc_by_type,
1470 .free_by_type = intel_i810_free_by_type,
1471 .agp_alloc_page = agp_generic_alloc_page,
1472 .agp_alloc_pages = agp_generic_alloc_pages,
1473 .agp_destroy_page = agp_generic_destroy_page,
1474 .agp_destroy_pages = agp_generic_destroy_pages,
1475 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1476 .chipset_flush = intel_i915_chipset_flush,
1477#ifdef USE_PCI_DMA_API
1478 .agp_map_page = intel_agp_map_page,
1479 .agp_unmap_page = intel_agp_unmap_page,
1480 .agp_map_memory = intel_agp_map_memory,
1481 .agp_unmap_memory = intel_agp_unmap_memory,
1482#endif
1483};
1484
1485static const struct agp_bridge_driver intel_g33_driver = {
1486 .owner = THIS_MODULE,
1487 .aperture_sizes = intel_i830_sizes,
1488 .size_type = FIXED_APER_SIZE,
1489 .num_aperture_sizes = 4,
1490 .needs_scratch_page = true,
1491 .configure = intel_i915_configure,
1492 .fetch_size = intel_i9xx_fetch_size,
1493 .cleanup = intel_i915_cleanup,
1494 .mask_memory = intel_i965_mask_memory,
1495 .masks = intel_i810_masks,
1496 .agp_enable = intel_i810_agp_enable,
1497 .cache_flush = global_cache_flush,
1498 .create_gatt_table = intel_i915_create_gatt_table,
1499 .free_gatt_table = intel_i830_free_gatt_table,
1500 .insert_memory = intel_i915_insert_entries,
1501 .remove_memory = intel_i915_remove_entries,
1502 .alloc_by_type = intel_i830_alloc_by_type,
1503 .free_by_type = intel_i810_free_by_type,
1504 .agp_alloc_page = agp_generic_alloc_page,
1505 .agp_alloc_pages = agp_generic_alloc_pages,
1506 .agp_destroy_page = agp_generic_destroy_page,
1507 .agp_destroy_pages = agp_generic_destroy_pages,
1508 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1509 .chipset_flush = intel_i915_chipset_flush,
1510#ifdef USE_PCI_DMA_API
1511 .agp_map_page = intel_agp_map_page,
1512 .agp_unmap_page = intel_agp_unmap_page,
1513 .agp_map_memory = intel_agp_map_memory,
1514 .agp_unmap_memory = intel_agp_unmap_memory,
1515#endif
1516};