aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/agp')
-rw-r--r--drivers/char/agp/Kconfig4
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/agp.h5
-rw-r--r--drivers/char/agp/amd-k7-agp.c6
-rw-r--r--drivers/char/agp/amd64-agp.c39
-rw-r--r--drivers/char/agp/backend.c22
-rw-r--r--drivers/char/agp/generic.c12
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c201
-rw-r--r--drivers/char/agp/intel-agp.h43
-rw-r--r--drivers/char/agp/intel-gtt.c1612
11 files changed, 836 insertions, 1111 deletions
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 4b66c69eaf57..fcd867d923ba 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -34,7 +34,7 @@ config AGP_ALI
34 X on the following ALi chipsets. The supported chipsets 34 X on the following ALi chipsets. The supported chipsets
35 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651. 35 include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
36 For the ALi-chipset question, ALi suggests you refer to 36 For the ALi-chipset question, ALi suggests you refer to
37 <http://www.ali.com.tw/eng/support/index.shtml>. 37 <http://www.ali.com.tw/>.
38 38
39 The M1541 chipset can do AGP 1x and 2x, but note that there is an 39 The M1541 chipset can do AGP 1x and 2x, but note that there is an
40 acknowledged incompatibility with Matrox G200 cards. Due to 40 acknowledged incompatibility with Matrox G200 cards. Due to
@@ -57,7 +57,7 @@ config AGP_AMD
57 57
58config AGP_AMD64 58config AGP_AMD64
59 tristate "AMD Opteron/Athlon64 on-CPU GART support" 59 tristate "AMD Opteron/Athlon64 on-CPU GART support"
60 depends on AGP && X86 && K8_NB 60 depends on AGP && X86 && AMD_NB
61 help 61 help
62 This option gives you AGP support for the GLX component of 62 This option gives you AGP support for the GLX component of
63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 627f542827c7..8eb56e273e75 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
13obj-$(CONFIG_AGP_PARISC) += parisc-agp.o 13obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
14obj-$(CONFIG_AGP_I460) += i460-agp.o 14obj-$(CONFIG_AGP_I460) += i460-agp.o
15obj-$(CONFIG_AGP_INTEL) += intel-agp.o 15obj-$(CONFIG_AGP_INTEL) += intel-agp.o
16obj-$(CONFIG_AGP_INTEL) += intel-gtt.o
16obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o 17obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
17obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o 18obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
18obj-$(CONFIG_AGP_SIS) += sis-agp.o 19obj-$(CONFIG_AGP_SIS) += sis-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 120490949997..5259065f3c79 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -121,11 +121,6 @@ struct agp_bridge_driver {
121 void (*agp_destroy_pages)(struct agp_memory *); 121 void (*agp_destroy_pages)(struct agp_memory *);
122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); 122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
123 void (*chipset_flush)(struct agp_bridge_data *); 123 void (*chipset_flush)(struct agp_bridge_data *);
124
125 int (*agp_map_page)(struct page *page, dma_addr_t *ret);
126 void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
127 int (*agp_map_memory)(struct agp_memory *mem);
128 void (*agp_unmap_memory)(struct agp_memory *mem);
129}; 124};
130 125
131struct agp_bridge_data { 126struct agp_bridge_data {
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b6b1568314c8..b1b4362bc648 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
309 309
310 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries; 310 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
311 311
312 if (type != 0 || mem->type != 0) 312 if (type != mem->type ||
313 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
313 return -EINVAL; 314 return -EINVAL;
314 315
315 if ((pg_start + mem->page_count) > num_entries) 316 if ((pg_start + mem->page_count) > num_entries)
@@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
348 unsigned long __iomem *cur_gatt; 349 unsigned long __iomem *cur_gatt;
349 unsigned long addr; 350 unsigned long addr;
350 351
351 if (type != 0 || mem->type != 0) 352 if (type != mem->type ||
353 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
352 return -EINVAL; 354 return -EINVAL;
353 355
354 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 356 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 70312da4c968..42396df55556 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -15,7 +15,7 @@
15#include <linux/mmzone.h> 15#include <linux/mmzone.h>
16#include <asm/page.h> /* PAGE_SIZE */ 16#include <asm/page.h> /* PAGE_SIZE */
17#include <asm/e820.h> 17#include <asm/e820.h>
18#include <asm/k8.h> 18#include <asm/amd_nb.h>
19#include <asm/gart.h> 19#include <asm/gart.h>
20#include "agp.h" 20#include "agp.h"
21 21
@@ -124,7 +124,7 @@ static int amd64_fetch_size(void)
124 u32 temp; 124 u32 temp;
125 struct aper_size_info_32 *values; 125 struct aper_size_info_32 *values;
126 126
127 dev = k8_northbridges[0]; 127 dev = k8_northbridges.nb_misc[0];
128 if (dev==NULL) 128 if (dev==NULL)
129 return 0; 129 return 0;
130 130
@@ -181,10 +181,14 @@ static int amd_8151_configure(void)
181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
182 int i; 182 int i;
183 183
184 if (!k8_northbridges.gart_supported)
185 return 0;
186
184 /* Configure AGP regs in each x86-64 host bridge. */ 187 /* Configure AGP regs in each x86-64 host bridge. */
185 for (i = 0; i < num_k8_northbridges; i++) { 188 for (i = 0; i < k8_northbridges.num; i++) {
186 agp_bridge->gart_bus_addr = 189 agp_bridge->gart_bus_addr =
187 amd64_configure(k8_northbridges[i], gatt_bus); 190 amd64_configure(k8_northbridges.nb_misc[i],
191 gatt_bus);
188 } 192 }
189 k8_flush_garts(); 193 k8_flush_garts();
190 return 0; 194 return 0;
@@ -195,11 +199,15 @@ static void amd64_cleanup(void)
195{ 199{
196 u32 tmp; 200 u32 tmp;
197 int i; 201 int i;
198 for (i = 0; i < num_k8_northbridges; i++) { 202
199 struct pci_dev *dev = k8_northbridges[i]; 203 if (!k8_northbridges.gart_supported)
204 return;
205
206 for (i = 0; i < k8_northbridges.num; i++) {
207 struct pci_dev *dev = k8_northbridges.nb_misc[i];
200 /* disable gart translation */ 208 /* disable gart translation */
201 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); 209 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
202 tmp &= ~AMD64_GARTEN; 210 tmp &= ~GARTEN;
203 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); 211 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
204 } 212 }
205} 213}
@@ -313,22 +321,25 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
313 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 321 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
314 return -1; 322 return -1;
315 323
316 pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1); 324 gart_set_size_and_enable(nb, order);
317 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); 325 pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
318 326
319 return 0; 327 return 0;
320} 328}
321 329
322static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) 330static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
323{ 331{
324 int i; 332 int i;
325 333
326 if (cache_k8_northbridges() < 0) 334 if (cache_k8_northbridges() < 0)
327 return -ENODEV; 335 return -ENODEV;
328 336
337 if (!k8_northbridges.gart_supported)
338 return -ENODEV;
339
329 i = 0; 340 i = 0;
330 for (i = 0; i < num_k8_northbridges; i++) { 341 for (i = 0; i < k8_northbridges.num; i++) {
331 struct pci_dev *dev = k8_northbridges[i]; 342 struct pci_dev *dev = k8_northbridges.nb_misc[i];
332 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 343 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
333 dev_err(&dev->dev, "no usable aperture found\n"); 344 dev_err(&dev->dev, "no usable aperture found\n");
334#ifdef __x86_64__ 345#ifdef __x86_64__
@@ -405,7 +416,8 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
405 } 416 }
406 417
407 /* shadow x86-64 registers into ULi registers */ 418 /* shadow x86-64 registers into ULi registers */
408 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 419 pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
420 &httfea);
409 421
410 /* if x86-64 aperture base is beyond 4G, exit here */ 422 /* if x86-64 aperture base is beyond 4G, exit here */
411 if ((httfea & 0x7fff) >> (32 - 25)) { 423 if ((httfea & 0x7fff) >> (32 - 25)) {
@@ -472,7 +484,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
472 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); 484 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
473 485
474 /* shadow x86-64 registers into NVIDIA registers */ 486 /* shadow x86-64 registers into NVIDIA registers */
475 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); 487 pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE,
488 &apbase);
476 489
477 /* if x86-64 aperture base is beyond 4G, exit here */ 490 /* if x86-64 aperture base is beyond 4G, exit here */
478 if ( (apbase & 0x7fff) >> (32 - 25) ) { 491 if ( (apbase & 0x7fff) >> (32 - 25) ) {
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index ee4f855611b6..f27d0d0816d3 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
151 } 151 }
152 152
153 bridge->scratch_page_page = page; 153 bridge->scratch_page_page = page;
154 if (bridge->driver->agp_map_page) { 154 bridge->scratch_page_dma = page_to_phys(page);
155 if (bridge->driver->agp_map_page(page,
156 &bridge->scratch_page_dma)) {
157 dev_err(&bridge->dev->dev,
158 "unable to dma-map scratch page\n");
159 rc = -ENOMEM;
160 goto err_out_nounmap;
161 }
162 } else {
163 bridge->scratch_page_dma = page_to_phys(page);
164 }
165 155
166 bridge->scratch_page = bridge->driver->mask_memory(bridge, 156 bridge->scratch_page = bridge->driver->mask_memory(bridge,
167 bridge->scratch_page_dma, 0); 157 bridge->scratch_page_dma, 0);
@@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
204 return 0; 194 return 0;
205 195
206err_out: 196err_out:
207 if (bridge->driver->needs_scratch_page &&
208 bridge->driver->agp_unmap_page) {
209 bridge->driver->agp_unmap_page(bridge->scratch_page_page,
210 bridge->scratch_page_dma);
211 }
212err_out_nounmap:
213 if (bridge->driver->needs_scratch_page) { 197 if (bridge->driver->needs_scratch_page) {
214 void *va = page_address(bridge->scratch_page_page); 198 void *va = page_address(bridge->scratch_page_page);
215 199
@@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
240 bridge->driver->needs_scratch_page) { 224 bridge->driver->needs_scratch_page) {
241 void *va = page_address(bridge->scratch_page_page); 225 void *va = page_address(bridge->scratch_page_page);
242 226
243 if (bridge->driver->agp_unmap_page)
244 bridge->driver->agp_unmap_page(bridge->scratch_page_page,
245 bridge->scratch_page_dma);
246
247 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); 227 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
248 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); 228 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
249 } 229 }
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index d2abf5143983..4956f1c8f9d5 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
437 curr->is_flushed = true; 437 curr->is_flushed = true;
438 } 438 }
439 439
440 if (curr->bridge->driver->agp_map_memory) {
441 ret_val = curr->bridge->driver->agp_map_memory(curr);
442 if (ret_val)
443 return ret_val;
444 }
445 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 440 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
446 441
447 if (ret_val != 0) 442 if (ret_val != 0)
@@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr)
483 if (ret_val != 0) 478 if (ret_val != 0)
484 return ret_val; 479 return ret_val;
485 480
486 if (curr->bridge->driver->agp_unmap_memory)
487 curr->bridge->driver->agp_unmap_memory(curr);
488
489 curr->is_bound = false; 481 curr->is_bound = false;
490 curr->pg_start = 0; 482 curr->pg_start = 0;
491 spin_lock(&curr->bridge->mapped_lock); 483 spin_lock(&curr->bridge->mapped_lock);
@@ -984,7 +976,9 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
984 976
985 bridge->driver->cache_flush(); 977 bridge->driver->cache_flush();
986#ifdef CONFIG_X86 978#ifdef CONFIG_X86
987 set_memory_uc((unsigned long)table, 1 << page_order); 979 if (set_memory_uc((unsigned long)table, 1 << page_order))
980 printk(KERN_WARNING "Could not set GATT table memory to UC!");
981
988 bridge->gatt_table = (void *)table; 982 bridge->gatt_table = (void *)table;
989#else 983#else
990 bridge->gatt_table = ioremap_nocache(virt_to_phys(table), 984 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index e763d3312ce7..75b763cb3ea1 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of 2 * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
3 * the "Intel 460GTX Chipset Software Developer's Manual": 3 * the "Intel 460GTX Chipset Software Developer's Manual":
4 * http://developer.intel.com/design/itanium/downloads/24870401s.htm 4 * http://www.intel.com/design/archives/itanium/downloads/248704.htm
5 */ 5 */
6/* 6/*
7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com> 7 * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index cd18493c9527..e72f49d52202 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,9 +12,6 @@
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h" 14#include "intel-agp.h"
15#include <linux/intel-gtt.h>
16
17#include "intel-gtt.c"
18 15
19int intel_agp_enabled; 16int intel_agp_enabled;
20EXPORT_SYMBOL(intel_agp_enabled); 17EXPORT_SYMBOL(intel_agp_enabled);
@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
703 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 700 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
704}; 701};
705 702
706static int find_gmch(u16 device)
707{
708 struct pci_dev *gmch_device;
709
710 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
711 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
712 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
713 device, gmch_device);
714 }
715
716 if (!gmch_device)
717 return 0;
718
719 intel_private.pcidev = gmch_device;
720 return 1;
721}
722
723/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 703/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
724 * driver and gmch_driver must be non-null, and find_gmch will determine 704 * driver and gmch_driver must be non-null, and find_gmch will determine
725 * which one should be used if a gmch_chip_id is present. 705 * which one should be used if a gmch_chip_id is present.
726 */ 706 */
727static const struct intel_driver_description { 707static const struct intel_agp_driver_description {
728 unsigned int chip_id; 708 unsigned int chip_id;
729 unsigned int gmch_chip_id;
730 char *name; 709 char *name;
731 const struct agp_bridge_driver *driver; 710 const struct agp_bridge_driver *driver;
732 const struct agp_bridge_driver *gmch_driver;
733} intel_agp_chipsets[] = { 711} intel_agp_chipsets[] = {
734 { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, 712 { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
735 { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, 713 { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
736 { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, 714 { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
737 { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", 715 { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
738 NULL, &intel_810_driver }, 716 { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
739 { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", 717 { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
740 NULL, &intel_810_driver }, 718 { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
741 { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", 719 { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
742 NULL, &intel_810_driver }, 720 { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
743 { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", 721 { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
744 &intel_815_driver, &intel_810_driver }, 722 { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
745 { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, 723 { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
746 { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, 724 { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
747 { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", 725 { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
748 &intel_830mp_driver, &intel_830_driver }, 726 { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
749 { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, 727 { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
750 { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, 728 { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
751 { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", 729 { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
752 &intel_845_driver, &intel_830_driver }, 730 { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
753 { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, 731 { 0, NULL, NULL }
754 { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
755 &intel_845_driver, &intel_830_driver },
756 { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
757 { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
758 &intel_845_driver, &intel_830_driver },
759 { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
760 { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
761 &intel_845_driver, &intel_830_driver },
762 { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
763 { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
764 NULL, &intel_915_driver },
765 { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
766 NULL, &intel_915_driver },
767 { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
768 NULL, &intel_915_driver },
769 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
770 NULL, &intel_915_driver },
771 { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
772 NULL, &intel_915_driver },
773 { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
774 NULL, &intel_915_driver },
775 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
776 NULL, &intel_i965_driver },
777 { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
778 NULL, &intel_i965_driver },
779 { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
780 NULL, &intel_i965_driver },
781 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
782 NULL, &intel_i965_driver },
783 { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
784 NULL, &intel_i965_driver },
785 { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
786 NULL, &intel_i965_driver },
787 { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
788 { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
789 { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
790 NULL, &intel_g33_driver },
791 { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
792 NULL, &intel_g33_driver },
793 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
794 NULL, &intel_g33_driver },
795 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
796 NULL, &intel_g33_driver },
797 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
798 NULL, &intel_g33_driver },
799 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
800 "GM45", NULL, &intel_i965_driver },
801 { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
802 "Eaglelake", NULL, &intel_i965_driver },
803 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
804 "Q45/Q43", NULL, &intel_i965_driver },
805 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
806 "G45/G43", NULL, &intel_i965_driver },
807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
808 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
810 "B43", NULL, &intel_i965_driver },
811 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
812 "G41", NULL, &intel_i965_driver },
813 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
814 "HD Graphics", NULL, &intel_i965_driver },
815 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
816 "HD Graphics", NULL, &intel_i965_driver },
817 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
818 "HD Graphics", NULL, &intel_i965_driver },
819 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
820 "HD Graphics", NULL, &intel_i965_driver },
821 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
822 "Sandybridge", NULL, &intel_gen6_driver },
823 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
824 "Sandybridge", NULL, &intel_gen6_driver },
825 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
826 "Sandybridge", NULL, &intel_gen6_driver },
827 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
828 "Sandybridge", NULL, &intel_gen6_driver },
829 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
830 "Sandybridge", NULL, &intel_gen6_driver },
831 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
832 "Sandybridge", NULL, &intel_gen6_driver },
833 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
834 "Sandybridge", NULL, &intel_gen6_driver },
835 { 0, 0, NULL, NULL, NULL }
836}; 732};
837 733
838static int __devinit intel_gmch_probe(struct pci_dev *pdev,
839 struct agp_bridge_data *bridge)
840{
841 int i, mask;
842
843 bridge->driver = NULL;
844
845 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
846 if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
847 find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
848 bridge->driver =
849 intel_agp_chipsets[i].gmch_driver;
850 break;
851 }
852 }
853
854 if (!bridge->driver)
855 return 0;
856
857 bridge->dev_private_data = &intel_private;
858 bridge->dev = pdev;
859
860 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
861
862 if (bridge->driver->mask_memory == intel_gen6_mask_memory)
863 mask = 40;
864 else if (bridge->driver->mask_memory == intel_i965_mask_memory)
865 mask = 36;
866 else
867 mask = 32;
868
869 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
870 dev_err(&intel_private.pcidev->dev,
871 "set gfx device dma mask %d-bit failed!\n", mask);
872 else
873 pci_set_consistent_dma_mask(intel_private.pcidev,
874 DMA_BIT_MASK(mask));
875
876 return 1;
877}
878
879static int __devinit agp_intel_probe(struct pci_dev *pdev, 734static int __devinit agp_intel_probe(struct pci_dev *pdev,
880 const struct pci_device_id *ent) 735 const struct pci_device_id *ent)
881{ 736{
@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
905 } 760 }
906 } 761 }
907 762
908 if (intel_agp_chipsets[i].name == NULL) { 763 if (!bridge->driver) {
909 if (cap_ptr) 764 if (cap_ptr)
910 dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", 765 dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
911 pdev->vendor, pdev->device); 766 pdev->vendor, pdev->device);
@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
913 return -ENODEV; 768 return -ENODEV;
914 } 769 }
915 770
916 if (!bridge->driver) {
917 if (cap_ptr)
918 dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
919 intel_agp_chipsets[i].gmch_chip_id);
920 agp_put_bridge(bridge);
921 return -ENODEV;
922 }
923
924 bridge->dev = pdev; 771 bridge->dev = pdev;
925 bridge->dev_private_data = NULL; 772 bridge->dev_private_data = NULL;
926 773
@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
972 819
973 agp_remove_bridge(bridge); 820 agp_remove_bridge(bridge);
974 821
975 if (intel_private.pcidev) 822 intel_gmch_remove(pdev);
976 pci_dev_put(intel_private.pcidev);
977 823
978 agp_put_bridge(bridge); 824 agp_put_bridge(bridge);
979} 825}
@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
1049 ID(PCI_DEVICE_ID_INTEL_G45_HB), 895 ID(PCI_DEVICE_ID_INTEL_G45_HB),
1050 ID(PCI_DEVICE_ID_INTEL_G41_HB), 896 ID(PCI_DEVICE_ID_INTEL_G41_HB),
1051 ID(PCI_DEVICE_ID_INTEL_B43_HB), 897 ID(PCI_DEVICE_ID_INTEL_B43_HB),
898 ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
1052 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), 899 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
1053 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 900 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
1054 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index d09b1ab7e8ab..90539df02504 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -215,44 +215,7 @@
215#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ 215#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
216#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A 216#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
217 217
218/* cover 915 and 945 variants */ 218int intel_gmch_probe(struct pci_dev *pdev,
219#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 219 struct agp_bridge_data *bridge);
220 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \ 220void intel_gmch_remove(struct pci_dev *pdev);
221 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
222 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
223 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
224 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
225
226#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
227 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
228 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
229 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
230 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
231 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
232
233#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
234 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
235 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
236 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
237 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
238
239#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
240 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
241
242#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
243 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
244 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
245
246#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
247 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
248 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
249 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
250 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
251 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
252 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
253 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
254 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
255 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
256 IS_SNB)
257
258#endif 221#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 75e0a3497888..6b6760ea2435 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -15,6 +15,18 @@
15 * /fairy-tale-mode off 15 * /fairy-tale-mode off
16 */ 16 */
17 17
18#include <linux/module.h>
19#include <linux/pci.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/pagemap.h>
23#include <linux/agp_backend.h>
24#include <asm/smp.h>
25#include "agp.h"
26#include "intel-agp.h"
27#include <linux/intel-gtt.h>
28#include <drm/intel-gtt.h>
29
18/* 30/*
19 * If we have Intel graphics, we're not going to have anything other than 31 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent 32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -23,11 +35,12 @@
23 */ 35 */
24#ifdef CONFIG_DMAR 36#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1 37#define USE_PCI_DMA_API 1
38#else
39#define USE_PCI_DMA_API 0
26#endif 40#endif
27 41
28/* Max amount of stolen space, anything above will be returned to Linux */ 42/* Max amount of stolen space, anything above will be returned to Linux */
29int intel_max_stolen = 32 * 1024 * 1024; 43int intel_max_stolen = 32 * 1024 * 1024;
30EXPORT_SYMBOL(intel_max_stolen);
31 44
32static const struct aper_size_info_fixed intel_i810_sizes[] = 45static const struct aper_size_info_fixed intel_i810_sizes[] =
33{ 46{
@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
55#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 68#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
56#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 69#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
57 70
58static struct gatt_mask intel_gen6_masks[] = 71struct intel_gtt_driver {
59{ 72 unsigned int gen : 8;
60 {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, 73 unsigned int is_g33 : 1;
61 .type = INTEL_AGP_UNCACHED_MEMORY }, 74 unsigned int is_pineview : 1;
62 {.mask = I810_PTE_VALID | GEN6_PTE_LLC, 75 unsigned int is_ironlake : 1;
63 .type = INTEL_AGP_CACHED_MEMORY_LLC }, 76 unsigned int dma_mask_size : 8;
64 {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, 77 /* Chipset specific GTT setup */
65 .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, 78 int (*setup)(void);
66 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, 79 /* This should undo anything done in ->setup() save the unmapping
67 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, 80 * of the mmio register file, that's done in the generic code. */
68 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, 81 void (*cleanup)(void);
69 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, 82 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
83 /* Flags is a more or less chipset specific opaque value.
84 * For chipsets that need to support old ums (non-gem) code, this
85 * needs to be identical to the various supported agp memory types! */
86 bool (*check_flags)(unsigned int flags);
87 void (*chipset_flush)(void);
70}; 88};
71 89
72static struct _intel_private { 90static struct _intel_private {
91 struct intel_gtt base;
92 const struct intel_gtt_driver *driver;
73 struct pci_dev *pcidev; /* device one */ 93 struct pci_dev *pcidev; /* device one */
94 struct pci_dev *bridge_dev;
74 u8 __iomem *registers; 95 u8 __iomem *registers;
96 phys_addr_t gtt_bus_addr;
97 phys_addr_t gma_bus_addr;
98 phys_addr_t pte_bus_addr;
75 u32 __iomem *gtt; /* I915G */ 99 u32 __iomem *gtt; /* I915G */
76 int num_dcache_entries; 100 int num_dcache_entries;
77 /* gtt_entries is the number of gtt entries that are already mapped
78 * to stolen memory. Stolen memory is larger than the memory mapped
79 * through gtt_entries, as it includes some reserved space for the BIOS
80 * popup and for the GTT.
81 */
82 int gtt_entries; /* i830+ */
83 int gtt_total_size;
84 union { 101 union {
85 void __iomem *i9xx_flush_page; 102 void __iomem *i9xx_flush_page;
86 void *i8xx_flush_page; 103 void *i8xx_flush_page;
@@ -88,23 +105,14 @@ static struct _intel_private {
88 struct page *i8xx_page; 105 struct page *i8xx_page;
89 struct resource ifp_resource; 106 struct resource ifp_resource;
90 int resource_valid; 107 int resource_valid;
108 struct page *scratch_page;
109 dma_addr_t scratch_page_dma;
91} intel_private; 110} intel_private;
92 111
93#ifdef USE_PCI_DMA_API 112#define INTEL_GTT_GEN intel_private.driver->gen
94static int intel_agp_map_page(struct page *page, dma_addr_t *ret) 113#define IS_G33 intel_private.driver->is_g33
95{ 114#define IS_PINEVIEW intel_private.driver->is_pineview
96 *ret = pci_map_page(intel_private.pcidev, page, 0, 115#define IS_IRONLAKE intel_private.driver->is_ironlake
97 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
98 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
99 return -EINVAL;
100 return 0;
101}
102
103static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
104{
105 pci_unmap_page(intel_private.pcidev, dma,
106 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
107}
108 116
109static void intel_agp_free_sglist(struct agp_memory *mem) 117static void intel_agp_free_sglist(struct agp_memory *mem)
110{ 118{
@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
125 struct scatterlist *sg; 133 struct scatterlist *sg;
126 int i; 134 int i;
127 135
136 if (mem->sg_list)
137 return 0; /* already mapped (for e.g. resume */
138
128 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); 139 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
129 140
130 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) 141 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
156 intel_agp_free_sglist(mem); 167 intel_agp_free_sglist(mem);
157} 168}
158 169
159static void intel_agp_insert_sg_entries(struct agp_memory *mem,
160 off_t pg_start, int mask_type)
161{
162 struct scatterlist *sg;
163 int i, j;
164
165 j = pg_start;
166
167 WARN_ON(!mem->num_sg);
168
169 if (mem->num_sg == mem->page_count) {
170 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
171 writel(agp_bridge->driver->mask_memory(agp_bridge,
172 sg_dma_address(sg), mask_type),
173 intel_private.gtt+j);
174 j++;
175 }
176 } else {
177 /* sg may merge pages, but we have to separate
178 * per-page addr for GTT */
179 unsigned int len, m;
180
181 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
182 len = sg_dma_len(sg) / PAGE_SIZE;
183 for (m = 0; m < len; m++) {
184 writel(agp_bridge->driver->mask_memory(agp_bridge,
185 sg_dma_address(sg) + m * PAGE_SIZE,
186 mask_type),
187 intel_private.gtt+j);
188 j++;
189 }
190 }
191 }
192 readl(intel_private.gtt+j-1);
193}
194
195#else
196
197static void intel_agp_insert_sg_entries(struct agp_memory *mem,
198 off_t pg_start, int mask_type)
199{
200 int i, j;
201
202 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
203 writel(agp_bridge->driver->mask_memory(agp_bridge,
204 page_to_phys(mem->pages[i]), mask_type),
205 intel_private.gtt+j);
206 }
207
208 readl(intel_private.gtt+j-1);
209}
210
211#endif
212
213static int intel_i810_fetch_size(void) 170static int intel_i810_fetch_size(void)
214{ 171{
215 u32 smram_miscc; 172 u32 smram_miscc;
216 struct aper_size_info_fixed *values; 173 struct aper_size_info_fixed *values;
217 174
218 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc); 175 pci_read_config_dword(intel_private.bridge_dev,
176 I810_SMRAM_MISCC, &smram_miscc);
219 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 177 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
220 178
221 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { 179 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
222 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n"); 180 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
223 return 0; 181 return 0;
224 } 182 }
225 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { 183 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
284 iounmap(intel_private.registers); 242 iounmap(intel_private.registers);
285} 243}
286 244
287static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode) 245static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
288{ 246{
289 return; 247 return;
290} 248}
@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
319 atomic_dec(&agp_bridge->current_memory_agp); 277 atomic_dec(&agp_bridge->current_memory_agp);
320} 278}
321 279
322static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
323 int type)
324{
325 if (type < AGP_USER_TYPES)
326 return type;
327 else if (type == AGP_USER_CACHED_MEMORY)
328 return INTEL_AGP_CACHED_MEMORY;
329 else
330 return 0;
331}
332
333static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
334 int type)
335{
336 unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
337 unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
338
339 if (type_mask == AGP_USER_UNCACHED_MEMORY)
340 return INTEL_AGP_UNCACHED_MEMORY;
341 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
342 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
343 INTEL_AGP_CACHED_MEMORY_LLC_MLC;
344 else /* set 'normal'/'cached' to LLC by default */
345 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
346 INTEL_AGP_CACHED_MEMORY_LLC;
347}
348
349
350static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 280static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
351 int type) 281 int type)
352{ 282{
@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
514 return addr | bridge->driver->masks[type].mask; 444 return addr | bridge->driver->masks[type].mask;
515} 445}
516 446
517static struct aper_size_info_fixed intel_i830_sizes[] = 447static int intel_gtt_setup_scratch_page(void)
518{ 448{
449 struct page *page;
450 dma_addr_t dma_addr;
451
452 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
453 if (page == NULL)
454 return -ENOMEM;
455 get_page(page);
456 set_pages_uc(page, 1);
457
458 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
459 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
460 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
461 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
462 return -EINVAL;
463
464 intel_private.scratch_page_dma = dma_addr;
465 } else
466 intel_private.scratch_page_dma = page_to_phys(page);
467
468 intel_private.scratch_page = page;
469
470 return 0;
471}
472
473static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
519 {128, 32768, 5}, 474 {128, 32768, 5},
520 /* The 64M mode still requires a 128k gatt */ 475 /* The 64M mode still requires a 128k gatt */
521 {64, 16384, 5}, 476 {64, 16384, 5},
@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
523 {512, 131072, 7}, 478 {512, 131072, 7},
524}; 479};
525 480
526static void intel_i830_init_gtt_entries(void) 481static unsigned int intel_gtt_stolen_entries(void)
527{ 482{
528 u16 gmch_ctrl; 483 u16 gmch_ctrl;
529 int gtt_entries = 0;
530 u8 rdct; 484 u8 rdct;
531 int local = 0; 485 int local = 0;
532 static const int ddt[4] = { 0, 16, 32, 64 }; 486 static const int ddt[4] = { 0, 16, 32, 64 };
533 int size; /* reserved space (in kb) at the top of stolen memory */ 487 unsigned int overhead_entries, stolen_entries;
488 unsigned int stolen_size = 0;
534 489
535 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); 490 pci_read_config_word(intel_private.bridge_dev,
491 I830_GMCH_CTRL, &gmch_ctrl);
536 492
537 if (IS_I965) { 493 if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
538 u32 pgetbl_ctl; 494 overhead_entries = 0;
539 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); 495 else
496 overhead_entries = intel_private.base.gtt_mappable_entries
497 / 1024;
540 498
541 /* The 965 has a field telling us the size of the GTT, 499 overhead_entries += 1; /* BIOS popup */
542 * which may be larger than what is necessary to map the
543 * aperture.
544 */
545 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
546 case I965_PGETBL_SIZE_128KB:
547 size = 128;
548 break;
549 case I965_PGETBL_SIZE_256KB:
550 size = 256;
551 break;
552 case I965_PGETBL_SIZE_512KB:
553 size = 512;
554 break;
555 case I965_PGETBL_SIZE_1MB:
556 size = 1024;
557 break;
558 case I965_PGETBL_SIZE_2MB:
559 size = 2048;
560 break;
561 case I965_PGETBL_SIZE_1_5MB:
562 size = 1024 + 512;
563 break;
564 default:
565 dev_info(&intel_private.pcidev->dev,
566 "unknown page table size, assuming 512KB\n");
567 size = 512;
568 }
569 size += 4; /* add in BIOS popup space */
570 } else if (IS_G33 && !IS_PINEVIEW) {
571 /* G33's GTT size defined in gmch_ctrl */
572 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
573 case G33_PGETBL_SIZE_1M:
574 size = 1024;
575 break;
576 case G33_PGETBL_SIZE_2M:
577 size = 2048;
578 break;
579 default:
580 dev_info(&agp_bridge->dev->dev,
581 "unknown page table size 0x%x, assuming 512KB\n",
582 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
583 size = 512;
584 }
585 size += 4;
586 } else if (IS_G4X || IS_PINEVIEW) {
587 /* On 4 series hardware, GTT stolen is separate from graphics
588 * stolen, ignore it in stolen gtt entries counting. However,
589 * 4KB of the stolen memory doesn't get mapped to the GTT.
590 */
591 size = 4;
592 } else {
593 /* On previous hardware, the GTT size was just what was
594 * required to map the aperture.
595 */
596 size = agp_bridge->driver->fetch_size() + 4;
597 }
598 500
599 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 501 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
600 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 502 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
601 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 503 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
602 case I830_GMCH_GMS_STOLEN_512: 504 case I830_GMCH_GMS_STOLEN_512:
603 gtt_entries = KB(512) - KB(size); 505 stolen_size = KB(512);
604 break; 506 break;
605 case I830_GMCH_GMS_STOLEN_1024: 507 case I830_GMCH_GMS_STOLEN_1024:
606 gtt_entries = MB(1) - KB(size); 508 stolen_size = MB(1);
607 break; 509 break;
608 case I830_GMCH_GMS_STOLEN_8192: 510 case I830_GMCH_GMS_STOLEN_8192:
609 gtt_entries = MB(8) - KB(size); 511 stolen_size = MB(8);
610 break; 512 break;
611 case I830_GMCH_GMS_LOCAL: 513 case I830_GMCH_GMS_LOCAL:
612 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE); 514 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
613 gtt_entries = (I830_RDRAM_ND(rdct) + 1) * 515 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
614 MB(ddt[I830_RDRAM_DDT(rdct)]); 516 MB(ddt[I830_RDRAM_DDT(rdct)]);
615 local = 1; 517 local = 1;
616 break; 518 break;
617 default: 519 default:
618 gtt_entries = 0; 520 stolen_size = 0;
619 break; 521 break;
620 } 522 }
621 } else if (IS_SNB) { 523 } else if (INTEL_GTT_GEN == 6) {
622 /* 524 /*
623 * SandyBridge has new memory control reg at 0x50.w 525 * SandyBridge has new memory control reg at 0x50.w
624 */ 526 */
@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
626 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 528 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
627 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { 529 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
628 case SNB_GMCH_GMS_STOLEN_32M: 530 case SNB_GMCH_GMS_STOLEN_32M:
629 gtt_entries = MB(32) - KB(size); 531 stolen_size = MB(32);
630 break; 532 break;
631 case SNB_GMCH_GMS_STOLEN_64M: 533 case SNB_GMCH_GMS_STOLEN_64M:
632 gtt_entries = MB(64) - KB(size); 534 stolen_size = MB(64);
633 break; 535 break;
634 case SNB_GMCH_GMS_STOLEN_96M: 536 case SNB_GMCH_GMS_STOLEN_96M:
635 gtt_entries = MB(96) - KB(size); 537 stolen_size = MB(96);
636 break; 538 break;
637 case SNB_GMCH_GMS_STOLEN_128M: 539 case SNB_GMCH_GMS_STOLEN_128M:
638 gtt_entries = MB(128) - KB(size); 540 stolen_size = MB(128);
639 break; 541 break;
640 case SNB_GMCH_GMS_STOLEN_160M: 542 case SNB_GMCH_GMS_STOLEN_160M:
641 gtt_entries = MB(160) - KB(size); 543 stolen_size = MB(160);
642 break; 544 break;
643 case SNB_GMCH_GMS_STOLEN_192M: 545 case SNB_GMCH_GMS_STOLEN_192M:
644 gtt_entries = MB(192) - KB(size); 546 stolen_size = MB(192);
645 break; 547 break;
646 case SNB_GMCH_GMS_STOLEN_224M: 548 case SNB_GMCH_GMS_STOLEN_224M:
647 gtt_entries = MB(224) - KB(size); 549 stolen_size = MB(224);
648 break; 550 break;
649 case SNB_GMCH_GMS_STOLEN_256M: 551 case SNB_GMCH_GMS_STOLEN_256M:
650 gtt_entries = MB(256) - KB(size); 552 stolen_size = MB(256);
651 break; 553 break;
652 case SNB_GMCH_GMS_STOLEN_288M: 554 case SNB_GMCH_GMS_STOLEN_288M:
653 gtt_entries = MB(288) - KB(size); 555 stolen_size = MB(288);
654 break; 556 break;
655 case SNB_GMCH_GMS_STOLEN_320M: 557 case SNB_GMCH_GMS_STOLEN_320M:
656 gtt_entries = MB(320) - KB(size); 558 stolen_size = MB(320);
657 break; 559 break;
658 case SNB_GMCH_GMS_STOLEN_352M: 560 case SNB_GMCH_GMS_STOLEN_352M:
659 gtt_entries = MB(352) - KB(size); 561 stolen_size = MB(352);
660 break; 562 break;
661 case SNB_GMCH_GMS_STOLEN_384M: 563 case SNB_GMCH_GMS_STOLEN_384M:
662 gtt_entries = MB(384) - KB(size); 564 stolen_size = MB(384);
663 break; 565 break;
664 case SNB_GMCH_GMS_STOLEN_416M: 566 case SNB_GMCH_GMS_STOLEN_416M:
665 gtt_entries = MB(416) - KB(size); 567 stolen_size = MB(416);
666 break; 568 break;
667 case SNB_GMCH_GMS_STOLEN_448M: 569 case SNB_GMCH_GMS_STOLEN_448M:
668 gtt_entries = MB(448) - KB(size); 570 stolen_size = MB(448);
669 break; 571 break;
670 case SNB_GMCH_GMS_STOLEN_480M: 572 case SNB_GMCH_GMS_STOLEN_480M:
671 gtt_entries = MB(480) - KB(size); 573 stolen_size = MB(480);
672 break; 574 break;
673 case SNB_GMCH_GMS_STOLEN_512M: 575 case SNB_GMCH_GMS_STOLEN_512M:
674 gtt_entries = MB(512) - KB(size); 576 stolen_size = MB(512);
675 break; 577 break;
676 } 578 }
677 } else { 579 } else {
678 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 580 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
679 case I855_GMCH_GMS_STOLEN_1M: 581 case I855_GMCH_GMS_STOLEN_1M:
680 gtt_entries = MB(1) - KB(size); 582 stolen_size = MB(1);
681 break; 583 break;
682 case I855_GMCH_GMS_STOLEN_4M: 584 case I855_GMCH_GMS_STOLEN_4M:
683 gtt_entries = MB(4) - KB(size); 585 stolen_size = MB(4);
684 break; 586 break;
685 case I855_GMCH_GMS_STOLEN_8M: 587 case I855_GMCH_GMS_STOLEN_8M:
686 gtt_entries = MB(8) - KB(size); 588 stolen_size = MB(8);
687 break; 589 break;
688 case I855_GMCH_GMS_STOLEN_16M: 590 case I855_GMCH_GMS_STOLEN_16M:
689 gtt_entries = MB(16) - KB(size); 591 stolen_size = MB(16);
690 break; 592 break;
691 case I855_GMCH_GMS_STOLEN_32M: 593 case I855_GMCH_GMS_STOLEN_32M:
692 gtt_entries = MB(32) - KB(size); 594 stolen_size = MB(32);
693 break; 595 break;
694 case I915_GMCH_GMS_STOLEN_48M: 596 case I915_GMCH_GMS_STOLEN_48M:
695 /* Check it's really I915G */ 597 stolen_size = MB(48);
696 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
697 gtt_entries = MB(48) - KB(size);
698 else
699 gtt_entries = 0;
700 break; 598 break;
701 case I915_GMCH_GMS_STOLEN_64M: 599 case I915_GMCH_GMS_STOLEN_64M:
702 /* Check it's really I915G */ 600 stolen_size = MB(64);
703 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
704 gtt_entries = MB(64) - KB(size);
705 else
706 gtt_entries = 0;
707 break; 601 break;
708 case G33_GMCH_GMS_STOLEN_128M: 602 case G33_GMCH_GMS_STOLEN_128M:
709 if (IS_G33 || IS_I965 || IS_G4X) 603 stolen_size = MB(128);
710 gtt_entries = MB(128) - KB(size);
711 else
712 gtt_entries = 0;
713 break; 604 break;
714 case G33_GMCH_GMS_STOLEN_256M: 605 case G33_GMCH_GMS_STOLEN_256M:
715 if (IS_G33 || IS_I965 || IS_G4X) 606 stolen_size = MB(256);
716 gtt_entries = MB(256) - KB(size);
717 else
718 gtt_entries = 0;
719 break; 607 break;
720 case INTEL_GMCH_GMS_STOLEN_96M: 608 case INTEL_GMCH_GMS_STOLEN_96M:
721 if (IS_I965 || IS_G4X) 609 stolen_size = MB(96);
722 gtt_entries = MB(96) - KB(size);
723 else
724 gtt_entries = 0;
725 break; 610 break;
726 case INTEL_GMCH_GMS_STOLEN_160M: 611 case INTEL_GMCH_GMS_STOLEN_160M:
727 if (IS_I965 || IS_G4X) 612 stolen_size = MB(160);
728 gtt_entries = MB(160) - KB(size);
729 else
730 gtt_entries = 0;
731 break; 613 break;
732 case INTEL_GMCH_GMS_STOLEN_224M: 614 case INTEL_GMCH_GMS_STOLEN_224M:
733 if (IS_I965 || IS_G4X) 615 stolen_size = MB(224);
734 gtt_entries = MB(224) - KB(size);
735 else
736 gtt_entries = 0;
737 break; 616 break;
738 case INTEL_GMCH_GMS_STOLEN_352M: 617 case INTEL_GMCH_GMS_STOLEN_352M:
739 if (IS_I965 || IS_G4X) 618 stolen_size = MB(352);
740 gtt_entries = MB(352) - KB(size);
741 else
742 gtt_entries = 0;
743 break; 619 break;
744 default: 620 default:
745 gtt_entries = 0; 621 stolen_size = 0;
746 break; 622 break;
747 } 623 }
748 } 624 }
749 if (!local && gtt_entries > intel_max_stolen) { 625
750 dev_info(&agp_bridge->dev->dev, 626 if (!local && stolen_size > intel_max_stolen) {
627 dev_info(&intel_private.bridge_dev->dev,
751 "detected %dK stolen memory, trimming to %dK\n", 628 "detected %dK stolen memory, trimming to %dK\n",
752 gtt_entries / KB(1), intel_max_stolen / KB(1)); 629 stolen_size / KB(1), intel_max_stolen / KB(1));
753 gtt_entries = intel_max_stolen / KB(4); 630 stolen_size = intel_max_stolen;
754 } else if (gtt_entries > 0) { 631 } else if (stolen_size > 0) {
755 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", 632 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
756 gtt_entries / KB(1), local ? "local" : "stolen"); 633 stolen_size / KB(1), local ? "local" : "stolen");
757 gtt_entries /= KB(4);
758 } else { 634 } else {
759 dev_info(&agp_bridge->dev->dev, 635 dev_info(&intel_private.bridge_dev->dev,
760 "no pre-allocated video memory detected\n"); 636 "no pre-allocated video memory detected\n");
761 gtt_entries = 0; 637 stolen_size = 0;
638 }
639
640 stolen_entries = stolen_size/KB(4) - overhead_entries;
641
642 return stolen_entries;
643}
644
645static unsigned int intel_gtt_total_entries(void)
646{
647 int size;
648
649 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
650 u32 pgetbl_ctl;
651 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
652
653 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
654 case I965_PGETBL_SIZE_128KB:
655 size = KB(128);
656 break;
657 case I965_PGETBL_SIZE_256KB:
658 size = KB(256);
659 break;
660 case I965_PGETBL_SIZE_512KB:
661 size = KB(512);
662 break;
663 case I965_PGETBL_SIZE_1MB:
664 size = KB(1024);
665 break;
666 case I965_PGETBL_SIZE_2MB:
667 size = KB(2048);
668 break;
669 case I965_PGETBL_SIZE_1_5MB:
670 size = KB(1024 + 512);
671 break;
672 default:
673 dev_info(&intel_private.pcidev->dev,
674 "unknown page table size, assuming 512KB\n");
675 size = KB(512);
676 }
677
678 return size/4;
679 } else if (INTEL_GTT_GEN == 6) {
680 u16 snb_gmch_ctl;
681
682 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
683 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
684 default:
685 case SNB_GTT_SIZE_0M:
686 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
687 size = MB(0);
688 break;
689 case SNB_GTT_SIZE_1M:
690 size = MB(1);
691 break;
692 case SNB_GTT_SIZE_2M:
693 size = MB(2);
694 break;
695 }
696 return size/4;
697 } else {
698 /* On previous hardware, the GTT size was just what was
699 * required to map the aperture.
700 */
701 return intel_private.base.gtt_mappable_entries;
702 }
703}
704
705static unsigned int intel_gtt_mappable_entries(void)
706{
707 unsigned int aperture_size;
708
709 if (INTEL_GTT_GEN == 2) {
710 u16 gmch_ctrl;
711
712 pci_read_config_word(intel_private.bridge_dev,
713 I830_GMCH_CTRL, &gmch_ctrl);
714
715 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
716 aperture_size = MB(64);
717 else
718 aperture_size = MB(128);
719 } else {
720 /* 9xx supports large sizes, just look at the length */
721 aperture_size = pci_resource_len(intel_private.pcidev, 2);
762 } 722 }
763 723
764 intel_private.gtt_entries = gtt_entries; 724 return aperture_size >> PAGE_SHIFT;
765} 725}
766 726
767static void intel_i830_fini_flush(void) 727static void intel_gtt_teardown_scratch_page(void)
728{
729 set_pages_wb(intel_private.scratch_page, 1);
730 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
731 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
732 put_page(intel_private.scratch_page);
733 __free_page(intel_private.scratch_page);
734}
735
736static void intel_gtt_cleanup(void)
737{
738 intel_private.driver->cleanup();
739
740 iounmap(intel_private.gtt);
741 iounmap(intel_private.registers);
742
743 intel_gtt_teardown_scratch_page();
744}
745
746static int intel_gtt_init(void)
747{
748 u32 gtt_map_size;
749 int ret;
750
751 ret = intel_private.driver->setup();
752 if (ret != 0)
753 return ret;
754
755 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
756 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
757
758 dev_info(&intel_private.bridge_dev->dev,
759 "detected gtt size: %dK total, %dK mappable\n",
760 intel_private.base.gtt_total_entries * 4,
761 intel_private.base.gtt_mappable_entries * 4);
762
763 gtt_map_size = intel_private.base.gtt_total_entries * 4;
764
765 intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
766 gtt_map_size);
767 if (!intel_private.gtt) {
768 intel_private.driver->cleanup();
769 iounmap(intel_private.registers);
770 return -ENOMEM;
771 }
772
773 global_cache_flush(); /* FIXME: ? */
774
775 /* we have to call this as early as possible after the MMIO base address is known */
776 intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
777 if (intel_private.base.gtt_stolen_entries == 0) {
778 intel_private.driver->cleanup();
779 iounmap(intel_private.registers);
780 iounmap(intel_private.gtt);
781 return -ENOMEM;
782 }
783
784 ret = intel_gtt_setup_scratch_page();
785 if (ret != 0) {
786 intel_gtt_cleanup();
787 return ret;
788 }
789
790 return 0;
791}
792
793static int intel_fake_agp_fetch_size(void)
794{
795 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
796 unsigned int aper_size;
797 int i;
798
799 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
800 / MB(1);
801
802 for (i = 0; i < num_sizes; i++) {
803 if (aper_size == intel_fake_agp_sizes[i].size) {
804 agp_bridge->current_size =
805 (void *) (intel_fake_agp_sizes + i);
806 return aper_size;
807 }
808 }
809
810 return 0;
811}
812
813static void i830_cleanup(void)
768{ 814{
769 kunmap(intel_private.i8xx_page); 815 kunmap(intel_private.i8xx_page);
770 intel_private.i8xx_flush_page = NULL; 816 intel_private.i8xx_flush_page = NULL;
771 unmap_page_from_agp(intel_private.i8xx_page);
772 817
773 __free_page(intel_private.i8xx_page); 818 __free_page(intel_private.i8xx_page);
774 intel_private.i8xx_page = NULL; 819 intel_private.i8xx_page = NULL;
@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
780 if (intel_private.i8xx_page) 825 if (intel_private.i8xx_page)
781 return; 826 return;
782 827
783 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); 828 intel_private.i8xx_page = alloc_page(GFP_KERNEL);
784 if (!intel_private.i8xx_page) 829 if (!intel_private.i8xx_page)
785 return; 830 return;
786 831
787 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); 832 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
788 if (!intel_private.i8xx_flush_page) 833 if (!intel_private.i8xx_flush_page)
789 intel_i830_fini_flush(); 834 i830_cleanup();
790} 835}
791 836
792/* The chipset_flush interface needs to get data that has already been 837/* The chipset_flush interface needs to get data that has already been
@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
799 * that buffer out, we just fill 1KB and clflush it out, on the assumption 844 * that buffer out, we just fill 1KB and clflush it out, on the assumption
800 * that it'll push whatever was in there out. It appears to work. 845 * that it'll push whatever was in there out. It appears to work.
801 */ 846 */
802static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) 847static void i830_chipset_flush(void)
803{ 848{
804 unsigned int *pg = intel_private.i8xx_flush_page; 849 unsigned int *pg = intel_private.i8xx_flush_page;
805 850
@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
811 printk(KERN_ERR "Timed out waiting for cache flush.\n"); 856 printk(KERN_ERR "Timed out waiting for cache flush.\n");
812} 857}
813 858
814/* The intel i830 automatically initializes the agp aperture during POST. 859static void i830_write_entry(dma_addr_t addr, unsigned int entry,
815 * Use the memory already set aside for in the GTT. 860 unsigned int flags)
816 */
817static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
818{ 861{
819 int page_order; 862 u32 pte_flags = I810_PTE_VALID;
820 struct aper_size_info_fixed *size; 863
821 int num_entries; 864 switch (flags) {
822 u32 temp; 865 case AGP_DCACHE_MEMORY:
866 pte_flags |= I810_PTE_LOCAL;
867 break;
868 case AGP_USER_CACHED_MEMORY:
869 pte_flags |= I830_PTE_SYSTEM_CACHED;
870 break;
871 }
823 872
824 size = agp_bridge->current_size; 873 writel(addr | pte_flags, intel_private.gtt + entry);
825 page_order = size->page_order; 874}
826 num_entries = size->num_entries;
827 agp_bridge->gatt_table_real = NULL;
828 875
829 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 876static void intel_enable_gtt(void)
830 temp &= 0xfff80000; 877{
878 u32 gma_addr;
879 u16 gmch_ctrl;
831 880
832 intel_private.registers = ioremap(temp, 128 * 4096); 881 if (INTEL_GTT_GEN == 2)
833 if (!intel_private.registers) 882 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
834 return -ENOMEM; 883 &gma_addr);
884 else
885 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
886 &gma_addr);
835 887
836 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 888 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
837 global_cache_flush(); /* FIXME: ?? */
838 889
839 /* we have to call this as early as possible after the MMIO base address is known */ 890 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
840 intel_i830_init_gtt_entries(); 891 gmch_ctrl |= I830_GMCH_ENABLED;
841 if (intel_private.gtt_entries == 0) { 892 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
842 iounmap(intel_private.registers); 893
894 writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
895 intel_private.registers+I810_PGETBL_CTL);
896 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
897}
898
899static int i830_setup(void)
900{
901 u32 reg_addr;
902
903 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
904 reg_addr &= 0xfff80000;
905
906 intel_private.registers = ioremap(reg_addr, KB(64));
907 if (!intel_private.registers)
843 return -ENOMEM; 908 return -ENOMEM;
844 }
845 909
846 agp_bridge->gatt_table = NULL; 910 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
911 intel_private.pte_bus_addr =
912 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
847 913
848 agp_bridge->gatt_bus_addr = temp; 914 intel_i830_setup_flush();
849 915
850 return 0; 916 return 0;
851} 917}
852 918
853/* Return the gatt table to a sane state. Use the top of stolen 919static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
854 * memory for the GTT.
855 */
856static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
857{ 920{
921 agp_bridge->gatt_table_real = NULL;
922 agp_bridge->gatt_table = NULL;
923 agp_bridge->gatt_bus_addr = 0;
924
858 return 0; 925 return 0;
859} 926}
860 927
861static int intel_i830_fetch_size(void) 928static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
862{ 929{
863 u16 gmch_ctrl;
864 struct aper_size_info_fixed *values;
865
866 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
867
868 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
869 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
870 /* 855GM/852GM/865G has 128MB aperture size */
871 agp_bridge->current_size = (void *) values;
872 agp_bridge->aperture_size_idx = 0;
873 return values[0].size;
874 }
875
876 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
877
878 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
879 agp_bridge->current_size = (void *) values;
880 agp_bridge->aperture_size_idx = 0;
881 return values[0].size;
882 } else {
883 agp_bridge->current_size = (void *) (values + 1);
884 agp_bridge->aperture_size_idx = 1;
885 return values[1].size;
886 }
887
888 return 0; 930 return 0;
889} 931}
890 932
891static int intel_i830_configure(void) 933static int intel_fake_agp_configure(void)
892{ 934{
893 struct aper_size_info_fixed *current_size;
894 u32 temp;
895 u16 gmch_ctrl;
896 int i; 935 int i;
897 936
898 current_size = A_SIZE_FIX(agp_bridge->current_size); 937 intel_enable_gtt();
899
900 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
901 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
902
903 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
904 gmch_ctrl |= I830_GMCH_ENABLED;
905 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
906 938
907 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); 939 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
908 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
909 940
910 if (agp_bridge->driver->needs_scratch_page) { 941 for (i = intel_private.base.gtt_stolen_entries;
911 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { 942 i < intel_private.base.gtt_total_entries; i++) {
912 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 943 intel_private.driver->write_entry(intel_private.scratch_page_dma,
913 } 944 i, 0);
914 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
915 } 945 }
946 readl(intel_private.gtt+i-1); /* PCI Posting. */
916 947
917 global_cache_flush(); 948 global_cache_flush();
918 949
919 intel_i830_setup_flush();
920 return 0; 950 return 0;
921} 951}
922 952
923static void intel_i830_cleanup(void) 953static bool i830_check_flags(unsigned int flags)
924{ 954{
925 iounmap(intel_private.registers); 955 switch (flags) {
956 case 0:
957 case AGP_PHYS_MEMORY:
958 case AGP_USER_CACHED_MEMORY:
959 case AGP_USER_MEMORY:
960 return true;
961 }
962
963 return false;
926} 964}
927 965
928static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start, 966static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
929 int type) 967 unsigned int sg_len,
968 unsigned int pg_start,
969 unsigned int flags)
930{ 970{
931 int i, j, num_entries; 971 struct scatterlist *sg;
932 void *temp; 972 unsigned int len, m;
973 int i, j;
974
975 j = pg_start;
976
977 /* sg may merge pages, but we have to separate
978 * per-page addr for GTT */
979 for_each_sg(sg_list, sg, sg_len, i) {
980 len = sg_dma_len(sg) >> PAGE_SHIFT;
981 for (m = 0; m < len; m++) {
982 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
983 intel_private.driver->write_entry(addr,
984 j, flags);
985 j++;
986 }
987 }
988 readl(intel_private.gtt+j-1);
989}
990
991static int intel_fake_agp_insert_entries(struct agp_memory *mem,
992 off_t pg_start, int type)
993{
994 int i, j;
933 int ret = -EINVAL; 995 int ret = -EINVAL;
934 int mask_type;
935 996
936 if (mem->page_count == 0) 997 if (mem->page_count == 0)
937 goto out; 998 goto out;
938 999
939 temp = agp_bridge->current_size; 1000 if (pg_start < intel_private.base.gtt_stolen_entries) {
940 num_entries = A_SIZE_FIX(temp)->num_entries;
941
942 if (pg_start < intel_private.gtt_entries) {
943 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, 1001 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
944 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n", 1002 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
945 pg_start, intel_private.gtt_entries); 1003 pg_start, intel_private.base.gtt_stolen_entries);
946 1004
947 dev_info(&intel_private.pcidev->dev, 1005 dev_info(&intel_private.pcidev->dev,
948 "trying to insert into local/stolen memory\n"); 1006 "trying to insert into local/stolen memory\n");
949 goto out_err; 1007 goto out_err;
950 } 1008 }
951 1009
952 if ((pg_start + mem->page_count) > num_entries) 1010 if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
953 goto out_err; 1011 goto out_err;
954 1012
955 /* The i830 can't check the GTT for entries since its read only,
956 * depend on the caller to make the correct offset decisions.
957 */
958
959 if (type != mem->type) 1013 if (type != mem->type)
960 goto out_err; 1014 goto out_err;
961 1015
962 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 1016 if (!intel_private.driver->check_flags(type))
963
964 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
965 mask_type != INTEL_AGP_CACHED_MEMORY)
966 goto out_err; 1017 goto out_err;
967 1018
968 if (!mem->is_flushed) 1019 if (!mem->is_flushed)
969 global_cache_flush(); 1020 global_cache_flush();
970 1021
971 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1022 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
972 writel(agp_bridge->driver->mask_memory(agp_bridge, 1023 ret = intel_agp_map_memory(mem);
973 page_to_phys(mem->pages[i]), mask_type), 1024 if (ret != 0)
974 intel_private.registers+I810_PTE_BASE+(j*4)); 1025 return ret;
1026
1027 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
1028 pg_start, type);
1029 } else {
1030 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1031 dma_addr_t addr = page_to_phys(mem->pages[i]);
1032 intel_private.driver->write_entry(addr,
1033 j, type);
1034 }
1035 readl(intel_private.gtt+j-1);
975 } 1036 }
976 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
977 1037
978out: 1038out:
979 ret = 0; 1039 ret = 0;
@@ -982,29 +1042,39 @@ out_err:
982 return ret; 1042 return ret;
983} 1043}
984 1044
985static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start, 1045static int intel_fake_agp_remove_entries(struct agp_memory *mem,
986 int type) 1046 off_t pg_start, int type)
987{ 1047{
988 int i; 1048 int i;
989 1049
990 if (mem->page_count == 0) 1050 if (mem->page_count == 0)
991 return 0; 1051 return 0;
992 1052
993 if (pg_start < intel_private.gtt_entries) { 1053 if (pg_start < intel_private.base.gtt_stolen_entries) {
994 dev_info(&intel_private.pcidev->dev, 1054 dev_info(&intel_private.pcidev->dev,
995 "trying to disable local/stolen memory\n"); 1055 "trying to disable local/stolen memory\n");
996 return -EINVAL; 1056 return -EINVAL;
997 } 1057 }
998 1058
1059 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
1060 intel_agp_unmap_memory(mem);
1061
999 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1062 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1000 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 1063 intel_private.driver->write_entry(intel_private.scratch_page_dma,
1064 i, 0);
1001 } 1065 }
1002 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 1066 readl(intel_private.gtt+i-1);
1003 1067
1004 return 0; 1068 return 0;
1005} 1069}
1006 1070
1007static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type) 1071static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
1072{
1073 intel_private.driver->chipset_flush();
1074}
1075
1076static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1077 int type)
1008{ 1078{
1009 if (type == AGP_PHYS_MEMORY) 1079 if (type == AGP_PHYS_MEMORY)
1010 return alloc_agpphysmem_i8xx(pg_count, type); 1080 return alloc_agpphysmem_i8xx(pg_count, type);
@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
1015static int intel_alloc_chipset_flush_resource(void) 1085static int intel_alloc_chipset_flush_resource(void)
1016{ 1086{
1017 int ret; 1087 int ret;
1018 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE, 1088 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1019 PAGE_SIZE, PCIBIOS_MIN_MEM, 0, 1089 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1020 pcibios_align_resource, agp_bridge->dev); 1090 pcibios_align_resource, intel_private.bridge_dev);
1021 1091
1022 return ret; 1092 return ret;
1023} 1093}
@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
1027 int ret; 1097 int ret;
1028 u32 temp; 1098 u32 temp;
1029 1099
1030 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp); 1100 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1031 if (!(temp & 0x1)) { 1101 if (!(temp & 0x1)) {
1032 intel_alloc_chipset_flush_resource(); 1102 intel_alloc_chipset_flush_resource();
1033 intel_private.resource_valid = 1; 1103 intel_private.resource_valid = 1;
1034 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 1104 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1035 } else { 1105 } else {
1036 temp &= ~1; 1106 temp &= ~1;
1037 1107
@@ -1050,17 +1120,17 @@ static void intel_i965_g33_setup_chipset_flush(void)
1050 u32 temp_hi, temp_lo; 1120 u32 temp_hi, temp_lo;
1051 int ret; 1121 int ret;
1052 1122
1053 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi); 1123 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1054 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo); 1124 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1055 1125
1056 if (!(temp_lo & 0x1)) { 1126 if (!(temp_lo & 0x1)) {
1057 1127
1058 intel_alloc_chipset_flush_resource(); 1128 intel_alloc_chipset_flush_resource();
1059 1129
1060 intel_private.resource_valid = 1; 1130 intel_private.resource_valid = 1;
1061 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4, 1131 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1062 upper_32_bits(intel_private.ifp_resource.start)); 1132 upper_32_bits(intel_private.ifp_resource.start));
1063 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); 1133 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1064 } else { 1134 } else {
1065 u64 l64; 1135 u64 l64;
1066 1136
@@ -1083,7 +1153,7 @@ static void intel_i9xx_setup_flush(void)
1083 if (intel_private.ifp_resource.start) 1153 if (intel_private.ifp_resource.start)
1084 return; 1154 return;
1085 1155
1086 if (IS_SNB) 1156 if (INTEL_GTT_GEN == 6)
1087 return; 1157 return;
1088 1158
1089 /* setup a resource for this object */ 1159 /* setup a resource for this object */
@@ -1091,7 +1161,7 @@ static void intel_i9xx_setup_flush(void)
1091 intel_private.ifp_resource.flags = IORESOURCE_MEM; 1161 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1092 1162
1093 /* Setup chipset flush for 915 */ 1163 /* Setup chipset flush for 915 */
1094 if (IS_I965 || IS_G33 || IS_G4X) { 1164 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1095 intel_i965_g33_setup_chipset_flush(); 1165 intel_i965_g33_setup_chipset_flush();
1096 } else { 1166 } else {
1097 intel_i915_setup_chipset_flush(); 1167 intel_i915_setup_chipset_flush();
@@ -1104,41 +1174,7 @@ static void intel_i9xx_setup_flush(void)
1104 "can't ioremap flush page - no chipset flushing\n"); 1174 "can't ioremap flush page - no chipset flushing\n");
1105} 1175}
1106 1176
1107static int intel_i9xx_configure(void) 1177static void i9xx_cleanup(void)
1108{
1109 struct aper_size_info_fixed *current_size;
1110 u32 temp;
1111 u16 gmch_ctrl;
1112 int i;
1113
1114 current_size = A_SIZE_FIX(agp_bridge->current_size);
1115
1116 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1117
1118 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1119
1120 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1121 gmch_ctrl |= I830_GMCH_ENABLED;
1122 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1123
1124 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1125 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1126
1127 if (agp_bridge->driver->needs_scratch_page) {
1128 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1129 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1130 }
1131 readl(intel_private.gtt+i-1); /* PCI Posting. */
1132 }
1133
1134 global_cache_flush();
1135
1136 intel_i9xx_setup_flush();
1137
1138 return 0;
1139}
1140
1141static void intel_i915_cleanup(void)
1142{ 1178{
1143 if (intel_private.i9xx_flush_page) 1179 if (intel_private.i9xx_flush_page)
1144 iounmap(intel_private.i9xx_flush_page); 1180 iounmap(intel_private.i9xx_flush_page);
@@ -1146,320 +1182,93 @@ static void intel_i915_cleanup(void)
1146 release_resource(&intel_private.ifp_resource); 1182 release_resource(&intel_private.ifp_resource);
1147 intel_private.ifp_resource.start = 0; 1183 intel_private.ifp_resource.start = 0;
1148 intel_private.resource_valid = 0; 1184 intel_private.resource_valid = 0;
1149 iounmap(intel_private.gtt);
1150 iounmap(intel_private.registers);
1151} 1185}
1152 1186
1153static void intel_i915_chipset_flush(struct agp_bridge_data *bridge) 1187static void i9xx_chipset_flush(void)
1154{ 1188{
1155 if (intel_private.i9xx_flush_page) 1189 if (intel_private.i9xx_flush_page)
1156 writel(1, intel_private.i9xx_flush_page); 1190 writel(1, intel_private.i9xx_flush_page);
1157} 1191}
1158 1192
1159static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, 1193static void i965_write_entry(dma_addr_t addr, unsigned int entry,
1160 int type) 1194 unsigned int flags)
1161{ 1195{
1162 int num_entries; 1196 /* Shift high bits down */
1163 void *temp; 1197 addr |= (addr >> 28) & 0xf0;
1164 int ret = -EINVAL; 1198 writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
1165 int mask_type;
1166
1167 if (mem->page_count == 0)
1168 goto out;
1169
1170 temp = agp_bridge->current_size;
1171 num_entries = A_SIZE_FIX(temp)->num_entries;
1172
1173 if (pg_start < intel_private.gtt_entries) {
1174 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1175 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1176 pg_start, intel_private.gtt_entries);
1177
1178 dev_info(&intel_private.pcidev->dev,
1179 "trying to insert into local/stolen memory\n");
1180 goto out_err;
1181 }
1182
1183 if ((pg_start + mem->page_count) > num_entries)
1184 goto out_err;
1185
1186 /* The i915 can't check the GTT for entries since it's read only;
1187 * depend on the caller to make the correct offset decisions.
1188 */
1189
1190 if (type != mem->type)
1191 goto out_err;
1192
1193 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1194
1195 if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1196 mask_type != INTEL_AGP_CACHED_MEMORY)
1197 goto out_err;
1198
1199 if (!mem->is_flushed)
1200 global_cache_flush();
1201
1202 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1203
1204 out:
1205 ret = 0;
1206 out_err:
1207 mem->is_flushed = true;
1208 return ret;
1209} 1199}
1210 1200
1211static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start, 1201static bool gen6_check_flags(unsigned int flags)
1212 int type)
1213{ 1202{
1214 int i; 1203 return true;
1215
1216 if (mem->page_count == 0)
1217 return 0;
1218
1219 if (pg_start < intel_private.gtt_entries) {
1220 dev_info(&intel_private.pcidev->dev,
1221 "trying to disable local/stolen memory\n");
1222 return -EINVAL;
1223 }
1224
1225 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1226 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1227
1228 readl(intel_private.gtt+i-1);
1229
1230 return 0;
1231} 1204}
1232 1205
1233/* Return the aperture size by just checking the resource length. The effect 1206static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1234 * described in the spec of the MSAC registers is just changing of the 1207 unsigned int flags)
1235 * resource size.
1236 */
1237static int intel_i9xx_fetch_size(void)
1238{ 1208{
1239 int num_sizes = ARRAY_SIZE(intel_i830_sizes); 1209 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1240 int aper_size; /* size in megabytes */ 1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1241 int i; 1211 u32 pte_flags;
1242
1243 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1244 1212
1245 for (i = 0; i < num_sizes; i++) { 1213 if (type_mask == AGP_USER_UNCACHED_MEMORY)
1246 if (aper_size == intel_i830_sizes[i].size) { 1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1247 agp_bridge->current_size = intel_i830_sizes + i; 1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1248 return aper_size; 1216 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1249 } 1217 if (gfdt)
1218 pte_flags |= GEN6_PTE_GFDT;
1219 } else { /* set 'normal'/'cached' to LLC by default */
1220 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1221 if (gfdt)
1222 pte_flags |= GEN6_PTE_GFDT;
1250 } 1223 }
1251 1224
1252 return 0; 1225 /* gen6 has bit11-4 for physical addr bit39-32 */
1226 addr |= (addr >> 28) & 0xff0;
1227 writel(addr | pte_flags, intel_private.gtt + entry);
1253} 1228}
1254 1229
1255static int intel_i915_get_gtt_size(void) 1230static void gen6_cleanup(void)
1256{ 1231{
1257 int size;
1258
1259 if (IS_G33) {
1260 u16 gmch_ctrl;
1261
1262 /* G33's GTT size defined in gmch_ctrl */
1263 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1264 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1265 case I830_GMCH_GMS_STOLEN_512:
1266 size = 512;
1267 break;
1268 case I830_GMCH_GMS_STOLEN_1024:
1269 size = 1024;
1270 break;
1271 case I830_GMCH_GMS_STOLEN_8192:
1272 size = 8*1024;
1273 break;
1274 default:
1275 dev_info(&agp_bridge->dev->dev,
1276 "unknown page table size 0x%x, assuming 512KB\n",
1277 (gmch_ctrl & I830_GMCH_GMS_MASK));
1278 size = 512;
1279 }
1280 } else {
1281 /* On previous hardware, the GTT size was just what was
1282 * required to map the aperture.
1283 */
1284 size = agp_bridge->driver->fetch_size();
1285 }
1286
1287 return KB(size);
1288} 1232}
1289 1233
1290/* The intel i915 automatically initializes the agp aperture during POST. 1234static int i9xx_setup(void)
1291 * Use the memory already set aside for in the GTT.
1292 */
1293static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1294{ 1235{
1295 int page_order; 1236 u32 reg_addr;
1296 struct aper_size_info_fixed *size;
1297 int num_entries;
1298 u32 temp, temp2;
1299 int gtt_map_size;
1300
1301 size = agp_bridge->current_size;
1302 page_order = size->page_order;
1303 num_entries = size->num_entries;
1304 agp_bridge->gatt_table_real = NULL;
1305
1306 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1307 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1308 1237
1309 gtt_map_size = intel_i915_get_gtt_size(); 1238 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1310
1311 intel_private.gtt = ioremap(temp2, gtt_map_size);
1312 if (!intel_private.gtt)
1313 return -ENOMEM;
1314
1315 intel_private.gtt_total_size = gtt_map_size / 4;
1316
1317 temp &= 0xfff80000;
1318
1319 intel_private.registers = ioremap(temp, 128 * 4096);
1320 if (!intel_private.registers) {
1321 iounmap(intel_private.gtt);
1322 return -ENOMEM;
1323 }
1324 1239
1325 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; 1240 reg_addr &= 0xfff80000;
1326 global_cache_flush(); /* FIXME: ? */
1327 1241
1328 /* we have to call this as early as possible after the MMIO base address is known */ 1242 intel_private.registers = ioremap(reg_addr, 128 * 4096);
1329 intel_i830_init_gtt_entries(); 1243 if (!intel_private.registers)
1330 if (intel_private.gtt_entries == 0) {
1331 iounmap(intel_private.gtt);
1332 iounmap(intel_private.registers);
1333 return -ENOMEM; 1244 return -ENOMEM;
1334 }
1335
1336 agp_bridge->gatt_table = NULL;
1337
1338 agp_bridge->gatt_bus_addr = temp;
1339
1340 return 0;
1341}
1342
1343/*
1344 * The i965 supports 36-bit physical addresses, but to keep
1345 * the format of the GTT the same, the bits that don't fit
1346 * in a 32-bit word are shifted down to bits 4..7.
1347 *
1348 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1349 * is always zero on 32-bit architectures, so no need to make
1350 * this conditional.
1351 */
1352static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1353 dma_addr_t addr, int type)
1354{
1355 /* Shift high bits down */
1356 addr |= (addr >> 28) & 0xf0;
1357
1358 /* Type checking must be done elsewhere */
1359 return addr | bridge->driver->masks[type].mask;
1360}
1361 1245
1362static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, 1246 if (INTEL_GTT_GEN == 3) {
1363 dma_addr_t addr, int type) 1247 u32 gtt_addr;
1364{
1365 /* gen6 has bit11-4 for physical addr bit39-32 */
1366 addr |= (addr >> 28) & 0xff0;
1367 1248
1368 /* Type checking must be done elsewhere */ 1249 pci_read_config_dword(intel_private.pcidev,
1369 return addr | bridge->driver->masks[type].mask; 1250 I915_PTEADDR, &gtt_addr);
1370} 1251 intel_private.gtt_bus_addr = gtt_addr;
1371 1252 } else {
1372static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) 1253 u32 gtt_offset;
1373{
1374 u16 snb_gmch_ctl;
1375
1376 switch (agp_bridge->dev->device) {
1377 case PCI_DEVICE_ID_INTEL_GM45_HB:
1378 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1379 case PCI_DEVICE_ID_INTEL_Q45_HB:
1380 case PCI_DEVICE_ID_INTEL_G45_HB:
1381 case PCI_DEVICE_ID_INTEL_G41_HB:
1382 case PCI_DEVICE_ID_INTEL_B43_HB:
1383 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1384 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1385 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1386 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1387 *gtt_offset = *gtt_size = MB(2);
1388 break;
1389 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1390 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1391 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
1392 *gtt_offset = MB(2);
1393 1254
1394 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1255 switch (INTEL_GTT_GEN) {
1395 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) { 1256 case 5:
1396 default: 1257 case 6:
1397 case SNB_GTT_SIZE_0M: 1258 gtt_offset = MB(2);
1398 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1399 *gtt_size = MB(0);
1400 break; 1259 break;
1401 case SNB_GTT_SIZE_1M: 1260 case 4:
1402 *gtt_size = MB(1); 1261 default:
1403 break; 1262 gtt_offset = KB(512);
1404 case SNB_GTT_SIZE_2M:
1405 *gtt_size = MB(2);
1406 break; 1263 break;
1407 } 1264 }
1408 break; 1265 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1409 default:
1410 *gtt_offset = *gtt_size = KB(512);
1411 } 1266 }
1412}
1413
1414/* The intel i965 automatically initializes the agp aperture during POST.
1415 * Use the memory already set aside for in the GTT.
1416 */
1417static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1418{
1419 int page_order;
1420 struct aper_size_info_fixed *size;
1421 int num_entries;
1422 u32 temp;
1423 int gtt_offset, gtt_size;
1424
1425 size = agp_bridge->current_size;
1426 page_order = size->page_order;
1427 num_entries = size->num_entries;
1428 agp_bridge->gatt_table_real = NULL;
1429
1430 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1431
1432 temp &= 0xfff00000;
1433
1434 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1435 1267
1436 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size); 1268 intel_private.pte_bus_addr =
1269 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1437 1270
1438 if (!intel_private.gtt) 1271 intel_i9xx_setup_flush();
1439 return -ENOMEM;
1440
1441 intel_private.gtt_total_size = gtt_size / 4;
1442
1443 intel_private.registers = ioremap(temp, 128 * 4096);
1444 if (!intel_private.registers) {
1445 iounmap(intel_private.gtt);
1446 return -ENOMEM;
1447 }
1448
1449 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1450 global_cache_flush(); /* FIXME: ? */
1451
1452 /* we have to call this as early as possible after the MMIO base address is known */
1453 intel_i830_init_gtt_entries();
1454 if (intel_private.gtt_entries == 0) {
1455 iounmap(intel_private.gtt);
1456 iounmap(intel_private.registers);
1457 return -ENOMEM;
1458 }
1459
1460 agp_bridge->gatt_table = NULL;
1461
1462 agp_bridge->gatt_bus_addr = temp;
1463 1272
1464 return 0; 1273 return 0;
1465} 1274}
@@ -1475,7 +1284,7 @@ static const struct agp_bridge_driver intel_810_driver = {
1475 .cleanup = intel_i810_cleanup, 1284 .cleanup = intel_i810_cleanup,
1476 .mask_memory = intel_i810_mask_memory, 1285 .mask_memory = intel_i810_mask_memory,
1477 .masks = intel_i810_masks, 1286 .masks = intel_i810_masks,
1478 .agp_enable = intel_i810_agp_enable, 1287 .agp_enable = intel_fake_agp_enable,
1479 .cache_flush = global_cache_flush, 1288 .cache_flush = global_cache_flush,
1480 .create_gatt_table = agp_generic_create_gatt_table, 1289 .create_gatt_table = agp_generic_create_gatt_table,
1481 .free_gatt_table = agp_generic_free_gatt_table, 1290 .free_gatt_table = agp_generic_free_gatt_table,
@@ -1490,161 +1299,282 @@ static const struct agp_bridge_driver intel_810_driver = {
1490 .agp_type_to_mask_type = agp_generic_type_to_mask_type, 1299 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1491}; 1300};
1492 1301
1493static const struct agp_bridge_driver intel_830_driver = { 1302static const struct agp_bridge_driver intel_fake_agp_driver = {
1494 .owner = THIS_MODULE, 1303 .owner = THIS_MODULE,
1495 .aperture_sizes = intel_i830_sizes,
1496 .size_type = FIXED_APER_SIZE, 1304 .size_type = FIXED_APER_SIZE,
1497 .num_aperture_sizes = 4, 1305 .aperture_sizes = intel_fake_agp_sizes,
1498 .needs_scratch_page = true, 1306 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1499 .configure = intel_i830_configure, 1307 .configure = intel_fake_agp_configure,
1500 .fetch_size = intel_i830_fetch_size, 1308 .fetch_size = intel_fake_agp_fetch_size,
1501 .cleanup = intel_i830_cleanup, 1309 .cleanup = intel_gtt_cleanup,
1502 .mask_memory = intel_i810_mask_memory, 1310 .agp_enable = intel_fake_agp_enable,
1503 .masks = intel_i810_masks,
1504 .agp_enable = intel_i810_agp_enable,
1505 .cache_flush = global_cache_flush, 1311 .cache_flush = global_cache_flush,
1506 .create_gatt_table = intel_i830_create_gatt_table, 1312 .create_gatt_table = intel_fake_agp_create_gatt_table,
1507 .free_gatt_table = intel_i830_free_gatt_table, 1313 .free_gatt_table = intel_fake_agp_free_gatt_table,
1508 .insert_memory = intel_i830_insert_entries, 1314 .insert_memory = intel_fake_agp_insert_entries,
1509 .remove_memory = intel_i830_remove_entries, 1315 .remove_memory = intel_fake_agp_remove_entries,
1510 .alloc_by_type = intel_i830_alloc_by_type, 1316 .alloc_by_type = intel_fake_agp_alloc_by_type,
1511 .free_by_type = intel_i810_free_by_type, 1317 .free_by_type = intel_i810_free_by_type,
1512 .agp_alloc_page = agp_generic_alloc_page, 1318 .agp_alloc_page = agp_generic_alloc_page,
1513 .agp_alloc_pages = agp_generic_alloc_pages, 1319 .agp_alloc_pages = agp_generic_alloc_pages,
1514 .agp_destroy_page = agp_generic_destroy_page, 1320 .agp_destroy_page = agp_generic_destroy_page,
1515 .agp_destroy_pages = agp_generic_destroy_pages, 1321 .agp_destroy_pages = agp_generic_destroy_pages,
1516 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1322 .chipset_flush = intel_fake_agp_chipset_flush,
1517 .chipset_flush = intel_i830_chipset_flush,
1518}; 1323};
1519 1324
1520static const struct agp_bridge_driver intel_915_driver = { 1325static const struct intel_gtt_driver i81x_gtt_driver = {
1521 .owner = THIS_MODULE, 1326 .gen = 1,
1522 .aperture_sizes = intel_i830_sizes, 1327 .dma_mask_size = 32,
1523 .size_type = FIXED_APER_SIZE,
1524 .num_aperture_sizes = 4,
1525 .needs_scratch_page = true,
1526 .configure = intel_i9xx_configure,
1527 .fetch_size = intel_i9xx_fetch_size,
1528 .cleanup = intel_i915_cleanup,
1529 .mask_memory = intel_i810_mask_memory,
1530 .masks = intel_i810_masks,
1531 .agp_enable = intel_i810_agp_enable,
1532 .cache_flush = global_cache_flush,
1533 .create_gatt_table = intel_i915_create_gatt_table,
1534 .free_gatt_table = intel_i830_free_gatt_table,
1535 .insert_memory = intel_i915_insert_entries,
1536 .remove_memory = intel_i915_remove_entries,
1537 .alloc_by_type = intel_i830_alloc_by_type,
1538 .free_by_type = intel_i810_free_by_type,
1539 .agp_alloc_page = agp_generic_alloc_page,
1540 .agp_alloc_pages = agp_generic_alloc_pages,
1541 .agp_destroy_page = agp_generic_destroy_page,
1542 .agp_destroy_pages = agp_generic_destroy_pages,
1543 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1544 .chipset_flush = intel_i915_chipset_flush,
1545#ifdef USE_PCI_DMA_API
1546 .agp_map_page = intel_agp_map_page,
1547 .agp_unmap_page = intel_agp_unmap_page,
1548 .agp_map_memory = intel_agp_map_memory,
1549 .agp_unmap_memory = intel_agp_unmap_memory,
1550#endif
1551}; 1328};
1552 1329static const struct intel_gtt_driver i8xx_gtt_driver = {
1553static const struct agp_bridge_driver intel_i965_driver = { 1330 .gen = 2,
1554 .owner = THIS_MODULE, 1331 .setup = i830_setup,
1555 .aperture_sizes = intel_i830_sizes, 1332 .cleanup = i830_cleanup,
1556 .size_type = FIXED_APER_SIZE, 1333 .write_entry = i830_write_entry,
1557 .num_aperture_sizes = 4, 1334 .dma_mask_size = 32,
1558 .needs_scratch_page = true, 1335 .check_flags = i830_check_flags,
1559 .configure = intel_i9xx_configure, 1336 .chipset_flush = i830_chipset_flush,
1560 .fetch_size = intel_i9xx_fetch_size,
1561 .cleanup = intel_i915_cleanup,
1562 .mask_memory = intel_i965_mask_memory,
1563 .masks = intel_i810_masks,
1564 .agp_enable = intel_i810_agp_enable,
1565 .cache_flush = global_cache_flush,
1566 .create_gatt_table = intel_i965_create_gatt_table,
1567 .free_gatt_table = intel_i830_free_gatt_table,
1568 .insert_memory = intel_i915_insert_entries,
1569 .remove_memory = intel_i915_remove_entries,
1570 .alloc_by_type = intel_i830_alloc_by_type,
1571 .free_by_type = intel_i810_free_by_type,
1572 .agp_alloc_page = agp_generic_alloc_page,
1573 .agp_alloc_pages = agp_generic_alloc_pages,
1574 .agp_destroy_page = agp_generic_destroy_page,
1575 .agp_destroy_pages = agp_generic_destroy_pages,
1576 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1577 .chipset_flush = intel_i915_chipset_flush,
1578#ifdef USE_PCI_DMA_API
1579 .agp_map_page = intel_agp_map_page,
1580 .agp_unmap_page = intel_agp_unmap_page,
1581 .agp_map_memory = intel_agp_map_memory,
1582 .agp_unmap_memory = intel_agp_unmap_memory,
1583#endif
1584}; 1337};
1585 1338static const struct intel_gtt_driver i915_gtt_driver = {
1586static const struct agp_bridge_driver intel_gen6_driver = { 1339 .gen = 3,
1587 .owner = THIS_MODULE, 1340 .setup = i9xx_setup,
1588 .aperture_sizes = intel_i830_sizes, 1341 .cleanup = i9xx_cleanup,
1589 .size_type = FIXED_APER_SIZE, 1342 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1590 .num_aperture_sizes = 4, 1343 .write_entry = i830_write_entry,
1591 .needs_scratch_page = true, 1344 .dma_mask_size = 32,
1592 .configure = intel_i9xx_configure, 1345 .check_flags = i830_check_flags,
1593 .fetch_size = intel_i9xx_fetch_size, 1346 .chipset_flush = i9xx_chipset_flush,
1594 .cleanup = intel_i915_cleanup, 1347};
1595 .mask_memory = intel_gen6_mask_memory, 1348static const struct intel_gtt_driver g33_gtt_driver = {
1596 .masks = intel_gen6_masks, 1349 .gen = 3,
1597 .agp_enable = intel_i810_agp_enable, 1350 .is_g33 = 1,
1598 .cache_flush = global_cache_flush, 1351 .setup = i9xx_setup,
1599 .create_gatt_table = intel_i965_create_gatt_table, 1352 .cleanup = i9xx_cleanup,
1600 .free_gatt_table = intel_i830_free_gatt_table, 1353 .write_entry = i965_write_entry,
1601 .insert_memory = intel_i915_insert_entries, 1354 .dma_mask_size = 36,
1602 .remove_memory = intel_i915_remove_entries, 1355 .check_flags = i830_check_flags,
1603 .alloc_by_type = intel_i830_alloc_by_type, 1356 .chipset_flush = i9xx_chipset_flush,
1604 .free_by_type = intel_i810_free_by_type, 1357};
1605 .agp_alloc_page = agp_generic_alloc_page, 1358static const struct intel_gtt_driver pineview_gtt_driver = {
1606 .agp_alloc_pages = agp_generic_alloc_pages, 1359 .gen = 3,
1607 .agp_destroy_page = agp_generic_destroy_page, 1360 .is_pineview = 1, .is_g33 = 1,
1608 .agp_destroy_pages = agp_generic_destroy_pages, 1361 .setup = i9xx_setup,
1609 .agp_type_to_mask_type = intel_gen6_type_to_mask_type, 1362 .cleanup = i9xx_cleanup,
1610 .chipset_flush = intel_i915_chipset_flush, 1363 .write_entry = i965_write_entry,
1611#ifdef USE_PCI_DMA_API 1364 .dma_mask_size = 36,
1612 .agp_map_page = intel_agp_map_page, 1365 .check_flags = i830_check_flags,
1613 .agp_unmap_page = intel_agp_unmap_page, 1366 .chipset_flush = i9xx_chipset_flush,
1614 .agp_map_memory = intel_agp_map_memory, 1367};
1615 .agp_unmap_memory = intel_agp_unmap_memory, 1368static const struct intel_gtt_driver i965_gtt_driver = {
1616#endif 1369 .gen = 4,
1370 .setup = i9xx_setup,
1371 .cleanup = i9xx_cleanup,
1372 .write_entry = i965_write_entry,
1373 .dma_mask_size = 36,
1374 .check_flags = i830_check_flags,
1375 .chipset_flush = i9xx_chipset_flush,
1376};
1377static const struct intel_gtt_driver g4x_gtt_driver = {
1378 .gen = 5,
1379 .setup = i9xx_setup,
1380 .cleanup = i9xx_cleanup,
1381 .write_entry = i965_write_entry,
1382 .dma_mask_size = 36,
1383 .check_flags = i830_check_flags,
1384 .chipset_flush = i9xx_chipset_flush,
1385};
1386static const struct intel_gtt_driver ironlake_gtt_driver = {
1387 .gen = 5,
1388 .is_ironlake = 1,
1389 .setup = i9xx_setup,
1390 .cleanup = i9xx_cleanup,
1391 .write_entry = i965_write_entry,
1392 .dma_mask_size = 36,
1393 .check_flags = i830_check_flags,
1394 .chipset_flush = i9xx_chipset_flush,
1395};
1396static const struct intel_gtt_driver sandybridge_gtt_driver = {
1397 .gen = 6,
1398 .setup = i9xx_setup,
1399 .cleanup = gen6_cleanup,
1400 .write_entry = gen6_write_entry,
1401 .dma_mask_size = 40,
1402 .check_flags = gen6_check_flags,
1403 .chipset_flush = i9xx_chipset_flush,
1617}; 1404};
1618 1405
1619static const struct agp_bridge_driver intel_g33_driver = { 1406/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1620 .owner = THIS_MODULE, 1407 * driver and gmch_driver must be non-null, and find_gmch will determine
1621 .aperture_sizes = intel_i830_sizes, 1408 * which one should be used if a gmch_chip_id is present.
1622 .size_type = FIXED_APER_SIZE, 1409 */
1623 .num_aperture_sizes = 4, 1410static const struct intel_gtt_driver_description {
1624 .needs_scratch_page = true, 1411 unsigned int gmch_chip_id;
1625 .configure = intel_i9xx_configure, 1412 char *name;
1626 .fetch_size = intel_i9xx_fetch_size, 1413 const struct agp_bridge_driver *gmch_driver;
1627 .cleanup = intel_i915_cleanup, 1414 const struct intel_gtt_driver *gtt_driver;
1628 .mask_memory = intel_i965_mask_memory, 1415} intel_gtt_chipsets[] = {
1629 .masks = intel_i810_masks, 1416 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
1630 .agp_enable = intel_i810_agp_enable, 1417 &i81x_gtt_driver},
1631 .cache_flush = global_cache_flush, 1418 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
1632 .create_gatt_table = intel_i915_create_gatt_table, 1419 &i81x_gtt_driver},
1633 .free_gatt_table = intel_i830_free_gatt_table, 1420 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
1634 .insert_memory = intel_i915_insert_entries, 1421 &i81x_gtt_driver},
1635 .remove_memory = intel_i915_remove_entries, 1422 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
1636 .alloc_by_type = intel_i830_alloc_by_type, 1423 &i81x_gtt_driver},
1637 .free_by_type = intel_i810_free_by_type, 1424 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1638 .agp_alloc_page = agp_generic_alloc_page, 1425 &intel_fake_agp_driver, &i8xx_gtt_driver},
1639 .agp_alloc_pages = agp_generic_alloc_pages, 1426 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1640 .agp_destroy_page = agp_generic_destroy_page, 1427 &intel_fake_agp_driver, &i8xx_gtt_driver},
1641 .agp_destroy_pages = agp_generic_destroy_pages, 1428 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1642 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1429 &intel_fake_agp_driver, &i8xx_gtt_driver},
1643 .chipset_flush = intel_i915_chipset_flush, 1430 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1644#ifdef USE_PCI_DMA_API 1431 &intel_fake_agp_driver, &i8xx_gtt_driver},
1645 .agp_map_page = intel_agp_map_page, 1432 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1646 .agp_unmap_page = intel_agp_unmap_page, 1433 &intel_fake_agp_driver, &i8xx_gtt_driver},
1647 .agp_map_memory = intel_agp_map_memory, 1434 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1648 .agp_unmap_memory = intel_agp_unmap_memory, 1435 &intel_fake_agp_driver, &i915_gtt_driver },
1649#endif 1436 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1437 &intel_fake_agp_driver, &i915_gtt_driver },
1438 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1439 &intel_fake_agp_driver, &i915_gtt_driver },
1440 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1441 &intel_fake_agp_driver, &i915_gtt_driver },
1442 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1443 &intel_fake_agp_driver, &i915_gtt_driver },
1444 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1445 &intel_fake_agp_driver, &i915_gtt_driver },
1446 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1447 &intel_fake_agp_driver, &i965_gtt_driver },
1448 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1449 &intel_fake_agp_driver, &i965_gtt_driver },
1450 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1451 &intel_fake_agp_driver, &i965_gtt_driver },
1452 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1453 &intel_fake_agp_driver, &i965_gtt_driver },
1454 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1455 &intel_fake_agp_driver, &i965_gtt_driver },
1456 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1457 &intel_fake_agp_driver, &i965_gtt_driver },
1458 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1459 &intel_fake_agp_driver, &g33_gtt_driver },
1460 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1461 &intel_fake_agp_driver, &g33_gtt_driver },
1462 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1463 &intel_fake_agp_driver, &g33_gtt_driver },
1464 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1465 &intel_fake_agp_driver, &pineview_gtt_driver },
1466 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1467 &intel_fake_agp_driver, &pineview_gtt_driver },
1468 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1469 &intel_fake_agp_driver, &g4x_gtt_driver },
1470 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1471 &intel_fake_agp_driver, &g4x_gtt_driver },
1472 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1473 &intel_fake_agp_driver, &g4x_gtt_driver },
1474 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1475 &intel_fake_agp_driver, &g4x_gtt_driver },
1476 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1477 &intel_fake_agp_driver, &g4x_gtt_driver },
1478 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1479 &intel_fake_agp_driver, &g4x_gtt_driver },
1480 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1481 &intel_fake_agp_driver, &g4x_gtt_driver },
1482 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1483 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1484 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1485 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
1486 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1487 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1488 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1489 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1490 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1491 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1492 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1493 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1494 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1495 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1496 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1497 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1498 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1499 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
1500 { 0, NULL, NULL }
1650}; 1501};
1502
1503static int find_gmch(u16 device)
1504{
1505 struct pci_dev *gmch_device;
1506
1507 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1508 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1509 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1510 device, gmch_device);
1511 }
1512
1513 if (!gmch_device)
1514 return 0;
1515
1516 intel_private.pcidev = gmch_device;
1517 return 1;
1518}
1519
1520int intel_gmch_probe(struct pci_dev *pdev,
1521 struct agp_bridge_data *bridge)
1522{
1523 int i, mask;
1524 bridge->driver = NULL;
1525
1526 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1527 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1528 bridge->driver =
1529 intel_gtt_chipsets[i].gmch_driver;
1530 intel_private.driver =
1531 intel_gtt_chipsets[i].gtt_driver;
1532 break;
1533 }
1534 }
1535
1536 if (!bridge->driver)
1537 return 0;
1538
1539 bridge->dev_private_data = &intel_private;
1540 bridge->dev = pdev;
1541
1542 intel_private.bridge_dev = pci_dev_get(pdev);
1543
1544 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1545
1546 mask = intel_private.driver->dma_mask_size;
1547 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1548 dev_err(&intel_private.pcidev->dev,
1549 "set gfx device dma mask %d-bit failed!\n", mask);
1550 else
1551 pci_set_consistent_dma_mask(intel_private.pcidev,
1552 DMA_BIT_MASK(mask));
1553
1554 if (bridge->driver == &intel_810_driver)
1555 return 1;
1556
1557 if (intel_gtt_init() != 0)
1558 return 0;
1559
1560 return 1;
1561}
1562EXPORT_SYMBOL(intel_gmch_probe);
1563
1564struct intel_gtt *intel_gtt_get(void)
1565{
1566 return &intel_private.base;
1567}
1568EXPORT_SYMBOL(intel_gtt_get);
1569
1570void intel_gmch_remove(struct pci_dev *pdev)
1571{
1572 if (intel_private.pcidev)
1573 pci_dev_put(intel_private.pcidev);
1574 if (intel_private.bridge_dev)
1575 pci_dev_put(intel_private.bridge_dev);
1576}
1577EXPORT_SYMBOL(intel_gmch_remove);
1578
1579MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
1580MODULE_LICENSE("GPL and additional rights");