aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/agp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/agp')
-rw-r--r--drivers/char/agp/agp.h15
-rw-r--r--drivers/char/agp/ali-agp.c4
-rw-r--r--drivers/char/agp/amd-k7-agp.c10
-rw-r--r--drivers/char/agp/amd64-agp.c7
-rw-r--r--drivers/char/agp/ati-agp.c7
-rw-r--r--drivers/char/agp/backend.c32
-rw-r--r--drivers/char/agp/efficeon-agp.c4
-rw-r--r--drivers/char/agp/generic.c20
-rw-r--r--drivers/char/agp/hp-agp.c17
-rw-r--r--drivers/char/agp/i460-agp.c17
-rw-r--r--drivers/char/agp/intel-agp.c190
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/parisc-agp.c12
-rw-r--r--drivers/char/agp/sgi-agp.c8
-rw-r--r--drivers/char/agp/sworks-agp.c10
-rw-r--r--drivers/char/agp/uninorth-agp.c53
16 files changed, 293 insertions, 115 deletions
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 178e2e9e9f09..d6f36c004d9b 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -107,7 +107,7 @@ struct agp_bridge_driver {
107 void (*agp_enable)(struct agp_bridge_data *, u32); 107 void (*agp_enable)(struct agp_bridge_data *, u32);
108 void (*cleanup)(void); 108 void (*cleanup)(void);
109 void (*tlb_flush)(struct agp_memory *); 109 void (*tlb_flush)(struct agp_memory *);
110 unsigned long (*mask_memory)(struct agp_bridge_data *, struct page *, int); 110 unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int);
111 void (*cache_flush)(void); 111 void (*cache_flush)(void);
112 int (*create_gatt_table)(struct agp_bridge_data *); 112 int (*create_gatt_table)(struct agp_bridge_data *);
113 int (*free_gatt_table)(struct agp_bridge_data *); 113 int (*free_gatt_table)(struct agp_bridge_data *);
@@ -121,6 +121,11 @@ struct agp_bridge_driver {
121 void (*agp_destroy_pages)(struct agp_memory *); 121 void (*agp_destroy_pages)(struct agp_memory *);
122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); 122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
123 void (*chipset_flush)(struct agp_bridge_data *); 123 void (*chipset_flush)(struct agp_bridge_data *);
124
125 int (*agp_map_page)(struct page *page, dma_addr_t *ret);
126 void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
127 int (*agp_map_memory)(struct agp_memory *mem);
128 void (*agp_unmap_memory)(struct agp_memory *mem);
124}; 129};
125 130
126struct agp_bridge_data { 131struct agp_bridge_data {
@@ -134,7 +139,8 @@ struct agp_bridge_data {
134 u32 __iomem *gatt_table; 139 u32 __iomem *gatt_table;
135 u32 *gatt_table_real; 140 u32 *gatt_table_real;
136 unsigned long scratch_page; 141 unsigned long scratch_page;
137 unsigned long scratch_page_real; 142 struct page *scratch_page_page;
143 dma_addr_t scratch_page_dma;
138 unsigned long gart_bus_addr; 144 unsigned long gart_bus_addr;
139 unsigned long gatt_bus_addr; 145 unsigned long gatt_bus_addr;
140 u32 mode; 146 u32 mode;
@@ -291,7 +297,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge);
291void global_cache_flush(void); 297void global_cache_flush(void);
292void get_agp_version(struct agp_bridge_data *bridge); 298void get_agp_version(struct agp_bridge_data *bridge);
293unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 299unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
294 struct page *page, int type); 300 dma_addr_t phys, int type);
295int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, 301int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
296 int type); 302 int type);
297struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); 303struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
@@ -312,9 +318,6 @@ void agp3_generic_cleanup(void);
312#define AGP_GENERIC_SIZES_ENTRIES 11 318#define AGP_GENERIC_SIZES_ENTRIES 11
313extern const struct aper_size_info_16 agp3_generic_sizes[]; 319extern const struct aper_size_info_16 agp3_generic_sizes[];
314 320
315#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
316#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
317
318extern int agp_off; 321extern int agp_off;
319extern int agp_try_unsupported_boot; 322extern int agp_try_unsupported_boot;
320 323
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 201ef3ffd484..d2ce68f27e4b 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -152,7 +152,7 @@ static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
152 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 152 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
153 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 153 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
154 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 154 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
155 phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN )); 155 page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
156 return page; 156 return page;
157} 157}
158 158
@@ -180,7 +180,7 @@ static void m1541_destroy_page(struct page *page, int flags)
180 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp); 180 pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
181 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, 181 pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
182 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) | 182 (((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
183 phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN)); 183 page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
184 } 184 }
185 agp_generic_destroy_page(page, flags); 185 agp_generic_destroy_page(page, flags);
186} 186}
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index ba9bde71eaaf..73dbf40c874d 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -44,7 +44,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
44#ifndef CONFIG_X86 44#ifndef CONFIG_X86
45 SetPageReserved(virt_to_page(page_map->real)); 45 SetPageReserved(virt_to_page(page_map->real));
46 global_cache_flush(); 46 global_cache_flush();
47 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 47 page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
48 PAGE_SIZE); 48 PAGE_SIZE);
49 if (page_map->remapped == NULL) { 49 if (page_map->remapped == NULL) {
50 ClearPageReserved(virt_to_page(page_map->real)); 50 ClearPageReserved(virt_to_page(page_map->real));
@@ -160,7 +160,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
160 160
161 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 161 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
162 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 162 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
163 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 163 agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
164 164
165 /* Get the address for the gart region. 165 /* Get the address for the gart region.
166 * This is a bus address even on the alpha, b/c its 166 * This is a bus address even on the alpha, b/c its
@@ -173,7 +173,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
173 173
174 /* Calculate the agp offset */ 174 /* Calculate the agp offset */
175 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 175 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
176 writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1, 176 writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
177 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 177 page_dir.remapped+GET_PAGE_DIR_OFF(addr));
178 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 178 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
179 } 179 }
@@ -325,7 +325,9 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
325 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 325 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
326 cur_gatt = GET_GATT(addr); 326 cur_gatt = GET_GATT(addr);
327 writel(agp_generic_mask_memory(agp_bridge, 327 writel(agp_generic_mask_memory(agp_bridge,
328 mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 328 page_to_phys(mem->pages[i]),
329 mem->type),
330 cur_gatt+GET_GATT_OFF(addr));
329 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */ 331 readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
330 } 332 }
331 amd_irongate_tlbflush(mem); 333 amd_irongate_tlbflush(mem);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3bf5dda90f4a..2fb2e6cc322a 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -79,7 +79,8 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
79 79
80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 80 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
81 tmp = agp_bridge->driver->mask_memory(agp_bridge, 81 tmp = agp_bridge->driver->mask_memory(agp_bridge,
82 mem->pages[i], mask_type); 82 page_to_phys(mem->pages[i]),
83 mask_type);
83 84
84 BUG_ON(tmp & 0xffffff0000000ffcULL); 85 BUG_ON(tmp & 0xffffff0000000ffcULL);
85 pte = (tmp & 0x000000ff00000000ULL) >> 28; 86 pte = (tmp & 0x000000ff00000000ULL) >> 28;
@@ -177,7 +178,7 @@ static const struct aper_size_info_32 amd_8151_sizes[7] =
177 178
178static int amd_8151_configure(void) 179static int amd_8151_configure(void)
179{ 180{
180 unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); 181 unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
181 int i; 182 int i;
182 183
183 /* Configure AGP regs in each x86-64 host bridge. */ 184 /* Configure AGP regs in each x86-64 host bridge. */
@@ -557,7 +558,7 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
557{ 558{
558 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 559 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
559 560
560 release_mem_region(virt_to_gart(bridge->gatt_table_real), 561 release_mem_region(virt_to_phys(bridge->gatt_table_real),
561 amd64_aperture_sizes[bridge->aperture_size_idx].size); 562 amd64_aperture_sizes[bridge->aperture_size_idx].size);
562 agp_remove_bridge(bridge); 563 agp_remove_bridge(bridge);
563 agp_put_bridge(bridge); 564 agp_put_bridge(bridge);
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 33656e144cc5..3b2ecbe86ebe 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -302,7 +302,8 @@ static int ati_insert_memory(struct agp_memory * mem,
302 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 302 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
303 cur_gatt = GET_GATT(addr); 303 cur_gatt = GET_GATT(addr);
304 writel(agp_bridge->driver->mask_memory(agp_bridge, 304 writel(agp_bridge->driver->mask_memory(agp_bridge,
305 mem->pages[i], mem->type), 305 page_to_phys(mem->pages[i]),
306 mem->type),
306 cur_gatt+GET_GATT_OFF(addr)); 307 cur_gatt+GET_GATT_OFF(addr));
307 } 308 }
308 readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */ 309 readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
@@ -359,7 +360,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
359 360
360 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 361 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
361 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped; 362 agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
362 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 363 agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
363 364
364 /* Write out the size register */ 365 /* Write out the size register */
365 current_size = A_SIZE_LVL2(agp_bridge->current_size); 366 current_size = A_SIZE_LVL2(agp_bridge->current_size);
@@ -389,7 +390,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
389 390
390 /* Calculate the agp offset */ 391 /* Calculate the agp offset */
391 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) { 392 for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
392 writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1, 393 writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
393 page_dir.remapped+GET_PAGE_DIR_OFF(addr)); 394 page_dir.remapped+GET_PAGE_DIR_OFF(addr));
394 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */ 395 readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
395 } 396 }
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index cfa5a649dfe7..ad87753f6de4 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -149,9 +149,21 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
149 return -ENOMEM; 149 return -ENOMEM;
150 } 150 }
151 151
152 bridge->scratch_page_real = phys_to_gart(page_to_phys(page)); 152 bridge->scratch_page_page = page;
153 bridge->scratch_page = 153 if (bridge->driver->agp_map_page) {
154 bridge->driver->mask_memory(bridge, page, 0); 154 if (bridge->driver->agp_map_page(page,
155 &bridge->scratch_page_dma)) {
156 dev_err(&bridge->dev->dev,
157 "unable to dma-map scratch page\n");
158 rc = -ENOMEM;
159 goto err_out_nounmap;
160 }
161 } else {
162 bridge->scratch_page_dma = page_to_phys(page);
163 }
164
165 bridge->scratch_page = bridge->driver->mask_memory(bridge,
166 bridge->scratch_page_dma, 0);
155 } 167 }
156 168
157 size_value = bridge->driver->fetch_size(); 169 size_value = bridge->driver->fetch_size();
@@ -191,8 +203,14 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
191 return 0; 203 return 0;
192 204
193err_out: 205err_out:
206 if (bridge->driver->needs_scratch_page &&
207 bridge->driver->agp_unmap_page) {
208 bridge->driver->agp_unmap_page(bridge->scratch_page_page,
209 bridge->scratch_page_dma);
210 }
211err_out_nounmap:
194 if (bridge->driver->needs_scratch_page) { 212 if (bridge->driver->needs_scratch_page) {
195 void *va = gart_to_virt(bridge->scratch_page_real); 213 void *va = page_address(bridge->scratch_page_page);
196 214
197 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); 215 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
198 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); 216 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
@@ -219,7 +237,11 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
219 237
220 if (bridge->driver->agp_destroy_page && 238 if (bridge->driver->agp_destroy_page &&
221 bridge->driver->needs_scratch_page) { 239 bridge->driver->needs_scratch_page) {
222 void *va = gart_to_virt(bridge->scratch_page_real); 240 void *va = page_address(bridge->scratch_page_page);
241
242 if (bridge->driver->agp_unmap_page)
243 bridge->driver->agp_unmap_page(bridge->scratch_page_page,
244 bridge->scratch_page_dma);
223 245
224 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP); 246 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
225 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE); 247 bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 35d50f2861b6..793f39ea9618 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -67,7 +67,7 @@ static const struct gatt_mask efficeon_generic_masks[] =
67/* This function does the same thing as mask_memory() for this chipset... */ 67/* This function does the same thing as mask_memory() for this chipset... */
68static inline unsigned long efficeon_mask_memory(struct page *page) 68static inline unsigned long efficeon_mask_memory(struct page *page)
69{ 69{
70 unsigned long addr = phys_to_gart(page_to_phys(page)); 70 unsigned long addr = page_to_phys(page);
71 return addr | 0x00000001; 71 return addr | 0x00000001;
72} 72}
73 73
@@ -226,7 +226,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
226 226
227 efficeon_private.l1_table[index] = page; 227 efficeon_private.l1_table[index] = page;
228 228
229 value = virt_to_gart((unsigned long *)page) | pati | present | index; 229 value = virt_to_phys((unsigned long *)page) | pati | present | index;
230 230
231 pci_write_config_dword(agp_bridge->dev, 231 pci_write_config_dword(agp_bridge->dev,
232 EFFICEON_ATTPAGE, value); 232 EFFICEON_ATTPAGE, value);
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 1e8b461b91f1..c50543966eb2 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -437,6 +437,12 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
437 curr->bridge->driver->cache_flush(); 437 curr->bridge->driver->cache_flush();
438 curr->is_flushed = true; 438 curr->is_flushed = true;
439 } 439 }
440
441 if (curr->bridge->driver->agp_map_memory) {
442 ret_val = curr->bridge->driver->agp_map_memory(curr);
443 if (ret_val)
444 return ret_val;
445 }
440 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); 446 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
441 447
442 if (ret_val != 0) 448 if (ret_val != 0)
@@ -478,6 +484,9 @@ int agp_unbind_memory(struct agp_memory *curr)
478 if (ret_val != 0) 484 if (ret_val != 0)
479 return ret_val; 485 return ret_val;
480 486
487 if (curr->bridge->driver->agp_unmap_memory)
488 curr->bridge->driver->agp_unmap_memory(curr);
489
481 curr->is_bound = false; 490 curr->is_bound = false;
482 curr->pg_start = 0; 491 curr->pg_start = 0;
483 spin_lock(&curr->bridge->mapped_lock); 492 spin_lock(&curr->bridge->mapped_lock);
@@ -979,7 +988,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
979 set_memory_uc((unsigned long)table, 1 << page_order); 988 set_memory_uc((unsigned long)table, 1 << page_order);
980 bridge->gatt_table = (void *)table; 989 bridge->gatt_table = (void *)table;
981#else 990#else
982 bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 991 bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
983 (PAGE_SIZE * (1 << page_order))); 992 (PAGE_SIZE * (1 << page_order)));
984 bridge->driver->cache_flush(); 993 bridge->driver->cache_flush();
985#endif 994#endif
@@ -992,7 +1001,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
992 1001
993 return -ENOMEM; 1002 return -ENOMEM;
994 } 1003 }
995 bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); 1004 bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
996 1005
997 /* AK: bogus, should encode addresses > 4GB */ 1006 /* AK: bogus, should encode addresses > 4GB */
998 for (i = 0; i < num_entries; i++) { 1007 for (i = 0; i < num_entries; i++) {
@@ -1132,7 +1141,9 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1132 } 1141 }
1133 1142
1134 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1143 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1135 writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type), 1144 writel(bridge->driver->mask_memory(bridge,
1145 page_to_phys(mem->pages[i]),
1146 mask_type),
1136 bridge->gatt_table+j); 1147 bridge->gatt_table+j);
1137 } 1148 }
1138 readl(bridge->gatt_table+j-1); /* PCI Posting. */ 1149 readl(bridge->gatt_table+j-1); /* PCI Posting. */
@@ -1347,9 +1358,8 @@ void global_cache_flush(void)
1347EXPORT_SYMBOL(global_cache_flush); 1358EXPORT_SYMBOL(global_cache_flush);
1348 1359
1349unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, 1360unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1350 struct page *page, int type) 1361 dma_addr_t addr, int type)
1351{ 1362{
1352 unsigned long addr = phys_to_gart(page_to_phys(page));
1353 /* memory type is ignored in the generic routine */ 1363 /* memory type is ignored in the generic routine */
1354 if (bridge->driver->masks) 1364 if (bridge->driver->masks)
1355 return addr | bridge->driver->masks[0].mask; 1365 return addr | bridge->driver->masks[0].mask;
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 8f3d4c184914..9047b2714653 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -107,7 +107,7 @@ static int __init hp_zx1_ioc_shared(void)
107 hp->gart_size = HP_ZX1_GART_SIZE; 107 hp->gart_size = HP_ZX1_GART_SIZE;
108 hp->gatt_entries = hp->gart_size / hp->io_page_size; 108 hp->gatt_entries = hp->gart_size / hp->io_page_size;
109 109
110 hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE)); 110 hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
111 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; 111 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
112 112
113 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { 113 if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
@@ -246,7 +246,7 @@ hp_zx1_configure (void)
246 agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS); 246 agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
247 247
248 if (hp->io_pdir_owner) { 248 if (hp->io_pdir_owner) {
249 writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE); 249 writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
250 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); 250 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
251 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); 251 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
252 readl(hp->ioc_regs+HP_ZX1_TCNFG); 252 readl(hp->ioc_regs+HP_ZX1_TCNFG);
@@ -394,10 +394,8 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
394} 394}
395 395
396static unsigned long 396static unsigned long
397hp_zx1_mask_memory (struct agp_bridge_data *bridge, 397hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
398 struct page *page, int type)
399{ 398{
400 unsigned long addr = phys_to_gart(page_to_phys(page));
401 return HP_ZX1_PDIR_VALID_BIT | addr; 399 return HP_ZX1_PDIR_VALID_BIT | addr;
402} 400}
403 401
@@ -478,7 +476,6 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
478{ 476{
479 acpi_handle handle, parent; 477 acpi_handle handle, parent;
480 acpi_status status; 478 acpi_status status;
481 struct acpi_buffer buffer;
482 struct acpi_device_info *info; 479 struct acpi_device_info *info;
483 u64 lba_hpa, sba_hpa, length; 480 u64 lba_hpa, sba_hpa, length;
484 int match; 481 int match;
@@ -490,13 +487,11 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
490 /* Look for an enclosing IOC scope and find its CSR space */ 487 /* Look for an enclosing IOC scope and find its CSR space */
491 handle = obj; 488 handle = obj;
492 do { 489 do {
493 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; 490 status = acpi_get_object_info(handle, &info);
494 status = acpi_get_object_info(handle, &buffer);
495 if (ACPI_SUCCESS(status)) { 491 if (ACPI_SUCCESS(status)) {
496 /* TBD check _CID also */ 492 /* TBD check _CID also */
497 info = buffer.pointer; 493 info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
498 info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0'; 494 match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
499 match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
500 kfree(info); 495 kfree(info);
501 if (match) { 496 if (match) {
502 status = hp_acpi_csr_space(handle, &sba_hpa, &length); 497 status = hp_acpi_csr_space(handle, &sba_hpa, &length);
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 60cc35bb5db7..e763d3312ce7 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -61,7 +61,7 @@
61#define WR_FLUSH_GATT(index) RD_GATT(index) 61#define WR_FLUSH_GATT(index) RD_GATT(index)
62 62
63static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, 63static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
64 unsigned long addr, int type); 64 dma_addr_t addr, int type);
65 65
66static struct { 66static struct {
67 void *gatt; /* ioremap'd GATT area */ 67 void *gatt; /* ioremap'd GATT area */
@@ -325,7 +325,7 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
325 325
326 io_page_size = 1UL << I460_IO_PAGE_SHIFT; 326 io_page_size = 1UL << I460_IO_PAGE_SHIFT;
327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) { 327 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
328 paddr = phys_to_gart(page_to_phys(mem->pages[i])); 328 paddr = page_to_phys(mem->pages[i]);
329 for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) 329 for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
330 WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); 330 WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
331 } 331 }
@@ -382,7 +382,7 @@ static int i460_alloc_large_page (struct lp_desc *lp)
382 return -ENOMEM; 382 return -ENOMEM;
383 } 383 }
384 384
385 lp->paddr = phys_to_gart(page_to_phys(lp->page)); 385 lp->paddr = page_to_phys(lp->page);
386 lp->refcount = 0; 386 lp->refcount = 0;
387 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); 387 atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
388 return 0; 388 return 0;
@@ -546,20 +546,13 @@ static void i460_destroy_page (struct page *page, int flags)
546#endif /* I460_LARGE_IO_PAGES */ 546#endif /* I460_LARGE_IO_PAGES */
547 547
548static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, 548static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
549 unsigned long addr, int type) 549 dma_addr_t addr, int type)
550{ 550{
551 /* Make sure the returned address is a valid GATT entry */ 551 /* Make sure the returned address is a valid GATT entry */
552 return bridge->driver->masks[0].mask 552 return bridge->driver->masks[0].mask
553 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); 553 | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
554} 554}
555 555
556static unsigned long i460_page_mask_memory(struct agp_bridge_data *bridge,
557 struct page *page, int type)
558{
559 unsigned long addr = phys_to_gart(page_to_phys(page));
560 return i460_mask_memory(bridge, addr, type);
561}
562
563const struct agp_bridge_driver intel_i460_driver = { 556const struct agp_bridge_driver intel_i460_driver = {
564 .owner = THIS_MODULE, 557 .owner = THIS_MODULE,
565 .aperture_sizes = i460_sizes, 558 .aperture_sizes = i460_sizes,
@@ -569,7 +562,7 @@ const struct agp_bridge_driver intel_i460_driver = {
569 .fetch_size = i460_fetch_size, 562 .fetch_size = i460_fetch_size,
570 .cleanup = i460_cleanup, 563 .cleanup = i460_cleanup,
571 .tlb_flush = i460_tlb_flush, 564 .tlb_flush = i460_tlb_flush,
572 .mask_memory = i460_page_mask_memory, 565 .mask_memory = i460_mask_memory,
573 .masks = i460_masks, 566 .masks = i460_masks,
574 .agp_enable = agp_generic_enable, 567 .agp_enable = agp_generic_enable,
575 .cache_flush = global_cache_flush, 568 .cache_flush = global_cache_flush,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8c9d50db5c3a..1540e693d91e 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -10,6 +10,16 @@
10#include <linux/agp_backend.h> 10#include <linux/agp_backend.h>
11#include "agp.h" 11#include "agp.h"
12 12
13/*
14 * If we have Intel graphics, we're not going to have anything other than
15 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
16 * on the Intel IOMMU support (CONFIG_DMAR).
17 * Only newer chipsets need to bother with this, of course.
18 */
19#ifdef CONFIG_DMAR
20#define USE_PCI_DMA_API 1
21#endif
22
13#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 23#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
14#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a 24#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
15#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970 25#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
@@ -49,6 +59,7 @@
49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 59#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 60#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 61#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
62#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
52#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 63#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
53 64
54/* cover 915 and 945 variants */ 65/* cover 915 and 945 variants */
@@ -81,7 +92,8 @@
81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 92 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ 93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB) 95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
85 97
86extern int agp_memory_reserved; 98extern int agp_memory_reserved;
87 99
@@ -170,6 +182,123 @@ static struct _intel_private {
170 int resource_valid; 182 int resource_valid;
171} intel_private; 183} intel_private;
172 184
185#ifdef USE_PCI_DMA_API
186static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
187{
188 *ret = pci_map_page(intel_private.pcidev, page, 0,
189 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
190 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
191 return -EINVAL;
192 return 0;
193}
194
195static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
196{
197 pci_unmap_page(intel_private.pcidev, dma,
198 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
199}
200
201static void intel_agp_free_sglist(struct agp_memory *mem)
202{
203 struct sg_table st;
204
205 st.sgl = mem->sg_list;
206 st.orig_nents = st.nents = mem->page_count;
207
208 sg_free_table(&st);
209
210 mem->sg_list = NULL;
211 mem->num_sg = 0;
212}
213
214static int intel_agp_map_memory(struct agp_memory *mem)
215{
216 struct sg_table st;
217 struct scatterlist *sg;
218 int i;
219
220 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
221
222 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
223 return -ENOMEM;
224
225 mem->sg_list = sg = st.sgl;
226
227 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
228 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
229
230 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
231 mem->page_count, PCI_DMA_BIDIRECTIONAL);
232 if (unlikely(!mem->num_sg)) {
233 intel_agp_free_sglist(mem);
234 return -ENOMEM;
235 }
236 return 0;
237}
238
239static void intel_agp_unmap_memory(struct agp_memory *mem)
240{
241 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
242
243 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
244 mem->page_count, PCI_DMA_BIDIRECTIONAL);
245 intel_agp_free_sglist(mem);
246}
247
248static void intel_agp_insert_sg_entries(struct agp_memory *mem,
249 off_t pg_start, int mask_type)
250{
251 struct scatterlist *sg;
252 int i, j;
253
254 j = pg_start;
255
256 WARN_ON(!mem->num_sg);
257
258 if (mem->num_sg == mem->page_count) {
259 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
260 writel(agp_bridge->driver->mask_memory(agp_bridge,
261 sg_dma_address(sg), mask_type),
262 intel_private.gtt+j);
263 j++;
264 }
265 } else {
266 /* sg may merge pages, but we have to seperate
267 * per-page addr for GTT */
268 unsigned int len, m;
269
270 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
271 len = sg_dma_len(sg) / PAGE_SIZE;
272 for (m = 0; m < len; m++) {
273 writel(agp_bridge->driver->mask_memory(agp_bridge,
274 sg_dma_address(sg) + m * PAGE_SIZE,
275 mask_type),
276 intel_private.gtt+j);
277 j++;
278 }
279 }
280 }
281 readl(intel_private.gtt+j-1);
282}
283
284#else
285
286static void intel_agp_insert_sg_entries(struct agp_memory *mem,
287 off_t pg_start, int mask_type)
288{
289 int i, j;
290
291 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
292 writel(agp_bridge->driver->mask_memory(agp_bridge,
293 page_to_phys(mem->pages[i]), mask_type),
294 intel_private.gtt+j);
295 }
296
297 readl(intel_private.gtt+j-1);
298}
299
300#endif
301
173static int intel_i810_fetch_size(void) 302static int intel_i810_fetch_size(void)
174{ 303{
175 u32 smram_miscc; 304 u32 smram_miscc;
@@ -343,8 +472,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
343 global_cache_flush(); 472 global_cache_flush();
344 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 473 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
345 writel(agp_bridge->driver->mask_memory(agp_bridge, 474 writel(agp_bridge->driver->mask_memory(agp_bridge,
346 mem->pages[i], 475 page_to_phys(mem->pages[i]), mask_type),
347 mask_type),
348 intel_private.registers+I810_PTE_BASE+(j*4)); 476 intel_private.registers+I810_PTE_BASE+(j*4));
349 } 477 }
350 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 478 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@ -461,9 +589,8 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
461} 589}
462 590
463static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, 591static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
464 struct page *page, int type) 592 dma_addr_t addr, int type)
465{ 593{
466 unsigned long addr = phys_to_gart(page_to_phys(page));
467 /* Type checking must be done elsewhere */ 594 /* Type checking must be done elsewhere */
468 return addr | bridge->driver->masks[type].mask; 595 return addr | bridge->driver->masks[type].mask;
469} 596}
@@ -851,7 +978,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
851 978
852 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 979 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
853 writel(agp_bridge->driver->mask_memory(agp_bridge, 980 writel(agp_bridge->driver->mask_memory(agp_bridge,
854 mem->pages[i], mask_type), 981 page_to_phys(mem->pages[i]), mask_type),
855 intel_private.registers+I810_PTE_BASE+(j*4)); 982 intel_private.registers+I810_PTE_BASE+(j*4));
856 } 983 }
857 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); 984 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@ -1015,6 +1142,12 @@ static int intel_i915_configure(void)
1015 1142
1016 intel_i9xx_setup_flush(); 1143 intel_i9xx_setup_flush();
1017 1144
1145#ifdef USE_PCI_DMA_API
1146 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
1147 dev_err(&intel_private.pcidev->dev,
1148 "set gfx device dma mask 36bit failed!\n");
1149#endif
1150
1018 return 0; 1151 return 0;
1019} 1152}
1020 1153
@@ -1039,7 +1172,7 @@ static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1039static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, 1172static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1040 int type) 1173 int type)
1041{ 1174{
1042 int i, j, num_entries; 1175 int num_entries;
1043 void *temp; 1176 void *temp;
1044 int ret = -EINVAL; 1177 int ret = -EINVAL;
1045 int mask_type; 1178 int mask_type;
@@ -1063,7 +1196,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1063 if ((pg_start + mem->page_count) > num_entries) 1196 if ((pg_start + mem->page_count) > num_entries)
1064 goto out_err; 1197 goto out_err;
1065 1198
1066 /* The i915 can't check the GTT for entries since its read only, 1199 /* The i915 can't check the GTT for entries since it's read only;
1067 * depend on the caller to make the correct offset decisions. 1200 * depend on the caller to make the correct offset decisions.
1068 */ 1201 */
1069 1202
@@ -1079,12 +1212,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1079 if (!mem->is_flushed) 1212 if (!mem->is_flushed)
1080 global_cache_flush(); 1213 global_cache_flush();
1081 1214
1082 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1215 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1083 writel(agp_bridge->driver->mask_memory(agp_bridge,
1084 mem->pages[i], mask_type), intel_private.gtt+j);
1085 }
1086
1087 readl(intel_private.gtt+j-1);
1088 agp_bridge->driver->tlb_flush(mem); 1216 agp_bridge->driver->tlb_flush(mem);
1089 1217
1090 out: 1218 out:
@@ -1196,9 +1324,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1196 * this conditional. 1324 * this conditional.
1197 */ 1325 */
1198static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, 1326static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1199 struct page *page, int type) 1327 dma_addr_t addr, int type)
1200{ 1328{
1201 dma_addr_t addr = phys_to_gart(page_to_phys(page));
1202 /* Shift high bits down */ 1329 /* Shift high bits down */
1203 addr |= (addr >> 28) & 0xf0; 1330 addr |= (addr >> 28) & 0xf0;
1204 1331
@@ -1216,6 +1343,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1216 case PCI_DEVICE_ID_INTEL_G41_HB: 1343 case PCI_DEVICE_ID_INTEL_G41_HB:
1217 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1344 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1218 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1345 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1346 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
1219 *gtt_offset = *gtt_size = MB(2); 1347 *gtt_offset = *gtt_size = MB(2);
1220 break; 1348 break;
1221 default: 1349 default:
@@ -2003,6 +2131,12 @@ static const struct agp_bridge_driver intel_915_driver = {
2003 .agp_destroy_pages = agp_generic_destroy_pages, 2131 .agp_destroy_pages = agp_generic_destroy_pages,
2004 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 2132 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2005 .chipset_flush = intel_i915_chipset_flush, 2133 .chipset_flush = intel_i915_chipset_flush,
2134#ifdef USE_PCI_DMA_API
2135 .agp_map_page = intel_agp_map_page,
2136 .agp_unmap_page = intel_agp_unmap_page,
2137 .agp_map_memory = intel_agp_map_memory,
2138 .agp_unmap_memory = intel_agp_unmap_memory,
2139#endif
2006}; 2140};
2007 2141
2008static const struct agp_bridge_driver intel_i965_driver = { 2142static const struct agp_bridge_driver intel_i965_driver = {
@@ -2031,6 +2165,12 @@ static const struct agp_bridge_driver intel_i965_driver = {
2031 .agp_destroy_pages = agp_generic_destroy_pages, 2165 .agp_destroy_pages = agp_generic_destroy_pages,
2032 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 2166 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2033 .chipset_flush = intel_i915_chipset_flush, 2167 .chipset_flush = intel_i915_chipset_flush,
2168#ifdef USE_PCI_DMA_API
2169 .agp_map_page = intel_agp_map_page,
2170 .agp_unmap_page = intel_agp_unmap_page,
2171 .agp_map_memory = intel_agp_map_memory,
2172 .agp_unmap_memory = intel_agp_unmap_memory,
2173#endif
2034}; 2174};
2035 2175
2036static const struct agp_bridge_driver intel_7505_driver = { 2176static const struct agp_bridge_driver intel_7505_driver = {
@@ -2085,6 +2225,12 @@ static const struct agp_bridge_driver intel_g33_driver = {
2085 .agp_destroy_pages = agp_generic_destroy_pages, 2225 .agp_destroy_pages = agp_generic_destroy_pages,
2086 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 2226 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
2087 .chipset_flush = intel_i915_chipset_flush, 2227 .chipset_flush = intel_i915_chipset_flush,
2228#ifdef USE_PCI_DMA_API
2229 .agp_map_page = intel_agp_map_page,
2230 .agp_unmap_page = intel_agp_unmap_page,
2231 .agp_map_memory = intel_agp_map_memory,
2232 .agp_unmap_memory = intel_agp_unmap_memory,
2233#endif
2088}; 2234};
2089 2235
2090static int find_gmch(u16 device) 2236static int find_gmch(u16 device)
@@ -2195,6 +2341,8 @@ static const struct intel_driver_description {
2195 "IGDNG/D", NULL, &intel_i965_driver }, 2341 "IGDNG/D", NULL, &intel_i965_driver },
2196 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2342 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2197 "IGDNG/M", NULL, &intel_i965_driver }, 2343 "IGDNG/M", NULL, &intel_i965_driver },
2344 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2345 "IGDNG/MA", NULL, &intel_i965_driver },
2198 { 0, 0, 0, NULL, NULL, NULL } 2346 { 0, 0, 0, NULL, NULL, NULL }
2199}; 2347};
2200 2348
@@ -2308,15 +2456,6 @@ static int agp_intel_resume(struct pci_dev *pdev)
2308 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 2456 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
2309 int ret_val; 2457 int ret_val;
2310 2458
2311 pci_restore_state(pdev);
2312
2313 /* We should restore our graphics device's config space,
2314 * as host bridge (00:00) resumes before graphics device (02:00),
2315 * then our access to its pci space can work right.
2316 */
2317 if (intel_private.pcidev)
2318 pci_restore_state(intel_private.pcidev);
2319
2320 if (bridge->driver == &intel_generic_driver) 2459 if (bridge->driver == &intel_generic_driver)
2321 intel_configure(); 2460 intel_configure();
2322 else if (bridge->driver == &intel_850_driver) 2461 else if (bridge->driver == &intel_850_driver)
@@ -2398,6 +2537,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2398 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2537 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2399 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2538 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2400 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2539 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2540 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
2401 { } 2541 { }
2402}; 2542};
2403 2543
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 263d71dd441c..7e36d2b4f9d4 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -225,7 +225,7 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
225 } 225 }
226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 226 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
227 writel(agp_bridge->driver->mask_memory(agp_bridge, 227 writel(agp_bridge->driver->mask_memory(agp_bridge,
228 mem->pages[i], mask_type), 228 page_to_phys(mem->pages[i]), mask_type),
229 agp_bridge->gatt_table+nvidia_private.pg_offset+j); 229 agp_bridge->gatt_table+nvidia_private.pg_offset+j);
230 } 230 }
231 231
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index e077701ae3d9..60ab75104da9 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -32,7 +32,7 @@
32#define AGP8X_MODE (1 << AGP8X_MODE_BIT) 32#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
33 33
34static unsigned long 34static unsigned long
35parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr, 35parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
36 int type); 36 int type);
37 37
38static struct _parisc_agp_info { 38static struct _parisc_agp_info {
@@ -189,20 +189,12 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
189} 189}
190 190
191static unsigned long 191static unsigned long
192parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr, 192parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
193 int type) 193 int type)
194{ 194{
195 return SBA_PDIR_VALID_BIT | addr; 195 return SBA_PDIR_VALID_BIT | addr;
196} 196}
197 197
198static unsigned long
199parisc_agp_page_mask_memory(struct agp_bridge_data *bridge, struct page *page,
200 int type)
201{
202 unsigned long addr = phys_to_gart(page_to_phys(page));
203 return SBA_PDIR_VALID_BIT | addr;
204}
205
206static void 198static void
207parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode) 199parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
208{ 200{
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index d3ea2e4226b5..0d426ae39c85 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -70,10 +70,9 @@ static void sgi_tioca_tlbflush(struct agp_memory *mem)
70 * entry. 70 * entry.
71 */ 71 */
72static unsigned long 72static unsigned long
73sgi_tioca_mask_memory(struct agp_bridge_data *bridge, 73sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
74 struct page *page, int type) 74 int type)
75{ 75{
76 unsigned long addr = phys_to_gart(page_to_phys(page));
77 return tioca_physpage_to_gart(addr); 76 return tioca_physpage_to_gart(addr);
78} 77}
79 78
@@ -190,7 +189,8 @@ static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
190 189
191 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 190 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
192 table[j] = 191 table[j] =
193 bridge->driver->mask_memory(bridge, mem->pages[i], 192 bridge->driver->mask_memory(bridge,
193 page_to_phys(mem->pages[i]),
194 mem->type); 194 mem->type);
195 } 195 }
196 196
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index b964a2199329..13acaaf64edb 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -155,7 +155,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
155 /* Create a fake scratch directory */ 155 /* Create a fake scratch directory */
156 for (i = 0; i < 1024; i++) { 156 for (i = 0; i < 1024; i++) {
157 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i); 157 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
158 writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i); 158 writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
159 } 159 }
160 160
161 retval = serverworks_create_gatt_pages(value->num_entries / 1024); 161 retval = serverworks_create_gatt_pages(value->num_entries / 1024);
@@ -167,7 +167,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
167 167
168 agp_bridge->gatt_table_real = (u32 *)page_dir.real; 168 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
169 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped; 169 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
170 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real); 170 agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
171 171
172 /* Get the address for the gart region. 172 /* Get the address for the gart region.
173 * This is a bus address even on the alpha, b/c its 173 * This is a bus address even on the alpha, b/c its
@@ -179,7 +179,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
179 179
180 /* Calculate the agp offset */ 180 /* Calculate the agp offset */
181 for (i = 0; i < value->num_entries / 1024; i++) 181 for (i = 0; i < value->num_entries / 1024; i++)
182 writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i); 182 writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
183 183
184 return 0; 184 return 0;
185} 185}
@@ -349,7 +349,9 @@ static int serverworks_insert_memory(struct agp_memory *mem,
349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 349 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
350 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr; 350 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
351 cur_gatt = SVRWRKS_GET_GATT(addr); 351 cur_gatt = SVRWRKS_GET_GATT(addr);
352 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr)); 352 writel(agp_bridge->driver->mask_memory(agp_bridge,
353 page_to_phys(mem->pages[i]), mem->type),
354 cur_gatt+GET_GATT_OFF(addr));
353 } 355 }
354 serverworks_tlbflush(mem); 356 serverworks_tlbflush(mem);
355 return 0; 357 return 0;
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index f192c3b9ad41..20ef1bf5e726 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -7,6 +7,7 @@
7#include <linux/pagemap.h> 7#include <linux/pagemap.h>
8#include <linux/agp_backend.h> 8#include <linux/agp_backend.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/vmalloc.h>
10#include <asm/uninorth.h> 11#include <asm/uninorth.h>
11#include <asm/pci-bridge.h> 12#include <asm/pci-bridge.h>
12#include <asm/prom.h> 13#include <asm/prom.h>
@@ -27,6 +28,8 @@
27static int uninorth_rev; 28static int uninorth_rev;
28static int is_u3; 29static int is_u3;
29 30
31#define DEFAULT_APERTURE_SIZE 256
32#define DEFAULT_APERTURE_STRING "256"
30static char *aperture = NULL; 33static char *aperture = NULL;
31 34
32static int uninorth_fetch_size(void) 35static int uninorth_fetch_size(void)
@@ -55,7 +58,7 @@ static int uninorth_fetch_size(void)
55 58
56 if (!size) { 59 if (!size) {
57 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) 60 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
58 if (values[i].size == 32) 61 if (values[i].size == DEFAULT_APERTURE_SIZE)
59 break; 62 break;
60 } 63 }
61 64
@@ -135,7 +138,7 @@ static int uninorth_configure(void)
135 if (is_u3) { 138 if (is_u3) {
136 pci_write_config_dword(agp_bridge->dev, 139 pci_write_config_dword(agp_bridge->dev,
137 UNI_N_CFG_GART_DUMMY_PAGE, 140 UNI_N_CFG_GART_DUMMY_PAGE,
138 agp_bridge->scratch_page_real >> 12); 141 page_to_phys(agp_bridge->scratch_page_page) >> 12);
139 } 142 }
140 143
141 return 0; 144 return 0;
@@ -179,8 +182,6 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
179 } 182 }
180 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]); 183 (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
181 mb(); 184 mb();
182 flush_dcache_range((unsigned long)&agp_bridge->gatt_table[pg_start],
183 (unsigned long)&agp_bridge->gatt_table[pg_start + mem->page_count]);
184 185
185 uninorth_tlbflush(mem); 186 uninorth_tlbflush(mem);
186 return 0; 187 return 0;
@@ -224,7 +225,6 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
224 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000); 225 (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
225 } 226 }
226 mb(); 227 mb();
227 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
228 uninorth_tlbflush(mem); 228 uninorth_tlbflush(mem);
229 229
230 return 0; 230 return 0;
@@ -243,7 +243,6 @@ int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
243 for (i = 0; i < mem->page_count; ++i) 243 for (i = 0; i < mem->page_count; ++i)
244 gp[i] = 0; 244 gp[i] = 0;
245 mb(); 245 mb();
246 flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
247 uninorth_tlbflush(mem); 246 uninorth_tlbflush(mem);
248 247
249 return 0; 248 return 0;
@@ -396,6 +395,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
396 int i; 395 int i;
397 void *temp; 396 void *temp;
398 struct page *page; 397 struct page *page;
398 struct page **pages;
399 399
400 /* We can't handle 2 level gatt's */ 400 /* We can't handle 2 level gatt's */
401 if (bridge->driver->size_type == LVL2_APER_SIZE) 401 if (bridge->driver->size_type == LVL2_APER_SIZE)
@@ -424,21 +424,39 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
424 if (table == NULL) 424 if (table == NULL)
425 return -ENOMEM; 425 return -ENOMEM;
426 426
427 pages = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL);
428 if (pages == NULL)
429 goto enomem;
430
427 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 431 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
428 432
429 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 433 for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
434 page++, i++) {
430 SetPageReserved(page); 435 SetPageReserved(page);
436 pages[i] = page;
437 }
431 438
432 bridge->gatt_table_real = (u32 *) table; 439 bridge->gatt_table_real = (u32 *) table;
433 bridge->gatt_table = (u32 *)table; 440 /* Need to clear out any dirty data still sitting in caches */
434 bridge->gatt_bus_addr = virt_to_gart(table); 441 flush_dcache_range((unsigned long)table,
442 (unsigned long)(table_end + PAGE_SIZE));
443 bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG);
444
445 if (bridge->gatt_table == NULL)
446 goto enomem;
447
448 bridge->gatt_bus_addr = virt_to_phys(table);
435 449
436 for (i = 0; i < num_entries; i++) 450 for (i = 0; i < num_entries; i++)
437 bridge->gatt_table[i] = 0; 451 bridge->gatt_table[i] = 0;
438 452
439 flush_dcache_range((unsigned long)table, (unsigned long)table_end);
440
441 return 0; 453 return 0;
454
455enomem:
456 kfree(pages);
457 if (table)
458 free_pages((unsigned long)table, page_order);
459 return -ENOMEM;
442} 460}
443 461
444static int uninorth_free_gatt_table(struct agp_bridge_data *bridge) 462static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
@@ -456,6 +474,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
456 * from the table. 474 * from the table.
457 */ 475 */
458 476
477 vunmap(bridge->gatt_table);
459 table = (char *) bridge->gatt_table_real; 478 table = (char *) bridge->gatt_table_real;
460 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 479 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
461 480
@@ -474,13 +493,11 @@ void null_cache_flush(void)
474 493
475/* Setup function */ 494/* Setup function */
476 495
477static const struct aper_size_info_32 uninorth_sizes[7] = 496static const struct aper_size_info_32 uninorth_sizes[] =
478{ 497{
479#if 0 /* Not sure uninorth supports that high aperture sizes */
480 {256, 65536, 6, 64}, 498 {256, 65536, 6, 64},
481 {128, 32768, 5, 32}, 499 {128, 32768, 5, 32},
482 {64, 16384, 4, 16}, 500 {64, 16384, 4, 16},
483#endif
484 {32, 8192, 3, 8}, 501 {32, 8192, 3, 8},
485 {16, 4096, 2, 4}, 502 {16, 4096, 2, 4},
486 {8, 2048, 1, 2}, 503 {8, 2048, 1, 2},
@@ -491,7 +508,7 @@ static const struct aper_size_info_32 uninorth_sizes[7] =
491 * Not sure that u3 supports that high aperture sizes but it 508 * Not sure that u3 supports that high aperture sizes but it
492 * would strange if it did not :) 509 * would strange if it did not :)
493 */ 510 */
494static const struct aper_size_info_32 u3_sizes[8] = 511static const struct aper_size_info_32 u3_sizes[] =
495{ 512{
496 {512, 131072, 7, 128}, 513 {512, 131072, 7, 128},
497 {256, 65536, 6, 64}, 514 {256, 65536, 6, 64},
@@ -507,7 +524,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
507 .owner = THIS_MODULE, 524 .owner = THIS_MODULE,
508 .aperture_sizes = (void *)uninorth_sizes, 525 .aperture_sizes = (void *)uninorth_sizes,
509 .size_type = U32_APER_SIZE, 526 .size_type = U32_APER_SIZE,
510 .num_aperture_sizes = 4, 527 .num_aperture_sizes = ARRAY_SIZE(uninorth_sizes),
511 .configure = uninorth_configure, 528 .configure = uninorth_configure,
512 .fetch_size = uninorth_fetch_size, 529 .fetch_size = uninorth_fetch_size,
513 .cleanup = uninorth_cleanup, 530 .cleanup = uninorth_cleanup,
@@ -534,7 +551,7 @@ const struct agp_bridge_driver u3_agp_driver = {
534 .owner = THIS_MODULE, 551 .owner = THIS_MODULE,
535 .aperture_sizes = (void *)u3_sizes, 552 .aperture_sizes = (void *)u3_sizes,
536 .size_type = U32_APER_SIZE, 553 .size_type = U32_APER_SIZE,
537 .num_aperture_sizes = 8, 554 .num_aperture_sizes = ARRAY_SIZE(u3_sizes),
538 .configure = uninorth_configure, 555 .configure = uninorth_configure,
539 .fetch_size = uninorth_fetch_size, 556 .fetch_size = uninorth_fetch_size,
540 .cleanup = uninorth_cleanup, 557 .cleanup = uninorth_cleanup,
@@ -717,7 +734,7 @@ module_param(aperture, charp, 0);
717MODULE_PARM_DESC(aperture, 734MODULE_PARM_DESC(aperture,
718 "Aperture size, must be power of two between 4MB and an\n" 735 "Aperture size, must be power of two between 4MB and an\n"
719 "\t\tupper limit specific to the UniNorth revision.\n" 736 "\t\tupper limit specific to the UniNorth revision.\n"
720 "\t\tDefault: 32M"); 737 "\t\tDefault: " DEFAULT_APERTURE_STRING "M");
721 738
722MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras"); 739MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
723MODULE_LICENSE("GPL"); 740MODULE_LICENSE("GPL");