diff options
Diffstat (limited to 'drivers/char/agp/i460-agp.c')
-rw-r--r-- | drivers/char/agp/i460-agp.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c index 60cc35bb5db7..e763d3312ce7 100644 --- a/drivers/char/agp/i460-agp.c +++ b/drivers/char/agp/i460-agp.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #define WR_FLUSH_GATT(index) RD_GATT(index) | 61 | #define WR_FLUSH_GATT(index) RD_GATT(index) |
62 | 62 | ||
63 | static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, | 63 | static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, |
64 | unsigned long addr, int type); | 64 | dma_addr_t addr, int type); |
65 | 65 | ||
66 | static struct { | 66 | static struct { |
67 | void *gatt; /* ioremap'd GATT area */ | 67 | void *gatt; /* ioremap'd GATT area */ |
@@ -325,7 +325,7 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem, | |||
325 | 325 | ||
326 | io_page_size = 1UL << I460_IO_PAGE_SHIFT; | 326 | io_page_size = 1UL << I460_IO_PAGE_SHIFT; |
327 | for (i = 0, j = io_pg_start; i < mem->page_count; i++) { | 327 | for (i = 0, j = io_pg_start; i < mem->page_count; i++) { |
328 | paddr = phys_to_gart(page_to_phys(mem->pages[i])); | 328 | paddr = page_to_phys(mem->pages[i]); |
329 | for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) | 329 | for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) |
330 | WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); | 330 | WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type)); |
331 | } | 331 | } |
@@ -382,7 +382,7 @@ static int i460_alloc_large_page (struct lp_desc *lp) | |||
382 | return -ENOMEM; | 382 | return -ENOMEM; |
383 | } | 383 | } |
384 | 384 | ||
385 | lp->paddr = phys_to_gart(page_to_phys(lp->page)); | 385 | lp->paddr = page_to_phys(lp->page); |
386 | lp->refcount = 0; | 386 | lp->refcount = 0; |
387 | atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); | 387 | atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); |
388 | return 0; | 388 | return 0; |
@@ -546,20 +546,13 @@ static void i460_destroy_page (struct page *page, int flags) | |||
546 | #endif /* I460_LARGE_IO_PAGES */ | 546 | #endif /* I460_LARGE_IO_PAGES */ |
547 | 547 | ||
548 | static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, | 548 | static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, |
549 | unsigned long addr, int type) | 549 | dma_addr_t addr, int type) |
550 | { | 550 | { |
551 | /* Make sure the returned address is a valid GATT entry */ | 551 | /* Make sure the returned address is a valid GATT entry */ |
552 | return bridge->driver->masks[0].mask | 552 | return bridge->driver->masks[0].mask |
553 | | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); | 553 | | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12); |
554 | } | 554 | } |
555 | 555 | ||
556 | static unsigned long i460_page_mask_memory(struct agp_bridge_data *bridge, | ||
557 | struct page *page, int type) | ||
558 | { | ||
559 | unsigned long addr = phys_to_gart(page_to_phys(page)); | ||
560 | return i460_mask_memory(bridge, addr, type); | ||
561 | } | ||
562 | |||
563 | const struct agp_bridge_driver intel_i460_driver = { | 556 | const struct agp_bridge_driver intel_i460_driver = { |
564 | .owner = THIS_MODULE, | 557 | .owner = THIS_MODULE, |
565 | .aperture_sizes = i460_sizes, | 558 | .aperture_sizes = i460_sizes, |
@@ -569,7 +562,7 @@ const struct agp_bridge_driver intel_i460_driver = { | |||
569 | .fetch_size = i460_fetch_size, | 562 | .fetch_size = i460_fetch_size, |
570 | .cleanup = i460_cleanup, | 563 | .cleanup = i460_cleanup, |
571 | .tlb_flush = i460_tlb_flush, | 564 | .tlb_flush = i460_tlb_flush, |
572 | .mask_memory = i460_page_mask_memory, | 565 | .mask_memory = i460_mask_memory, |
573 | .masks = i460_masks, | 566 | .masks = i460_masks, |
574 | .agp_enable = agp_generic_enable, | 567 | .agp_enable = agp_generic_enable, |
575 | .cache_flush = global_cache_flush, | 568 | .cache_flush = global_cache_flush, |