aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/agp_64.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:20:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:20:03 -0400
commit96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch)
treed947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86/agp_64.h
parent27bd0c955648646abf2a353a8371d28c37bcd982 (diff)
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the header install make rules Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/agp_64.h')
-rw-r--r--include/asm-x86/agp_64.h34
1 files changed, 34 insertions, 0 deletions
diff --git a/include/asm-x86/agp_64.h b/include/asm-x86/agp_64.h
new file mode 100644
index 000000000000..de338666f3f9
--- /dev/null
+++ b/include/asm-x86/agp_64.h
@@ -0,0 +1,34 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/cacheflush.h>
5
6/*
7 * Functions to keep the agpgart mappings coherent.
8 * The GART gives the CPU a physical alias of memory. The alias is
9 * mapped uncacheable. Make sure there are no conflicting mappings
10 * with different cachability attributes for the same page.
11 */
12
13/* Caller's responsibility to call global_flush_tlb() for
14 * performance reasons */
15#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
16#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
17#define flush_agp_mappings() global_flush_tlb()
18
19/* Could use CLFLUSH here if the cpu supports it. But then it would
20 need to be called for each cacheline of the whole page so it may not be
21 worth it. Would need a page for it. */
22#define flush_agp_cache() asm volatile("wbinvd":::"memory")
23
24/* Convert a physical address to an address suitable for the GART. */
25#define phys_to_gart(x) (x)
26#define gart_to_phys(x) (x)
27
28/* GATT allocation. Returns/accepts GATT kernel virtual address. */
29#define alloc_gatt_pages(order) \
30 ((char *)__get_free_pages(GFP_KERNEL, (order)))
31#define free_gatt_pages(table, order) \
32 free_pages((unsigned long)(table), (order))
33
34#endif