aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm/highmem.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 05:13:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 05:13:55 -0400
commitb0aeae21da297caad0d6d0608b197e6ef97faf6d (patch)
tree2743c48c08577a5403afb4c7dcf118b2090a97bf /arch/i386/mm/highmem.c
parent1c9541c7bc437c4d1bcad1e56770622c24e634a1 (diff)
i386: prepare shared mm/highmem.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/i386/mm/highmem.c')
-rw-r--r--arch/i386/mm/highmem.c113
1 files changed, 0 insertions, 113 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
deleted file mode 100644
index 1c3bf95f7356..000000000000
--- a/arch/i386/mm/highmem.c
+++ /dev/null
@@ -1,113 +0,0 @@
1#include <linux/highmem.h>
2#include <linux/module.h>
3
4void *kmap(struct page *page)
5{
6 might_sleep();
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
10}
11
12void kunmap(struct page *page)
13{
14 if (in_interrupt())
15 BUG();
16 if (!PageHighMem(page))
17 return;
18 kunmap_high(page);
19}
20
21/*
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
25 *
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
28 */
29void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
30{
31 enum fixed_addresses idx;
32 unsigned long vaddr;
33
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 pagefault_disable();
36
37 if (!PageHighMem(page))
38 return page_address(page);
39
40 idx = type + KM_TYPE_NR*smp_processor_id();
41 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
42 BUG_ON(!pte_none(*(kmap_pte-idx)));
43 set_pte(kmap_pte-idx, mk_pte(page, prot));
44 arch_flush_lazy_mmu_mode();
45
46 return (void *)vaddr;
47}
48
49void *kmap_atomic(struct page *page, enum km_type type)
50{
51 return kmap_atomic_prot(page, type, kmap_prot);
52}
53
54void kunmap_atomic(void *kvaddr, enum km_type type)
55{
56 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
57 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
58
59 /*
60 * Force other mappings to Oops if they'll try to access this pte
61 * without first remap it. Keeping stale mappings around is a bad idea
62 * also, in case the page changes cacheability attributes or becomes
63 * a protected page in a hypervisor.
64 */
65 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
66 kpte_clear_flush(kmap_pte-idx, vaddr);
67 else {
68#ifdef CONFIG_DEBUG_HIGHMEM
69 BUG_ON(vaddr < PAGE_OFFSET);
70 BUG_ON(vaddr >= (unsigned long)high_memory);
71#endif
72 }
73
74 arch_flush_lazy_mmu_mode();
75 pagefault_enable();
76}
77
78/* This is the same as kmap_atomic() but can map memory that doesn't
79 * have a struct page associated with it.
80 */
81void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
82{
83 enum fixed_addresses idx;
84 unsigned long vaddr;
85
86 pagefault_disable();
87
88 idx = type + KM_TYPE_NR*smp_processor_id();
89 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
90 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
91 arch_flush_lazy_mmu_mode();
92
93 return (void*) vaddr;
94}
95
96struct page *kmap_atomic_to_page(void *ptr)
97{
98 unsigned long idx, vaddr = (unsigned long)ptr;
99 pte_t *pte;
100
101 if (vaddr < FIXADDR_START)
102 return virt_to_page(ptr);
103
104 idx = virt_to_fix(vaddr);
105 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
106 return pte_page(*pte);
107}
108
109EXPORT_SYMBOL(kmap);
110EXPORT_SYMBOL(kunmap);
111EXPORT_SYMBOL(kmap_atomic);
112EXPORT_SYMBOL(kunmap_atomic);
113EXPORT_SYMBOL(kmap_atomic_to_page);