diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:47 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:16:47 -0400 |
commit | ad757b6aa5801b81dec609d87753604a06313c53 (patch) | |
tree | 7bb40460e1729ad370b5ae75e65f9e6a0e824328 /arch/x86/mm/highmem_32.c | |
parent | 96ae6ea0be1b902c28b3b463c27da42b41e2b63a (diff) |
i386: move mm
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/highmem_32.c')
-rw-r--r-- | arch/x86/mm/highmem_32.c | 113 |
1 files changed, 113 insertions, 0 deletions
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c new file mode 100644 index 000000000000..1c3bf95f7356 --- /dev/null +++ b/arch/x86/mm/highmem_32.c | |||
@@ -0,0 +1,113 @@ | |||
1 | #include <linux/highmem.h> | ||
2 | #include <linux/module.h> | ||
3 | |||
4 | void *kmap(struct page *page) | ||
5 | { | ||
6 | might_sleep(); | ||
7 | if (!PageHighMem(page)) | ||
8 | return page_address(page); | ||
9 | return kmap_high(page); | ||
10 | } | ||
11 | |||
12 | void kunmap(struct page *page) | ||
13 | { | ||
14 | if (in_interrupt()) | ||
15 | BUG(); | ||
16 | if (!PageHighMem(page)) | ||
17 | return; | ||
18 | kunmap_high(page); | ||
19 | } | ||
20 | |||
21 | /* | ||
22 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | ||
23 | * no global lock is needed and because the kmap code must perform a global TLB | ||
24 | * invalidation when the kmap pool wraps. | ||
25 | * | ||
26 | * However when holding an atomic kmap is is not legal to sleep, so atomic | ||
27 | * kmaps are appropriate for short, tight code paths only. | ||
28 | */ | ||
29 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
30 | { | ||
31 | enum fixed_addresses idx; | ||
32 | unsigned long vaddr; | ||
33 | |||
34 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
35 | pagefault_disable(); | ||
36 | |||
37 | if (!PageHighMem(page)) | ||
38 | return page_address(page); | ||
39 | |||
40 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
41 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
42 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
43 | set_pte(kmap_pte-idx, mk_pte(page, prot)); | ||
44 | arch_flush_lazy_mmu_mode(); | ||
45 | |||
46 | return (void *)vaddr; | ||
47 | } | ||
48 | |||
49 | void *kmap_atomic(struct page *page, enum km_type type) | ||
50 | { | ||
51 | return kmap_atomic_prot(page, type, kmap_prot); | ||
52 | } | ||
53 | |||
54 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
55 | { | ||
56 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
57 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
58 | |||
59 | /* | ||
60 | * Force other mappings to Oops if they'll try to access this pte | ||
61 | * without first remap it. Keeping stale mappings around is a bad idea | ||
62 | * also, in case the page changes cacheability attributes or becomes | ||
63 | * a protected page in a hypervisor. | ||
64 | */ | ||
65 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) | ||
66 | kpte_clear_flush(kmap_pte-idx, vaddr); | ||
67 | else { | ||
68 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
69 | BUG_ON(vaddr < PAGE_OFFSET); | ||
70 | BUG_ON(vaddr >= (unsigned long)high_memory); | ||
71 | #endif | ||
72 | } | ||
73 | |||
74 | arch_flush_lazy_mmu_mode(); | ||
75 | pagefault_enable(); | ||
76 | } | ||
77 | |||
78 | /* This is the same as kmap_atomic() but can map memory that doesn't | ||
79 | * have a struct page associated with it. | ||
80 | */ | ||
81 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | ||
82 | { | ||
83 | enum fixed_addresses idx; | ||
84 | unsigned long vaddr; | ||
85 | |||
86 | pagefault_disable(); | ||
87 | |||
88 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
89 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
90 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | ||
91 | arch_flush_lazy_mmu_mode(); | ||
92 | |||
93 | return (void*) vaddr; | ||
94 | } | ||
95 | |||
96 | struct page *kmap_atomic_to_page(void *ptr) | ||
97 | { | ||
98 | unsigned long idx, vaddr = (unsigned long)ptr; | ||
99 | pte_t *pte; | ||
100 | |||
101 | if (vaddr < FIXADDR_START) | ||
102 | return virt_to_page(ptr); | ||
103 | |||
104 | idx = virt_to_fix(vaddr); | ||
105 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
106 | return pte_page(*pte); | ||
107 | } | ||
108 | |||
109 | EXPORT_SYMBOL(kmap); | ||
110 | EXPORT_SYMBOL(kunmap); | ||
111 | EXPORT_SYMBOL(kmap_atomic); | ||
112 | EXPORT_SYMBOL(kunmap_atomic); | ||
113 | EXPORT_SYMBOL(kmap_atomic_to_page); | ||