aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-14 03:16:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-14 04:44:08 -0400
commit62395efdb0ef42e664ca81677901268c403a6286 (patch)
tree7ce9e6ba8b3fa0c004f852b56966e96ad948d2f8 /arch/x86/mm
parentccd50dfd92ea2c4ba9e39531ac55db53393e783e (diff)
parent88200bc28da38bcda1cb1bd218216e83b426d8a8 (diff)
Merge branch 'x86/asm' into tracing/syscalls
We need the wider TIF work-mask checks in entry_32.S. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/highmem_32.c19
-rw-r--r--arch/x86/mm/iomap_32.c13
-rw-r--r--arch/x86/mm/kmmio.c2
3 files changed, 16 insertions, 18 deletions
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index d11745334a67..f256e73542d7 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -121,23 +121,30 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
121 pagefault_enable(); 121 pagefault_enable();
122} 122}
123 123
124/* This is the same as kmap_atomic() but can map memory that doesn't 124void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
125 * have a struct page associated with it.
126 */
127void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
128{ 125{
129 enum fixed_addresses idx; 126 enum fixed_addresses idx;
130 unsigned long vaddr; 127 unsigned long vaddr;
131 128
132 pagefault_disable(); 129 pagefault_disable();
133 130
134 idx = type + KM_TYPE_NR*smp_processor_id(); 131 debug_kmap_atomic_prot(type);
132
133 idx = type + KM_TYPE_NR * smp_processor_id();
135 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 134 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
136 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); 135 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
137 arch_flush_lazy_mmu_mode(); 136 arch_flush_lazy_mmu_mode();
138 137
139 return (void*) vaddr; 138 return (void*) vaddr;
140} 139}
140
141/* This is the same as kmap_atomic() but can map memory that doesn't
142 * have a struct page associated with it.
143 */
144void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
145{
146 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
147}
141EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ 148EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
142 149
143struct page *kmap_atomic_to_page(void *ptr) 150struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 04102d42ff42..592984e5496b 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/iomap.h> 19#include <asm/iomap.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/highmem.h>
21#include <linux/module.h> 22#include <linux/module.h>
22 23
23int is_io_mapping_possible(resource_size_t base, unsigned long size) 24int is_io_mapping_possible(resource_size_t base, unsigned long size)
@@ -36,11 +37,6 @@ EXPORT_SYMBOL_GPL(is_io_mapping_possible);
36void * 37void *
37iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 38iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
38{ 39{
39 enum fixed_addresses idx;
40 unsigned long vaddr;
41
42 pagefault_disable();
43
44 /* 40 /*
45 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. 41 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
46 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the 42 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
@@ -50,12 +46,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
50 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 46 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
51 prot = PAGE_KERNEL_UC_MINUS; 47 prot = PAGE_KERNEL_UC_MINUS;
52 48
53 idx = type + KM_TYPE_NR*smp_processor_id(); 49 return kmap_atomic_prot_pfn(pfn, type, prot);
54 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
56 arch_flush_lazy_mmu_mode();
57
58 return (void*) vaddr;
59} 50}
60EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 51EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
61 52
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 6a518dd08a36..4f115e00486b 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -310,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); 310 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
311 311
312 if (!ctx->active) { 312 if (!ctx->active) {
313 pr_warning("kmmio: spurious debug trap on CPU %d.\n", 313 pr_debug("kmmio: spurious debug trap on CPU %d.\n",
314 smp_processor_id()); 314 smp_processor_id());
315 goto out; 315 goto out;
316 } 316 }