diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2007-09-11 18:24:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-11 20:21:27 -0400 |
commit | 4150d3f549fe2355625017b2a6ff72aec98bcef0 (patch) | |
tree | 2d6b7445928c8a295214737a31260d36e9009795 /arch/i386 | |
parent | 298a5df45d497e66064fda22ef0abf13766d3333 (diff) |
revert "highmem: catch illegal nesting"
Revert
commit 656dad312fb41ed95ef08325e9df9bece3aacbbb
Author: Ingo Molnar <mingo@elte.hu>
Date: Sat Feb 10 01:46:36 2007 -0800
[PATCH] highmem: catch illegal nesting
Catch illegally nested kmap_atomic()s even if the page that is mapped by
the 'inner' instance is from lowmem.
This avoids spuriously zapped kmap-atomic ptes and turns hard to find
crashes into clear asserts at the bug site.
Problem is, a get_zeroed_page(GFP_KERNEL) from interrupt context will trigger
this check if non-irq code on this CPU holds a KM_USER0 mapping. But that
get_zeroed_page() will never be altering the kmap slot anyway due to the
GFP_KERNEL.
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/mm/highmem.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index ad8d86cc683e..1c3bf95f7356 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c | |||
@@ -34,17 +34,16 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
34 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 34 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
35 | pagefault_disable(); | 35 | pagefault_disable(); |
36 | 36 | ||
37 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
38 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
39 | |||
40 | if (!PageHighMem(page)) | 37 | if (!PageHighMem(page)) |
41 | return page_address(page); | 38 | return page_address(page); |
42 | 39 | ||
40 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
43 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 41 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
42 | BUG_ON(!pte_none(*(kmap_pte-idx))); | ||
44 | set_pte(kmap_pte-idx, mk_pte(page, prot)); | 43 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
45 | arch_flush_lazy_mmu_mode(); | 44 | arch_flush_lazy_mmu_mode(); |
46 | 45 | ||
47 | return (void*) vaddr; | 46 | return (void *)vaddr; |
48 | } | 47 | } |
49 | 48 | ||
50 | void *kmap_atomic(struct page *page, enum km_type type) | 49 | void *kmap_atomic(struct page *page, enum km_type type) |