diff options
author | Nicolas Pitre <nico@cam.org> | 2009-09-03 16:45:59 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-04 14:20:07 -0400 |
commit | 7929eb9cf643ae416e5081b2a6fa558d37b9854c (patch) | |
tree | c4cbaa5ccdd1c929eba802374d80191b6f90e16c /arch/arm/mm | |
parent | c47a830c08a26a7c210ae16a0ffe3f56ba86ea69 (diff) |
ARM: 5691/1: fix cache aliasing issues between kmap() and kmap_atomic() with highmem
Let's suppose a highmem page is kmap'd with kmap(). A pkmap entry is
used, the page mapped to it, and the virtual cache is dirtied. Then
kunmap() is used which does virtually nothing except for decrementing a
usage count.
Then, let's suppose the _same_ page gets mapped using kmap_atomic().
It is therefore mapped onto a fixmap entry instead, which has a
different virtual address unaware of the dirty cache data for that page
sitting in the pkmap mapping.
Fortunately it is easy to know if a pkmap mapping still exists for that
page and use it directly with kmap_atomic(), thanks to kmap_high_get().
And actual testing with a printk in the added code path shows that this
condition is actually met *extremely* frequently. Seems that we've been
quite lucky that things have worked so well with highmem so far.
Cc: stable@kernel.org
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/highmem.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index a34954d9df7d..73cae57fa707 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -40,11 +40,16 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
40 | { | 40 | { |
41 | unsigned int idx; | 41 | unsigned int idx; |
42 | unsigned long vaddr; | 42 | unsigned long vaddr; |
43 | void *kmap; | ||
43 | 44 | ||
44 | pagefault_disable(); | 45 | pagefault_disable(); |
45 | if (!PageHighMem(page)) | 46 | if (!PageHighMem(page)) |
46 | return page_address(page); | 47 | return page_address(page); |
47 | 48 | ||
49 | kmap = kmap_high_get(page); | ||
50 | if (kmap) | ||
51 | return kmap; | ||
52 | |||
48 | idx = type + KM_TYPE_NR * smp_processor_id(); | 53 | idx = type + KM_TYPE_NR * smp_processor_id(); |
49 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 54 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
50 | #ifdef CONFIG_DEBUG_HIGHMEM | 55 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -80,6 +85,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
80 | #else | 85 | #else |
81 | (void) idx; /* to kill a warning */ | 86 | (void) idx; /* to kill a warning */ |
82 | #endif | 87 | #endif |
88 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { | ||
89 | /* this address was obtained through kmap_high_get() */ | ||
90 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); | ||
83 | } | 91 | } |
84 | pagefault_enable(); | 92 | pagefault_enable(); |
85 | } | 93 | } |