aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-01-13 13:21:30 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-21 12:42:32 -0500
commit9597134218300c045cf219be3664615e97cb239c (patch)
tree0395c2d245709c0de13917b0a9e404a4b27d2817 /arch/x86
parent731f1872f4e8a0f1eabd49c3548207e79a421202 (diff)
x86: fix PTE corruption issue while mapping RAM using /dev/mem
Beschorner Daniel reported: > hwinfo problem since 2.6.28, showing this in the oops: > Corrupted page table at address 7fd04de3ec00 Also, PaX Team reported a regression with this commit: > commit 9542ada803198e6eba29d3289abb39ea82047b92 > Author: Suresh Siddha <suresh.b.siddha@intel.com> > Date: Wed Sep 24 08:53:33 2008 -0700 > > x86: track memtype for RAM in page struct This commit breaks mapping any RAM page through /dev/mem, as the reserve_memtype() was not initializing the return attribute type and as such corrupting the PTE entry that was setup with the return attribute type. Because of this bug, application mapping this RAM page through /dev/mem will die with "Corrupted page table at address xxxx" message in the kernel log and also the kernel identity mapping which maps the underlying RAM page gets converted to UC. Fix this by initializing the return attribute type before calling reserve_ram_pages_type() Reported-by: PaX Team <pageexec@freemail.hu> Reported-and-tested-by: Beschorner Daniel <Daniel.Beschorner@facton.com> Tested-and-Acked-by: PaX Team <pageexec@freemail.hu> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/pat.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 070ee4a3b225..ffc88cc00fda 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -333,6 +333,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
333 req_type & _PAGE_CACHE_MASK); 333 req_type & _PAGE_CACHE_MASK);
334 } 334 }
335 335
336 if (new_type)
337 *new_type = actual_type;
338
336 /* 339 /*
337 * For legacy reasons, some parts of the physical address range in the 340 * For legacy reasons, some parts of the physical address range in the
338 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 341 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
@@ -356,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
356 new->end = end; 359 new->end = end;
357 new->type = actual_type; 360 new->type = actual_type;
358 361
359 if (new_type)
360 *new_type = actual_type;
361
362 spin_lock(&memtype_lock); 362 spin_lock(&memtype_lock);
363 363
364 if (cached_entry && start >= cached_start) 364 if (cached_entry && start >= cached_start)