aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Borntraeger <borntraeger@de.ibm.com>2010-04-09 07:43:01 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-04-26 10:47:51 -0400
commit86c4129a017aaade3bc1fe3f47156ffd99e16f16 (patch)
tree10cf090d59a4e4719b5bf91e3e3b85c7a0bae5a5
parent351bb6c3608dca332b5a84be8302cbff8b835612 (diff)
s390: disable change bit override
commit 6af7eea2aee57b869f34eba0a94ef122fe90fbfd upstream. commit 6a985c6194017de2c062916ad1cd00dee0302c40 ([S390] s390: use change recording override for kernel mapping) deactivated the change bit recording for the kernel mapping to improve the performance. This works most of the time, but there are cases (e.g. kernel runs in home space, futex atomic compare xcmg) where we modify user memory with the kernel mapping instead of the user mapping. Instead of fixing these cases, this patch just deactivates change bit override to avoid future problems with other kernel code that might use the kernel mapping for user memory. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--arch/s390/mm/vmem.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 300ab012b0fd..5f91a38d7592 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -70,12 +70,8 @@ static pte_t __ref *vmem_pte_alloc(void)
70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 70 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
71 if (!pte) 71 if (!pte)
72 return NULL; 72 return NULL;
73 if (MACHINE_HAS_HPAGE) 73 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY | _PAGE_CO, 74 PTRS_PER_PTE * sizeof(pte_t));
75 PTRS_PER_PTE * sizeof(pte_t));
76 else
77 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
78 PTRS_PER_PTE * sizeof(pte_t));
79 return pte; 75 return pte;
80} 76}
81 77
@@ -116,8 +112,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
116 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 112 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
117 (address + HPAGE_SIZE <= start + size) && 113 (address + HPAGE_SIZE <= start + size) &&
118 (address >= HPAGE_SIZE)) { 114 (address >= HPAGE_SIZE)) {
119 pte_val(pte) |= _SEGMENT_ENTRY_LARGE | 115 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
120 _SEGMENT_ENTRY_CO;
121 pmd_val(*pm_dir) = pte_val(pte); 116 pmd_val(*pm_dir) = pte_val(pte);
122 address += HPAGE_SIZE - PAGE_SIZE; 117 address += HPAGE_SIZE - PAGE_SIZE;
123 continue; 118 continue;