diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-03-18 08:27:36 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-03-18 08:28:13 -0400 |
commit | f481bfafd36e621d6cbc62d4b25f74811410aef7 (patch) | |
tree | 781f98037ec772f2b7d480d5642135f7c1ec8b89 | |
parent | 2887fc5aa60803b9d6d38c79248805f08d8b0e28 (diff) |
[S390] make page table walking more robust
Make page table walking on s390 more robust. The current code requires
that the pgd/pud/pmd/pte loop is only done for address ranges that are
below the end address of the last vma of the address space. But this
is not always true, e.g. the generic page table walker does not guarantee
this. Change TASK_SIZE/TASK_SIZE_OF to reflect the current size of the
address space. This makes the generic page table walker happy but it
breaks the upgrade of a 3 level page table to a 4 level page table.
To make the upgrade work again another fix is required.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r-- | arch/s390/include/asm/processor.h | 5 | ||||
-rw-r--r-- | arch/s390/mm/mmap.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 2 |
3 files changed, 6 insertions, 5 deletions
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 066b99502e09..db4523fe38ac 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -61,7 +61,7 @@ extern void print_cpu_info(struct cpuinfo_S390 *); | |||
61 | extern int get_cpu_capability(unsigned int *); | 61 | extern int get_cpu_capability(unsigned int *); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * User space process size: 2GB for 31 bit, 4TB for 64 bit. | 64 | * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. |
65 | */ | 65 | */ |
66 | #ifndef __s390x__ | 66 | #ifndef __s390x__ |
67 | 67 | ||
@@ -70,8 +70,7 @@ extern int get_cpu_capability(unsigned int *); | |||
70 | 70 | ||
71 | #else /* __s390x__ */ | 71 | #else /* __s390x__ */ |
72 | 72 | ||
73 | #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \ | 73 | #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) |
74 | (1UL << 31) : (1UL << 53)) | ||
75 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ | 74 | #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ |
76 | (1UL << 30) : (1UL << 41)) | 75 | (1UL << 30) : (1UL << 41)) |
77 | #define TASK_SIZE TASK_SIZE_OF(current) | 76 | #define TASK_SIZE TASK_SIZE_OF(current) |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 5932a824547a..346dd0c5cbde 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -35,7 +35,7 @@ | |||
35 | * Leave an at least ~128 MB hole. | 35 | * Leave an at least ~128 MB hole. |
36 | */ | 36 | */ |
37 | #define MIN_GAP (128*1024*1024) | 37 | #define MIN_GAP (128*1024*1024) |
38 | #define MAX_GAP (TASK_SIZE/6*5) | 38 | #define MAX_GAP (STACK_TOP/6*5) |
39 | 39 | ||
40 | static inline unsigned long mmap_base(void) | 40 | static inline unsigned long mmap_base(void) |
41 | { | 41 | { |
@@ -46,7 +46,7 @@ static inline unsigned long mmap_base(void) | |||
46 | else if (gap > MAX_GAP) | 46 | else if (gap > MAX_GAP) |
47 | gap = MAX_GAP; | 47 | gap = MAX_GAP; |
48 | 48 | ||
49 | return TASK_SIZE - (gap & PAGE_MASK); | 49 | return STACK_TOP - (gap & PAGE_MASK); |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline int mmap_is_legacy(void) | 52 | static inline int mmap_is_legacy(void) |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0767827540b1..6b6ddc4ea02b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -117,6 +117,7 @@ repeat: | |||
117 | crst_table_init(table, entry); | 117 | crst_table_init(table, entry); |
118 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | 118 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); |
119 | mm->pgd = (pgd_t *) table; | 119 | mm->pgd = (pgd_t *) table; |
120 | mm->task_size = mm->context.asce_limit; | ||
120 | table = NULL; | 121 | table = NULL; |
121 | } | 122 | } |
122 | spin_unlock(&mm->page_table_lock); | 123 | spin_unlock(&mm->page_table_lock); |
@@ -154,6 +155,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | |||
154 | BUG(); | 155 | BUG(); |
155 | } | 156 | } |
156 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | 157 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
158 | mm->task_size = mm->context.asce_limit; | ||
157 | crst_table_free(mm, (unsigned long *) pgd); | 159 | crst_table_free(mm, (unsigned long *) pgd); |
158 | } | 160 | } |
159 | update_mm(mm, current); | 161 | update_mm(mm, current); |