diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/mm/fault.c | 34 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 2 |
2 files changed, 8 insertions, 28 deletions
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index f21b55549787..af7eb087dca7 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -20,32 +20,6 @@ | |||
20 | extern void die (char *, struct pt_regs *, long); | 20 | extern void die (char *, struct pt_regs *, long); |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * This routine is analogous to expand_stack() but instead grows the | ||
24 | * register backing store (which grows towards higher addresses). | ||
25 | * Since the register backing store is access sequentially, we | ||
26 | * disallow growing the RBS by more than a page at a time. Note that | ||
27 | * the VM_GROWSUP flag can be set on any VM area but that's fine | ||
28 | * because the total process size is still limited by RLIMIT_STACK and | ||
29 | * RLIMIT_AS. | ||
30 | */ | ||
31 | static inline long | ||
32 | expand_backing_store (struct vm_area_struct *vma, unsigned long address) | ||
33 | { | ||
34 | unsigned long grow; | ||
35 | |||
36 | grow = PAGE_SIZE >> PAGE_SHIFT; | ||
37 | if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur | ||
38 | || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur)) | ||
39 | return -ENOMEM; | ||
40 | vma->vm_end += PAGE_SIZE; | ||
41 | vma->vm_mm->total_vm += grow; | ||
42 | if (vma->vm_flags & VM_LOCKED) | ||
43 | vma->vm_mm->locked_vm += grow; | ||
44 | vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | 23 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment |
50 | * (inside region 5, on ia64) and that page is present. | 24 | * (inside region 5, on ia64) and that page is present. |
51 | */ | 25 | */ |
@@ -185,7 +159,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
185 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) | 159 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) |
186 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) | 160 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) |
187 | goto bad_area; | 161 | goto bad_area; |
188 | if (expand_backing_store(vma, address)) | 162 | /* |
163 | * Since the register backing store is accessed sequentially, | ||
164 | * we disallow growing it by more than a page at a time. | ||
165 | */ | ||
166 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) | ||
167 | goto bad_area; | ||
168 | if (expand_upwards(vma, address)) | ||
189 | goto bad_area; | 169 | goto bad_area; |
190 | } | 170 | } |
191 | goto good_area; | 171 | goto good_area; |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 98246acd4991..0063b2c50908 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -158,7 +158,7 @@ ia64_init_addr_space (void) | |||
158 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; | 158 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
159 | vma->vm_end = vma->vm_start + PAGE_SIZE; | 159 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
160 | vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; | 160 | vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; |
161 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP; | 161 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
162 | down_write(¤t->mm->mmap_sem); | 162 | down_write(¤t->mm->mmap_sem); |
163 | if (insert_vm_struct(current->mm, vma)) { | 163 | if (insert_vm_struct(current->mm, vma)) { |
164 | up_write(¤t->mm->mmap_sem); | 164 | up_write(¤t->mm->mmap_sem); |