aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/mm/fault.c34
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/mmap.c17
4 files changed, 24 insertions, 32 deletions
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index f21b55549787..af7eb087dca7 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -20,32 +20,6 @@
20extern void die (char *, struct pt_regs *, long); 20extern void die (char *, struct pt_regs *, long);
21 21
22/* 22/*
23 * This routine is analogous to expand_stack() but instead grows the
24 * register backing store (which grows towards higher addresses).
25 * Since the register backing store is access sequentially, we
26 * disallow growing the RBS by more than a page at a time. Note that
27 * the VM_GROWSUP flag can be set on any VM area but that's fine
28 * because the total process size is still limited by RLIMIT_STACK and
29 * RLIMIT_AS.
30 */
31static inline long
32expand_backing_store (struct vm_area_struct *vma, unsigned long address)
33{
34 unsigned long grow;
35
36 grow = PAGE_SIZE >> PAGE_SHIFT;
37 if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur
38 || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur))
39 return -ENOMEM;
40 vma->vm_end += PAGE_SIZE;
41 vma->vm_mm->total_vm += grow;
42 if (vma->vm_flags & VM_LOCKED)
43 vma->vm_mm->locked_vm += grow;
44 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
45 return 0;
46}
47
48/*
49 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 23 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
50 * (inside region 5, on ia64) and that page is present. 24 * (inside region 5, on ia64) and that page is present.
51 */ 25 */
@@ -185,7 +159,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
185 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) 159 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
186 || REGION_OFFSET(address) >= RGN_MAP_LIMIT) 160 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
187 goto bad_area; 161 goto bad_area;
188 if (expand_backing_store(vma, address)) 162 /*
163 * Since the register backing store is accessed sequentially,
164 * we disallow growing it by more than a page at a time.
165 */
166 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
167 goto bad_area;
168 if (expand_upwards(vma, address))
189 goto bad_area; 169 goto bad_area;
190 } 170 }
191 goto good_area; 171 goto good_area;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 98246acd4991..0063b2c50908 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -158,7 +158,7 @@ ia64_init_addr_space (void)
158 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 158 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
159 vma->vm_end = vma->vm_start + PAGE_SIZE; 159 vma->vm_end = vma->vm_start + PAGE_SIZE;
160 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; 160 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
161 vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP; 161 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
162 down_write(&current->mm->mmap_sem); 162 down_write(&current->mm->mmap_sem);
163 if (insert_vm_struct(current->mm, vma)) { 163 if (insert_vm_struct(current->mm, vma)) {
164 up_write(&current->mm->mmap_sem); 164 up_write(&current->mm->mmap_sem);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d4552fe0864..89398032bc4b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -896,7 +896,8 @@ void handle_ra_miss(struct address_space *mapping,
896unsigned long max_sane_readahead(unsigned long nr); 896unsigned long max_sane_readahead(unsigned long nr);
897 897
898/* Do stack extension */ 898/* Do stack extension */
899extern int expand_stack(struct vm_area_struct * vma, unsigned long address); 899extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
900extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
900 901
901/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 902/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
902extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 903extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/mm/mmap.c b/mm/mmap.c
index c43b28457007..d931d7e49ac9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1508,11 +1508,15 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
1508 return 0; 1508 return 0;
1509} 1509}
1510 1510
1511#ifdef CONFIG_STACK_GROWSUP 1511#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1512/* 1512/*
1513 * vma is the first one with address > vma->vm_end. Have to extend vma. 1513 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1514 * vma is the last one with address > vma->vm_end. Have to extend vma.
1514 */ 1515 */
1515int expand_stack(struct vm_area_struct * vma, unsigned long address) 1516#ifdef CONFIG_STACK_GROWSUP
1517static inline
1518#endif
1519int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1516{ 1520{
1517 int error; 1521 int error;
1518 1522
@@ -1550,6 +1554,13 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address)
1550 anon_vma_unlock(vma); 1554 anon_vma_unlock(vma);
1551 return error; 1555 return error;
1552} 1556}
1557#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1558
1559#ifdef CONFIG_STACK_GROWSUP
1560int expand_stack(struct vm_area_struct *vma, unsigned long address)
1561{
1562 return expand_upwards(vma, address);
1563}
1553 1564
1554struct vm_area_struct * 1565struct vm_area_struct *
1555find_extend_vma(struct mm_struct *mm, unsigned long addr) 1566find_extend_vma(struct mm_struct *mm, unsigned long addr)