aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 18:47:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 18:47:03 -0500
commitd414c104e26fd3b597f855cc29473a8b1527fb4c (patch)
tree021af0f439f2612f84a97ee14e20a9595def90d7 /arch/ia64
parentf6d43b93bd07cf3e430f426ee8f1330cb8d5d8c9 (diff)
parente31048af84e24c99136acaad99f9273b147f8444 (diff)
Merge tag 'please-pull-vm_unwrapped' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux
Pull ia64 update from Tony Luck: "ia64 vm patch series that was cooking in -mm tree" * tag 'please-pull-vm_unwrapped' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux: mm: use vm_unmapped_area() in hugetlbfs on ia64 architecture mm: use vm_unmapped_area() on ia64 architecture
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/sys_ia64.c37
-rw-r--r--arch/ia64/mm/hugetlbpage.c20
2 files changed, 21 insertions, 36 deletions
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index d9439ef2f661..41e33f84c185 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
25 unsigned long pgoff, unsigned long flags) 25 unsigned long pgoff, unsigned long flags)
26{ 26{
27 long map_shared = (flags & MAP_SHARED); 27 long map_shared = (flags & MAP_SHARED);
28 unsigned long start_addr, align_mask = PAGE_SIZE - 1; 28 unsigned long align_mask = 0;
29 struct mm_struct *mm = current->mm; 29 struct mm_struct *mm = current->mm;
30 struct vm_area_struct *vma; 30 struct vm_unmapped_area_info info;
31 31
32 if (len > RGN_MAP_LIMIT) 32 if (len > RGN_MAP_LIMIT)
33 return -ENOMEM; 33 return -ENOMEM;
@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
44 addr = 0; 44 addr = 0;
45#endif 45#endif
46 if (!addr) 46 if (!addr)
47 addr = mm->free_area_cache; 47 addr = TASK_UNMAPPED_BASE;
48 48
49 if (map_shared && (TASK_SIZE > 0xfffffffful)) 49 if (map_shared && (TASK_SIZE > 0xfffffffful))
50 /* 50 /*
@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
53 * tasks, we prefer to avoid exhausting the address space too quickly by 53 * tasks, we prefer to avoid exhausting the address space too quickly by
54 * limiting alignment to a single page. 54 * limiting alignment to a single page.
55 */ 55 */
56 align_mask = SHMLBA - 1; 56 align_mask = PAGE_MASK & (SHMLBA - 1);
57 57
58 full_search: 58 info.flags = 0;
59 start_addr = addr = (addr + align_mask) & ~align_mask; 59 info.length = len;
60 60 info.low_limit = addr;
61 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 61 info.high_limit = TASK_SIZE;
62 /* At this point: (!vma || addr < vma->vm_end). */ 62 info.align_mask = align_mask;
63 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { 63 info.align_offset = 0;
64 if (start_addr != TASK_UNMAPPED_BASE) { 64 return vm_unmapped_area(&info);
65 /* Start a new search --- just in case we missed some holes. */
66 addr = TASK_UNMAPPED_BASE;
67 goto full_search;
68 }
69 return -ENOMEM;
70 }
71 if (!vma || addr + len <= vma->vm_start) {
72 /* Remember the address where we stopped this search: */
73 mm->free_area_cache = addr + len;
74 return addr;
75 }
76 addr = (vma->vm_end + align_mask) & ~align_mask;
77 }
78} 65}
79 66
80asmlinkage long 67asmlinkage long
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 5ca674b74737..76069c18ee42 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
148unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 148unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
149 unsigned long pgoff, unsigned long flags) 149 unsigned long pgoff, unsigned long flags)
150{ 150{
151 struct vm_area_struct *vmm; 151 struct vm_unmapped_area_info info;
152 152
153 if (len > RGN_MAP_LIMIT) 153 if (len > RGN_MAP_LIMIT)
154 return -ENOMEM; 154 return -ENOMEM;
@@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
165 /* This code assumes that RGN_HPAGE != 0. */ 165 /* This code assumes that RGN_HPAGE != 0. */
166 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) 166 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
167 addr = HPAGE_REGION_BASE; 167 addr = HPAGE_REGION_BASE;
168 else 168
169 addr = ALIGN(addr, HPAGE_SIZE); 169 info.flags = 0;
170 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 170 info.length = len;
171 /* At this point: (!vmm || addr < vmm->vm_end). */ 171 info.low_limit = addr;
172 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) 172 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
173 return -ENOMEM; 173 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
174 if (!vmm || (addr + len) <= vmm->vm_start) 174 info.align_offset = 0;
175 return addr; 175 return vm_unmapped_area(&info);
176 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
177 }
178} 176}
179 177
180static int __init hugetlb_setup_sz(char *str) 178static int __init hugetlb_setup_sz(char *str)