aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c26
-rw-r--r--arch/powerpc/mm/mmap.c55
2 files changed, 50 insertions, 31 deletions
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index a12e86395025..0a3d71aae175 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -48,17 +48,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
48 struct mm_struct *mm = current->mm; 48 struct mm_struct *mm = current->mm;
49 struct vm_area_struct *vma; 49 struct vm_area_struct *vma;
50 struct hstate *h = hstate_file(file); 50 struct hstate *h = hstate_file(file);
51 int fixed = (flags & MAP_FIXED);
52 unsigned long high_limit;
51 struct vm_unmapped_area_info info; 53 struct vm_unmapped_area_info info;
52 54
53 if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE)) 55 high_limit = DEFAULT_MAP_WINDOW;
54 mm->context.addr_limit = TASK_SIZE; 56 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
57 high_limit = TASK_SIZE;
55 58
56 if (len & ~huge_page_mask(h)) 59 if (len & ~huge_page_mask(h))
57 return -EINVAL; 60 return -EINVAL;
58 if (len > mm->task_size) 61 if (len > high_limit)
59 return -ENOMEM; 62 return -ENOMEM;
63 if (fixed) {
64 if (addr > high_limit - len)
65 return -ENOMEM;
66 }
60 67
61 if (flags & MAP_FIXED) { 68 if (unlikely(addr > mm->context.addr_limit &&
69 mm->context.addr_limit != TASK_SIZE))
70 mm->context.addr_limit = TASK_SIZE;
71
72 if (fixed) {
62 if (prepare_hugepage_range(file, addr, len)) 73 if (prepare_hugepage_range(file, addr, len))
63 return -EINVAL; 74 return -EINVAL;
64 return addr; 75 return addr;
@@ -67,7 +78,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
67 if (addr) { 78 if (addr) {
68 addr = ALIGN(addr, huge_page_size(h)); 79 addr = ALIGN(addr, huge_page_size(h));
69 vma = find_vma(mm, addr); 80 vma = find_vma(mm, addr);
70 if (mm->task_size - len >= addr && 81 if (high_limit - len >= addr &&
71 (!vma || addr + len <= vm_start_gap(vma))) 82 (!vma || addr + len <= vm_start_gap(vma)))
72 return addr; 83 return addr;
73 } 84 }
@@ -78,12 +89,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
78 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 89 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
79 info.length = len; 90 info.length = len;
80 info.low_limit = PAGE_SIZE; 91 info.low_limit = PAGE_SIZE;
81 info.high_limit = current->mm->mmap_base; 92 info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
82 info.align_mask = PAGE_MASK & ~huge_page_mask(h); 93 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
83 info.align_offset = 0; 94 info.align_offset = 0;
84 95
85 if (addr > DEFAULT_MAP_WINDOW)
86 info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
87
88 return vm_unmapped_area(&info); 96 return vm_unmapped_area(&info);
89} 97}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 5d78b193fec4..6d476a7b5611 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
106{ 106{
107 struct mm_struct *mm = current->mm; 107 struct mm_struct *mm = current->mm;
108 struct vm_area_struct *vma; 108 struct vm_area_struct *vma;
109 int fixed = (flags & MAP_FIXED);
110 unsigned long high_limit;
109 struct vm_unmapped_area_info info; 111 struct vm_unmapped_area_info info;
110 112
113 high_limit = DEFAULT_MAP_WINDOW;
114 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
115 high_limit = TASK_SIZE;
116
117 if (len > high_limit)
118 return -ENOMEM;
119 if (fixed) {
120 if (addr > high_limit - len)
121 return -ENOMEM;
122 }
123
111 if (unlikely(addr > mm->context.addr_limit && 124 if (unlikely(addr > mm->context.addr_limit &&
112 mm->context.addr_limit != TASK_SIZE)) 125 mm->context.addr_limit != TASK_SIZE))
113 mm->context.addr_limit = TASK_SIZE; 126 mm->context.addr_limit = TASK_SIZE;
114 127
115 if (len > mm->task_size - mmap_min_addr) 128 if (fixed)
116 return -ENOMEM;
117
118 if (flags & MAP_FIXED)
119 return addr; 129 return addr;
120 130
121 if (addr) { 131 if (addr) {
122 addr = PAGE_ALIGN(addr); 132 addr = PAGE_ALIGN(addr);
123 vma = find_vma(mm, addr); 133 vma = find_vma(mm, addr);
124 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 134 if (high_limit - len >= addr && addr >= mmap_min_addr &&
125 (!vma || addr + len <= vm_start_gap(vma))) 135 (!vma || addr + len <= vm_start_gap(vma)))
126 return addr; 136 return addr;
127 } 137 }
@@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
129 info.flags = 0; 139 info.flags = 0;
130 info.length = len; 140 info.length = len;
131 info.low_limit = mm->mmap_base; 141 info.low_limit = mm->mmap_base;
142 info.high_limit = high_limit;
132 info.align_mask = 0; 143 info.align_mask = 0;
133 144
134 if (unlikely(addr > DEFAULT_MAP_WINDOW))
135 info.high_limit = mm->context.addr_limit;
136 else
137 info.high_limit = DEFAULT_MAP_WINDOW;
138
139 return vm_unmapped_area(&info); 145 return vm_unmapped_area(&info);
140} 146}
141 147
@@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
149 struct vm_area_struct *vma; 155 struct vm_area_struct *vma;
150 struct mm_struct *mm = current->mm; 156 struct mm_struct *mm = current->mm;
151 unsigned long addr = addr0; 157 unsigned long addr = addr0;
158 int fixed = (flags & MAP_FIXED);
159 unsigned long high_limit;
152 struct vm_unmapped_area_info info; 160 struct vm_unmapped_area_info info;
153 161
162 high_limit = DEFAULT_MAP_WINDOW;
163 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
164 high_limit = TASK_SIZE;
165
166 if (len > high_limit)
167 return -ENOMEM;
168 if (fixed) {
169 if (addr > high_limit - len)
170 return -ENOMEM;
171 }
172
154 if (unlikely(addr > mm->context.addr_limit && 173 if (unlikely(addr > mm->context.addr_limit &&
155 mm->context.addr_limit != TASK_SIZE)) 174 mm->context.addr_limit != TASK_SIZE))
156 mm->context.addr_limit = TASK_SIZE; 175 mm->context.addr_limit = TASK_SIZE;
157 176
158 /* requested length too big for entire address space */ 177 if (fixed)
159 if (len > mm->task_size - mmap_min_addr)
160 return -ENOMEM;
161
162 if (flags & MAP_FIXED)
163 return addr; 178 return addr;
164 179
165 /* requesting a specific address */
166 if (addr) { 180 if (addr) {
167 addr = PAGE_ALIGN(addr); 181 addr = PAGE_ALIGN(addr);
168 vma = find_vma(mm, addr); 182 vma = find_vma(mm, addr);
169 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 183 if (high_limit - len >= addr && addr >= mmap_min_addr &&
170 (!vma || addr + len <= vm_start_gap(vma))) 184 (!vma || addr + len <= vm_start_gap(vma)))
171 return addr; 185 return addr;
172 } 186 }
173 187
174 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 188 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
175 info.length = len; 189 info.length = len;
176 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 190 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
177 info.high_limit = mm->mmap_base; 191 info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
178 info.align_mask = 0; 192 info.align_mask = 0;
179 193
180 if (addr > DEFAULT_MAP_WINDOW)
181 info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
182
183 addr = vm_unmapped_area(&info); 194 addr = vm_unmapped_area(&info);
184 if (!(addr & ~PAGE_MASK)) 195 if (!(addr & ~PAGE_MASK))
185 return addr; 196 return addr;