aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/mmap.c59
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/vmalloc.c33
4 files changed, 55 insertions, 45 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 47263ac3e4ea..1d33fec7bac6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1004,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1004 if (pos < size) { 1004 if (pos < size) {
1005 retval = generic_file_direct_IO(READ, iocb, 1005 retval = generic_file_direct_IO(READ, iocb,
1006 iov, pos, nr_segs); 1006 iov, pos, nr_segs);
1007 if (retval >= 0 && !is_sync_kiocb(iocb)) 1007 if (retval > 0 && !is_sync_kiocb(iocb))
1008 retval = -EIOCBQUEUED; 1008 retval = -EIOCBQUEUED;
1009 if (retval > 0) 1009 if (retval > 0)
1010 *ppos = pos + retval; 1010 *ppos = pos + retval;
diff --git a/mm/mmap.c b/mm/mmap.c
index 63df2d698414..de54acd9942f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1302,37 +1302,40 @@ unsigned long
1302get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1302get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1303 unsigned long pgoff, unsigned long flags) 1303 unsigned long pgoff, unsigned long flags)
1304{ 1304{
1305 if (flags & MAP_FIXED) { 1305 unsigned long ret;
1306 unsigned long ret;
1307 1306
1308 if (addr > TASK_SIZE - len) 1307 if (!(flags & MAP_FIXED)) {
1309 return -ENOMEM; 1308 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1310 if (addr & ~PAGE_MASK)
1311 return -EINVAL;
1312 if (file && is_file_hugepages(file)) {
1313 /*
1314 * Check if the given range is hugepage aligned, and
1315 * can be made suitable for hugepages.
1316 */
1317 ret = prepare_hugepage_range(addr, len);
1318 } else {
1319 /*
1320 * Ensure that a normal request is not falling in a
1321 * reserved hugepage range. For some archs like IA-64,
1322 * there is a separate region for hugepages.
1323 */
1324 ret = is_hugepage_only_range(current->mm, addr, len);
1325 }
1326 if (ret)
1327 return -EINVAL;
1328 return addr;
1329 }
1330 1309
1331 if (file && file->f_op && file->f_op->get_unmapped_area) 1310 get_area = current->mm->get_unmapped_area;
1332 return file->f_op->get_unmapped_area(file, addr, len, 1311 if (file && file->f_op && file->f_op->get_unmapped_area)
1333 pgoff, flags); 1312 get_area = file->f_op->get_unmapped_area;
1313 addr = get_area(file, addr, len, pgoff, flags);
1314 if (IS_ERR_VALUE(addr))
1315 return addr;
1316 }
1334 1317
1335 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 1318 if (addr > TASK_SIZE - len)
1319 return -ENOMEM;
1320 if (addr & ~PAGE_MASK)
1321 return -EINVAL;
1322 if (file && is_file_hugepages(file)) {
1323 /*
1324 * Check if the given range is hugepage aligned, and
1325 * can be made suitable for hugepages.
1326 */
1327 ret = prepare_hugepage_range(addr, len);
1328 } else {
1329 /*
1330 * Ensure that a normal request is not falling in a
1331 * reserved hugepage range. For some archs like IA-64,
1332 * there is a separate region for hugepages.
1333 */
1334 ret = is_hugepage_only_range(current->mm, addr, len);
1335 }
1336 if (ret)
1337 return -EINVAL;
1338 return addr;
1336} 1339}
1337 1340
1338EXPORT_SYMBOL(get_unmapped_area); 1341EXPORT_SYMBOL(get_unmapped_area);
diff --git a/mm/rmap.c b/mm/rmap.c
index a6203b4e1278..9827409eb7c7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -626,7 +626,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
626 pgd_t *pgd; 626 pgd_t *pgd;
627 pud_t *pud; 627 pud_t *pud;
628 pmd_t *pmd; 628 pmd_t *pmd;
629 pte_t *pte; 629 pte_t *pte, *original_pte;
630 pte_t pteval; 630 pte_t pteval;
631 struct page *page; 631 struct page *page;
632 unsigned long address; 632 unsigned long address;
@@ -658,7 +658,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
658 if (!pmd_present(*pmd)) 658 if (!pmd_present(*pmd))
659 goto out_unlock; 659 goto out_unlock;
660 660
661 for (pte = pte_offset_map(pmd, address); 661 for (original_pte = pte = pte_offset_map(pmd, address);
662 address < end; pte++, address += PAGE_SIZE) { 662 address < end; pte++, address += PAGE_SIZE) {
663 663
664 if (!pte_present(*pte)) 664 if (!pte_present(*pte))
@@ -694,7 +694,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
694 (*mapcount)--; 694 (*mapcount)--;
695 } 695 }
696 696
697 pte_unmap(pte); 697 pte_unmap(original_pte);
698out_unlock: 698out_unlock:
699 spin_unlock(&mm->page_table_lock); 699 spin_unlock(&mm->page_table_lock);
700} 700}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bd83e5c2bbf..8ff16a1eee6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
249} 249}
250 250
251/** 251/* Caller must hold vmlist_lock */
252 * remove_vm_area - find and remove a contingous kernel virtual area 252struct vm_struct *__remove_vm_area(void *addr)
253 *
254 * @addr: base address
255 *
256 * Search for the kernel VM area starting at @addr, and remove it.
257 * This function returns the found VM area, but using it is NOT safe
258 * on SMP machines.
259 */
260struct vm_struct *remove_vm_area(void *addr)
261{ 253{
262 struct vm_struct **p, *tmp; 254 struct vm_struct **p, *tmp;
263 255
264 write_lock(&vmlist_lock);
265 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 256 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
266 if (tmp->addr == addr) 257 if (tmp->addr == addr)
267 goto found; 258 goto found;
268 } 259 }
269 write_unlock(&vmlist_lock);
270 return NULL; 260 return NULL;
271 261
272found: 262found:
273 unmap_vm_area(tmp); 263 unmap_vm_area(tmp);
274 *p = tmp->next; 264 *p = tmp->next;
275 write_unlock(&vmlist_lock);
276 265
277 /* 266 /*
278 * Remove the guard page. 267 * Remove the guard page.
@@ -281,6 +270,24 @@ found:
281 return tmp; 270 return tmp;
282} 271}
283 272
273/**
274 * remove_vm_area - find and remove a contingous kernel virtual area
275 *
276 * @addr: base address
277 *
278 * Search for the kernel VM area starting at @addr, and remove it.
279 * This function returns the found VM area, but using it is NOT safe
280 * on SMP machines, except for its size or flags.
281 */
282struct vm_struct *remove_vm_area(void *addr)
283{
284 struct vm_struct *v;
285 write_lock(&vmlist_lock);
286 v = __remove_vm_area(addr);
287 write_unlock(&vmlist_lock);
288 return v;
289}
290
284void __vunmap(void *addr, int deallocate_pages) 291void __vunmap(void *addr, int deallocate_pages)
285{ 292{
286 struct vm_struct *area; 293 struct vm_struct *area;