diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-10 09:19:18 -0500 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-10 09:19:18 -0500 |
commit | 2ade0c1d9d93b7642212657ef76f4a1e30233711 (patch) | |
tree | 63bc720c0ffe5f4760cac4ed617b9870b050175e /mm/vmalloc.c | |
parent | 504499f22c08a03e2e19dc88d31aa0ecd2ac815e (diff) | |
parent | 6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 84 |
1 files changed, 67 insertions, 17 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9f909622a25e..eb5cc7d00c5a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -31,8 +31,6 @@ | |||
31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
32 | #include <asm/shmparam.h> | 32 | #include <asm/shmparam.h> |
33 | 33 | ||
34 | bool vmap_lazy_unmap __read_mostly = true; | ||
35 | |||
36 | /*** Page table manipulation functions ***/ | 34 | /*** Page table manipulation functions ***/ |
37 | 35 | ||
38 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | 36 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
@@ -293,13 +291,13 @@ static void __insert_vmap_area(struct vmap_area *va) | |||
293 | struct rb_node *tmp; | 291 | struct rb_node *tmp; |
294 | 292 | ||
295 | while (*p) { | 293 | while (*p) { |
296 | struct vmap_area *tmp; | 294 | struct vmap_area *tmp_va; |
297 | 295 | ||
298 | parent = *p; | 296 | parent = *p; |
299 | tmp = rb_entry(parent, struct vmap_area, rb_node); | 297 | tmp_va = rb_entry(parent, struct vmap_area, rb_node); |
300 | if (va->va_start < tmp->va_end) | 298 | if (va->va_start < tmp_va->va_end) |
301 | p = &(*p)->rb_left; | 299 | p = &(*p)->rb_left; |
302 | else if (va->va_end > tmp->va_start) | 300 | else if (va->va_end > tmp_va->va_start) |
303 | p = &(*p)->rb_right; | 301 | p = &(*p)->rb_right; |
304 | else | 302 | else |
305 | BUG(); | 303 | BUG(); |
@@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void) | |||
503 | { | 501 | { |
504 | unsigned int log; | 502 | unsigned int log; |
505 | 503 | ||
506 | if (!vmap_lazy_unmap) | ||
507 | return 0; | ||
508 | |||
509 | log = fls(num_online_cpus()); | 504 | log = fls(num_online_cpus()); |
510 | 505 | ||
511 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); | 506 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); |
@@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
566 | if (va->va_end > *end) | 561 | if (va->va_end > *end) |
567 | *end = va->va_end; | 562 | *end = va->va_end; |
568 | nr += (va->va_end - va->va_start) >> PAGE_SHIFT; | 563 | nr += (va->va_end - va->va_start) >> PAGE_SHIFT; |
569 | unmap_vmap_area(va); | ||
570 | list_add_tail(&va->purge_list, &valist); | 564 | list_add_tail(&va->purge_list, &valist); |
571 | va->flags |= VM_LAZY_FREEING; | 565 | va->flags |= VM_LAZY_FREEING; |
572 | va->flags &= ~VM_LAZY_FREE; | 566 | va->flags &= ~VM_LAZY_FREE; |
@@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void) | |||
611 | } | 605 | } |
612 | 606 | ||
613 | /* | 607 | /* |
614 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been | 608 | * Free a vmap area, caller ensuring that the area has been unmapped |
615 | * called for the correct range previously. | 609 | * and flush_cache_vunmap had been called for the correct range |
610 | * previously. | ||
616 | */ | 611 | */ |
617 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) | 612 | static void free_vmap_area_noflush(struct vmap_area *va) |
618 | { | 613 | { |
619 | va->flags |= VM_LAZY_FREE; | 614 | va->flags |= VM_LAZY_FREE; |
620 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 615 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
@@ -623,6 +618,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va) | |||
623 | } | 618 | } |
624 | 619 | ||
625 | /* | 620 | /* |
621 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been | ||
622 | * called for the correct range previously. | ||
623 | */ | ||
624 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) | ||
625 | { | ||
626 | unmap_vmap_area(va); | ||
627 | free_vmap_area_noflush(va); | ||
628 | } | ||
629 | |||
630 | /* | ||
626 | * Free and unmap a vmap area | 631 | * Free and unmap a vmap area |
627 | */ | 632 | */ |
628 | static void free_unmap_vmap_area(struct vmap_area *va) | 633 | static void free_unmap_vmap_area(struct vmap_area *va) |
@@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb) | |||
798 | spin_unlock(&vmap_block_tree_lock); | 803 | spin_unlock(&vmap_block_tree_lock); |
799 | BUG_ON(tmp != vb); | 804 | BUG_ON(tmp != vb); |
800 | 805 | ||
801 | free_unmap_vmap_area_noflush(vb->va); | 806 | free_vmap_area_noflush(vb->va); |
802 | call_rcu(&vb->rcu_head, rcu_free_vb); | 807 | call_rcu(&vb->rcu_head, rcu_free_vb); |
803 | } | 808 | } |
804 | 809 | ||
@@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size) | |||
936 | rcu_read_unlock(); | 941 | rcu_read_unlock(); |
937 | BUG_ON(!vb); | 942 | BUG_ON(!vb); |
938 | 943 | ||
944 | vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); | ||
945 | |||
939 | spin_lock(&vb->lock); | 946 | spin_lock(&vb->lock); |
940 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); | 947 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); |
941 | 948 | ||
@@ -988,7 +995,6 @@ void vm_unmap_aliases(void) | |||
988 | 995 | ||
989 | s = vb->va->va_start + (i << PAGE_SHIFT); | 996 | s = vb->va->va_start + (i << PAGE_SHIFT); |
990 | e = vb->va->va_start + (j << PAGE_SHIFT); | 997 | e = vb->va->va_start + (j << PAGE_SHIFT); |
991 | vunmap_page_range(s, e); | ||
992 | flush = 1; | 998 | flush = 1; |
993 | 999 | ||
994 | if (s < start) | 1000 | if (s < start) |
@@ -1596,6 +1602,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | |||
1596 | } | 1602 | } |
1597 | EXPORT_SYMBOL(__vmalloc); | 1603 | EXPORT_SYMBOL(__vmalloc); |
1598 | 1604 | ||
1605 | static inline void *__vmalloc_node_flags(unsigned long size, | ||
1606 | int node, gfp_t flags) | ||
1607 | { | ||
1608 | return __vmalloc_node(size, 1, flags, PAGE_KERNEL, | ||
1609 | node, __builtin_return_address(0)); | ||
1610 | } | ||
1611 | |||
1599 | /** | 1612 | /** |
1600 | * vmalloc - allocate virtually contiguous memory | 1613 | * vmalloc - allocate virtually contiguous memory |
1601 | * @size: allocation size | 1614 | * @size: allocation size |
@@ -1607,12 +1620,28 @@ EXPORT_SYMBOL(__vmalloc); | |||
1607 | */ | 1620 | */ |
1608 | void *vmalloc(unsigned long size) | 1621 | void *vmalloc(unsigned long size) |
1609 | { | 1622 | { |
1610 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, | 1623 | return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); |
1611 | -1, __builtin_return_address(0)); | ||
1612 | } | 1624 | } |
1613 | EXPORT_SYMBOL(vmalloc); | 1625 | EXPORT_SYMBOL(vmalloc); |
1614 | 1626 | ||
1615 | /** | 1627 | /** |
1628 | * vzalloc - allocate virtually contiguous memory with zero fill | ||
1629 | * @size: allocation size | ||
1630 | * Allocate enough pages to cover @size from the page level | ||
1631 | * allocator and map them into contiguous kernel virtual space. | ||
1632 | * The memory allocated is set to zero. | ||
1633 | * | ||
1634 | * For tight control over page level allocator and protection flags | ||
1635 | * use __vmalloc() instead. | ||
1636 | */ | ||
1637 | void *vzalloc(unsigned long size) | ||
1638 | { | ||
1639 | return __vmalloc_node_flags(size, -1, | ||
1640 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | ||
1641 | } | ||
1642 | EXPORT_SYMBOL(vzalloc); | ||
1643 | |||
1644 | /** | ||
1616 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace | 1645 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace |
1617 | * @size: allocation size | 1646 | * @size: allocation size |
1618 | * | 1647 | * |
@@ -1653,6 +1682,25 @@ void *vmalloc_node(unsigned long size, int node) | |||
1653 | } | 1682 | } |
1654 | EXPORT_SYMBOL(vmalloc_node); | 1683 | EXPORT_SYMBOL(vmalloc_node); |
1655 | 1684 | ||
1685 | /** | ||
1686 | * vzalloc_node - allocate memory on a specific node with zero fill | ||
1687 | * @size: allocation size | ||
1688 | * @node: numa node | ||
1689 | * | ||
1690 | * Allocate enough pages to cover @size from the page level | ||
1691 | * allocator and map them into contiguous kernel virtual space. | ||
1692 | * The memory allocated is set to zero. | ||
1693 | * | ||
1694 | * For tight control over page level allocator and protection flags | ||
1695 | * use __vmalloc_node() instead. | ||
1696 | */ | ||
1697 | void *vzalloc_node(unsigned long size, int node) | ||
1698 | { | ||
1699 | return __vmalloc_node_flags(size, node, | ||
1700 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | ||
1701 | } | ||
1702 | EXPORT_SYMBOL(vzalloc_node); | ||
1703 | |||
1656 | #ifndef PAGE_KERNEL_EXEC | 1704 | #ifndef PAGE_KERNEL_EXEC |
1657 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 1705 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
1658 | #endif | 1706 | #endif |
@@ -2350,6 +2398,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |||
2350 | 2398 | ||
2351 | #ifdef CONFIG_PROC_FS | 2399 | #ifdef CONFIG_PROC_FS |
2352 | static void *s_start(struct seq_file *m, loff_t *pos) | 2400 | static void *s_start(struct seq_file *m, loff_t *pos) |
2401 | __acquires(&vmlist_lock) | ||
2353 | { | 2402 | { |
2354 | loff_t n = *pos; | 2403 | loff_t n = *pos; |
2355 | struct vm_struct *v; | 2404 | struct vm_struct *v; |
@@ -2376,6 +2425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) | |||
2376 | } | 2425 | } |
2377 | 2426 | ||
2378 | static void s_stop(struct seq_file *m, void *p) | 2427 | static void s_stop(struct seq_file *m, void *p) |
2428 | __releases(&vmlist_lock) | ||
2379 | { | 2429 | { |
2380 | read_unlock(&vmlist_lock); | 2430 | read_unlock(&vmlist_lock); |
2381 | } | 2431 | } |