diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 110 |
1 files changed, 86 insertions, 24 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d55d905463eb..ae007462b7f6 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -509,6 +509,9 @@ static unsigned long lazy_max_pages(void) | |||
| 509 | 509 | ||
| 510 | static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); | 510 | static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); |
| 511 | 511 | ||
| 512 | /* for per-CPU blocks */ | ||
| 513 | static void purge_fragmented_blocks_allcpus(void); | ||
| 514 | |||
| 512 | /* | 515 | /* |
| 513 | * Purges all lazily-freed vmap areas. | 516 | * Purges all lazily-freed vmap areas. |
| 514 | * | 517 | * |
| @@ -539,6 +542,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
| 539 | } else | 542 | } else |
| 540 | spin_lock(&purge_lock); | 543 | spin_lock(&purge_lock); |
| 541 | 544 | ||
| 545 | if (sync) | ||
| 546 | purge_fragmented_blocks_allcpus(); | ||
| 547 | |||
| 542 | rcu_read_lock(); | 548 | rcu_read_lock(); |
| 543 | list_for_each_entry_rcu(va, &vmap_area_list, list) { | 549 | list_for_each_entry_rcu(va, &vmap_area_list, list) { |
| 544 | if (va->flags & VM_LAZY_FREE) { | 550 | if (va->flags & VM_LAZY_FREE) { |
| @@ -667,8 +673,6 @@ static bool vmap_initialized __read_mostly = false; | |||
| 667 | struct vmap_block_queue { | 673 | struct vmap_block_queue { |
| 668 | spinlock_t lock; | 674 | spinlock_t lock; |
| 669 | struct list_head free; | 675 | struct list_head free; |
| 670 | struct list_head dirty; | ||
| 671 | unsigned int nr_dirty; | ||
| 672 | }; | 676 | }; |
| 673 | 677 | ||
| 674 | struct vmap_block { | 678 | struct vmap_block { |
| @@ -678,10 +682,9 @@ struct vmap_block { | |||
| 678 | unsigned long free, dirty; | 682 | unsigned long free, dirty; |
| 679 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); | 683 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); |
| 680 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); | 684 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); |
| 681 | union { | 685 | struct list_head free_list; |
| 682 | struct list_head free_list; | 686 | struct rcu_head rcu_head; |
| 683 | struct rcu_head rcu_head; | 687 | struct list_head purge; |
| 684 | }; | ||
| 685 | }; | 688 | }; |
| 686 | 689 | ||
| 687 | /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ | 690 | /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ |
| @@ -757,7 +760,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
| 757 | vbq = &get_cpu_var(vmap_block_queue); | 760 | vbq = &get_cpu_var(vmap_block_queue); |
| 758 | vb->vbq = vbq; | 761 | vb->vbq = vbq; |
| 759 | spin_lock(&vbq->lock); | 762 | spin_lock(&vbq->lock); |
| 760 | list_add(&vb->free_list, &vbq->free); | 763 | list_add_rcu(&vb->free_list, &vbq->free); |
| 761 | spin_unlock(&vbq->lock); | 764 | spin_unlock(&vbq->lock); |
| 762 | put_cpu_var(vmap_block_queue); | 765 | put_cpu_var(vmap_block_queue); |
| 763 | 766 | ||
| @@ -776,8 +779,6 @@ static void free_vmap_block(struct vmap_block *vb) | |||
| 776 | struct vmap_block *tmp; | 779 | struct vmap_block *tmp; |
| 777 | unsigned long vb_idx; | 780 | unsigned long vb_idx; |
| 778 | 781 | ||
| 779 | BUG_ON(!list_empty(&vb->free_list)); | ||
| 780 | |||
| 781 | vb_idx = addr_to_vb_idx(vb->va->va_start); | 782 | vb_idx = addr_to_vb_idx(vb->va->va_start); |
| 782 | spin_lock(&vmap_block_tree_lock); | 783 | spin_lock(&vmap_block_tree_lock); |
| 783 | tmp = radix_tree_delete(&vmap_block_tree, vb_idx); | 784 | tmp = radix_tree_delete(&vmap_block_tree, vb_idx); |
| @@ -788,12 +789,61 @@ static void free_vmap_block(struct vmap_block *vb) | |||
| 788 | call_rcu(&vb->rcu_head, rcu_free_vb); | 789 | call_rcu(&vb->rcu_head, rcu_free_vb); |
| 789 | } | 790 | } |
| 790 | 791 | ||
| 792 | static void purge_fragmented_blocks(int cpu) | ||
| 793 | { | ||
| 794 | LIST_HEAD(purge); | ||
| 795 | struct vmap_block *vb; | ||
| 796 | struct vmap_block *n_vb; | ||
| 797 | struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); | ||
| 798 | |||
| 799 | rcu_read_lock(); | ||
| 800 | list_for_each_entry_rcu(vb, &vbq->free, free_list) { | ||
| 801 | |||
| 802 | if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) | ||
| 803 | continue; | ||
| 804 | |||
| 805 | spin_lock(&vb->lock); | ||
| 806 | if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { | ||
| 807 | vb->free = 0; /* prevent further allocs after releasing lock */ | ||
| 808 | vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ | ||
| 809 | bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS); | ||
| 810 | bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); | ||
| 811 | spin_lock(&vbq->lock); | ||
| 812 | list_del_rcu(&vb->free_list); | ||
| 813 | spin_unlock(&vbq->lock); | ||
| 814 | spin_unlock(&vb->lock); | ||
| 815 | list_add_tail(&vb->purge, &purge); | ||
| 816 | } else | ||
| 817 | spin_unlock(&vb->lock); | ||
| 818 | } | ||
| 819 | rcu_read_unlock(); | ||
| 820 | |||
| 821 | list_for_each_entry_safe(vb, n_vb, &purge, purge) { | ||
| 822 | list_del(&vb->purge); | ||
| 823 | free_vmap_block(vb); | ||
| 824 | } | ||
| 825 | } | ||
| 826 | |||
| 827 | static void purge_fragmented_blocks_thiscpu(void) | ||
| 828 | { | ||
| 829 | purge_fragmented_blocks(smp_processor_id()); | ||
| 830 | } | ||
| 831 | |||
| 832 | static void purge_fragmented_blocks_allcpus(void) | ||
| 833 | { | ||
| 834 | int cpu; | ||
| 835 | |||
| 836 | for_each_possible_cpu(cpu) | ||
| 837 | purge_fragmented_blocks(cpu); | ||
| 838 | } | ||
| 839 | |||
| 791 | static void *vb_alloc(unsigned long size, gfp_t gfp_mask) | 840 | static void *vb_alloc(unsigned long size, gfp_t gfp_mask) |
| 792 | { | 841 | { |
| 793 | struct vmap_block_queue *vbq; | 842 | struct vmap_block_queue *vbq; |
| 794 | struct vmap_block *vb; | 843 | struct vmap_block *vb; |
| 795 | unsigned long addr = 0; | 844 | unsigned long addr = 0; |
| 796 | unsigned int order; | 845 | unsigned int order; |
| 846 | int purge = 0; | ||
| 797 | 847 | ||
| 798 | BUG_ON(size & ~PAGE_MASK); | 848 | BUG_ON(size & ~PAGE_MASK); |
| 799 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); | 849 | BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); |
| @@ -806,24 +856,38 @@ again: | |||
| 806 | int i; | 856 | int i; |
| 807 | 857 | ||
| 808 | spin_lock(&vb->lock); | 858 | spin_lock(&vb->lock); |
| 859 | if (vb->free < 1UL << order) | ||
| 860 | goto next; | ||
| 861 | |||
| 809 | i = bitmap_find_free_region(vb->alloc_map, | 862 | i = bitmap_find_free_region(vb->alloc_map, |
| 810 | VMAP_BBMAP_BITS, order); | 863 | VMAP_BBMAP_BITS, order); |
| 811 | 864 | ||
| 812 | if (i >= 0) { | 865 | if (i < 0) { |
| 813 | addr = vb->va->va_start + (i << PAGE_SHIFT); | 866 | if (vb->free + vb->dirty == VMAP_BBMAP_BITS) { |
| 814 | BUG_ON(addr_to_vb_idx(addr) != | 867 | /* fragmented and no outstanding allocations */ |
| 815 | addr_to_vb_idx(vb->va->va_start)); | 868 | BUG_ON(vb->dirty != VMAP_BBMAP_BITS); |
| 816 | vb->free -= 1UL << order; | 869 | purge = 1; |
| 817 | if (vb->free == 0) { | ||
| 818 | spin_lock(&vbq->lock); | ||
| 819 | list_del_init(&vb->free_list); | ||
| 820 | spin_unlock(&vbq->lock); | ||
| 821 | } | 870 | } |
| 822 | spin_unlock(&vb->lock); | 871 | goto next; |
| 823 | break; | ||
| 824 | } | 872 | } |
| 873 | addr = vb->va->va_start + (i << PAGE_SHIFT); | ||
| 874 | BUG_ON(addr_to_vb_idx(addr) != | ||
| 875 | addr_to_vb_idx(vb->va->va_start)); | ||
| 876 | vb->free -= 1UL << order; | ||
| 877 | if (vb->free == 0) { | ||
| 878 | spin_lock(&vbq->lock); | ||
| 879 | list_del_rcu(&vb->free_list); | ||
| 880 | spin_unlock(&vbq->lock); | ||
| 881 | } | ||
| 882 | spin_unlock(&vb->lock); | ||
| 883 | break; | ||
| 884 | next: | ||
| 825 | spin_unlock(&vb->lock); | 885 | spin_unlock(&vb->lock); |
| 826 | } | 886 | } |
| 887 | |||
| 888 | if (purge) | ||
| 889 | purge_fragmented_blocks_thiscpu(); | ||
| 890 | |||
| 827 | put_cpu_var(vmap_block_queue); | 891 | put_cpu_var(vmap_block_queue); |
| 828 | rcu_read_unlock(); | 892 | rcu_read_unlock(); |
| 829 | 893 | ||
| @@ -860,11 +924,11 @@ static void vb_free(const void *addr, unsigned long size) | |||
| 860 | BUG_ON(!vb); | 924 | BUG_ON(!vb); |
| 861 | 925 | ||
| 862 | spin_lock(&vb->lock); | 926 | spin_lock(&vb->lock); |
| 863 | bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); | 927 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); |
| 864 | 928 | ||
| 865 | vb->dirty += 1UL << order; | 929 | vb->dirty += 1UL << order; |
| 866 | if (vb->dirty == VMAP_BBMAP_BITS) { | 930 | if (vb->dirty == VMAP_BBMAP_BITS) { |
| 867 | BUG_ON(vb->free || !list_empty(&vb->free_list)); | 931 | BUG_ON(vb->free); |
| 868 | spin_unlock(&vb->lock); | 932 | spin_unlock(&vb->lock); |
| 869 | free_vmap_block(vb); | 933 | free_vmap_block(vb); |
| 870 | } else | 934 | } else |
| @@ -1033,8 +1097,6 @@ void __init vmalloc_init(void) | |||
| 1033 | vbq = &per_cpu(vmap_block_queue, i); | 1097 | vbq = &per_cpu(vmap_block_queue, i); |
| 1034 | spin_lock_init(&vbq->lock); | 1098 | spin_lock_init(&vbq->lock); |
| 1035 | INIT_LIST_HEAD(&vbq->free); | 1099 | INIT_LIST_HEAD(&vbq->free); |
| 1036 | INIT_LIST_HEAD(&vbq->dirty); | ||
| 1037 | vbq->nr_dirty = 0; | ||
| 1038 | } | 1100 | } |
| 1039 | 1101 | ||
| 1040 | /* Import existing vmlist entries. */ | 1102 | /* Import existing vmlist entries. */ |
