aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/vmalloc.c110
3 files changed, 92 insertions, 28 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e3736923220e..698ea80f2102 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2232,6 +2232,9 @@ again:
2232 if (unlikely(status)) 2232 if (unlikely(status))
2233 break; 2233 break;
2234 2234
2235 if (mapping_writably_mapped(mapping))
2236 flush_dcache_page(page);
2237
2235 pagefault_disable(); 2238 pagefault_disable();
2236 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2239 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2237 pagefault_enable(); 2240 pagefault_enable();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e91b81b63670..2d16fa6b8c2d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1515,10 +1515,9 @@ static struct attribute_group hstate_attr_group = {
1515 .attrs = hstate_attrs, 1515 .attrs = hstate_attrs,
1516}; 1516};
1517 1517
1518static int __init hugetlb_sysfs_add_hstate(struct hstate *h, 1518static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1519 struct kobject *parent, 1519 struct kobject **hstate_kobjs,
1520 struct kobject **hstate_kobjs, 1520 struct attribute_group *hstate_attr_group)
1521 struct attribute_group *hstate_attr_group)
1522{ 1521{
1523 int retval; 1522 int retval;
1524 int hi = h - hstates; 1523 int hi = h - hstates;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d55d905463eb..ae007462b7f6 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -509,6 +509,9 @@ static unsigned long lazy_max_pages(void)
509 509
510static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 510static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
511 511
512/* for per-CPU blocks */
513static void purge_fragmented_blocks_allcpus(void);
514
512/* 515/*
513 * Purges all lazily-freed vmap areas. 516 * Purges all lazily-freed vmap areas.
514 * 517 *
@@ -539,6 +542,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
539 } else 542 } else
540 spin_lock(&purge_lock); 543 spin_lock(&purge_lock);
541 544
545 if (sync)
546 purge_fragmented_blocks_allcpus();
547
542 rcu_read_lock(); 548 rcu_read_lock();
543 list_for_each_entry_rcu(va, &vmap_area_list, list) { 549 list_for_each_entry_rcu(va, &vmap_area_list, list) {
544 if (va->flags & VM_LAZY_FREE) { 550 if (va->flags & VM_LAZY_FREE) {
@@ -667,8 +673,6 @@ static bool vmap_initialized __read_mostly = false;
667struct vmap_block_queue { 673struct vmap_block_queue {
668 spinlock_t lock; 674 spinlock_t lock;
669 struct list_head free; 675 struct list_head free;
670 struct list_head dirty;
671 unsigned int nr_dirty;
672}; 676};
673 677
674struct vmap_block { 678struct vmap_block {
@@ -678,10 +682,9 @@ struct vmap_block {
678 unsigned long free, dirty; 682 unsigned long free, dirty;
679 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 683 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
680 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 684 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
681 union { 685 struct list_head free_list;
682 struct list_head free_list; 686 struct rcu_head rcu_head;
683 struct rcu_head rcu_head; 687 struct list_head purge;
684 };
685}; 688};
686 689
687/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 690/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
@@ -757,7 +760,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
757 vbq = &get_cpu_var(vmap_block_queue); 760 vbq = &get_cpu_var(vmap_block_queue);
758 vb->vbq = vbq; 761 vb->vbq = vbq;
759 spin_lock(&vbq->lock); 762 spin_lock(&vbq->lock);
760 list_add(&vb->free_list, &vbq->free); 763 list_add_rcu(&vb->free_list, &vbq->free);
761 spin_unlock(&vbq->lock); 764 spin_unlock(&vbq->lock);
762 put_cpu_var(vmap_block_queue); 765 put_cpu_var(vmap_block_queue);
763 766
@@ -776,8 +779,6 @@ static void free_vmap_block(struct vmap_block *vb)
776 struct vmap_block *tmp; 779 struct vmap_block *tmp;
777 unsigned long vb_idx; 780 unsigned long vb_idx;
778 781
779 BUG_ON(!list_empty(&vb->free_list));
780
781 vb_idx = addr_to_vb_idx(vb->va->va_start); 782 vb_idx = addr_to_vb_idx(vb->va->va_start);
782 spin_lock(&vmap_block_tree_lock); 783 spin_lock(&vmap_block_tree_lock);
783 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 784 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
@@ -788,12 +789,61 @@ static void free_vmap_block(struct vmap_block *vb)
788 call_rcu(&vb->rcu_head, rcu_free_vb); 789 call_rcu(&vb->rcu_head, rcu_free_vb);
789} 790}
790 791
792static void purge_fragmented_blocks(int cpu)
793{
794 LIST_HEAD(purge);
795 struct vmap_block *vb;
796 struct vmap_block *n_vb;
797 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
798
799 rcu_read_lock();
800 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
801
802 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
803 continue;
804
805 spin_lock(&vb->lock);
806 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
807 vb->free = 0; /* prevent further allocs after releasing lock */
808 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
809 bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
810 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
811 spin_lock(&vbq->lock);
812 list_del_rcu(&vb->free_list);
813 spin_unlock(&vbq->lock);
814 spin_unlock(&vb->lock);
815 list_add_tail(&vb->purge, &purge);
816 } else
817 spin_unlock(&vb->lock);
818 }
819 rcu_read_unlock();
820
821 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
822 list_del(&vb->purge);
823 free_vmap_block(vb);
824 }
825}
826
827static void purge_fragmented_blocks_thiscpu(void)
828{
829 purge_fragmented_blocks(smp_processor_id());
830}
831
832static void purge_fragmented_blocks_allcpus(void)
833{
834 int cpu;
835
836 for_each_possible_cpu(cpu)
837 purge_fragmented_blocks(cpu);
838}
839
791static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 840static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
792{ 841{
793 struct vmap_block_queue *vbq; 842 struct vmap_block_queue *vbq;
794 struct vmap_block *vb; 843 struct vmap_block *vb;
795 unsigned long addr = 0; 844 unsigned long addr = 0;
796 unsigned int order; 845 unsigned int order;
846 int purge = 0;
797 847
798 BUG_ON(size & ~PAGE_MASK); 848 BUG_ON(size & ~PAGE_MASK);
799 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 849 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
@@ -806,24 +856,38 @@ again:
806 int i; 856 int i;
807 857
808 spin_lock(&vb->lock); 858 spin_lock(&vb->lock);
859 if (vb->free < 1UL << order)
860 goto next;
861
809 i = bitmap_find_free_region(vb->alloc_map, 862 i = bitmap_find_free_region(vb->alloc_map,
810 VMAP_BBMAP_BITS, order); 863 VMAP_BBMAP_BITS, order);
811 864
812 if (i >= 0) { 865 if (i < 0) {
813 addr = vb->va->va_start + (i << PAGE_SHIFT); 866 if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
814 BUG_ON(addr_to_vb_idx(addr) != 867 /* fragmented and no outstanding allocations */
815 addr_to_vb_idx(vb->va->va_start)); 868 BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
816 vb->free -= 1UL << order; 869 purge = 1;
817 if (vb->free == 0) {
818 spin_lock(&vbq->lock);
819 list_del_init(&vb->free_list);
820 spin_unlock(&vbq->lock);
821 } 870 }
822 spin_unlock(&vb->lock); 871 goto next;
823 break;
824 } 872 }
873 addr = vb->va->va_start + (i << PAGE_SHIFT);
874 BUG_ON(addr_to_vb_idx(addr) !=
875 addr_to_vb_idx(vb->va->va_start));
876 vb->free -= 1UL << order;
877 if (vb->free == 0) {
878 spin_lock(&vbq->lock);
879 list_del_rcu(&vb->free_list);
880 spin_unlock(&vbq->lock);
881 }
882 spin_unlock(&vb->lock);
883 break;
884next:
825 spin_unlock(&vb->lock); 885 spin_unlock(&vb->lock);
826 } 886 }
887
888 if (purge)
889 purge_fragmented_blocks_thiscpu();
890
827 put_cpu_var(vmap_block_queue); 891 put_cpu_var(vmap_block_queue);
828 rcu_read_unlock(); 892 rcu_read_unlock();
829 893
@@ -860,11 +924,11 @@ static void vb_free(const void *addr, unsigned long size)
860 BUG_ON(!vb); 924 BUG_ON(!vb);
861 925
862 spin_lock(&vb->lock); 926 spin_lock(&vb->lock);
863 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 927 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
864 928
865 vb->dirty += 1UL << order; 929 vb->dirty += 1UL << order;
866 if (vb->dirty == VMAP_BBMAP_BITS) { 930 if (vb->dirty == VMAP_BBMAP_BITS) {
867 BUG_ON(vb->free || !list_empty(&vb->free_list)); 931 BUG_ON(vb->free);
868 spin_unlock(&vb->lock); 932 spin_unlock(&vb->lock);
869 free_vmap_block(vb); 933 free_vmap_block(vb);
870 } else 934 } else
@@ -1033,8 +1097,6 @@ void __init vmalloc_init(void)
1033 vbq = &per_cpu(vmap_block_queue, i); 1097 vbq = &per_cpu(vmap_block_queue, i);
1034 spin_lock_init(&vbq->lock); 1098 spin_lock_init(&vbq->lock);
1035 INIT_LIST_HEAD(&vbq->free); 1099 INIT_LIST_HEAD(&vbq->free);
1036 INIT_LIST_HEAD(&vbq->dirty);
1037 vbq->nr_dirty = 0;
1038 } 1100 }
1039 1101
1040 /* Import existing vmlist entries. */ 1102 /* Import existing vmlist entries. */