summaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-07-21 16:48:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-21 16:48:51 -0400
commit3928d4f5ee37cdc523894f6e549e6aae521d8980 (patch)
treec328fd919e48fc8442db04d13c2ba1fe2c0f88e4 /mm/mmap.c
parent191a3afa98b857faf5231981ddbab66698034273 (diff)
mm: use helper functions for allocating and freeing vm_area structs
The vm_area_struct is one of the most fundamental memory management objects, but the management of it is entirely open-coded evertwhere, ranging from allocation and freeing (using kmem_cache_[z]alloc and kmem_cache_free) to initializing all the fields. We want to unify this in order to end up having some unified initialization of the vmas, and the first step to this is to at least have basic allocation functions. Right now those functions are literally just wrappers around the kmem_cache_*() calls. This is a purely mechanical conversion: # new vma: kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL) -> vm_area_alloc() # copy old vma kmem_cache_alloc(vm_area_cachep, GFP_KERNEL) -> vm_area_dup(old) # free vma kmem_cache_free(vm_area_cachep, vma) -> vm_area_free(vma) to the point where the old vma passed in to the vm_area_dup() function isn't even used yet (because I've left all the old manual initialization alone). Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 5801b5f0a634..4286ad2dd1f5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,7 +182,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
182 if (vma->vm_file) 182 if (vma->vm_file)
183 fput(vma->vm_file); 183 fput(vma->vm_file);
184 mpol_put(vma_policy(vma)); 184 mpol_put(vma_policy(vma));
185 kmem_cache_free(vm_area_cachep, vma); 185 vm_area_free(vma);
186 return next; 186 return next;
187} 187}
188 188
@@ -911,7 +911,7 @@ again:
911 anon_vma_merge(vma, next); 911 anon_vma_merge(vma, next);
912 mm->map_count--; 912 mm->map_count--;
913 mpol_put(vma_policy(next)); 913 mpol_put(vma_policy(next));
914 kmem_cache_free(vm_area_cachep, next); 914 vm_area_free(next);
915 /* 915 /*
916 * In mprotect's case 6 (see comments on vma_merge), 916 * In mprotect's case 6 (see comments on vma_merge),
917 * we must remove another next too. It would clutter 917 * we must remove another next too. It would clutter
@@ -1729,7 +1729,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
1729 * specific mapper. the address has already been validated, but 1729 * specific mapper. the address has already been validated, but
1730 * not unmapped, but the maps are removed from the list. 1730 * not unmapped, but the maps are removed from the list.
1731 */ 1731 */
1732 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 1732 vma = vm_area_alloc();
1733 if (!vma) { 1733 if (!vma) {
1734 error = -ENOMEM; 1734 error = -ENOMEM;
1735 goto unacct_error; 1735 goto unacct_error;
@@ -1832,7 +1832,7 @@ allow_write_and_free_vma:
1832 if (vm_flags & VM_DENYWRITE) 1832 if (vm_flags & VM_DENYWRITE)
1833 allow_write_access(file); 1833 allow_write_access(file);
1834free_vma: 1834free_vma:
1835 kmem_cache_free(vm_area_cachep, vma); 1835 vm_area_free(vma);
1836unacct_error: 1836unacct_error:
1837 if (charged) 1837 if (charged)
1838 vm_unacct_memory(charged); 1838 vm_unacct_memory(charged);
@@ -2620,7 +2620,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2620 return err; 2620 return err;
2621 } 2621 }
2622 2622
2623 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2623 new = vm_area_dup(vma);
2624 if (!new) 2624 if (!new)
2625 return -ENOMEM; 2625 return -ENOMEM;
2626 2626
@@ -2669,7 +2669,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2669 out_free_mpol: 2669 out_free_mpol:
2670 mpol_put(vma_policy(new)); 2670 mpol_put(vma_policy(new));
2671 out_free_vma: 2671 out_free_vma:
2672 kmem_cache_free(vm_area_cachep, new); 2672 vm_area_free(new);
2673 return err; 2673 return err;
2674} 2674}
2675 2675
@@ -2984,7 +2984,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
2984 /* 2984 /*
2985 * create a vma struct for an anonymous mapping 2985 * create a vma struct for an anonymous mapping
2986 */ 2986 */
2987 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2987 vma = vm_area_alloc();
2988 if (!vma) { 2988 if (!vma) {
2989 vm_unacct_memory(len >> PAGE_SHIFT); 2989 vm_unacct_memory(len >> PAGE_SHIFT);
2990 return -ENOMEM; 2990 return -ENOMEM;
@@ -3202,7 +3202,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3202 } 3202 }
3203 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 3203 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3204 } else { 3204 } else {
3205 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 3205 new_vma = vm_area_dup(vma);
3206 if (!new_vma) 3206 if (!new_vma)
3207 goto out; 3207 goto out;
3208 *new_vma = *vma; 3208 *new_vma = *vma;
@@ -3226,7 +3226,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3226out_free_mempol: 3226out_free_mempol:
3227 mpol_put(vma_policy(new_vma)); 3227 mpol_put(vma_policy(new_vma));
3228out_free_vma: 3228out_free_vma:
3229 kmem_cache_free(vm_area_cachep, new_vma); 3229 vm_area_free(new_vma);
3230out: 3230out:
3231 return NULL; 3231 return NULL;
3232} 3232}
@@ -3350,7 +3350,7 @@ static struct vm_area_struct *__install_special_mapping(
3350 int ret; 3350 int ret;
3351 struct vm_area_struct *vma; 3351 struct vm_area_struct *vma;
3352 3352
3353 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 3353 vma = vm_area_alloc();
3354 if (unlikely(vma == NULL)) 3354 if (unlikely(vma == NULL))
3355 return ERR_PTR(-ENOMEM); 3355 return ERR_PTR(-ENOMEM);
3356 3356
@@ -3376,7 +3376,7 @@ static struct vm_area_struct *__install_special_mapping(
3376 return vma; 3376 return vma;
3377 3377
3378out: 3378out:
3379 kmem_cache_free(vm_area_cachep, vma); 3379 vm_area_free(vma);
3380 return ERR_PTR(ret); 3380 return ERR_PTR(ret);
3381} 3381}
3382 3382