diff options
author | vishnu.ps <vishnu.ps@samsung.com> | 2014-10-09 18:26:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:25:52 -0400 |
commit | cc71aba348906ff93a4ad2f600045ee2d1ecc291 (patch) | |
tree | 79f9041d9249e88092c9fcaa128d9800e09e7a14 /mm/mmap.c | |
parent | bf0dea23a9c094ae869a88bb694fbe966671bf6d (diff) |
mm/mmap.c: whitespace fixes
Signed-off-by: vishnu.ps <vishnu.ps@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 37 |
1 files changed, 19 insertions, 18 deletions
@@ -70,7 +70,7 @@ static void unmap_region(struct mm_struct *mm, | |||
70 | * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes | 70 | * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes |
71 | * w: (no) no w: (no) no w: (yes) yes w: (no) no | 71 | * w: (no) no w: (no) no w: (yes) yes w: (no) no |
72 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes | 72 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes |
73 | * | 73 | * |
74 | * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes | 74 | * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes |
75 | * w: (no) no w: (no) no w: (copy) copy w: (no) no | 75 | * w: (no) no w: (no) no w: (copy) copy w: (no) no |
76 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes | 76 | * x: (no) no x: (no) yes x: (no) yes x: (yes) yes |
@@ -741,7 +741,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
741 | * split_vma inserting another: so it must be | 741 | * split_vma inserting another: so it must be |
742 | * mprotect case 4 shifting the boundary down. | 742 | * mprotect case 4 shifting the boundary down. |
743 | */ | 743 | */ |
744 | adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); | 744 | adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); |
745 | exporter = vma; | 745 | exporter = vma; |
746 | importer = next; | 746 | importer = next; |
747 | } | 747 | } |
@@ -1010,7 +1010,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, | |||
1010 | struct vm_area_struct *vma_merge(struct mm_struct *mm, | 1010 | struct vm_area_struct *vma_merge(struct mm_struct *mm, |
1011 | struct vm_area_struct *prev, unsigned long addr, | 1011 | struct vm_area_struct *prev, unsigned long addr, |
1012 | unsigned long end, unsigned long vm_flags, | 1012 | unsigned long end, unsigned long vm_flags, |
1013 | struct anon_vma *anon_vma, struct file *file, | 1013 | struct anon_vma *anon_vma, struct file *file, |
1014 | pgoff_t pgoff, struct mempolicy *policy) | 1014 | pgoff_t pgoff, struct mempolicy *policy) |
1015 | { | 1015 | { |
1016 | pgoff_t pglen = (end - addr) >> PAGE_SHIFT; | 1016 | pgoff_t pglen = (end - addr) >> PAGE_SHIFT; |
@@ -1036,7 +1036,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
1036 | * Can it merge with the predecessor? | 1036 | * Can it merge with the predecessor? |
1037 | */ | 1037 | */ |
1038 | if (prev && prev->vm_end == addr && | 1038 | if (prev && prev->vm_end == addr && |
1039 | mpol_equal(vma_policy(prev), policy) && | 1039 | mpol_equal(vma_policy(prev), policy) && |
1040 | can_vma_merge_after(prev, vm_flags, | 1040 | can_vma_merge_after(prev, vm_flags, |
1041 | anon_vma, file, pgoff)) { | 1041 | anon_vma, file, pgoff)) { |
1042 | /* | 1042 | /* |
@@ -1064,7 +1064,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, | |||
1064 | * Can this new request be merged in front of next? | 1064 | * Can this new request be merged in front of next? |
1065 | */ | 1065 | */ |
1066 | if (next && end == next->vm_start && | 1066 | if (next && end == next->vm_start && |
1067 | mpol_equal(policy, vma_policy(next)) && | 1067 | mpol_equal(policy, vma_policy(next)) && |
1068 | can_vma_merge_before(next, vm_flags, | 1068 | can_vma_merge_before(next, vm_flags, |
1069 | anon_vma, file, pgoff+pglen)) { | 1069 | anon_vma, file, pgoff+pglen)) { |
1070 | if (prev && addr < prev->vm_end) /* case 4 */ | 1070 | if (prev && addr < prev->vm_end) /* case 4 */ |
@@ -1235,7 +1235,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1235 | unsigned long flags, unsigned long pgoff, | 1235 | unsigned long flags, unsigned long pgoff, |
1236 | unsigned long *populate) | 1236 | unsigned long *populate) |
1237 | { | 1237 | { |
1238 | struct mm_struct * mm = current->mm; | 1238 | struct mm_struct *mm = current->mm; |
1239 | vm_flags_t vm_flags; | 1239 | vm_flags_t vm_flags; |
1240 | 1240 | ||
1241 | *populate = 0; | 1241 | *populate = 0; |
@@ -1263,7 +1263,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1263 | 1263 | ||
1264 | /* offset overflow? */ | 1264 | /* offset overflow? */ |
1265 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) | 1265 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) |
1266 | return -EOVERFLOW; | 1266 | return -EOVERFLOW; |
1267 | 1267 | ||
1268 | /* Too many mappings? */ | 1268 | /* Too many mappings? */ |
1269 | if (mm->map_count > sysctl_max_map_count) | 1269 | if (mm->map_count > sysctl_max_map_count) |
@@ -1921,7 +1921,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
1921 | info.align_mask = 0; | 1921 | info.align_mask = 0; |
1922 | return vm_unmapped_area(&info); | 1922 | return vm_unmapped_area(&info); |
1923 | } | 1923 | } |
1924 | #endif | 1924 | #endif |
1925 | 1925 | ||
1926 | /* | 1926 | /* |
1927 | * This mmap-allocator allocates new areas top-down from below the | 1927 | * This mmap-allocator allocates new areas top-down from below the |
@@ -2321,13 +2321,13 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) | |||
2321 | } | 2321 | } |
2322 | 2322 | ||
2323 | struct vm_area_struct * | 2323 | struct vm_area_struct * |
2324 | find_extend_vma(struct mm_struct * mm, unsigned long addr) | 2324 | find_extend_vma(struct mm_struct *mm, unsigned long addr) |
2325 | { | 2325 | { |
2326 | struct vm_area_struct * vma; | 2326 | struct vm_area_struct *vma; |
2327 | unsigned long start; | 2327 | unsigned long start; |
2328 | 2328 | ||
2329 | addr &= PAGE_MASK; | 2329 | addr &= PAGE_MASK; |
2330 | vma = find_vma(mm,addr); | 2330 | vma = find_vma(mm, addr); |
2331 | if (!vma) | 2331 | if (!vma) |
2332 | return NULL; | 2332 | return NULL; |
2333 | if (vma->vm_start <= addr) | 2333 | if (vma->vm_start <= addr) |
@@ -2376,7 +2376,7 @@ static void unmap_region(struct mm_struct *mm, | |||
2376 | struct vm_area_struct *vma, struct vm_area_struct *prev, | 2376 | struct vm_area_struct *vma, struct vm_area_struct *prev, |
2377 | unsigned long start, unsigned long end) | 2377 | unsigned long start, unsigned long end) |
2378 | { | 2378 | { |
2379 | struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; | 2379 | struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; |
2380 | struct mmu_gather tlb; | 2380 | struct mmu_gather tlb; |
2381 | 2381 | ||
2382 | lru_add_drain(); | 2382 | lru_add_drain(); |
@@ -2423,7 +2423,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2423 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the | 2423 | * __split_vma() bypasses sysctl_max_map_count checking. We use this on the |
2424 | * munmap path where it doesn't make sense to fail. | 2424 | * munmap path where it doesn't make sense to fail. |
2425 | */ | 2425 | */ |
2426 | static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | 2426 | static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, |
2427 | unsigned long addr, int new_below) | 2427 | unsigned long addr, int new_below) |
2428 | { | 2428 | { |
2429 | struct vm_area_struct *new; | 2429 | struct vm_area_struct *new; |
@@ -2512,7 +2512,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
2512 | if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) | 2512 | if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) |
2513 | return -EINVAL; | 2513 | return -EINVAL; |
2514 | 2514 | ||
2515 | if ((len = PAGE_ALIGN(len)) == 0) | 2515 | len = PAGE_ALIGN(len); |
2516 | if (len == 0) | ||
2516 | return -EINVAL; | 2517 | return -EINVAL; |
2517 | 2518 | ||
2518 | /* Find the first overlapping VMA */ | 2519 | /* Find the first overlapping VMA */ |
@@ -2558,7 +2559,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | |||
2558 | if (error) | 2559 | if (error) |
2559 | return error; | 2560 | return error; |
2560 | } | 2561 | } |
2561 | vma = prev? prev->vm_next: mm->mmap; | 2562 | vma = prev ? prev->vm_next : mm->mmap; |
2562 | 2563 | ||
2563 | /* | 2564 | /* |
2564 | * unlock any mlock()ed ranges before detaching vmas | 2565 | * unlock any mlock()ed ranges before detaching vmas |
@@ -2621,10 +2622,10 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) | |||
2621 | */ | 2622 | */ |
2622 | static unsigned long do_brk(unsigned long addr, unsigned long len) | 2623 | static unsigned long do_brk(unsigned long addr, unsigned long len) |
2623 | { | 2624 | { |
2624 | struct mm_struct * mm = current->mm; | 2625 | struct mm_struct *mm = current->mm; |
2625 | struct vm_area_struct * vma, * prev; | 2626 | struct vm_area_struct *vma, *prev; |
2626 | unsigned long flags; | 2627 | unsigned long flags; |
2627 | struct rb_node ** rb_link, * rb_parent; | 2628 | struct rb_node **rb_link, *rb_parent; |
2628 | pgoff_t pgoff = addr >> PAGE_SHIFT; | 2629 | pgoff_t pgoff = addr >> PAGE_SHIFT; |
2629 | int error; | 2630 | int error; |
2630 | 2631 | ||