diff options
author | Tobin C Harding <me@tobin.cc> | 2017-02-24 17:59:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 20:46:55 -0500 |
commit | 166f61b9435a1b64bd46a08ec6cf1d6fc579a772 (patch) | |
tree | 9aa1fd2bc36e5a7e4718a9838c07f0ad8624288e /mm | |
parent | 7f2b6ce8e31ef843f5ece7fd302119d659b015f7 (diff) |
mm: codgin-style fixes
Fix whitespace issues, extraneous braces.
Link: http://lkml.kernel.org/r/1485992240-10986-5-git-send-email-me@tobin.cc
Signed-off-by: Tobin C Harding <me@tobin.cc>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 60 |
1 files changed, 29 insertions, 31 deletions
diff --git a/mm/memory.c b/mm/memory.c index 747a2cdd2f7d..bfad9fe316c1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | /* | 31 | /* |
32 | * 05.04.94 - Multi-page memory management added for v1.1. | 32 | * 05.04.94 - Multi-page memory management added for v1.1. |
33 | * Idea by Alex Bligh (alex@cconcepts.co.uk) | 33 | * Idea by Alex Bligh (alex@cconcepts.co.uk) |
34 | * | 34 | * |
35 | * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG | 35 | * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG |
36 | * (Gerhard.Wichert@pdb.siemens.de) | 36 | * (Gerhard.Wichert@pdb.siemens.de) |
@@ -82,9 +82,9 @@ | |||
82 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 82 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
83 | /* use the per-pgdat data instead for discontigmem - mbligh */ | 83 | /* use the per-pgdat data instead for discontigmem - mbligh */ |
84 | unsigned long max_mapnr; | 84 | unsigned long max_mapnr; |
85 | struct page *mem_map; | ||
86 | |||
87 | EXPORT_SYMBOL(max_mapnr); | 85 | EXPORT_SYMBOL(max_mapnr); |
86 | |||
87 | struct page *mem_map; | ||
88 | EXPORT_SYMBOL(mem_map); | 88 | EXPORT_SYMBOL(mem_map); |
89 | #endif | 89 | #endif |
90 | 90 | ||
@@ -95,8 +95,7 @@ EXPORT_SYMBOL(mem_map); | |||
95 | * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL | 95 | * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL |
96 | * and ZONE_HIGHMEM. | 96 | * and ZONE_HIGHMEM. |
97 | */ | 97 | */ |
98 | void * high_memory; | 98 | void *high_memory; |
99 | |||
100 | EXPORT_SYMBOL(high_memory); | 99 | EXPORT_SYMBOL(high_memory); |
101 | 100 | ||
102 | /* | 101 | /* |
@@ -120,10 +119,10 @@ static int __init disable_randmaps(char *s) | |||
120 | __setup("norandmaps", disable_randmaps); | 119 | __setup("norandmaps", disable_randmaps); |
121 | 120 | ||
122 | unsigned long zero_pfn __read_mostly; | 121 | unsigned long zero_pfn __read_mostly; |
123 | unsigned long highest_memmap_pfn __read_mostly; | ||
124 | |||
125 | EXPORT_SYMBOL(zero_pfn); | 122 | EXPORT_SYMBOL(zero_pfn); |
126 | 123 | ||
124 | unsigned long highest_memmap_pfn __read_mostly; | ||
125 | |||
127 | /* | 126 | /* |
128 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() | 127 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
129 | */ | 128 | */ |
@@ -556,7 +555,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
556 | 555 | ||
557 | if (is_vm_hugetlb_page(vma)) { | 556 | if (is_vm_hugetlb_page(vma)) { |
558 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, | 557 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, |
559 | floor, next? next->vm_start: ceiling); | 558 | floor, next ? next->vm_start : ceiling); |
560 | } else { | 559 | } else { |
561 | /* | 560 | /* |
562 | * Optimization: gather nearby vmas into one call down | 561 | * Optimization: gather nearby vmas into one call down |
@@ -569,7 +568,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
569 | unlink_file_vma(vma); | 568 | unlink_file_vma(vma); |
570 | } | 569 | } |
571 | free_pgd_range(tlb, addr, vma->vm_end, | 570 | free_pgd_range(tlb, addr, vma->vm_end, |
572 | floor, next? next->vm_start: ceiling); | 571 | floor, next ? next->vm_start : ceiling); |
573 | } | 572 | } |
574 | vma = next; | 573 | vma = next; |
575 | } | 574 | } |
@@ -1141,9 +1140,8 @@ again: | |||
1141 | arch_enter_lazy_mmu_mode(); | 1140 | arch_enter_lazy_mmu_mode(); |
1142 | do { | 1141 | do { |
1143 | pte_t ptent = *pte; | 1142 | pte_t ptent = *pte; |
1144 | if (pte_none(ptent)) { | 1143 | if (pte_none(ptent)) |
1145 | continue; | 1144 | continue; |
1146 | } | ||
1147 | 1145 | ||
1148 | if (pte_present(ptent)) { | 1146 | if (pte_present(ptent)) { |
1149 | struct page *page; | 1147 | struct page *page; |
@@ -1463,10 +1461,10 @@ EXPORT_SYMBOL_GPL(zap_vma_ptes); | |||
1463 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, | 1461 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
1464 | spinlock_t **ptl) | 1462 | spinlock_t **ptl) |
1465 | { | 1463 | { |
1466 | pgd_t * pgd = pgd_offset(mm, addr); | 1464 | pgd_t *pgd = pgd_offset(mm, addr); |
1467 | pud_t * pud = pud_alloc(mm, pgd, addr); | 1465 | pud_t *pud = pud_alloc(mm, pgd, addr); |
1468 | if (pud) { | 1466 | if (pud) { |
1469 | pmd_t * pmd = pmd_alloc(mm, pud, addr); | 1467 | pmd_t *pmd = pmd_alloc(mm, pud, addr); |
1470 | if (pmd) { | 1468 | if (pmd) { |
1471 | VM_BUG_ON(pmd_trans_huge(*pmd)); | 1469 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
1472 | return pte_alloc_map_lock(mm, pmd, addr, ptl); | 1470 | return pte_alloc_map_lock(mm, pmd, addr, ptl); |
@@ -2525,7 +2523,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2525 | hlen = ULONG_MAX - hba + 1; | 2523 | hlen = ULONG_MAX - hba + 1; |
2526 | } | 2524 | } |
2527 | 2525 | ||
2528 | details.check_mapping = even_cows? NULL: mapping; | 2526 | details.check_mapping = even_cows ? NULL : mapping; |
2529 | details.first_index = hba; | 2527 | details.first_index = hba; |
2530 | details.last_index = hba + hlen - 1; | 2528 | details.last_index = hba + hlen - 1; |
2531 | if (details.last_index < details.first_index) | 2529 | if (details.last_index < details.first_index) |
@@ -3407,14 +3405,14 @@ static int do_numa_page(struct vm_fault *vmf) | |||
3407 | int flags = 0; | 3405 | int flags = 0; |
3408 | 3406 | ||
3409 | /* | 3407 | /* |
3410 | * The "pte" at this point cannot be used safely without | 3408 | * The "pte" at this point cannot be used safely without |
3411 | * validation through pte_unmap_same(). It's of NUMA type but | 3409 | * validation through pte_unmap_same(). It's of NUMA type but |
3412 | * the pfn may be screwed if the read is non atomic. | 3410 | * the pfn may be screwed if the read is non atomic. |
3413 | * | 3411 | * |
3414 | * We can safely just do a "set_pte_at()", because the old | 3412 | * We can safely just do a "set_pte_at()", because the old |
3415 | * page table entry is not accessible, so there would be no | 3413 | * page table entry is not accessible, so there would be no |
3416 | * concurrent hardware modifications to the PTE. | 3414 | * concurrent hardware modifications to the PTE. |
3417 | */ | 3415 | */ |
3418 | vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); | 3416 | vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); |
3419 | spin_lock(vmf->ptl); | 3417 | spin_lock(vmf->ptl); |
3420 | if (unlikely(!pte_same(*vmf->pte, pte))) { | 3418 | if (unlikely(!pte_same(*vmf->pte, pte))) { |
@@ -3750,14 +3748,14 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
3750 | 3748 | ||
3751 | if (flags & FAULT_FLAG_USER) { | 3749 | if (flags & FAULT_FLAG_USER) { |
3752 | mem_cgroup_oom_disable(); | 3750 | mem_cgroup_oom_disable(); |
3753 | /* | 3751 | /* |
3754 | * The task may have entered a memcg OOM situation but | 3752 | * The task may have entered a memcg OOM situation but |
3755 | * if the allocation error was handled gracefully (no | 3753 | * if the allocation error was handled gracefully (no |
3756 | * VM_FAULT_OOM), there is no need to kill anything. | 3754 | * VM_FAULT_OOM), there is no need to kill anything. |
3757 | * Just clean up the OOM state peacefully. | 3755 | * Just clean up the OOM state peacefully. |
3758 | */ | 3756 | */ |
3759 | if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) | 3757 | if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) |
3760 | mem_cgroup_oom_synchronize(false); | 3758 | mem_cgroup_oom_synchronize(false); |
3761 | } | 3759 | } |
3762 | 3760 | ||
3763 | /* | 3761 | /* |