summaryrefslogtreecommitdiffstats
path: root/mm/percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-02 14:46:04 -0400
committerTejun Heo <tj@kernel.org>2014-09-02 14:46:04 -0400
commite04d320838f573d8fa989a0d7af0972f9b0142d9 (patch)
tree134ab3b64a07aca992f4d3b81f048b3791c12df4 /mm/percpu.c
parenta16037c8dfc2734c1a2c8e3ffd4766ed25f2a41d (diff)
percpu: indent the population block in pcpu_alloc()
The next patch will conditionalize the population block in pcpu_alloc() which will end up making a rather large indentation change obfuscating the actual logic change. This patch puts the block under "if (true)" so that the next patch can avoid indentation changes. The defintions of the local variables which are used only in the block are moved into the block. This patch is purely cosmetic. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c38
1 files changed, 21 insertions, 17 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index e18aa143aab1..577d84fb3002 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -742,7 +742,6 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
742 struct pcpu_chunk *chunk; 742 struct pcpu_chunk *chunk;
743 const char *err; 743 const char *err;
744 int slot, off, new_alloc, cpu, ret; 744 int slot, off, new_alloc, cpu, ret;
745 int page_start, page_end, rs, re;
746 unsigned long flags; 745 unsigned long flags;
747 void __percpu *ptr; 746 void __percpu *ptr;
748 747
@@ -847,27 +846,32 @@ area_found:
847 spin_unlock_irqrestore(&pcpu_lock, flags); 846 spin_unlock_irqrestore(&pcpu_lock, flags);
848 847
849 /* populate if not all pages are already there */ 848 /* populate if not all pages are already there */
850 mutex_lock(&pcpu_alloc_mutex); 849 if (true) {
851 page_start = PFN_DOWN(off); 850 int page_start, page_end, rs, re;
852 page_end = PFN_UP(off + size);
853 851
854 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 852 mutex_lock(&pcpu_alloc_mutex);
855 WARN_ON(chunk->immutable);
856 853
857 ret = pcpu_populate_chunk(chunk, rs, re); 854 page_start = PFN_DOWN(off);
855 page_end = PFN_UP(off + size);
858 856
859 spin_lock_irqsave(&pcpu_lock, flags); 857 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
860 if (ret) { 858 WARN_ON(chunk->immutable);
861 mutex_unlock(&pcpu_alloc_mutex); 859
862 pcpu_free_area(chunk, off); 860 ret = pcpu_populate_chunk(chunk, rs, re);
863 err = "failed to populate"; 861
864 goto fail_unlock; 862 spin_lock_irqsave(&pcpu_lock, flags);
863 if (ret) {
864 mutex_unlock(&pcpu_alloc_mutex);
865 pcpu_free_area(chunk, off);
866 err = "failed to populate";
867 goto fail_unlock;
868 }
869 bitmap_set(chunk->populated, rs, re - rs);
870 spin_unlock_irqrestore(&pcpu_lock, flags);
865 } 871 }
866 bitmap_set(chunk->populated, rs, re - rs);
867 spin_unlock_irqrestore(&pcpu_lock, flags);
868 }
869 872
870 mutex_unlock(&pcpu_alloc_mutex); 873 mutex_unlock(&pcpu_alloc_mutex);
874 }
871 875
872 /* clear the areas and return address relative to base address */ 876 /* clear the areas and return address relative to base address */
873 for_each_possible_cpu(cpu) 877 for_each_possible_cpu(cpu)