diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-07-01 02:49:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-01 03:31:25 -0400 |
commit | 0a54cec0c25cc49e3b68b14c205f1f6cff13f5e1 (patch) | |
tree | eb4e63ee9ae1fcaf9aa53a1668e55c09516052d9 /mm | |
parent | ec8c27e04f89a7575ca2c4facb99152e03d6a99c (diff) | |
parent | 980019d74e4b2428362b36a0506519d6d9460800 (diff) |
Merge branch 'linus' into core/rcu
Conflicts:
fs/fs-writeback.c
Merge reason: Resolve the conflict
Note, i picked the version from Linus's tree, which effectively reverts
the fs-writeback.c bits of:
b97181f: fs: remove all rcu head initializations, except on_stack initializations
As the upstream changes to this file changed this code heavily and the
first attempt to resolve the conflict resulted in a non-booting kernel.
It's safer to re-try this portion of the commit cleanly.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 9 | ||||
-rw-r--r-- | mm/page-writeback.c | 5 | ||||
-rw-r--r-- | mm/percpu.c | 36 |
4 files changed, 40 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c6ece0a5759..20a8193a7af 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1370,7 +1370,7 @@ static void memcg_wakeup_oom(struct mem_cgroup *mem) | |||
1370 | 1370 | ||
1371 | static void memcg_oom_recover(struct mem_cgroup *mem) | 1371 | static void memcg_oom_recover(struct mem_cgroup *mem) |
1372 | { | 1372 | { |
1373 | if (mem->oom_kill_disable && atomic_read(&mem->oom_lock)) | 1373 | if (atomic_read(&mem->oom_lock)) |
1374 | memcg_wakeup_oom(mem); | 1374 | memcg_wakeup_oom(mem); |
1375 | } | 1375 | } |
1376 | 1376 | ||
@@ -3781,6 +3781,8 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, | |||
3781 | return -EINVAL; | 3781 | return -EINVAL; |
3782 | } | 3782 | } |
3783 | mem->oom_kill_disable = val; | 3783 | mem->oom_kill_disable = val; |
3784 | if (!val) | ||
3785 | memcg_oom_recover(mem); | ||
3784 | cgroup_unlock(); | 3786 | cgroup_unlock(); |
3785 | return 0; | 3787 | return 0; |
3786 | } | 3788 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5d6fb339de0..5bc0a96beb5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2094,7 +2094,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | |||
2094 | NODEMASK_SCRATCH(scratch); | 2094 | NODEMASK_SCRATCH(scratch); |
2095 | 2095 | ||
2096 | if (!scratch) | 2096 | if (!scratch) |
2097 | return; | 2097 | goto put_mpol; |
2098 | /* contextualize the tmpfs mount point mempolicy */ | 2098 | /* contextualize the tmpfs mount point mempolicy */ |
2099 | new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); | 2099 | new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); |
2100 | if (IS_ERR(new)) | 2100 | if (IS_ERR(new)) |
@@ -2103,19 +2103,20 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) | |||
2103 | task_lock(current); | 2103 | task_lock(current); |
2104 | ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); | 2104 | ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); |
2105 | task_unlock(current); | 2105 | task_unlock(current); |
2106 | mpol_put(mpol); /* drop our ref on sb mpol */ | ||
2107 | if (ret) | 2106 | if (ret) |
2108 | goto put_free; | 2107 | goto put_new; |
2109 | 2108 | ||
2110 | /* Create pseudo-vma that contains just the policy */ | 2109 | /* Create pseudo-vma that contains just the policy */ |
2111 | memset(&pvma, 0, sizeof(struct vm_area_struct)); | 2110 | memset(&pvma, 0, sizeof(struct vm_area_struct)); |
2112 | pvma.vm_end = TASK_SIZE; /* policy covers entire file */ | 2111 | pvma.vm_end = TASK_SIZE; /* policy covers entire file */ |
2113 | mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ | 2112 | mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ |
2114 | 2113 | ||
2115 | put_free: | 2114 | put_new: |
2116 | mpol_put(new); /* drop initial ref */ | 2115 | mpol_put(new); /* drop initial ref */ |
2117 | free_scratch: | 2116 | free_scratch: |
2118 | NODEMASK_SCRATCH_FREE(scratch); | 2117 | NODEMASK_SCRATCH_FREE(scratch); |
2118 | put_mpol: | ||
2119 | mpol_put(mpol); /* drop our incoming ref on sb mpol */ | ||
2119 | } | 2120 | } |
2120 | } | 2121 | } |
2121 | 2122 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index bbd396ac954..54f28bd493d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -597,7 +597,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
597 | (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) | 597 | (!laptop_mode && ((global_page_state(NR_FILE_DIRTY) |
598 | + global_page_state(NR_UNSTABLE_NFS)) | 598 | + global_page_state(NR_UNSTABLE_NFS)) |
599 | > background_thresh))) | 599 | > background_thresh))) |
600 | bdi_start_writeback(bdi, NULL, 0); | 600 | bdi_start_background_writeback(bdi); |
601 | } | 601 | } |
602 | 602 | ||
603 | void set_page_dirty_balance(struct page *page, int page_mkwrite) | 603 | void set_page_dirty_balance(struct page *page, int page_mkwrite) |
@@ -705,9 +705,8 @@ void laptop_mode_timer_fn(unsigned long data) | |||
705 | * We want to write everything out, not just down to the dirty | 705 | * We want to write everything out, not just down to the dirty |
706 | * threshold | 706 | * threshold |
707 | */ | 707 | */ |
708 | |||
709 | if (bdi_has_dirty_io(&q->backing_dev_info)) | 708 | if (bdi_has_dirty_io(&q->backing_dev_info)) |
710 | bdi_start_writeback(&q->backing_dev_info, NULL, nr_pages); | 709 | bdi_start_writeback(&q->backing_dev_info, nr_pages); |
711 | } | 710 | } |
712 | 711 | ||
713 | /* | 712 | /* |
diff --git a/mm/percpu.c b/mm/percpu.c index 39f7dfd5958..6470e771023 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -229,8 +229,8 @@ static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) | |||
229 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; | 229 | return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; |
230 | } | 230 | } |
231 | 231 | ||
232 | static unsigned long __maybe_unused pcpu_chunk_addr(struct pcpu_chunk *chunk, | 232 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, |
233 | unsigned int cpu, int page_idx) | 233 | unsigned int cpu, int page_idx) |
234 | { | 234 | { |
235 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + | 235 | return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + |
236 | (page_idx << PAGE_SHIFT); | 236 | (page_idx << PAGE_SHIFT); |
@@ -978,7 +978,32 @@ bool is_kernel_percpu_address(unsigned long addr) | |||
978 | */ | 978 | */ |
979 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | 979 | phys_addr_t per_cpu_ptr_to_phys(void *addr) |
980 | { | 980 | { |
981 | if (pcpu_addr_in_first_chunk(addr)) { | 981 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
982 | bool in_first_chunk = false; | ||
983 | unsigned long first_start, first_end; | ||
984 | unsigned int cpu; | ||
985 | |||
986 | /* | ||
987 | * The following test on first_start/end isn't strictly | ||
988 | * necessary but will speed up lookups of addresses which | ||
989 | * aren't in the first chunk. | ||
990 | */ | ||
991 | first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0); | ||
992 | first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu, | ||
993 | pcpu_unit_pages); | ||
994 | if ((unsigned long)addr >= first_start && | ||
995 | (unsigned long)addr < first_end) { | ||
996 | for_each_possible_cpu(cpu) { | ||
997 | void *start = per_cpu_ptr(base, cpu); | ||
998 | |||
999 | if (addr >= start && addr < start + pcpu_unit_size) { | ||
1000 | in_first_chunk = true; | ||
1001 | break; | ||
1002 | } | ||
1003 | } | ||
1004 | } | ||
1005 | |||
1006 | if (in_first_chunk) { | ||
982 | if ((unsigned long)addr < VMALLOC_START || | 1007 | if ((unsigned long)addr < VMALLOC_START || |
983 | (unsigned long)addr >= VMALLOC_END) | 1008 | (unsigned long)addr >= VMALLOC_END) |
984 | return __pa(addr); | 1009 | return __pa(addr); |
@@ -1086,7 +1111,7 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |||
1086 | static int group_map[NR_CPUS] __initdata; | 1111 | static int group_map[NR_CPUS] __initdata; |
1087 | static int group_cnt[NR_CPUS] __initdata; | 1112 | static int group_cnt[NR_CPUS] __initdata; |
1088 | const size_t static_size = __per_cpu_end - __per_cpu_start; | 1113 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
1089 | int group_cnt_max = 0, nr_groups = 1, nr_units = 0; | 1114 | int nr_groups = 1, nr_units = 0; |
1090 | size_t size_sum, min_unit_size, alloc_size; | 1115 | size_t size_sum, min_unit_size, alloc_size; |
1091 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | 1116 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ |
1092 | int last_allocs, group, unit; | 1117 | int last_allocs, group, unit; |
@@ -1096,7 +1121,7 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |||
1096 | 1121 | ||
1097 | /* this function may be called multiple times */ | 1122 | /* this function may be called multiple times */ |
1098 | memset(group_map, 0, sizeof(group_map)); | 1123 | memset(group_map, 0, sizeof(group_map)); |
1099 | memset(group_cnt, 0, sizeof(group_map)); | 1124 | memset(group_cnt, 0, sizeof(group_cnt)); |
1100 | 1125 | ||
1101 | /* | 1126 | /* |
1102 | * Determine min_unit_size, alloc_size and max_upa such that | 1127 | * Determine min_unit_size, alloc_size and max_upa such that |
@@ -1130,7 +1155,6 @@ struct pcpu_alloc_info * __init pcpu_build_alloc_info( | |||
1130 | } | 1155 | } |
1131 | group_map[cpu] = group; | 1156 | group_map[cpu] = group; |
1132 | group_cnt[group]++; | 1157 | group_cnt[group]++; |
1133 | group_cnt_max = max(group_cnt_max, group_cnt[group]); | ||
1134 | } | 1158 | } |
1135 | 1159 | ||
1136 | /* | 1160 | /* |