aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-22 12:04:48 -0400
commit95211279c5ad00a317c98221d7e4365e02f20836 (patch)
tree2ddc8625378d2915b8c96392f3cf6663b705ed55 /mm/memory.c
parent5375871d432ae9fc581014ac117b96aaee3cd0c7 (diff)
parent12724850e8064f64b6223d26d78c0597c742c65a (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge first batch of patches from Andrew Morton: "A few misc things and all the MM queue" * emailed from Andrew Morton <akpm@linux-foundation.org>: (92 commits) memcg: avoid THP split in task migration thp: add HPAGE_PMD_* definitions for !CONFIG_TRANSPARENT_HUGEPAGE memcg: clean up existing move charge code mm/memcontrol.c: remove unnecessary 'break' in mem_cgroup_read() mm/memcontrol.c: remove redundant BUG_ON() in mem_cgroup_usage_unregister_event() mm/memcontrol.c: s/stealed/stolen/ memcg: fix performance of mem_cgroup_begin_update_page_stat() memcg: remove PCG_FILE_MAPPED memcg: use new logic for page stat accounting memcg: remove PCG_MOVE_LOCK flag from page_cgroup memcg: simplify move_account() check memcg: remove EXPORT_SYMBOL(mem_cgroup_update_page_stat) memcg: kill dead prev_priority stubs memcg: remove PCG_CACHE page_cgroup flag memcg: let css_get_next() rely upon rcu_read_lock() cgroup: revert ss_id_lock to spinlock idr: make idr_get_next() good for rcu_read_lock() memcg: remove unnecessary thp check in page stat accounting memcg: remove redundant returns memcg: enum lru_list lru ...
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c53
1 files changed, 19 insertions, 34 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 8438c157e4d9..3416b6e018d6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -125,17 +125,17 @@ core_initcall(init_zero_pfn);
125 125
126#if defined(SPLIT_RSS_COUNTING) 126#if defined(SPLIT_RSS_COUNTING)
127 127
128static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) 128void sync_mm_rss(struct mm_struct *mm)
129{ 129{
130 int i; 130 int i;
131 131
132 for (i = 0; i < NR_MM_COUNTERS; i++) { 132 for (i = 0; i < NR_MM_COUNTERS; i++) {
133 if (task->rss_stat.count[i]) { 133 if (current->rss_stat.count[i]) {
134 add_mm_counter(mm, i, task->rss_stat.count[i]); 134 add_mm_counter(mm, i, current->rss_stat.count[i]);
135 task->rss_stat.count[i] = 0; 135 current->rss_stat.count[i] = 0;
136 } 136 }
137 } 137 }
138 task->rss_stat.events = 0; 138 current->rss_stat.events = 0;
139} 139}
140 140
141static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) 141static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
@@ -157,30 +157,7 @@ static void check_sync_rss_stat(struct task_struct *task)
157 if (unlikely(task != current)) 157 if (unlikely(task != current))
158 return; 158 return;
159 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) 159 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
160 __sync_task_rss_stat(task, task->mm); 160 sync_mm_rss(task->mm);
161}
162
163unsigned long get_mm_counter(struct mm_struct *mm, int member)
164{
165 long val = 0;
166
167 /*
168 * Don't use task->mm here...for avoiding to use task_get_mm()..
169 * The caller must guarantee task->mm is not invalid.
170 */
171 val = atomic_long_read(&mm->rss_stat.count[member]);
172 /*
173 * counter is updated in asynchronous manner and may go to minus.
174 * But it's never be expected number for users.
175 */
176 if (val < 0)
177 return 0;
178 return (unsigned long)val;
179}
180
181void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
182{
183 __sync_task_rss_stat(task, mm);
184} 161}
185#else /* SPLIT_RSS_COUNTING */ 162#else /* SPLIT_RSS_COUNTING */
186 163
@@ -661,7 +638,7 @@ static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
661 int i; 638 int i;
662 639
663 if (current->mm == mm) 640 if (current->mm == mm)
664 sync_mm_rss(current, mm); 641 sync_mm_rss(mm);
665 for (i = 0; i < NR_MM_COUNTERS; i++) 642 for (i = 0; i < NR_MM_COUNTERS; i++)
666 if (rss[i]) 643 if (rss[i])
667 add_mm_counter(mm, i, rss[i]); 644 add_mm_counter(mm, i, rss[i]);
@@ -1247,16 +1224,24 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1247 do { 1224 do {
1248 next = pmd_addr_end(addr, end); 1225 next = pmd_addr_end(addr, end);
1249 if (pmd_trans_huge(*pmd)) { 1226 if (pmd_trans_huge(*pmd)) {
1250 if (next-addr != HPAGE_PMD_SIZE) { 1227 if (next - addr != HPAGE_PMD_SIZE) {
1251 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); 1228 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
1252 split_huge_page_pmd(vma->vm_mm, pmd); 1229 split_huge_page_pmd(vma->vm_mm, pmd);
1253 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1230 } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1254 continue; 1231 goto next;
1255 /* fall through */ 1232 /* fall through */
1256 } 1233 }
1257 if (pmd_none_or_clear_bad(pmd)) 1234 /*
1258 continue; 1235 * Here there can be other concurrent MADV_DONTNEED or
1236 * trans huge page faults running, and if the pmd is
1237 * none or trans huge it can change under us. This is
1238 * because MADV_DONTNEED holds the mmap_sem in read
1239 * mode.
1240 */
1241 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1242 goto next;
1259 next = zap_pte_range(tlb, vma, pmd, addr, next, details); 1243 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1244next:
1260 cond_resched(); 1245 cond_resched();
1261 } while (pmd++, addr = next, addr != end); 1246 } while (pmd++, addr = next, addr != end);
1262 1247