aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c31
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/power/swsusp.c25
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/timer.c9
8 files changed, 48 insertions, 36 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index b756f527497e..2e3f4a47e7d0 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -553,7 +553,7 @@ void acct_update_integrals(struct task_struct *tsk)
553 if (delta == 0) 553 if (delta == 0)
554 return; 554 return;
555 tsk->acct_stimexpd = tsk->stime; 555 tsk->acct_stimexpd = tsk->stime;
556 tsk->acct_rss_mem1 += delta * get_mm_counter(tsk->mm, rss); 556 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
557 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; 557 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
558 } 558 }
559} 559}
diff --git a/kernel/exit.c b/kernel/exit.c
index 3b25b182d2be..79f52b85d6ed 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -839,7 +839,10 @@ fastcall NORET_TYPE void do_exit(long code)
839 preempt_count()); 839 preempt_count());
840 840
841 acct_update_integrals(tsk); 841 acct_update_integrals(tsk);
842 update_mem_hiwater(tsk); 842 if (tsk->mm) {
843 update_hiwater_rss(tsk->mm);
844 update_hiwater_vm(tsk->mm);
845 }
843 group_dead = atomic_dec_and_test(&tsk->signal->live); 846 group_dead = atomic_dec_and_test(&tsk->signal->live);
844 if (group_dead) { 847 if (group_dead) {
845 del_timer_sync(&tsk->signal->real_timer); 848 del_timer_sync(&tsk->signal->real_timer);
diff --git a/kernel/fork.c b/kernel/fork.c
index 280bd44ac441..8a069612eac3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -182,37 +182,37 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
182} 182}
183 183
184#ifdef CONFIG_MMU 184#ifdef CONFIG_MMU
185static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) 185static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
186{ 186{
187 struct vm_area_struct * mpnt, *tmp, **pprev; 187 struct vm_area_struct *mpnt, *tmp, **pprev;
188 struct rb_node **rb_link, *rb_parent; 188 struct rb_node **rb_link, *rb_parent;
189 int retval; 189 int retval;
190 unsigned long charge; 190 unsigned long charge;
191 struct mempolicy *pol; 191 struct mempolicy *pol;
192 192
193 down_write(&oldmm->mmap_sem); 193 down_write(&oldmm->mmap_sem);
194 flush_cache_mm(current->mm); 194 flush_cache_mm(oldmm);
195 down_write(&mm->mmap_sem);
196
195 mm->locked_vm = 0; 197 mm->locked_vm = 0;
196 mm->mmap = NULL; 198 mm->mmap = NULL;
197 mm->mmap_cache = NULL; 199 mm->mmap_cache = NULL;
198 mm->free_area_cache = oldmm->mmap_base; 200 mm->free_area_cache = oldmm->mmap_base;
199 mm->cached_hole_size = ~0UL; 201 mm->cached_hole_size = ~0UL;
200 mm->map_count = 0; 202 mm->map_count = 0;
201 set_mm_counter(mm, rss, 0);
202 set_mm_counter(mm, anon_rss, 0);
203 cpus_clear(mm->cpu_vm_mask); 203 cpus_clear(mm->cpu_vm_mask);
204 mm->mm_rb = RB_ROOT; 204 mm->mm_rb = RB_ROOT;
205 rb_link = &mm->mm_rb.rb_node; 205 rb_link = &mm->mm_rb.rb_node;
206 rb_parent = NULL; 206 rb_parent = NULL;
207 pprev = &mm->mmap; 207 pprev = &mm->mmap;
208 208
209 for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { 209 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
210 struct file *file; 210 struct file *file;
211 211
212 if (mpnt->vm_flags & VM_DONTCOPY) { 212 if (mpnt->vm_flags & VM_DONTCOPY) {
213 long pages = vma_pages(mpnt); 213 long pages = vma_pages(mpnt);
214 mm->total_vm -= pages; 214 mm->total_vm -= pages;
215 __vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 215 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
216 -pages); 216 -pages);
217 continue; 217 continue;
218 } 218 }
@@ -253,12 +253,8 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
253 } 253 }
254 254
255 /* 255 /*
256 * Link in the new vma and copy the page table entries: 256 * Link in the new vma and copy the page table entries.
257 * link in first so that swapoff can see swap entries.
258 * Note that, exceptionally, here the vma is inserted
259 * without holding mm->mmap_sem.
260 */ 257 */
261 spin_lock(&mm->page_table_lock);
262 *pprev = tmp; 258 *pprev = tmp;
263 pprev = &tmp->vm_next; 259 pprev = &tmp->vm_next;
264 260
@@ -267,8 +263,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
267 rb_parent = &tmp->vm_rb; 263 rb_parent = &tmp->vm_rb;
268 264
269 mm->map_count++; 265 mm->map_count++;
270 retval = copy_page_range(mm, current->mm, tmp); 266 retval = copy_page_range(mm, oldmm, tmp);
271 spin_unlock(&mm->page_table_lock);
272 267
273 if (tmp->vm_ops && tmp->vm_ops->open) 268 if (tmp->vm_ops && tmp->vm_ops->open)
274 tmp->vm_ops->open(tmp); 269 tmp->vm_ops->open(tmp);
@@ -277,9 +272,9 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
277 goto out; 272 goto out;
278 } 273 }
279 retval = 0; 274 retval = 0;
280
281out: 275out:
282 flush_tlb_mm(current->mm); 276 up_write(&mm->mmap_sem);
277 flush_tlb_mm(oldmm);
283 up_write(&oldmm->mmap_sem); 278 up_write(&oldmm->mmap_sem);
284 return retval; 279 return retval;
285fail_nomem_policy: 280fail_nomem_policy:
@@ -323,6 +318,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
323 INIT_LIST_HEAD(&mm->mmlist); 318 INIT_LIST_HEAD(&mm->mmlist);
324 mm->core_waiters = 0; 319 mm->core_waiters = 0;
325 mm->nr_ptes = 0; 320 mm->nr_ptes = 0;
321 set_mm_counter(mm, file_rss, 0);
322 set_mm_counter(mm, anon_rss, 0);
326 spin_lock_init(&mm->page_table_lock); 323 spin_lock_init(&mm->page_table_lock);
327 rwlock_init(&mm->ioctx_list_lock); 324 rwlock_init(&mm->ioctx_list_lock);
328 mm->ioctx_list = NULL; 325 mm->ioctx_list = NULL;
@@ -499,7 +496,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
499 if (retval) 496 if (retval)
500 goto free_pt; 497 goto free_pt;
501 498
502 mm->hiwater_rss = get_mm_counter(mm,rss); 499 mm->hiwater_rss = get_mm_rss(mm);
503 mm->hiwater_vm = mm->total_vm; 500 mm->hiwater_vm = mm->total_vm;
504 501
505good_mm: 502good_mm:
diff --git a/kernel/futex.c b/kernel/futex.c
index ca05fe6a70b2..3b4d5ad44cc6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -205,15 +205,13 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
205 /* 205 /*
206 * Do a quick atomic lookup first - this is the fastpath. 206 * Do a quick atomic lookup first - this is the fastpath.
207 */ 207 */
208 spin_lock(&current->mm->page_table_lock); 208 page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
209 page = follow_page(mm, uaddr, 0);
210 if (likely(page != NULL)) { 209 if (likely(page != NULL)) {
211 key->shared.pgoff = 210 key->shared.pgoff =
212 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 211 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
213 spin_unlock(&current->mm->page_table_lock); 212 put_page(page);
214 return 0; 213 return 0;
215 } 214 }
216 spin_unlock(&current->mm->page_table_lock);
217 215
218 /* 216 /*
219 * Do it the general way. 217 * Do it the general way.
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 36c5d9cd4cc1..2c95848fbce8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -334,7 +334,7 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
334 if (pages) { 334 if (pages) {
335 unsigned int count, i; 335 unsigned int count, i;
336 pages->mapping = NULL; 336 pages->mapping = NULL;
337 pages->private = order; 337 set_page_private(pages, order);
338 count = 1 << order; 338 count = 1 << order;
339 for (i = 0; i < count; i++) 339 for (i = 0; i < count; i++)
340 SetPageReserved(pages + i); 340 SetPageReserved(pages + i);
@@ -347,7 +347,7 @@ static void kimage_free_pages(struct page *page)
347{ 347{
348 unsigned int order, count, i; 348 unsigned int order, count, i;
349 349
350 order = page->private; 350 order = page_private(page);
351 count = 1 << order; 351 count = 1 << order;
352 for (i = 0; i < count; i++) 352 for (i = 0; i < count; i++)
353 ClearPageReserved(page + i); 353 ClearPageReserved(page + i);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 10bc5ec496d7..016504ccfccf 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -578,15 +578,23 @@ static int save_highmem_zone(struct zone *zone)
578 continue; 578 continue;
579 page = pfn_to_page(pfn); 579 page = pfn_to_page(pfn);
580 /* 580 /*
581 * This condition results from rvmalloc() sans vmalloc_32() 581 * PageReserved results from rvmalloc() sans vmalloc_32()
582 * and architectural memory reservations. This should be 582 * and architectural memory reservations.
583 * corrected eventually when the cases giving rise to this 583 *
584 * are better understood. 584 * rvmalloc should not cause this, because all implementations
585 * appear to always be using vmalloc_32 on architectures with
586 * highmem. This is a good thing, because we would like to save
587 * rvmalloc pages.
588 *
589 * It appears to be triggered by pages which do not point to
590 * valid memory (see arch/i386/mm/init.c:one_highpage_init(),
591 * which sets PageReserved if the page does not point to valid
592 * RAM.
593 *
594 * XXX: must remove usage of PageReserved!
585 */ 595 */
586 if (PageReserved(page)) { 596 if (PageReserved(page))
587 printk("highmem reserved page?!\n");
588 continue; 597 continue;
589 }
590 BUG_ON(PageNosave(page)); 598 BUG_ON(PageNosave(page));
591 if (PageNosaveFree(page)) 599 if (PageNosaveFree(page))
592 continue; 600 continue;
@@ -672,10 +680,9 @@ static int saveable(struct zone * zone, unsigned long * zone_pfn)
672 return 0; 680 return 0;
673 681
674 page = pfn_to_page(pfn); 682 page = pfn_to_page(pfn);
675 BUG_ON(PageReserved(page) && PageNosave(page));
676 if (PageNosave(page)) 683 if (PageNosave(page))
677 return 0; 684 return 0;
678 if (PageReserved(page) && pfn_is_nosave(pfn)) { 685 if (pfn_is_nosave(pfn)) {
679 pr_debug("[nosave pfn 0x%lx]", pfn); 686 pr_debug("[nosave pfn 0x%lx]", pfn);
680 return 0; 687 return 0;
681 } 688 }
diff --git a/kernel/sched.c b/kernel/sched.c
index 1e5cafdf4e27..4f26c544d02c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2511,8 +2511,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
2511 cpustat->idle = cputime64_add(cpustat->idle, tmp); 2511 cpustat->idle = cputime64_add(cpustat->idle, tmp);
2512 /* Account for system time used */ 2512 /* Account for system time used */
2513 acct_update_integrals(p); 2513 acct_update_integrals(p);
2514 /* Update rss highwater mark */
2515 update_mem_hiwater(p);
2516} 2514}
2517 2515
2518/* 2516/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 3ba10fa35b60..6a2e5f8dc725 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -752,6 +752,15 @@ static void second_overflow(void)
752 else 752 else
753 time_adj += (time_adj >> 2) + (time_adj >> 5); 753 time_adj += (time_adj >> 2) + (time_adj >> 5);
754#endif 754#endif
755#if HZ == 250
756 /* Compensate for (HZ==250) != (1 << SHIFT_HZ).
757 * Add 1.5625% and 0.78125% to get 255.85938; => only 0.05% error (p. 14)
758 */
759 if (time_adj < 0)
760 time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
761 else
762 time_adj += (time_adj >> 6) + (time_adj >> 7);
763#endif
755#if HZ == 1000 764#if HZ == 1000
756 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ). 765 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
757 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14) 766 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)