aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c31
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/kexec.c11
-rw-r--r--kernel/posix-cpu-timers.c74
-rw-r--r--kernel/power/swsusp.c27
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/timer.c9
13 files changed, 98 insertions, 81 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index b756f527497e..2e3f4a47e7d0 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -553,7 +553,7 @@ void acct_update_integrals(struct task_struct *tsk)
553 if (delta == 0) 553 if (delta == 0)
554 return; 554 return;
555 tsk->acct_stimexpd = tsk->stime; 555 tsk->acct_stimexpd = tsk->stime;
556 tsk->acct_rss_mem1 += delta * get_mm_counter(tsk->mm, rss); 556 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
557 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; 557 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
558 } 558 }
559} 559}
diff --git a/kernel/audit.c b/kernel/audit.c
index aefa73a8a586..0c56320d38dc 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -133,7 +133,7 @@ struct audit_buffer {
133 struct list_head list; 133 struct list_head list;
134 struct sk_buff *skb; /* formatted skb ready to send */ 134 struct sk_buff *skb; /* formatted skb ready to send */
135 struct audit_context *ctx; /* NULL or associated context */ 135 struct audit_context *ctx; /* NULL or associated context */
136 int gfp_mask; 136 gfp_t gfp_mask;
137}; 137};
138 138
139static void audit_set_pid(struct audit_buffer *ab, pid_t pid) 139static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
@@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx,
647 * will be written at syscall exit. If there is no associated task, tsk 647 * will be written at syscall exit. If there is no associated task, tsk
648 * should be NULL. */ 648 * should be NULL. */
649 649
650struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, 650struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
651 int type) 651 int type)
652{ 652{
653 struct audit_buffer *ab = NULL; 653 struct audit_buffer *ab = NULL;
@@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab)
879/* Log an audit record. This is a convenience function that calls 879/* Log an audit record. This is a convenience function that calls
880 * audit_log_start, audit_log_vformat, and audit_log_end. It may be 880 * audit_log_start, audit_log_vformat, and audit_log_end. It may be
881 * called in any context. */ 881 * called in any context. */
882void audit_log(struct audit_context *ctx, int gfp_mask, int type, 882void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
883 const char *fmt, ...) 883 const char *fmt, ...)
884{ 884{
885 struct audit_buffer *ab; 885 struct audit_buffer *ab;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 88696f639aab..d8a68509e729 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab)
803 up_read(&mm->mmap_sem); 803 up_read(&mm->mmap_sem);
804} 804}
805 805
806static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask) 806static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask)
807{ 807{
808 int i; 808 int i;
809 struct audit_buffer *ab; 809 struct audit_buffer *ab;
diff --git a/kernel/exit.c b/kernel/exit.c
index 3b25b182d2be..79f52b85d6ed 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -839,7 +839,10 @@ fastcall NORET_TYPE void do_exit(long code)
839 preempt_count()); 839 preempt_count());
840 840
841 acct_update_integrals(tsk); 841 acct_update_integrals(tsk);
842 update_mem_hiwater(tsk); 842 if (tsk->mm) {
843 update_hiwater_rss(tsk->mm);
844 update_hiwater_vm(tsk->mm);
845 }
843 group_dead = atomic_dec_and_test(&tsk->signal->live); 846 group_dead = atomic_dec_and_test(&tsk->signal->live);
844 if (group_dead) { 847 if (group_dead) {
845 del_timer_sync(&tsk->signal->real_timer); 848 del_timer_sync(&tsk->signal->real_timer);
diff --git a/kernel/fork.c b/kernel/fork.c
index 280bd44ac441..8a069612eac3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -182,37 +182,37 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
182} 182}
183 183
184#ifdef CONFIG_MMU 184#ifdef CONFIG_MMU
185static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) 185static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
186{ 186{
187 struct vm_area_struct * mpnt, *tmp, **pprev; 187 struct vm_area_struct *mpnt, *tmp, **pprev;
188 struct rb_node **rb_link, *rb_parent; 188 struct rb_node **rb_link, *rb_parent;
189 int retval; 189 int retval;
190 unsigned long charge; 190 unsigned long charge;
191 struct mempolicy *pol; 191 struct mempolicy *pol;
192 192
193 down_write(&oldmm->mmap_sem); 193 down_write(&oldmm->mmap_sem);
194 flush_cache_mm(current->mm); 194 flush_cache_mm(oldmm);
195 down_write(&mm->mmap_sem);
196
195 mm->locked_vm = 0; 197 mm->locked_vm = 0;
196 mm->mmap = NULL; 198 mm->mmap = NULL;
197 mm->mmap_cache = NULL; 199 mm->mmap_cache = NULL;
198 mm->free_area_cache = oldmm->mmap_base; 200 mm->free_area_cache = oldmm->mmap_base;
199 mm->cached_hole_size = ~0UL; 201 mm->cached_hole_size = ~0UL;
200 mm->map_count = 0; 202 mm->map_count = 0;
201 set_mm_counter(mm, rss, 0);
202 set_mm_counter(mm, anon_rss, 0);
203 cpus_clear(mm->cpu_vm_mask); 203 cpus_clear(mm->cpu_vm_mask);
204 mm->mm_rb = RB_ROOT; 204 mm->mm_rb = RB_ROOT;
205 rb_link = &mm->mm_rb.rb_node; 205 rb_link = &mm->mm_rb.rb_node;
206 rb_parent = NULL; 206 rb_parent = NULL;
207 pprev = &mm->mmap; 207 pprev = &mm->mmap;
208 208
209 for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { 209 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
210 struct file *file; 210 struct file *file;
211 211
212 if (mpnt->vm_flags & VM_DONTCOPY) { 212 if (mpnt->vm_flags & VM_DONTCOPY) {
213 long pages = vma_pages(mpnt); 213 long pages = vma_pages(mpnt);
214 mm->total_vm -= pages; 214 mm->total_vm -= pages;
215 __vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, 215 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
216 -pages); 216 -pages);
217 continue; 217 continue;
218 } 218 }
@@ -253,12 +253,8 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
253 } 253 }
254 254
255 /* 255 /*
256 * Link in the new vma and copy the page table entries: 256 * Link in the new vma and copy the page table entries.
257 * link in first so that swapoff can see swap entries.
258 * Note that, exceptionally, here the vma is inserted
259 * without holding mm->mmap_sem.
260 */ 257 */
261 spin_lock(&mm->page_table_lock);
262 *pprev = tmp; 258 *pprev = tmp;
263 pprev = &tmp->vm_next; 259 pprev = &tmp->vm_next;
264 260
@@ -267,8 +263,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
267 rb_parent = &tmp->vm_rb; 263 rb_parent = &tmp->vm_rb;
268 264
269 mm->map_count++; 265 mm->map_count++;
270 retval = copy_page_range(mm, current->mm, tmp); 266 retval = copy_page_range(mm, oldmm, tmp);
271 spin_unlock(&mm->page_table_lock);
272 267
273 if (tmp->vm_ops && tmp->vm_ops->open) 268 if (tmp->vm_ops && tmp->vm_ops->open)
274 tmp->vm_ops->open(tmp); 269 tmp->vm_ops->open(tmp);
@@ -277,9 +272,9 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
277 goto out; 272 goto out;
278 } 273 }
279 retval = 0; 274 retval = 0;
280
281out: 275out:
282 flush_tlb_mm(current->mm); 276 up_write(&mm->mmap_sem);
277 flush_tlb_mm(oldmm);
283 up_write(&oldmm->mmap_sem); 278 up_write(&oldmm->mmap_sem);
284 return retval; 279 return retval;
285fail_nomem_policy: 280fail_nomem_policy:
@@ -323,6 +318,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
323 INIT_LIST_HEAD(&mm->mmlist); 318 INIT_LIST_HEAD(&mm->mmlist);
324 mm->core_waiters = 0; 319 mm->core_waiters = 0;
325 mm->nr_ptes = 0; 320 mm->nr_ptes = 0;
321 set_mm_counter(mm, file_rss, 0);
322 set_mm_counter(mm, anon_rss, 0);
326 spin_lock_init(&mm->page_table_lock); 323 spin_lock_init(&mm->page_table_lock);
327 rwlock_init(&mm->ioctx_list_lock); 324 rwlock_init(&mm->ioctx_list_lock);
328 mm->ioctx_list = NULL; 325 mm->ioctx_list = NULL;
@@ -499,7 +496,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
499 if (retval) 496 if (retval)
500 goto free_pt; 497 goto free_pt;
501 498
502 mm->hiwater_rss = get_mm_counter(mm,rss); 499 mm->hiwater_rss = get_mm_rss(mm);
503 mm->hiwater_vm = mm->total_vm; 500 mm->hiwater_vm = mm->total_vm;
504 501
505good_mm: 502good_mm:
diff --git a/kernel/futex.c b/kernel/futex.c
index ca05fe6a70b2..3b4d5ad44cc6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -205,15 +205,13 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
205 /* 205 /*
206 * Do a quick atomic lookup first - this is the fastpath. 206 * Do a quick atomic lookup first - this is the fastpath.
207 */ 207 */
208 spin_lock(&current->mm->page_table_lock); 208 page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
209 page = follow_page(mm, uaddr, 0);
210 if (likely(page != NULL)) { 209 if (likely(page != NULL)) {
211 key->shared.pgoff = 210 key->shared.pgoff =
212 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 211 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
213 spin_unlock(&current->mm->page_table_lock); 212 put_page(page);
214 return 0; 213 return 0;
215 } 214 }
216 spin_unlock(&current->mm->page_table_lock);
217 215
218 /* 216 /*
219 * Do it the general way. 217 * Do it the general way.
diff --git a/kernel/kexec.c b/kernel/kexec.c
index cdd4dcd8fb63..2c95848fbce8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p)
90static int kimage_is_destination_range(struct kimage *image, 90static int kimage_is_destination_range(struct kimage *image,
91 unsigned long start, unsigned long end); 91 unsigned long start, unsigned long end);
92static struct page *kimage_alloc_page(struct kimage *image, 92static struct page *kimage_alloc_page(struct kimage *image,
93 unsigned int gfp_mask, 93 gfp_t gfp_mask,
94 unsigned long dest); 94 unsigned long dest);
95 95
96static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 96static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
@@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image,
326 return 0; 326 return 0;
327} 327}
328 328
329static struct page *kimage_alloc_pages(unsigned int gfp_mask, 329static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
330 unsigned int order)
331{ 330{
332 struct page *pages; 331 struct page *pages;
333 332
@@ -335,7 +334,7 @@ static struct page *kimage_alloc_pages(unsigned int gfp_mask,
335 if (pages) { 334 if (pages) {
336 unsigned int count, i; 335 unsigned int count, i;
337 pages->mapping = NULL; 336 pages->mapping = NULL;
338 pages->private = order; 337 set_page_private(pages, order);
339 count = 1 << order; 338 count = 1 << order;
340 for (i = 0; i < count; i++) 339 for (i = 0; i < count; i++)
341 SetPageReserved(pages + i); 340 SetPageReserved(pages + i);
@@ -348,7 +347,7 @@ static void kimage_free_pages(struct page *page)
348{ 347{
349 unsigned int order, count, i; 348 unsigned int order, count, i;
350 349
351 order = page->private; 350 order = page_private(page);
352 count = 1 << order; 351 count = 1 << order;
353 for (i = 0; i < count; i++) 352 for (i = 0; i < count; i++)
354 ClearPageReserved(page + i); 353 ClearPageReserved(page + i);
@@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
654} 653}
655 654
656static struct page *kimage_alloc_page(struct kimage *image, 655static struct page *kimage_alloc_page(struct kimage *image,
657 unsigned int gfp_mask, 656 gfp_t gfp_mask,
658 unsigned long destination) 657 unsigned long destination)
659{ 658{
660 /* 659 /*
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index d30b304a3384..bf374fceb39c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
91 * Update expiry time from increment, and increase overrun count, 91 * Update expiry time from increment, and increase overrun count,
92 * given the current clock sample. 92 * given the current clock sample.
93 */ 93 */
94static inline void bump_cpu_timer(struct k_itimer *timer, 94static void bump_cpu_timer(struct k_itimer *timer,
95 union cpu_time_count now) 95 union cpu_time_count now)
96{ 96{
97 int i; 97 int i;
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
110 for (i = 0; incr < delta - incr; i++) 110 for (i = 0; incr < delta - incr; i++)
111 incr = incr << 1; 111 incr = incr << 1;
112 for (; i >= 0; incr >>= 1, i--) { 112 for (; i >= 0; incr >>= 1, i--) {
113 if (delta <= incr) 113 if (delta < incr)
114 continue; 114 continue;
115 timer->it.cpu.expires.sched += incr; 115 timer->it.cpu.expires.sched += incr;
116 timer->it_overrun += 1 << i; 116 timer->it_overrun += 1 << i;
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer,
128 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) 128 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
129 incr = cputime_add(incr, incr); 129 incr = cputime_add(incr, incr);
130 for (; i >= 0; incr = cputime_halve(incr), i--) { 130 for (; i >= 0; incr = cputime_halve(incr), i--) {
131 if (cputime_le(delta, incr)) 131 if (cputime_lt(delta, incr))
132 continue; 132 continue;
133 timer->it.cpu.expires.cpu = 133 timer->it.cpu.expires.cpu =
134 cputime_add(timer->it.cpu.expires.cpu, incr); 134 cputime_add(timer->it.cpu.expires.cpu, incr);
@@ -380,14 +380,9 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
380int posix_cpu_timer_del(struct k_itimer *timer) 380int posix_cpu_timer_del(struct k_itimer *timer)
381{ 381{
382 struct task_struct *p = timer->it.cpu.task; 382 struct task_struct *p = timer->it.cpu.task;
383 int ret = 0;
383 384
384 if (timer->it.cpu.firing) 385 if (likely(p != NULL)) {
385 return TIMER_RETRY;
386
387 if (unlikely(p == NULL))
388 return 0;
389
390 if (!list_empty(&timer->it.cpu.entry)) {
391 read_lock(&tasklist_lock); 386 read_lock(&tasklist_lock);
392 if (unlikely(p->signal == NULL)) { 387 if (unlikely(p->signal == NULL)) {
393 /* 388 /*
@@ -396,18 +391,20 @@ int posix_cpu_timer_del(struct k_itimer *timer)
396 */ 391 */
397 BUG_ON(!list_empty(&timer->it.cpu.entry)); 392 BUG_ON(!list_empty(&timer->it.cpu.entry));
398 } else { 393 } else {
399 /*
400 * Take us off the task's timer list.
401 */
402 spin_lock(&p->sighand->siglock); 394 spin_lock(&p->sighand->siglock);
403 list_del(&timer->it.cpu.entry); 395 if (timer->it.cpu.firing)
396 ret = TIMER_RETRY;
397 else
398 list_del(&timer->it.cpu.entry);
404 spin_unlock(&p->sighand->siglock); 399 spin_unlock(&p->sighand->siglock);
405 } 400 }
406 read_unlock(&tasklist_lock); 401 read_unlock(&tasklist_lock);
402
403 if (!ret)
404 put_task_struct(p);
407 } 405 }
408 put_task_struct(p);
409 406
410 return 0; 407 return ret;
411} 408}
412 409
413/* 410/*
@@ -424,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
424 cputime_t ptime = cputime_add(utime, stime); 421 cputime_t ptime = cputime_add(utime, stime);
425 422
426 list_for_each_entry_safe(timer, next, head, entry) { 423 list_for_each_entry_safe(timer, next, head, entry) {
427 put_task_struct(timer->task);
428 timer->task = NULL;
429 list_del_init(&timer->entry); 424 list_del_init(&timer->entry);
430 if (cputime_lt(timer->expires.cpu, ptime)) { 425 if (cputime_lt(timer->expires.cpu, ptime)) {
431 timer->expires.cpu = cputime_zero; 426 timer->expires.cpu = cputime_zero;
@@ -437,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
437 432
438 ++head; 433 ++head;
439 list_for_each_entry_safe(timer, next, head, entry) { 434 list_for_each_entry_safe(timer, next, head, entry) {
440 put_task_struct(timer->task);
441 timer->task = NULL;
442 list_del_init(&timer->entry); 435 list_del_init(&timer->entry);
443 if (cputime_lt(timer->expires.cpu, utime)) { 436 if (cputime_lt(timer->expires.cpu, utime)) {
444 timer->expires.cpu = cputime_zero; 437 timer->expires.cpu = cputime_zero;
@@ -450,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
450 443
451 ++head; 444 ++head;
452 list_for_each_entry_safe(timer, next, head, entry) { 445 list_for_each_entry_safe(timer, next, head, entry) {
453 put_task_struct(timer->task);
454 timer->task = NULL;
455 list_del_init(&timer->entry); 446 list_del_init(&timer->entry);
456 if (timer->expires.sched < sched_time) { 447 if (timer->expires.sched < sched_time) {
457 timer->expires.sched = 0; 448 timer->expires.sched = 0;
@@ -495,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
495 struct task_struct *t = p; 486 struct task_struct *t = p;
496 unsigned int nthreads = atomic_read(&p->signal->live); 487 unsigned int nthreads = atomic_read(&p->signal->live);
497 488
489 if (!nthreads)
490 return;
491
498 switch (clock_idx) { 492 switch (clock_idx) {
499 default: 493 default:
500 BUG(); 494 BUG();
@@ -503,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
503 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 497 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
504 nthreads); 498 nthreads);
505 do { 499 do {
506 if (!unlikely(t->exit_state)) { 500 if (!unlikely(t->flags & PF_EXITING)) {
507 ticks = cputime_add(prof_ticks(t), left); 501 ticks = cputime_add(prof_ticks(t), left);
508 if (cputime_eq(t->it_prof_expires, 502 if (cputime_eq(t->it_prof_expires,
509 cputime_zero) || 503 cputime_zero) ||
@@ -518,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
518 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 512 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
519 nthreads); 513 nthreads);
520 do { 514 do {
521 if (!unlikely(t->exit_state)) { 515 if (!unlikely(t->flags & PF_EXITING)) {
522 ticks = cputime_add(virt_ticks(t), left); 516 ticks = cputime_add(virt_ticks(t), left);
523 if (cputime_eq(t->it_virt_expires, 517 if (cputime_eq(t->it_virt_expires,
524 cputime_zero) || 518 cputime_zero) ||
@@ -533,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
533 nsleft = expires.sched - val.sched; 527 nsleft = expires.sched - val.sched;
534 do_div(nsleft, nthreads); 528 do_div(nsleft, nthreads);
535 do { 529 do {
536 if (!unlikely(t->exit_state)) { 530 if (!unlikely(t->flags & PF_EXITING)) {
537 ns = t->sched_time + nsleft; 531 ns = t->sched_time + nsleft;
538 if (t->it_sched_expires == 0 || 532 if (t->it_sched_expires == 0 ||
539 t->it_sched_expires > ns) { 533 t->it_sched_expires > ns) {
@@ -572,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
572 struct cpu_timer_list *next; 566 struct cpu_timer_list *next;
573 unsigned long i; 567 unsigned long i;
574 568
569 if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
570 return;
571
575 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? 572 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
576 p->cpu_timers : p->signal->cpu_timers); 573 p->cpu_timers : p->signal->cpu_timers);
577 head += CPUCLOCK_WHICH(timer->it_clock); 574 head += CPUCLOCK_WHICH(timer->it_clock);
@@ -582,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
582 listpos = head; 579 listpos = head;
583 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { 580 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
584 list_for_each_entry(next, head, entry) { 581 list_for_each_entry(next, head, entry) {
585 if (next->expires.sched > nt->expires.sched) { 582 if (next->expires.sched > nt->expires.sched)
586 listpos = &next->entry;
587 break; 583 break;
588 } 584 listpos = &next->entry;
589 } 585 }
590 } else { 586 } else {
591 list_for_each_entry(next, head, entry) { 587 list_for_each_entry(next, head, entry) {
592 if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { 588 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
593 listpos = &next->entry;
594 break; 589 break;
595 } 590 listpos = &next->entry;
596 } 591 }
597 } 592 }
598 list_add(&nt->entry, listpos); 593 list_add(&nt->entry, listpos);
@@ -736,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
736 * Disarm any old timer after extracting its expiry time. 731 * Disarm any old timer after extracting its expiry time.
737 */ 732 */
738 BUG_ON(!irqs_disabled()); 733 BUG_ON(!irqs_disabled());
734
735 ret = 0;
739 spin_lock(&p->sighand->siglock); 736 spin_lock(&p->sighand->siglock);
740 old_expires = timer->it.cpu.expires; 737 old_expires = timer->it.cpu.expires;
741 list_del_init(&timer->it.cpu.entry); 738 if (unlikely(timer->it.cpu.firing)) {
739 timer->it.cpu.firing = -1;
740 ret = TIMER_RETRY;
741 } else
742 list_del_init(&timer->it.cpu.entry);
742 spin_unlock(&p->sighand->siglock); 743 spin_unlock(&p->sighand->siglock);
743 744
744 /* 745 /*
@@ -786,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
786 } 787 }
787 } 788 }
788 789
789 if (unlikely(timer->it.cpu.firing)) { 790 if (unlikely(ret)) {
790 /* 791 /*
791 * We are colliding with the timer actually firing. 792 * We are colliding with the timer actually firing.
792 * Punt after filling in the timer's old value, and 793 * Punt after filling in the timer's old value, and
@@ -794,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
794 * it as an overrun (thanks to bump_cpu_timer above). 795 * it as an overrun (thanks to bump_cpu_timer above).
795 */ 796 */
796 read_unlock(&tasklist_lock); 797 read_unlock(&tasklist_lock);
797 timer->it.cpu.firing = -1;
798 ret = TIMER_RETRY;
799 goto out; 798 goto out;
800 } 799 }
801 800
@@ -1169,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk,
1169 unsigned long long sched_left, sched; 1168 unsigned long long sched_left, sched;
1170 const unsigned int nthreads = atomic_read(&sig->live); 1169 const unsigned int nthreads = atomic_read(&sig->live);
1171 1170
1171 if (!nthreads)
1172 return;
1173
1172 prof_left = cputime_sub(prof_expires, utime); 1174 prof_left = cputime_sub(prof_expires, utime);
1173 prof_left = cputime_sub(prof_left, stime); 1175 prof_left = cputime_sub(prof_left, stime);
1174 prof_left = cputime_div(prof_left, nthreads); 1176 prof_left = cputime_div(prof_left, nthreads);
@@ -1205,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
1205 1207
1206 do { 1208 do {
1207 t = next_thread(t); 1209 t = next_thread(t);
1208 } while (unlikely(t->exit_state)); 1210 } while (unlikely(t->flags & PF_EXITING));
1209 } while (t != tsk); 1211 } while (t != tsk);
1210 } 1212 }
1211} 1213}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 2d5c45676442..016504ccfccf 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -578,15 +578,23 @@ static int save_highmem_zone(struct zone *zone)
578 continue; 578 continue;
579 page = pfn_to_page(pfn); 579 page = pfn_to_page(pfn);
580 /* 580 /*
581 * This condition results from rvmalloc() sans vmalloc_32() 581 * PageReserved results from rvmalloc() sans vmalloc_32()
582 * and architectural memory reservations. This should be 582 * and architectural memory reservations.
583 * corrected eventually when the cases giving rise to this 583 *
584 * are better understood. 584 * rvmalloc should not cause this, because all implementations
585 * appear to always be using vmalloc_32 on architectures with
586 * highmem. This is a good thing, because we would like to save
587 * rvmalloc pages.
588 *
589 * It appears to be triggered by pages which do not point to
590 * valid memory (see arch/i386/mm/init.c:one_highpage_init(),
591 * which sets PageReserved if the page does not point to valid
592 * RAM.
593 *
594 * XXX: must remove usage of PageReserved!
585 */ 595 */
586 if (PageReserved(page)) { 596 if (PageReserved(page))
587 printk("highmem reserved page?!\n");
588 continue; 597 continue;
589 }
590 BUG_ON(PageNosave(page)); 598 BUG_ON(PageNosave(page));
591 if (PageNosaveFree(page)) 599 if (PageNosaveFree(page))
592 continue; 600 continue;
@@ -672,10 +680,9 @@ static int saveable(struct zone * zone, unsigned long * zone_pfn)
672 return 0; 680 return 0;
673 681
674 page = pfn_to_page(pfn); 682 page = pfn_to_page(pfn);
675 BUG_ON(PageReserved(page) && PageNosave(page));
676 if (PageNosave(page)) 683 if (PageNosave(page))
677 return 0; 684 return 0;
678 if (PageReserved(page) && pfn_is_nosave(pfn)) { 685 if (pfn_is_nosave(pfn)) {
679 pr_debug("[nosave pfn 0x%lx]", pfn); 686 pr_debug("[nosave pfn 0x%lx]", pfn);
680 return 0; 687 return 0;
681 } 688 }
@@ -1095,7 +1102,7 @@ static inline void eat_page(void *page)
1095 *eaten_memory = c; 1102 *eaten_memory = c;
1096} 1103}
1097 1104
1098unsigned long get_usable_page(unsigned gfp_mask) 1105unsigned long get_usable_page(gfp_t gfp_mask)
1099{ 1106{
1100 unsigned long m; 1107 unsigned long m;
1101 1108
diff --git a/kernel/sched.c b/kernel/sched.c
index 1f31a528fdba..4f26c544d02c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2511,8 +2511,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
2511 cpustat->idle = cputime64_add(cpustat->idle, tmp); 2511 cpustat->idle = cputime64_add(cpustat->idle, tmp);
2512 /* Account for system time used */ 2512 /* Account for system time used */
2513 acct_update_integrals(p); 2513 acct_update_integrals(p);
2514 /* Update rss highwater mark */
2515 update_mem_hiwater(p);
2516} 2514}
2517 2515
2518/* 2516/*
@@ -3879,6 +3877,7 @@ EXPORT_SYMBOL(cpu_present_map);
3879 3877
3880#ifndef CONFIG_SMP 3878#ifndef CONFIG_SMP
3881cpumask_t cpu_online_map = CPU_MASK_ALL; 3879cpumask_t cpu_online_map = CPU_MASK_ALL;
3880EXPORT_SYMBOL_GPL(cpu_online_map);
3882cpumask_t cpu_possible_map = CPU_MASK_ALL; 3881cpumask_t cpu_possible_map = CPU_MASK_ALL;
3883#endif 3882#endif
3884 3883
diff --git a/kernel/signal.c b/kernel/signal.c
index f2b96b08fb44..6904bbbfe116 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -406,6 +406,8 @@ void __exit_signal(struct task_struct *tsk)
406 406
407void exit_signal(struct task_struct *tsk) 407void exit_signal(struct task_struct *tsk)
408{ 408{
409 atomic_dec(&tsk->signal->live);
410
409 write_lock_irq(&tasklist_lock); 411 write_lock_irq(&tasklist_lock);
410 __exit_signal(tsk); 412 __exit_signal(tsk);
411 write_unlock_irq(&tasklist_lock); 413 write_unlock_irq(&tasklist_lock);
diff --git a/kernel/time.c b/kernel/time.c
index 40c2410ac99a..a3c2100470e1 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -532,6 +532,7 @@ int do_settimeofday (struct timespec *tv)
532 clock_was_set(); 532 clock_was_set();
533 return 0; 533 return 0;
534} 534}
535EXPORT_SYMBOL(do_settimeofday);
535 536
536void do_gettimeofday (struct timeval *tv) 537void do_gettimeofday (struct timeval *tv)
537{ 538{
diff --git a/kernel/timer.c b/kernel/timer.c
index 3ba10fa35b60..6a2e5f8dc725 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -752,6 +752,15 @@ static void second_overflow(void)
752 else 752 else
753 time_adj += (time_adj >> 2) + (time_adj >> 5); 753 time_adj += (time_adj >> 2) + (time_adj >> 5);
754#endif 754#endif
755#if HZ == 250
756 /* Compensate for (HZ==250) != (1 << SHIFT_HZ).
757 * Add 1.5625% and 0.78125% to get 255.85938; => only 0.05% error (p. 14)
758 */
759 if (time_adj < 0)
760 time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
761 else
762 time_adj += (time_adj >> 6) + (time_adj >> 7);
763#endif
755#if HZ == 1000 764#if HZ == 1000
756 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ). 765 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
757 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14) 766 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)