aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-09-04 08:31:00 -0400
committerIngo Molnar <mingo@kernel.org>2012-09-04 08:31:00 -0400
commit59f979455d7209171ab10a72c8df5c2512976cb4 (patch)
treec4c7fc48bd79bf8acbe848a1b979fa9e8ab4ac6a /kernel
parentb9bb50db9126c4ccad78af2dfb77277ca17c9b64 (diff)
parent9450d57eab5cad36774c297da123062744472588 (diff)
Merge branch 'sched/urgent' into sched/core
Merge in the current fixes branch, we are going to apply dependent patches. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c19
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched/core.c34
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/task_work.c1
-rw-r--r--kernel/time/tick-sched.c1
-rw-r--r--kernel/time/timekeeping.c39
-rw-r--r--kernel/timer.c9
-rw-r--r--kernel/trace/trace_syscalls.c4
11 files changed, 69 insertions, 53 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 3a5ca582ba1e..ed206fd88cca 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
250 spin_unlock(&hash_lock); 250 spin_unlock(&hash_lock);
251 spin_unlock(&entry->lock); 251 spin_unlock(&entry->lock);
252 fsnotify_destroy_mark(entry); 252 fsnotify_destroy_mark(entry);
253 fsnotify_put_mark(entry);
254 goto out; 253 goto out;
255 } 254 }
256 255
@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
259 258
260 fsnotify_duplicate_mark(&new->mark, entry); 259 fsnotify_duplicate_mark(&new->mark, entry);
261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { 260 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
262 free_chunk(new); 261 fsnotify_put_mark(&new->mark);
263 goto Fallback; 262 goto Fallback;
264 } 263 }
265 264
@@ -293,7 +292,7 @@ static void untag_chunk(struct node *p)
293 spin_unlock(&hash_lock); 292 spin_unlock(&hash_lock);
294 spin_unlock(&entry->lock); 293 spin_unlock(&entry->lock);
295 fsnotify_destroy_mark(entry); 294 fsnotify_destroy_mark(entry);
296 fsnotify_put_mark(entry); 295 fsnotify_put_mark(&new->mark); /* drop initial reference */
297 goto out; 296 goto out;
298 297
299Fallback: 298Fallback:
@@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
322 321
323 entry = &chunk->mark; 322 entry = &chunk->mark;
324 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { 323 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
325 free_chunk(chunk); 324 fsnotify_put_mark(entry);
326 return -ENOSPC; 325 return -ENOSPC;
327 } 326 }
328 327
@@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
347 insert_hash(chunk); 346 insert_hash(chunk);
348 spin_unlock(&hash_lock); 347 spin_unlock(&hash_lock);
349 spin_unlock(&entry->lock); 348 spin_unlock(&entry->lock);
349 fsnotify_put_mark(entry); /* drop initial reference */
350 return 0; 350 return 0;
351} 351}
352 352
@@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
396 fsnotify_duplicate_mark(chunk_entry, old_entry); 396 fsnotify_duplicate_mark(chunk_entry, old_entry);
397 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { 397 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
398 spin_unlock(&old_entry->lock); 398 spin_unlock(&old_entry->lock);
399 free_chunk(chunk); 399 fsnotify_put_mark(chunk_entry);
400 fsnotify_put_mark(old_entry); 400 fsnotify_put_mark(old_entry);
401 return -ENOSPC; 401 return -ENOSPC;
402 } 402 }
@@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
444 spin_unlock(&chunk_entry->lock); 444 spin_unlock(&chunk_entry->lock);
445 spin_unlock(&old_entry->lock); 445 spin_unlock(&old_entry->lock);
446 fsnotify_destroy_mark(old_entry); 446 fsnotify_destroy_mark(old_entry);
447 fsnotify_put_mark(chunk_entry); /* drop initial reference */
447 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ 448 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
448 fsnotify_put_mark(old_entry); /* and kill it */
449 return 0; 449 return 0;
450} 450}
451 451
@@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
916 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); 916 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
917 917
918 evict_chunk(chunk); 918 evict_chunk(chunk);
919 fsnotify_put_mark(entry); 919
920 /*
921 * We are guaranteed to have at least one reference to the mark from
922 * either the inode or the caller of fsnotify_destroy_mark().
923 */
924 BUG_ON(atomic_read(&entry->refcnt) < 1);
920} 925}
921 926
922static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, 927static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
diff --git a/kernel/fork.c b/kernel/fork.c
index 3bd2280d79f6..2c8857e12855 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -455,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
455 if (retval) 455 if (retval)
456 goto out; 456 goto out;
457 457
458 if (file && uprobe_mmap(tmp)) 458 if (file)
459 goto out; 459 uprobe_mmap(tmp);
460 } 460 }
461 /* a new mm has just been created */ 461 /* a new mm has just been created */
462 arch_dup_mmap(oldmm, mm); 462 arch_dup_mmap(oldmm, mm);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78d9c965433a..ae66229238a0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4784,27 +4784,17 @@ void idle_task_exit(void)
4784} 4784}
4785 4785
4786/* 4786/*
4787 * While a dead CPU has no uninterruptible tasks queued at this point, 4787 * Since this CPU is going 'away' for a while, fold any nr_active delta
4788 * it might still have a nonzero ->nr_uninterruptible counter, because 4788 * we might have. Assumes we're called after migrate_tasks() so that the
4789 * for performance reasons the counter is not stricly tracking tasks to 4789 * nr_active count is stable.
4790 * their home CPUs. So we just add the counter to another CPU's counter, 4790 *
4791 * to keep the global sum constant after CPU-down: 4791 * Also see the comment "Global load-average calculations".
4792 */
4793static void migrate_nr_uninterruptible(struct rq *rq_src)
4794{
4795 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
4796
4797 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
4798 rq_src->nr_uninterruptible = 0;
4799}
4800
4801/*
4802 * remove the tasks which were accounted by rq from calc_load_tasks.
4803 */ 4792 */
4804static void calc_global_load_remove(struct rq *rq) 4793static void calc_load_migrate(struct rq *rq)
4805{ 4794{
4806 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 4795 long delta = calc_load_fold_active(rq);
4807 rq->calc_load_active = 0; 4796 if (delta)
4797 atomic_long_add(delta, &calc_load_tasks);
4808} 4798}
4809 4799
4810/* 4800/*
@@ -4832,9 +4822,6 @@ static void migrate_tasks(unsigned int dead_cpu)
4832 */ 4822 */
4833 rq->stop = NULL; 4823 rq->stop = NULL;
4834 4824
4835 /* Ensure any throttled groups are reachable by pick_next_task */
4836 unthrottle_offline_cfs_rqs(rq);
4837
4838 for ( ; ; ) { 4825 for ( ; ; ) {
4839 /* 4826 /*
4840 * There's this thread running, bail when that's the only 4827 * There's this thread running, bail when that's the only
@@ -5098,8 +5085,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5098 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5085 BUG_ON(rq->nr_running != 1); /* the migration thread */
5099 raw_spin_unlock_irqrestore(&rq->lock, flags); 5086 raw_spin_unlock_irqrestore(&rq->lock, flags);
5100 5087
5101 migrate_nr_uninterruptible(rq); 5088 calc_load_migrate(rq);
5102 calc_global_load_remove(rq);
5103 break; 5089 break;
5104#endif 5090#endif
5105 } 5091 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01d3eda6b7f9..1ca4fe423528 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2052 hrtimer_cancel(&cfs_b->slack_timer); 2052 hrtimer_cancel(&cfs_b->slack_timer);
2053} 2053}
2054 2054
2055void unthrottle_offline_cfs_rqs(struct rq *rq) 2055static void unthrottle_offline_cfs_rqs(struct rq *rq)
2056{ 2056{
2057 struct cfs_rq *cfs_rq; 2057 struct cfs_rq *cfs_rq;
2058 2058
@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2106 return NULL; 2106 return NULL;
2107} 2107}
2108static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 2108static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2109void unthrottle_offline_cfs_rqs(struct rq *rq) {} 2109static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2110 2110
2111#endif /* CONFIG_CFS_BANDWIDTH */ 2111#endif /* CONFIG_CFS_BANDWIDTH */
2112 2112
@@ -3630,7 +3630,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3630 * @group: sched_group whose statistics are to be updated. 3630 * @group: sched_group whose statistics are to be updated.
3631 * @load_idx: Load index of sched_domain of this_cpu for load calc. 3631 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3632 * @local_group: Does group contain this_cpu. 3632 * @local_group: Does group contain this_cpu.
3633 * @cpus: Set of cpus considered for load balancing.
3634 * @balance: Should we balance. 3633 * @balance: Should we balance.
3635 * @sgs: variable to hold the statistics for this group. 3634 * @sgs: variable to hold the statistics for this group.
3636 */ 3635 */
@@ -3777,7 +3776,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3777/** 3776/**
3778 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 3777 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3779 * @env: The load balancing environment. 3778 * @env: The load balancing environment.
3780 * @cpus: Set of cpus considered for load balancing.
3781 * @balance: Should we balance. 3779 * @balance: Should we balance.
3782 * @sds: variable to hold the statistics for this sched_domain. 3780 * @sds: variable to hold the statistics for this sched_domain.
3783 */ 3781 */
@@ -4927,6 +4925,9 @@ static void rq_online_fair(struct rq *rq)
4927static void rq_offline_fair(struct rq *rq) 4925static void rq_offline_fair(struct rq *rq)
4928{ 4926{
4929 update_sysctl(); 4927 update_sysctl();
4928
4929 /* Ensure any throttled groups are reachable by pick_next_task */
4930 unthrottle_offline_cfs_rqs(rq);
4930} 4931}
4931 4932
4932#endif /* CONFIG_SMP */ 4933#endif /* CONFIG_SMP */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 944cb68420e9..e0b7ba9c040f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -691,6 +691,7 @@ balanced:
691 * runtime - in which case borrowing doesn't make sense. 691 * runtime - in which case borrowing doesn't make sense.
692 */ 692 */
693 rt_rq->rt_runtime = RUNTIME_INF; 693 rt_rq->rt_runtime = RUNTIME_INF;
694 rt_rq->rt_throttled = 0;
694 raw_spin_unlock(&rt_rq->rt_runtime_lock); 695 raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 raw_spin_unlock(&rt_b->rt_runtime_lock); 696 raw_spin_unlock(&rt_b->rt_runtime_lock);
696 } 697 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 804c2e5e7872..09871698e80c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1157,7 +1157,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1157 1157
1158extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1158extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1159extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1159extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1160extern void unthrottle_offline_cfs_rqs(struct rq *rq);
1161 1160
1162extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1161extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1163 1162
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 91d4e1742a0c..d320d44903bd 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -75,6 +75,7 @@ void task_work_run(void)
75 p = q->next; 75 p = q->next;
76 q->func(q); 76 q->func(q);
77 q = p; 77 q = p;
78 cond_resched();
78 } 79 }
79 } 80 }
80} 81}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 024540f97f74..3a9e5d5c1091 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -573,6 +573,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
573 tick_do_update_jiffies64(now); 573 tick_do_update_jiffies64(now);
574 update_cpu_load_nohz(); 574 update_cpu_load_nohz();
575 575
576 calc_load_exit_idle();
576 touch_softlockup_watchdog(); 577 touch_softlockup_watchdog();
577 /* 578 /*
578 * Cancel the scheduled timer and restore the tick 579 * Cancel the scheduled timer and restore the tick
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e16af197a2bc..34e5eac81424 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -115,6 +115,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
115{ 115{
116 tk->xtime_sec += ts->tv_sec; 116 tk->xtime_sec += ts->tv_sec;
117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; 117 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
118 tk_normalize_xtime(tk);
118} 119}
119 120
120static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) 121static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
@@ -276,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
276 tk->xtime_nsec += cycle_delta * tk->mult; 277 tk->xtime_nsec += cycle_delta * tk->mult;
277 278
278 /* If arch requires, add in gettimeoffset() */ 279 /* If arch requires, add in gettimeoffset() */
279 tk->xtime_nsec += arch_gettimeoffset() << tk->shift; 280 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
280 281
281 tk_normalize_xtime(tk); 282 tk_normalize_xtime(tk);
282 283
@@ -427,7 +428,7 @@ int do_settimeofday(const struct timespec *tv)
427 struct timespec ts_delta, xt; 428 struct timespec ts_delta, xt;
428 unsigned long flags; 429 unsigned long flags;
429 430
430 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 431 if (!timespec_valid_strict(tv))
431 return -EINVAL; 432 return -EINVAL;
432 433
433 write_seqlock_irqsave(&tk->lock, flags); 434 write_seqlock_irqsave(&tk->lock, flags);
@@ -463,6 +464,8 @@ int timekeeping_inject_offset(struct timespec *ts)
463{ 464{
464 struct timekeeper *tk = &timekeeper; 465 struct timekeeper *tk = &timekeeper;
465 unsigned long flags; 466 unsigned long flags;
467 struct timespec tmp;
468 int ret = 0;
466 469
467 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) 470 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
468 return -EINVAL; 471 return -EINVAL;
@@ -471,10 +474,17 @@ int timekeeping_inject_offset(struct timespec *ts)
471 474
472 timekeeping_forward_now(tk); 475 timekeeping_forward_now(tk);
473 476
477 /* Make sure the proposed value is valid */
478 tmp = timespec_add(tk_xtime(tk), *ts);
479 if (!timespec_valid_strict(&tmp)) {
480 ret = -EINVAL;
481 goto error;
482 }
474 483
475 tk_xtime_add(tk, ts); 484 tk_xtime_add(tk, ts);
476 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); 485 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
477 486
487error: /* even if we error out, we forwarded the time, so call update */
478 timekeeping_update(tk, true); 488 timekeeping_update(tk, true);
479 489
480 write_sequnlock_irqrestore(&tk->lock, flags); 490 write_sequnlock_irqrestore(&tk->lock, flags);
@@ -482,7 +492,7 @@ int timekeeping_inject_offset(struct timespec *ts)
482 /* signal hrtimers about time change */ 492 /* signal hrtimers about time change */
483 clock_was_set(); 493 clock_was_set();
484 494
485 return 0; 495 return ret;
486} 496}
487EXPORT_SYMBOL(timekeeping_inject_offset); 497EXPORT_SYMBOL(timekeeping_inject_offset);
488 498
@@ -649,7 +659,20 @@ void __init timekeeping_init(void)
649 struct timespec now, boot, tmp; 659 struct timespec now, boot, tmp;
650 660
651 read_persistent_clock(&now); 661 read_persistent_clock(&now);
662 if (!timespec_valid_strict(&now)) {
663 pr_warn("WARNING: Persistent clock returned invalid value!\n"
664 " Check your CMOS/BIOS settings.\n");
665 now.tv_sec = 0;
666 now.tv_nsec = 0;
667 }
668
652 read_boot_clock(&boot); 669 read_boot_clock(&boot);
670 if (!timespec_valid_strict(&boot)) {
671 pr_warn("WARNING: Boot clock returned invalid value!\n"
672 " Check your CMOS/BIOS settings.\n");
673 boot.tv_sec = 0;
674 boot.tv_nsec = 0;
675 }
653 676
654 seqlock_init(&tk->lock); 677 seqlock_init(&tk->lock);
655 678
@@ -690,7 +713,7 @@ static struct timespec timekeeping_suspend_time;
690static void __timekeeping_inject_sleeptime(struct timekeeper *tk, 713static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
691 struct timespec *delta) 714 struct timespec *delta)
692{ 715{
693 if (!timespec_valid(delta)) { 716 if (!timespec_valid_strict(delta)) {
694 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " 717 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
695 "sleep delta value!\n"); 718 "sleep delta value!\n");
696 return; 719 return;
@@ -1129,6 +1152,10 @@ static void update_wall_time(void)
1129 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 1152 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1130#endif 1153#endif
1131 1154
1155 /* Check if there's really nothing to do */
1156 if (offset < tk->cycle_interval)
1157 goto out;
1158
1132 /* 1159 /*
1133 * With NO_HZ we may have to accumulate many cycle_intervals 1160 * With NO_HZ we may have to accumulate many cycle_intervals
1134 * (think "ticks") worth of time at once. To do this efficiently, 1161 * (think "ticks") worth of time at once. To do this efficiently,
@@ -1161,9 +1188,9 @@ static void update_wall_time(void)
1161 * the vsyscall implementations are converted to use xtime_nsec 1188 * the vsyscall implementations are converted to use xtime_nsec
1162 * (shifted nanoseconds), this can be killed. 1189 * (shifted nanoseconds), this can be killed.
1163 */ 1190 */
1164 remainder = tk->xtime_nsec & ((1 << tk->shift) - 1); 1191 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1165 tk->xtime_nsec -= remainder; 1192 tk->xtime_nsec -= remainder;
1166 tk->xtime_nsec += 1 << tk->shift; 1193 tk->xtime_nsec += 1ULL << tk->shift;
1167 tk->ntp_error += remainder << tk->ntp_error_shift; 1194 tk->ntp_error += remainder << tk->ntp_error_shift;
1168 1195
1169 /* 1196 /*
diff --git a/kernel/timer.c b/kernel/timer.c
index a61c09374eba..8c5e7b908c68 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1407,13 +1407,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1407 1407
1408#endif 1408#endif
1409 1409
1410#ifndef __alpha__
1411
1412/*
1413 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
1414 * should be moved into arch/i386 instead?
1415 */
1416
1417/** 1410/**
1418 * sys_getpid - return the thread group id of the current process 1411 * sys_getpid - return the thread group id of the current process
1419 * 1412 *
@@ -1469,8 +1462,6 @@ SYSCALL_DEFINE0(getegid)
1469 return from_kgid_munged(current_user_ns(), current_egid()); 1462 return from_kgid_munged(current_user_ns(), current_egid());
1470} 1463}
1471 1464
1472#endif
1473
1474static void process_timeout(unsigned long __data) 1465static void process_timeout(unsigned long __data)
1475{ 1466{
1476 wake_up_process((struct task_struct *)__data); 1467 wake_up_process((struct task_struct *)__data);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 60e4d7875672..6b245f64c8dd 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
506 int size; 506 int size;
507 507
508 syscall_nr = syscall_get_nr(current, regs); 508 syscall_nr = syscall_get_nr(current, regs);
509 if (syscall_nr < 0)
510 return;
509 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 511 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
510 return; 512 return;
511 513
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
580 int size; 582 int size;
581 583
582 syscall_nr = syscall_get_nr(current, regs); 584 syscall_nr = syscall_get_nr(current, regs);
585 if (syscall_nr < 0)
586 return;
583 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 587 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
584 return; 588 return;
585 589