aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-07-21 15:45:02 -0400
committerIngo Molnar <mingo@elte.hu>2010-07-21 15:45:08 -0400
commitdca45ad8af54963c005393a484ad117b8ba6150f (patch)
tree7c9a6966283a6bb12b54e5680a67d203be292930 /kernel
parent68c38fc3cb4e5a60f502ee9c45f3dfe70e5165ad (diff)
parentcd5b8f8755a89a57fc8c408d284b8b613f090345 (diff)
Merge branch 'linus' into sched/core
Merge reason: Move from the -rc3 to the almost-rc6 base. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/early_res.c6
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/module.c23
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/nvs.c (renamed from kernel/power/hibernate_nvs.c)24
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/sched.c22
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/time/tick-sched.c21
-rw-r--r--kernel/trace/trace_event_perf.c4
13 files changed, 87 insertions, 59 deletions
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 31aa9332ef3f..7bfae887f211 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -7,6 +7,8 @@
7#include <linux/bootmem.h> 7#include <linux/bootmem.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/early_res.h> 9#include <linux/early_res.h>
10#include <linux/slab.h>
11#include <linux/kmemleak.h>
10 12
11/* 13/*
12 * Early reserved memory areas. 14 * Early reserved memory areas.
@@ -319,6 +321,8 @@ void __init free_early(u64 start, u64 end)
319 struct early_res *r; 321 struct early_res *r;
320 int i; 322 int i;
321 323
324 kmemleak_free_part(__va(start), end - start);
325
322 i = find_overlapped_early(start, end); 326 i = find_overlapped_early(start, end);
323 r = &early_res[i]; 327 r = &early_res[i];
324 if (i >= max_early_res || r->end != end || r->start != start) 328 if (i >= max_early_res || r->end != end || r->start != start)
@@ -333,6 +337,8 @@ void __init free_early_partial(u64 start, u64 end)
333 struct early_res *r; 337 struct early_res *r;
334 int i; 338 int i;
335 339
340 kmemleak_free_part(__va(start), end - start);
341
336 if (start == end) 342 if (start == end)
337 return; 343 return;
338 344
diff --git a/kernel/futex.c b/kernel/futex.c
index e7a35f1039e7..6a3a5fa1526d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -429,20 +429,11 @@ static void free_pi_state(struct futex_pi_state *pi_state)
429static struct task_struct * futex_find_get_task(pid_t pid) 429static struct task_struct * futex_find_get_task(pid_t pid)
430{ 430{
431 struct task_struct *p; 431 struct task_struct *p;
432 const struct cred *cred = current_cred(), *pcred;
433 432
434 rcu_read_lock(); 433 rcu_read_lock();
435 p = find_task_by_vpid(pid); 434 p = find_task_by_vpid(pid);
436 if (!p) { 435 if (p)
437 p = ERR_PTR(-ESRCH); 436 get_task_struct(p);
438 } else {
439 pcred = __task_cred(p);
440 if (cred->euid != pcred->euid &&
441 cred->euid != pcred->uid)
442 p = ERR_PTR(-ESRCH);
443 else
444 get_task_struct(p);
445 }
446 437
447 rcu_read_unlock(); 438 rcu_read_unlock();
448 439
@@ -564,8 +555,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
564 if (!pid) 555 if (!pid)
565 return -ESRCH; 556 return -ESRCH;
566 p = futex_find_get_task(pid); 557 p = futex_find_get_task(pid);
567 if (IS_ERR(p)) 558 if (!p)
568 return PTR_ERR(p); 559 return -ESRCH;
569 560
570 /* 561 /*
571 * We need to look at the task state flags to figure out, 562 * We need to look at the task state flags to figure out,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3164ba7ce151..e1497481fe8a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -456,6 +456,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 456 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 457 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
458 desc->status |= flags; 458 desc->status |= flags;
459
460 if (chip != desc->chip)
461 irq_chip_set_defaults(desc->chip);
459 } 462 }
460 463
461 return ret; 464 return ret;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 474a84715eac..131b1703936f 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1089,9 +1089,10 @@ void crash_kexec(struct pt_regs *regs)
1089 1089
1090size_t crash_get_memory_size(void) 1090size_t crash_get_memory_size(void)
1091{ 1091{
1092 size_t size; 1092 size_t size = 0;
1093 mutex_lock(&kexec_mutex); 1093 mutex_lock(&kexec_mutex);
1094 size = crashk_res.end - crashk_res.start + 1; 1094 if (crashk_res.end != crashk_res.start)
1095 size = crashk_res.end - crashk_res.start + 1;
1095 mutex_unlock(&kexec_mutex); 1096 mutex_unlock(&kexec_mutex);
1096 return size; 1097 return size;
1097} 1098}
@@ -1134,7 +1135,7 @@ int crash_shrink_memory(unsigned long new_size)
1134 1135
1135 free_reserved_phys_range(end, crashk_res.end); 1136 free_reserved_phys_range(end, crashk_res.end);
1136 1137
1137 if (start == end) 1138 if ((start == end) && (crashk_res.parent != NULL))
1138 release_resource(&crashk_res); 1139 release_resource(&crashk_res);
1139 crashk_res.end = end - 1; 1140 crashk_res.end = end - 1;
1140 1141
diff --git a/kernel/module.c b/kernel/module.c
index 8c6b42840dd1..5d2d28197c82 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2062,6 +2062,12 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2062#endif 2062#endif
2063} 2063}
2064 2064
2065static void dynamic_debug_remove(struct _ddebug *debug)
2066{
2067 if (debug)
2068 ddebug_remove_module(debug->modname);
2069}
2070
2065static void *module_alloc_update_bounds(unsigned long size) 2071static void *module_alloc_update_bounds(unsigned long size)
2066{ 2072{
2067 void *ret = module_alloc(size); 2073 void *ret = module_alloc(size);
@@ -2124,6 +2130,8 @@ static noinline struct module *load_module(void __user *umod,
2124 void *ptr = NULL; /* Stops spurious gcc warning */ 2130 void *ptr = NULL; /* Stops spurious gcc warning */
2125 unsigned long symoffs, stroffs, *strmap; 2131 unsigned long symoffs, stroffs, *strmap;
2126 void __percpu *percpu; 2132 void __percpu *percpu;
2133 struct _ddebug *debug = NULL;
2134 unsigned int num_debug = 0;
2127 2135
2128 mm_segment_t old_fs; 2136 mm_segment_t old_fs;
2129 2137
@@ -2476,15 +2484,9 @@ static noinline struct module *load_module(void __user *umod,
2476 kfree(strmap); 2484 kfree(strmap);
2477 strmap = NULL; 2485 strmap = NULL;
2478 2486
2479 if (!mod->taints) { 2487 if (!mod->taints)
2480 struct _ddebug *debug;
2481 unsigned int num_debug;
2482
2483 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2488 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2484 sizeof(*debug), &num_debug); 2489 sizeof(*debug), &num_debug);
2485 if (debug)
2486 dynamic_debug_setup(debug, num_debug);
2487 }
2488 2490
2489 err = module_finalize(hdr, sechdrs, mod); 2491 err = module_finalize(hdr, sechdrs, mod);
2490 if (err < 0) 2492 if (err < 0)
@@ -2526,10 +2528,13 @@ static noinline struct module *load_module(void __user *umod,
2526 goto unlock; 2528 goto unlock;
2527 } 2529 }
2528 2530
2531 if (debug)
2532 dynamic_debug_setup(debug, num_debug);
2533
2529 /* Find duplicate symbols */ 2534 /* Find duplicate symbols */
2530 err = verify_export_symbols(mod); 2535 err = verify_export_symbols(mod);
2531 if (err < 0) 2536 if (err < 0)
2532 goto unlock; 2537 goto ddebug;
2533 2538
2534 list_add_rcu(&mod->list, &modules); 2539 list_add_rcu(&mod->list, &modules);
2535 mutex_unlock(&module_mutex); 2540 mutex_unlock(&module_mutex);
@@ -2557,6 +2562,8 @@ static noinline struct module *load_module(void __user *umod,
2557 mutex_lock(&module_mutex); 2562 mutex_lock(&module_mutex);
2558 /* Unlink carefully: kallsyms could be walking list. */ 2563 /* Unlink carefully: kallsyms could be walking list. */
2559 list_del_rcu(&mod->list); 2564 list_del_rcu(&mod->list);
2565 ddebug:
2566 dynamic_debug_remove(debug);
2560 unlock: 2567 unlock:
2561 mutex_unlock(&module_mutex); 2568 mutex_unlock(&module_mutex);
2562 synchronize_sched(); 2569 synchronize_sched();
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 5c36ea9d55d2..ca6066a6952e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -99,9 +99,13 @@ config PM_SLEEP_ADVANCED_DEBUG
99 depends on PM_ADVANCED_DEBUG 99 depends on PM_ADVANCED_DEBUG
100 default n 100 default n
101 101
102config SUSPEND_NVS
103 bool
104
102config SUSPEND 105config SUSPEND
103 bool "Suspend to RAM and standby" 106 bool "Suspend to RAM and standby"
104 depends on PM && ARCH_SUSPEND_POSSIBLE 107 depends on PM && ARCH_SUSPEND_POSSIBLE
108 select SUSPEND_NVS if HAS_IOMEM
105 default y 109 default y
106 ---help--- 110 ---help---
107 Allow the system to enter sleep states in which main memory is 111 Allow the system to enter sleep states in which main memory is
@@ -130,13 +134,10 @@ config SUSPEND_FREEZER
130 134
131 Turning OFF this setting is NOT recommended! If in doubt, say Y. 135 Turning OFF this setting is NOT recommended! If in doubt, say Y.
132 136
133config HIBERNATION_NVS
134 bool
135
136config HIBERNATION 137config HIBERNATION
137 bool "Hibernation (aka 'suspend to disk')" 138 bool "Hibernation (aka 'suspend to disk')"
138 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 139 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
139 select HIBERNATION_NVS if HAS_IOMEM 140 select SUSPEND_NVS if HAS_IOMEM
140 ---help--- 141 ---help---
141 Enable the suspend to disk (STD) functionality, which is usually 142 Enable the suspend to disk (STD) functionality, which is usually
142 called "hibernation" in user interfaces. STD checkpoints the 143 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 524e058dcf06..f9063c6b185d 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -10,6 +10,6 @@ obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ 11obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \
12 block_io.o 12 block_io.o
13obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o 13obj-$(CONFIG_SUSPEND_NVS) += nvs.o
14 14
15obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 15obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/nvs.c
index fdcad9ed5a7b..1836db60bbb6 100644
--- a/kernel/power/hibernate_nvs.c
+++ b/kernel/power/nvs.c
@@ -15,7 +15,7 @@
15 15
16/* 16/*
17 * Platforms, like ACPI, may want us to save some memory used by them during 17 * Platforms, like ACPI, may want us to save some memory used by them during
18 * hibernation and to restore the contents of this memory during the subsequent 18 * suspend and to restore the contents of this memory during the subsequent
19 * resume. The code below implements a mechanism allowing us to do that. 19 * resume. The code below implements a mechanism allowing us to do that.
20 */ 20 */
21 21
@@ -30,7 +30,7 @@ struct nvs_page {
30static LIST_HEAD(nvs_list); 30static LIST_HEAD(nvs_list);
31 31
32/** 32/**
33 * hibernate_nvs_register - register platform NVS memory region to save 33 * suspend_nvs_register - register platform NVS memory region to save
34 * @start - physical address of the region 34 * @start - physical address of the region
35 * @size - size of the region 35 * @size - size of the region
36 * 36 *
@@ -38,7 +38,7 @@ static LIST_HEAD(nvs_list);
38 * things so that the data from page-aligned addresses in this region will 38 * things so that the data from page-aligned addresses in this region will
39 * be copied into separate RAM pages. 39 * be copied into separate RAM pages.
40 */ 40 */
41int hibernate_nvs_register(unsigned long start, unsigned long size) 41int suspend_nvs_register(unsigned long start, unsigned long size)
42{ 42{
43 struct nvs_page *entry, *next; 43 struct nvs_page *entry, *next;
44 44
@@ -68,9 +68,9 @@ int hibernate_nvs_register(unsigned long start, unsigned long size)
68} 68}
69 69
70/** 70/**
71 * hibernate_nvs_free - free data pages allocated for saving NVS regions 71 * suspend_nvs_free - free data pages allocated for saving NVS regions
72 */ 72 */
73void hibernate_nvs_free(void) 73void suspend_nvs_free(void)
74{ 74{
75 struct nvs_page *entry; 75 struct nvs_page *entry;
76 76
@@ -86,16 +86,16 @@ void hibernate_nvs_free(void)
86} 86}
87 87
88/** 88/**
89 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions 89 * suspend_nvs_alloc - allocate memory necessary for saving NVS regions
90 */ 90 */
91int hibernate_nvs_alloc(void) 91int suspend_nvs_alloc(void)
92{ 92{
93 struct nvs_page *entry; 93 struct nvs_page *entry;
94 94
95 list_for_each_entry(entry, &nvs_list, node) { 95 list_for_each_entry(entry, &nvs_list, node) {
96 entry->data = (void *)__get_free_page(GFP_KERNEL); 96 entry->data = (void *)__get_free_page(GFP_KERNEL);
97 if (!entry->data) { 97 if (!entry->data) {
98 hibernate_nvs_free(); 98 suspend_nvs_free();
99 return -ENOMEM; 99 return -ENOMEM;
100 } 100 }
101 } 101 }
@@ -103,9 +103,9 @@ int hibernate_nvs_alloc(void)
103} 103}
104 104
105/** 105/**
106 * hibernate_nvs_save - save NVS memory regions 106 * suspend_nvs_save - save NVS memory regions
107 */ 107 */
108void hibernate_nvs_save(void) 108void suspend_nvs_save(void)
109{ 109{
110 struct nvs_page *entry; 110 struct nvs_page *entry;
111 111
@@ -119,12 +119,12 @@ void hibernate_nvs_save(void)
119} 119}
120 120
121/** 121/**
122 * hibernate_nvs_restore - restore NVS memory regions 122 * suspend_nvs_restore - restore NVS memory regions
123 * 123 *
124 * This function is going to be called with interrupts disabled, so it 124 * This function is going to be called with interrupts disabled, so it
125 * cannot iounmap the virtual addresses used to access the NVS region. 125 * cannot iounmap the virtual addresses used to access the NVS region.
126 */ 126 */
127void hibernate_nvs_restore(void) 127void suspend_nvs_restore(void)
128{ 128{
129 struct nvs_page *entry; 129 struct nvs_page *entry;
130 130
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 56e7dbb8b996..f37cb7dd4402 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -16,6 +16,12 @@
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/syscalls.h> 17#include <linux/syscalls.h>
18#include <linux/gfp.h> 18#include <linux/gfp.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/mm.h>
23#include <linux/slab.h>
24#include <linux/suspend.h>
19 25
20#include "power.h" 26#include "power.h"
21 27
diff --git a/kernel/sched.c b/kernel/sched.c
index 7b443ee27be4..16f3f77f71be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1277,6 +1277,12 @@ static void sched_avg_update(struct rq *rq)
1277 s64 period = sched_avg_period(); 1277 s64 period = sched_avg_period();
1278 1278
1279 while ((s64)(rq->clock - rq->age_stamp) > period) { 1279 while ((s64)(rq->clock - rq->age_stamp) > period) {
1280 /*
1281 * Inline assembly required to prevent the compiler
1282 * optimising this loop into a divmod call.
1283 * See __iter_div_u64_rem() for another example of this.
1284 */
1285 asm("" : "+rm" (rq->age_stamp));
1280 rq->age_stamp += period; 1286 rq->age_stamp += period;
1281 rq->rt_avg /= 2; 1287 rq->rt_avg /= 2;
1282 } 1288 }
@@ -1680,9 +1686,6 @@ static void update_shares(struct sched_domain *sd)
1680 1686
1681static void update_h_load(long cpu) 1687static void update_h_load(long cpu)
1682{ 1688{
1683 if (root_task_group_empty())
1684 return;
1685
1686 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 1689 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1687} 1690}
1688 1691
@@ -2564,7 +2567,16 @@ void sched_fork(struct task_struct *p, int clone_flags)
2564 if (p->sched_class->task_fork) 2567 if (p->sched_class->task_fork)
2565 p->sched_class->task_fork(p); 2568 p->sched_class->task_fork(p);
2566 2569
2570 /*
2571 * The child is not yet in the pid-hash so no cgroup attach races,
2572 * and the cgroup is pinned to this child due to cgroup_fork()
2573 * is ran before sched_fork().
2574 *
2575 * Silence PROVE_RCU.
2576 */
2577 rcu_read_lock();
2567 set_task_cpu(p, cpu); 2578 set_task_cpu(p, cpu);
2579 rcu_read_unlock();
2568 2580
2569#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2581#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2570 if (likely(sched_info_on())) 2582 if (likely(sched_info_on()))
@@ -2934,9 +2946,9 @@ unsigned long nr_iowait(void)
2934 return sum; 2946 return sum;
2935} 2947}
2936 2948
2937unsigned long nr_iowait_cpu(void) 2949unsigned long nr_iowait_cpu(int cpu)
2938{ 2950{
2939 struct rq *this = this_rq(); 2951 struct rq *this = cpu_rq(cpu);
2940 return atomic_read(&this->nr_iowait); 2952 return atomic_read(&this->nr_iowait);
2941} 2953}
2942 2954
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c9ac09760953..806d1b227a21 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1240 * effect of the currently running task from the load 1240 * effect of the currently running task from the load
1241 * of the current CPU: 1241 * of the current CPU:
1242 */ 1242 */
1243 rcu_read_lock();
1243 if (sync) { 1244 if (sync) {
1244 tg = task_group(current); 1245 tg = task_group(current);
1245 weight = current->se.load.weight; 1246 weight = current->se.load.weight;
@@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1275 balanced = this_eff_load <= prev_eff_load; 1276 balanced = this_eff_load <= prev_eff_load;
1276 } else 1277 } else
1277 balanced = true; 1278 balanced = true;
1279 rcu_read_unlock();
1278 1280
1279 /* 1281 /*
1280 * If the currently running task will sleep within 1282 * If the currently running task will sleep within
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5f171f04ab00..17525cac6cfe 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now)
154 * Updates the per cpu time idle statistics counters 154 * Updates the per cpu time idle statistics counters
155 */ 155 */
156static void 156static void
157update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) 157update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
158{ 158{
159 ktime_t delta; 159 ktime_t delta;
160 160
161 if (ts->idle_active) { 161 if (ts->idle_active) {
162 delta = ktime_sub(now, ts->idle_entrytime); 162 delta = ktime_sub(now, ts->idle_entrytime);
163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 if (nr_iowait_cpu() > 0) 164 if (nr_iowait_cpu(cpu) > 0)
165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); 165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
166 ts->idle_entrytime = now; 166 ts->idle_entrytime = now;
167 } 167 }
@@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
175{ 175{
176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
177 177
178 update_ts_time_stats(ts, now, NULL); 178 update_ts_time_stats(cpu, ts, now, NULL);
179 ts->idle_active = 0; 179 ts->idle_active = 0;
180 180
181 sched_clock_idle_wakeup_event(0); 181 sched_clock_idle_wakeup_event(0);
182} 182}
183 183
184static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
185{ 185{
186 ktime_t now; 186 ktime_t now;
187 187
188 now = ktime_get(); 188 now = ktime_get();
189 189
190 update_ts_time_stats(ts, now, NULL); 190 update_ts_time_stats(cpu, ts, now, NULL);
191 191
192 ts->idle_entrytime = now; 192 ts->idle_entrytime = now;
193 ts->idle_active = 1; 193 ts->idle_active = 1;
@@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
216 if (!tick_nohz_enabled) 216 if (!tick_nohz_enabled)
217 return -1; 217 return -1;
218 218
219 update_ts_time_stats(ts, ktime_get(), last_update_time); 219 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
220 220
221 return ktime_to_us(ts->idle_sleeptime); 221 return ktime_to_us(ts->idle_sleeptime);
222} 222}
@@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
242 if (!tick_nohz_enabled) 242 if (!tick_nohz_enabled)
243 return -1; 243 return -1;
244 244
245 update_ts_time_stats(ts, ktime_get(), last_update_time); 245 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
246 246
247 return ktime_to_us(ts->iowait_sleeptime); 247 return ktime_to_us(ts->iowait_sleeptime);
248} 248}
@@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle)
284 */ 284 */
285 ts->inidle = 1; 285 ts->inidle = 1;
286 286
287 now = tick_nohz_start_idle(ts); 287 now = tick_nohz_start_idle(cpu, ts);
288 288
289 /* 289 /*
290 * If this cpu is offline and it is the one which updates 290 * If this cpu is offline and it is the one which updates
@@ -315,9 +315,6 @@ void tick_nohz_stop_sched_tick(int inidle)
315 goto end; 315 goto end;
316 } 316 }
317 317
318 if (nohz_ratelimit(cpu))
319 goto end;
320
321 ts->idle_calls++; 318 ts->idle_calls++;
322 /* Read jiffies and the time when jiffies were updated last */ 319 /* Read jiffies and the time when jiffies were updated last */
323 do { 320 do {
@@ -328,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle)
328 } while (read_seqretry(&xtime_lock, seq)); 325 } while (read_seqretry(&xtime_lock, seq));
329 326
330 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || 327 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
331 arch_needs_cpu(cpu)) { 328 arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) {
332 next_jiffies = last_jiffies + 1; 329 next_jiffies = last_jiffies + 1;
333 delta_jiffies = 1; 330 delta_jiffies = 1;
334 } else { 331 } else {
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index e6f65887842c..8a2b73f7c068 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -96,7 +96,9 @@ int perf_trace_init(struct perf_event *p_event)
96 mutex_lock(&event_mutex); 96 mutex_lock(&event_mutex);
97 list_for_each_entry(tp_event, &ftrace_events, list) { 97 list_for_each_entry(tp_event, &ftrace_events, list) {
98 if (tp_event->event.type == event_id && 98 if (tp_event->event.type == event_id &&
99 tp_event->class && tp_event->class->perf_probe && 99 tp_event->class &&
100 (tp_event->class->perf_probe ||
101 tp_event->class->reg) &&
100 try_module_get(tp_event->mod)) { 102 try_module_get(tp_event->mod)) {
101 ret = perf_trace_event_init(tp_event, p_event); 103 ret = perf_trace_event_init(tp_event, p_event);
102 break; 104 break;