aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/jump_label.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-11-27 10:59:09 -0500
committerIngo Molnar <mingo@elte.hu>2011-12-06 02:34:02 -0500
commitb202952075f62603bea9bfb6ebc6b0420db11949 (patch)
tree9c8e0538b455e68b5c371caba5b1585ed0ef9d8a /kernel/jump_label.c
parentb79387ef185af2323594920923cecba5753c3817 (diff)
perf, core: Rate limit perf_sched_events jump_label patching
jump_lable patching is very expensive operation that involves pausing all cpus. The patching of perf_sched_events jump_label is easily controllable from userspace by unprivileged user. When te user runs a loop like this: "while true; do perf stat -e cycles true; done" ... the performance of my test application that just increments a counter for one second drops by 4%. This is on a 16 cpu box with my test application using only one of them. An impact on a real server doing real work will be worse. Performance of KVM PMU drops nearly 50% due to jump_lable for "perf record" since KVM PMU implementation creates and destroys perf event frequently. This patch introduces a way to rate limit jump_label patching and uses it to fix the above problem. I believe that as jump_label use will spread the problem will become more common and thus solving it in a generic code is appropriate. Also fixing it in the perf code would result in moving jump_label accounting logic to perf code with all the ifdefs in case of JUMP_LABEL=n kernel. With this patch all details are nicely hidden inside jump_label code. Signed-off-by: Gleb Natapov <gleb@redhat.com> Acked-by: Jason Baron <jbaron@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20111127155909.GO2557@redhat.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/jump_label.c')
-rw-r--r--kernel/jump_label.c35
1 files changed, 33 insertions, 2 deletions
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 66ff7109f697..51a175ab0a03 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -72,15 +72,46 @@ void jump_label_inc(struct jump_label_key *key)
72 jump_label_unlock(); 72 jump_label_unlock();
73} 73}
74 74
75void jump_label_dec(struct jump_label_key *key) 75static void __jump_label_dec(struct jump_label_key *key,
76 unsigned long rate_limit, struct delayed_work *work)
76{ 77{
77 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) 78 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
78 return; 79 return;
79 80
80 jump_label_update(key, JUMP_LABEL_DISABLE); 81 if (rate_limit) {
82 atomic_inc(&key->enabled);
83 schedule_delayed_work(work, rate_limit);
84 } else
85 jump_label_update(key, JUMP_LABEL_DISABLE);
86
81 jump_label_unlock(); 87 jump_label_unlock();
82} 88}
83 89
90static void jump_label_update_timeout(struct work_struct *work)
91{
92 struct jump_label_key_deferred *key =
93 container_of(work, struct jump_label_key_deferred, work.work);
94 __jump_label_dec(&key->key, 0, NULL);
95}
96
97void jump_label_dec(struct jump_label_key *key)
98{
99 __jump_label_dec(key, 0, NULL);
100}
101
102void jump_label_dec_deferred(struct jump_label_key_deferred *key)
103{
104 __jump_label_dec(&key->key, key->timeout, &key->work);
105}
106
107
108void jump_label_rate_limit(struct jump_label_key_deferred *key,
109 unsigned long rl)
110{
111 key->timeout = rl;
112 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
113}
114
84static int addr_conflict(struct jump_entry *entry, void *start, void *end) 115static int addr_conflict(struct jump_entry *entry, void *start, void *end)
85{ 116{
86 if (entry->code <= (unsigned long)end && 117 if (entry->code <= (unsigned long)end &&