aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-02-27 04:47:00 -0500
committerIngo Molnar <mingo@elte.hu>2012-03-12 15:43:16 -0400
commit3ccf3e8306156a28213adc720aba807e9a901ad5 (patch)
tree5b9db344b702299ea7eb53fbff3d0d74707d40ec
parent554cecaf733623b327eef9652b65965eb1081b81 (diff)
printk/sched: Introduce special printk_sched() for those awkward moments
There's a few awkward printk()s inside of scheduler guts that people prefer to keep but really are rather deadlock prone. Fudge around it by storing the text in a per-cpu buffer and poll it using the existing printk_tick() handler. This will drop output when its more frequent than once a tick, however only the affinity thing could possible go that fast and for that just one should suffice to notify the admin he's done something silly.. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/n/tip-wua3lmkt3dg8nfts66o6brne@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/printk.h10
-rw-r--r--kernel/printk.c40
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/rt.c8
4 files changed, 55 insertions, 5 deletions
diff --git a/include/linux/printk.h b/include/linux/printk.h
index f0e22f75143f..1f77a4174ee0 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -101,6 +101,11 @@ asmlinkage __printf(1, 2) __cold
101int printk(const char *fmt, ...); 101int printk(const char *fmt, ...);
102 102
103/* 103/*
104 * Special printk facility for scheduler use only, _DO_NOT_USE_ !
105 */
106__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
107
108/*
104 * Please don't use printk_ratelimit(), because it shares ratelimiting state 109 * Please don't use printk_ratelimit(), because it shares ratelimiting state
105 * with all other unrelated printk_ratelimit() callsites. Instead use 110 * with all other unrelated printk_ratelimit() callsites. Instead use
106 * printk_ratelimited() or plain old __ratelimit(). 111 * printk_ratelimited() or plain old __ratelimit().
@@ -127,6 +132,11 @@ int printk(const char *s, ...)
127{ 132{
128 return 0; 133 return 0;
129} 134}
135static inline __printf(1, 2) __cold
136int printk_sched(const char *s, ...)
137{
138 return 0;
139}
130static inline int printk_ratelimit(void) 140static inline int printk_ratelimit(void)
131{ 141{
132 return 0; 142 return 0;
diff --git a/kernel/printk.c b/kernel/printk.c
index 13c0a1143f49..7ca7ba591e21 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1208,13 +1208,47 @@ int is_console_locked(void)
1208 return console_locked; 1208 return console_locked;
1209} 1209}
1210 1210
1211/*
1212 * Delayed printk facility, for scheduler-internal messages:
1213 */
1214#define PRINTK_BUF_SIZE 512
1215
1216#define PRINTK_PENDING_WAKEUP 0x01
1217#define PRINTK_PENDING_SCHED 0x02
1218
1211static DEFINE_PER_CPU(int, printk_pending); 1219static DEFINE_PER_CPU(int, printk_pending);
1220static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
1221
1222int printk_sched(const char *fmt, ...)
1223{
1224 unsigned long flags;
1225 va_list args;
1226 char *buf;
1227 int r;
1228
1229 local_irq_save(flags);
1230 buf = __get_cpu_var(printk_sched_buf);
1231
1232 va_start(args, fmt);
1233 r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
1234 va_end(args);
1235
1236 __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
1237 local_irq_restore(flags);
1238
1239 return r;
1240}
1212 1241
1213void printk_tick(void) 1242void printk_tick(void)
1214{ 1243{
1215 if (__this_cpu_read(printk_pending)) { 1244 if (__this_cpu_read(printk_pending)) {
1216 __this_cpu_write(printk_pending, 0); 1245 int pending = __this_cpu_xchg(printk_pending, 0);
1217 wake_up_interruptible(&log_wait); 1246 if (pending & PRINTK_PENDING_SCHED) {
1247 char *buf = __get_cpu_var(printk_sched_buf);
1248 printk(KERN_WARNING "[sched_delayed] %s", buf);
1249 }
1250 if (pending & PRINTK_PENDING_WAKEUP)
1251 wake_up_interruptible(&log_wait);
1218 } 1252 }
1219} 1253}
1220 1254
@@ -1228,7 +1262,7 @@ int printk_needs_cpu(int cpu)
1228void wake_up_klogd(void) 1262void wake_up_klogd(void)
1229{ 1263{
1230 if (waitqueue_active(&log_wait)) 1264 if (waitqueue_active(&log_wait))
1231 this_cpu_write(printk_pending, 1); 1265 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
1232} 1266}
1233 1267
1234/** 1268/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b1ccce819ce2..8781cec7c3e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1284,7 +1284,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1284 * leave kernel. 1284 * leave kernel.
1285 */ 1285 */
1286 if (p->mm && printk_ratelimit()) { 1286 if (p->mm && printk_ratelimit()) {
1287 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n", 1287 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1288 task_pid_nr(p), p->comm, cpu); 1288 task_pid_nr(p), p->comm, cpu);
1289 } 1289 }
1290 1290
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7f7e7cdcb472..b60dad720173 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -864,8 +864,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
864 * but accrue some time due to boosting. 864 * but accrue some time due to boosting.
865 */ 865 */
866 if (likely(rt_b->rt_runtime)) { 866 if (likely(rt_b->rt_runtime)) {
867 static bool once = false;
868
867 rt_rq->rt_throttled = 1; 869 rt_rq->rt_throttled = 1;
868 printk_once(KERN_WARNING "sched: RT throttling activated\n"); 870
871 if (!once) {
872 once = true;
873 printk_sched("sched: RT throttling activated\n");
874 }
869 } else { 875 } else {
870 /* 876 /*
871 * In case we did anyway, make it go away, 877 * In case we did anyway, make it go away,