aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/printk.c40
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/rt.c8
3 files changed, 45 insertions, 5 deletions
diff --git a/kernel/printk.c b/kernel/printk.c
index 13c0a1143f49..7ca7ba591e21 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1208,13 +1208,47 @@ int is_console_locked(void)
1208 return console_locked; 1208 return console_locked;
1209} 1209}
1210 1210
1211/*
1212 * Delayed printk facility, for scheduler-internal messages:
1213 */
1214#define PRINTK_BUF_SIZE 512
1215
1216#define PRINTK_PENDING_WAKEUP 0x01
1217#define PRINTK_PENDING_SCHED 0x02
1218
1211static DEFINE_PER_CPU(int, printk_pending); 1219static DEFINE_PER_CPU(int, printk_pending);
1220static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
1221
1222int printk_sched(const char *fmt, ...)
1223{
1224 unsigned long flags;
1225 va_list args;
1226 char *buf;
1227 int r;
1228
1229 local_irq_save(flags);
1230 buf = __get_cpu_var(printk_sched_buf);
1231
1232 va_start(args, fmt);
1233 r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
1234 va_end(args);
1235
1236 __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
1237 local_irq_restore(flags);
1238
1239 return r;
1240}
1212 1241
1213void printk_tick(void) 1242void printk_tick(void)
1214{ 1243{
1215 if (__this_cpu_read(printk_pending)) { 1244 if (__this_cpu_read(printk_pending)) {
1216 __this_cpu_write(printk_pending, 0); 1245 int pending = __this_cpu_xchg(printk_pending, 0);
1217 wake_up_interruptible(&log_wait); 1246 if (pending & PRINTK_PENDING_SCHED) {
1247 char *buf = __get_cpu_var(printk_sched_buf);
1248 printk(KERN_WARNING "[sched_delayed] %s", buf);
1249 }
1250 if (pending & PRINTK_PENDING_WAKEUP)
1251 wake_up_interruptible(&log_wait);
1218 } 1252 }
1219} 1253}
1220 1254
@@ -1228,7 +1262,7 @@ int printk_needs_cpu(int cpu)
1228void wake_up_klogd(void) 1262void wake_up_klogd(void)
1229{ 1263{
1230 if (waitqueue_active(&log_wait)) 1264 if (waitqueue_active(&log_wait))
1231 this_cpu_write(printk_pending, 1); 1265 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
1232} 1266}
1233 1267
1234/** 1268/**
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b1ccce819ce2..8781cec7c3e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1284,7 +1284,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1284 * leave kernel. 1284 * leave kernel.
1285 */ 1285 */
1286 if (p->mm && printk_ratelimit()) { 1286 if (p->mm && printk_ratelimit()) {
1287 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n", 1287 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1288 task_pid_nr(p), p->comm, cpu); 1288 task_pid_nr(p), p->comm, cpu);
1289 } 1289 }
1290 1290
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7f7e7cdcb472..b60dad720173 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -864,8 +864,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
864 * but accrue some time due to boosting. 864 * but accrue some time due to boosting.
865 */ 865 */
866 if (likely(rt_b->rt_runtime)) { 866 if (likely(rt_b->rt_runtime)) {
867 static bool once = false;
868
867 rt_rq->rt_throttled = 1; 869 rt_rq->rt_throttled = 1;
868 printk_once(KERN_WARNING "sched: RT throttling activated\n"); 870
871 if (!once) {
872 once = true;
873 printk_sched("sched: RT throttling activated\n");
874 }
869 } else { 875 } else {
870 /* 876 /*
871 * In case we did anyway, make it go away, 877 * In case we did anyway, make it go away,