aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 11:22:16 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-15 11:22:16 -0400
commitb5869ce7f68b233ceb81465a7644be0d9a5f3dbb (patch)
treee3611e7f038a4a4fa813532ae57a9a626fa1434d /fs
parentdf3d80f5a5c74168be42788364d13cf6c83c7b9c (diff)
parent9c63d9c021f375a2708ad79043d6f4dd1291a085 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: (140 commits) sched: sync wakeups preempt too sched: affine sync wakeups sched: guest CPU accounting: maintain guest state in KVM sched: guest CPU accounting: maintain stats in account_system_time() sched: guest CPU accounting: add guest-CPU /proc/<pid>/stat fields sched: guest CPU accounting: add guest-CPU /proc/stat field sched: domain sysctl fixes: add terminator comment sched: domain sysctl fixes: do not crash on allocation failure sched: domain sysctl fixes: unregister the sysctl table before domains sched: domain sysctl fixes: use for_each_online_cpu() sched: domain sysctl fixes: use kcalloc() Make scheduler debug file operations const sched: enable wake-idle on CONFIG_SCHED_MC=y sched: reintroduce topology.h tunings sched: allow the immediate migration of cache-cold tasks sched: debug, improve migration statistics sched: debug: increase width of debug line sched: activate task_hot() only on fair-scheduled tasks sched: reintroduce cache-hot affinity sched: speed up context-switches a bit ...
Diffstat (limited to 'fs')
-rw-r--r--fs/pipe.c9
-rw-r--r--fs/proc/array.c17
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/proc_misc.c15
4 files changed, 31 insertions, 12 deletions
diff --git a/fs/pipe.c b/fs/pipe.c
index 6b3d91a691bf..e66ec48e95d8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -45,8 +45,7 @@ void pipe_wait(struct pipe_inode_info *pipe)
45 * Pipes are system-local resources, so sleeping on them 45 * Pipes are system-local resources, so sleeping on them
46 * is considered a noninteractive wait: 46 * is considered a noninteractive wait:
47 */ 47 */
48 prepare_to_wait(&pipe->wait, &wait, 48 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
49 TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
50 if (pipe->inode) 49 if (pipe->inode)
51 mutex_unlock(&pipe->inode->i_mutex); 50 mutex_unlock(&pipe->inode->i_mutex);
52 schedule(); 51 schedule();
@@ -383,7 +382,7 @@ redo:
383 382
384 /* Signal writers asynchronously that there is more room. */ 383 /* Signal writers asynchronously that there is more room. */
385 if (do_wakeup) { 384 if (do_wakeup) {
386 wake_up_interruptible(&pipe->wait); 385 wake_up_interruptible_sync(&pipe->wait);
387 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 386 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
388 } 387 }
389 if (ret > 0) 388 if (ret > 0)
@@ -556,7 +555,7 @@ redo2:
556out: 555out:
557 mutex_unlock(&inode->i_mutex); 556 mutex_unlock(&inode->i_mutex);
558 if (do_wakeup) { 557 if (do_wakeup) {
559 wake_up_interruptible(&pipe->wait); 558 wake_up_interruptible_sync(&pipe->wait);
560 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 559 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
561 } 560 }
562 if (ret > 0) 561 if (ret > 0)
@@ -650,7 +649,7 @@ pipe_release(struct inode *inode, int decr, int decw)
650 if (!pipe->readers && !pipe->writers) { 649 if (!pipe->readers && !pipe->writers) {
651 free_pipe_info(inode); 650 free_pipe_info(inode);
652 } else { 651 } else {
653 wake_up_interruptible(&pipe->wait); 652 wake_up_interruptible_sync(&pipe->wait);
654 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 653 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
655 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 654 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
656 } 655 }
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ee4814dd98f9..27b59f5f3bd1 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -370,6 +370,11 @@ static cputime_t task_stime(struct task_struct *p)
370} 370}
371#endif 371#endif
372 372
373static cputime_t task_gtime(struct task_struct *p)
374{
375 return p->gtime;
376}
377
373static int do_task_stat(struct task_struct *task, char *buffer, int whole) 378static int do_task_stat(struct task_struct *task, char *buffer, int whole)
374{ 379{
375 unsigned long vsize, eip, esp, wchan = ~0UL; 380 unsigned long vsize, eip, esp, wchan = ~0UL;
@@ -385,6 +390,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
385 unsigned long cmin_flt = 0, cmaj_flt = 0; 390 unsigned long cmin_flt = 0, cmaj_flt = 0;
386 unsigned long min_flt = 0, maj_flt = 0; 391 unsigned long min_flt = 0, maj_flt = 0;
387 cputime_t cutime, cstime, utime, stime; 392 cputime_t cutime, cstime, utime, stime;
393 cputime_t cgtime, gtime;
388 unsigned long rsslim = 0; 394 unsigned long rsslim = 0;
389 char tcomm[sizeof(task->comm)]; 395 char tcomm[sizeof(task->comm)];
390 unsigned long flags; 396 unsigned long flags;
@@ -403,6 +409,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
403 sigemptyset(&sigign); 409 sigemptyset(&sigign);
404 sigemptyset(&sigcatch); 410 sigemptyset(&sigcatch);
405 cutime = cstime = utime = stime = cputime_zero; 411 cutime = cstime = utime = stime = cputime_zero;
412 cgtime = gtime = cputime_zero;
406 413
407 rcu_read_lock(); 414 rcu_read_lock();
408 if (lock_task_sighand(task, &flags)) { 415 if (lock_task_sighand(task, &flags)) {
@@ -420,6 +427,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
420 cmaj_flt = sig->cmaj_flt; 427 cmaj_flt = sig->cmaj_flt;
421 cutime = sig->cutime; 428 cutime = sig->cutime;
422 cstime = sig->cstime; 429 cstime = sig->cstime;
430 cgtime = sig->cgtime;
423 rsslim = sig->rlim[RLIMIT_RSS].rlim_cur; 431 rsslim = sig->rlim[RLIMIT_RSS].rlim_cur;
424 432
425 /* add up live thread stats at the group level */ 433 /* add up live thread stats at the group level */
@@ -430,6 +438,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
430 maj_flt += t->maj_flt; 438 maj_flt += t->maj_flt;
431 utime = cputime_add(utime, task_utime(t)); 439 utime = cputime_add(utime, task_utime(t));
432 stime = cputime_add(stime, task_stime(t)); 440 stime = cputime_add(stime, task_stime(t));
441 gtime = cputime_add(gtime, task_gtime(t));
433 t = next_thread(t); 442 t = next_thread(t);
434 } while (t != task); 443 } while (t != task);
435 444
@@ -437,6 +446,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
437 maj_flt += sig->maj_flt; 446 maj_flt += sig->maj_flt;
438 utime = cputime_add(utime, sig->utime); 447 utime = cputime_add(utime, sig->utime);
439 stime = cputime_add(stime, sig->stime); 448 stime = cputime_add(stime, sig->stime);
449 gtime += cputime_add(gtime, sig->gtime);
440 } 450 }
441 451
442 sid = signal_session(sig); 452 sid = signal_session(sig);
@@ -454,6 +464,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
454 maj_flt = task->maj_flt; 464 maj_flt = task->maj_flt;
455 utime = task_utime(task); 465 utime = task_utime(task);
456 stime = task_stime(task); 466 stime = task_stime(task);
467 gtime = task_gtime(task);
457 } 468 }
458 469
459 /* scale priority and nice values from timeslices to -20..20 */ 470 /* scale priority and nice values from timeslices to -20..20 */
@@ -471,7 +482,7 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
471 482
472 res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \ 483 res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \
473%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ 484%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
474%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu\n", 485%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
475 task->pid, 486 task->pid,
476 tcomm, 487 tcomm,
477 state, 488 state,
@@ -516,7 +527,9 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole)
516 task_cpu(task), 527 task_cpu(task),
517 task->rt_priority, 528 task->rt_priority,
518 task->policy, 529 task->policy,
519 (unsigned long long)delayacct_blkio_ticks(task)); 530 (unsigned long long)delayacct_blkio_ticks(task),
531 cputime_to_clock_t(gtime),
532 cputime_to_clock_t(cgtime));
520 if (mm) 533 if (mm)
521 mmput(mm); 534 mmput(mm);
522 return res; 535 return res;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 19489b0d5554..e5d0953d4db1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -304,7 +304,7 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
304 return sprintf(buffer, "%llu %llu %lu\n", 304 return sprintf(buffer, "%llu %llu %lu\n",
305 task->sched_info.cpu_time, 305 task->sched_info.cpu_time,
306 task->sched_info.run_delay, 306 task->sched_info.run_delay,
307 task->sched_info.pcnt); 307 task->sched_info.pcount);
308} 308}
309#endif 309#endif
310 310
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index bee251cb87c8..b872a01ad3af 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -443,6 +443,7 @@ static int show_stat(struct seq_file *p, void *v)
443 int i; 443 int i;
444 unsigned long jif; 444 unsigned long jif;
445 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; 445 cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
446 cputime64_t guest;
446 u64 sum = 0; 447 u64 sum = 0;
447 struct timespec boottime; 448 struct timespec boottime;
448 unsigned int *per_irq_sum; 449 unsigned int *per_irq_sum;
@@ -453,6 +454,7 @@ static int show_stat(struct seq_file *p, void *v)
453 454
454 user = nice = system = idle = iowait = 455 user = nice = system = idle = iowait =
455 irq = softirq = steal = cputime64_zero; 456 irq = softirq = steal = cputime64_zero;
457 guest = cputime64_zero;
456 getboottime(&boottime); 458 getboottime(&boottime);
457 jif = boottime.tv_sec; 459 jif = boottime.tv_sec;
458 460
@@ -467,6 +469,7 @@ static int show_stat(struct seq_file *p, void *v)
467 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); 469 irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
468 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); 470 softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
469 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); 471 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
472 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
470 for (j = 0; j < NR_IRQS; j++) { 473 for (j = 0; j < NR_IRQS; j++) {
471 unsigned int temp = kstat_cpu(i).irqs[j]; 474 unsigned int temp = kstat_cpu(i).irqs[j];
472 sum += temp; 475 sum += temp;
@@ -474,7 +477,7 @@ static int show_stat(struct seq_file *p, void *v)
474 } 477 }
475 } 478 }
476 479
477 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n", 480 seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
478 (unsigned long long)cputime64_to_clock_t(user), 481 (unsigned long long)cputime64_to_clock_t(user),
479 (unsigned long long)cputime64_to_clock_t(nice), 482 (unsigned long long)cputime64_to_clock_t(nice),
480 (unsigned long long)cputime64_to_clock_t(system), 483 (unsigned long long)cputime64_to_clock_t(system),
@@ -482,7 +485,8 @@ static int show_stat(struct seq_file *p, void *v)
482 (unsigned long long)cputime64_to_clock_t(iowait), 485 (unsigned long long)cputime64_to_clock_t(iowait),
483 (unsigned long long)cputime64_to_clock_t(irq), 486 (unsigned long long)cputime64_to_clock_t(irq),
484 (unsigned long long)cputime64_to_clock_t(softirq), 487 (unsigned long long)cputime64_to_clock_t(softirq),
485 (unsigned long long)cputime64_to_clock_t(steal)); 488 (unsigned long long)cputime64_to_clock_t(steal),
489 (unsigned long long)cputime64_to_clock_t(guest));
486 for_each_online_cpu(i) { 490 for_each_online_cpu(i) {
487 491
488 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ 492 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
@@ -494,7 +498,9 @@ static int show_stat(struct seq_file *p, void *v)
494 irq = kstat_cpu(i).cpustat.irq; 498 irq = kstat_cpu(i).cpustat.irq;
495 softirq = kstat_cpu(i).cpustat.softirq; 499 softirq = kstat_cpu(i).cpustat.softirq;
496 steal = kstat_cpu(i).cpustat.steal; 500 steal = kstat_cpu(i).cpustat.steal;
497 seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n", 501 guest = kstat_cpu(i).cpustat.guest;
502 seq_printf(p,
503 "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
498 i, 504 i,
499 (unsigned long long)cputime64_to_clock_t(user), 505 (unsigned long long)cputime64_to_clock_t(user),
500 (unsigned long long)cputime64_to_clock_t(nice), 506 (unsigned long long)cputime64_to_clock_t(nice),
@@ -503,7 +509,8 @@ static int show_stat(struct seq_file *p, void *v)
503 (unsigned long long)cputime64_to_clock_t(iowait), 509 (unsigned long long)cputime64_to_clock_t(iowait),
504 (unsigned long long)cputime64_to_clock_t(irq), 510 (unsigned long long)cputime64_to_clock_t(irq),
505 (unsigned long long)cputime64_to_clock_t(softirq), 511 (unsigned long long)cputime64_to_clock_t(softirq),
506 (unsigned long long)cputime64_to_clock_t(steal)); 512 (unsigned long long)cputime64_to_clock_t(steal),
513 (unsigned long long)cputime64_to_clock_t(guest));
507 } 514 }
508 seq_printf(p, "intr %llu", (unsigned long long)sum); 515 seq_printf(p, "intr %llu", (unsigned long long)sum);
509 516