aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cgroups/cpusets.txt2
-rw-r--r--Documentation/rt-mutex-design.txt2
-rw-r--r--Documentation/scheduler/sched-domains.txt4
-rw-r--r--Documentation/spinlocks.txt2
-rw-r--r--Documentation/virtual/uml/UserModeLinux-HOWTO.txt4
-rw-r--r--arch/avr32/kernel/process.c2
-rw-r--r--arch/cris/include/arch-v10/arch/bitops.h2
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c4
-rw-r--r--arch/mips/kernel/scall32-o32.S5
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/tile/include/asm/processor.h2
-rw-r--r--arch/tile/kernel/stack.c2
-rw-r--r--arch/um/kernel/sysrq.c2
-rw-r--r--include/linux/completion.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/spinlock_up.h2
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/workqueue_internal.h2
21 files changed, 27 insertions, 26 deletions
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 12e01d432bfe..7740038d82bc 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -373,7 +373,7 @@ can become very uneven.
3731.7 What is sched_load_balance ? 3731.7 What is sched_load_balance ?
374-------------------------------- 374--------------------------------
375 375
376The kernel scheduler (kernel/sched.c) automatically load balances 376The kernel scheduler (kernel/sched/core.c) automatically load balances
377tasks. If one CPU is underutilized, kernel code running on that 377tasks. If one CPU is underutilized, kernel code running on that
378CPU will look for tasks on other more overloaded CPUs and move those 378CPU will look for tasks on other more overloaded CPUs and move those
379tasks to itself, within the constraints of such placement mechanisms 379tasks to itself, within the constraints of such placement mechanisms
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt
index 33ed8007a845..a5bcd7f5c33f 100644
--- a/Documentation/rt-mutex-design.txt
+++ b/Documentation/rt-mutex-design.txt
@@ -384,7 +384,7 @@ priority back.
384__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the 384__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the
385result does not equal the task's current priority, then rt_mutex_setprio 385result does not equal the task's current priority, then rt_mutex_setprio
386is called to adjust the priority of the task to the new priority. 386is called to adjust the priority of the task to the new priority.
387Note that rt_mutex_setprio is defined in kernel/sched.c to implement the 387Note that rt_mutex_setprio is defined in kernel/sched/core.c to implement the
388actual change in priority. 388actual change in priority.
389 389
390It is interesting to note that __rt_mutex_adjust_prio can either increase 390It is interesting to note that __rt_mutex_adjust_prio can either increase
diff --git a/Documentation/scheduler/sched-domains.txt b/Documentation/scheduler/sched-domains.txt
index 443f0c76bab4..4af80b1c05aa 100644
--- a/Documentation/scheduler/sched-domains.txt
+++ b/Documentation/scheduler/sched-domains.txt
@@ -25,7 +25,7 @@ is treated as one entity. The load of a group is defined as the sum of the
25load of each of its member CPUs, and only when the load of a group becomes 25load of each of its member CPUs, and only when the load of a group becomes
26out of balance are tasks moved between groups. 26out of balance are tasks moved between groups.
27 27
28In kernel/sched.c, trigger_load_balance() is run periodically on each CPU 28In kernel/sched/core.c, trigger_load_balance() is run periodically on each CPU
29through scheduler_tick(). It raises a softirq after the next regularly scheduled 29through scheduler_tick(). It raises a softirq after the next regularly scheduled
30rebalancing event for the current runqueue has arrived. The actual load 30rebalancing event for the current runqueue has arrived. The actual load
31balancing workhorse, run_rebalance_domains()->rebalance_domains(), is then run 31balancing workhorse, run_rebalance_domains()->rebalance_domains(), is then run
@@ -62,7 +62,7 @@ struct sched_domain fields, SD_FLAG_*, SD_*_INIT to get an idea of
62the specifics and what to tune. 62the specifics and what to tune.
63 63
64Architectures may retain the regular override the default SD_*_INIT flags 64Architectures may retain the regular override the default SD_*_INIT flags
65while using the generic domain builder in kernel/sched.c if they wish to 65while using the generic domain builder in kernel/sched/core.c if they wish to
66retain the traditional SMT->SMP->NUMA topology (or some subset of that). This 66retain the traditional SMT->SMP->NUMA topology (or some subset of that). This
67can be done by #define'ing ARCH_HASH_SCHED_TUNE. 67can be done by #define'ing ARCH_HASH_SCHED_TUNE.
68 68
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt
index 9dbe885ecd8d..97eaf5727178 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/spinlocks.txt
@@ -137,7 +137,7 @@ don't block on each other (and thus there is no dead-lock wrt interrupts.
137But when you do the write-lock, you have to use the irq-safe version. 137But when you do the write-lock, you have to use the irq-safe version.
138 138
139For an example of being clever with rw-locks, see the "waitqueue_lock" 139For an example of being clever with rw-locks, see the "waitqueue_lock"
140handling in kernel/sched.c - nothing ever _changes_ a wait-queue from 140handling in kernel/sched/core.c - nothing ever _changes_ a wait-queue from
141within an interrupt, they only read the queue in order to know whom to 141within an interrupt, they only read the queue in order to know whom to
142wake up. So read-locks are safe (which is good: they are very common 142wake up. So read-locks are safe (which is good: they are very common
143indeed), while write-locks need to protect themselves against interrupts. 143indeed), while write-locks need to protect themselves against interrupts.
diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
index a5f8436753e7..f4099ca6b483 100644
--- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
@@ -3127,7 +3127,7 @@
3127 at process_kern.c:156 3127 at process_kern.c:156
3128 #3 0x1006a052 in switch_to (prev=0x50072000, next=0x507e8000, last=0x50072000) 3128 #3 0x1006a052 in switch_to (prev=0x50072000, next=0x507e8000, last=0x50072000)
3129 at process_kern.c:161 3129 at process_kern.c:161
3130 #4 0x10001d12 in schedule () at sched.c:777 3130 #4 0x10001d12 in schedule () at core.c:777
3131 #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71 3131 #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71
3132 #6 0x1006aa10 in __down_failed () at semaphore.c:157 3132 #6 0x1006aa10 in __down_failed () at semaphore.c:157
3133 #7 0x1006c5d8 in segv_handler (sc=0x5006e940) at trap_user.c:174 3133 #7 0x1006c5d8 in segv_handler (sc=0x5006e940) at trap_user.c:174
@@ -3191,7 +3191,7 @@
3191 at process_kern.c:161 3191 at process_kern.c:161
3192 161 _switch_to(prev, next); 3192 161 _switch_to(prev, next);
3193 (gdb) 3193 (gdb)
3194 #4 0x10001d12 in schedule () at sched.c:777 3194 #4 0x10001d12 in schedule () at core.c:777
3195 777 switch_to(prev, next, prev); 3195 777 switch_to(prev, next, prev);
3196 (gdb) 3196 (gdb)
3197 #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71 3197 #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71
diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c
index e7b61494c312..c2731003edef 100644
--- a/arch/avr32/kernel/process.c
+++ b/arch/avr32/kernel/process.c
@@ -341,7 +341,7 @@ unsigned long get_wchan(struct task_struct *p)
341 * is actually quite ugly. It might be possible to 341 * is actually quite ugly. It might be possible to
342 * determine the frame size automatically at build 342 * determine the frame size automatically at build
343 * time by doing this: 343 * time by doing this:
344 * - compile sched.c 344 * - compile sched/core.c
345 * - disassemble the resulting sched.o 345 * - disassemble the resulting sched.o
346 * - look for 'sub sp,??' shortly after '<schedule>:' 346 * - look for 'sub sp,??' shortly after '<schedule>:'
347 */ 347 */
diff --git a/arch/cris/include/arch-v10/arch/bitops.h b/arch/cris/include/arch-v10/arch/bitops.h
index be85f6de25d3..03d9cfd92c8a 100644
--- a/arch/cris/include/arch-v10/arch/bitops.h
+++ b/arch/cris/include/arch-v10/arch/bitops.h
@@ -17,7 +17,7 @@ static inline unsigned long cris_swapnwbrlz(unsigned long w)
17 in another register: 17 in another register:
18 ! __asm__ ("swapnwbr %2\n\tlz %2,%0" 18 ! __asm__ ("swapnwbr %2\n\tlz %2,%0"
19 ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w)); 19 ! : "=r,r" (res), "=r,X" (dummy) : "1,0" (w));
20 confuses gcc (sched.c, gcc from cris-dist-1.14). */ 20 confuses gcc (core.c, gcc from cris-dist-1.14). */
21 21
22 unsigned long res; 22 unsigned long res;
23 __asm__ ("swapnwbr %0 \n\t" 23 __asm__ ("swapnwbr %0 \n\t"
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 9be4e497f3d3..991ca336b8a2 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1035,7 +1035,7 @@ END(ia64_delay_loop)
1035 * Return a CPU-local timestamp in nano-seconds. This timestamp is 1035 * Return a CPU-local timestamp in nano-seconds. This timestamp is
1036 * NOT synchronized across CPUs its return value must never be 1036 * NOT synchronized across CPUs its return value must never be
1037 * compared against the values returned on another CPU. The usage in 1037 * compared against the values returned on another CPU. The usage in
1038 * kernel/sched.c ensures that. 1038 * kernel/sched/core.c ensures that.
1039 * 1039 *
1040 * The return-value of sched_clock() is NOT supposed to wrap-around. 1040 * The return-value of sched_clock() is NOT supposed to wrap-around.
1041 * If it did, it would cause some scheduling hiccups (at the worst). 1041 * If it did, it would cause some scheduling hiccups (at the worst).
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index fd814e08c945..cb098628aee8 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -27,12 +27,12 @@ unsigned long mt_fpemul_threshold;
27 * FPU affinity with the user's requested processor affinity. 27 * FPU affinity with the user's requested processor affinity.
28 * This code is 98% identical with the sys_sched_setaffinity() 28 * This code is 98% identical with the sys_sched_setaffinity()
29 * and sys_sched_getaffinity() system calls, and should be 29 * and sys_sched_getaffinity() system calls, and should be
30 * updated when kernel/sched.c changes. 30 * updated when kernel/sched/core.c changes.
31 */ 31 */
32 32
33/* 33/*
34 * find_process_by_pid - find a process with a matching PID value. 34 * find_process_by_pid - find a process with a matching PID value.
35 * used in sys_sched_set/getaffinity() in kernel/sched.c, so 35 * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
36 * cloned here. 36 * cloned here.
37 */ 37 */
38static inline struct task_struct *find_process_by_pid(pid_t pid) 38static inline struct task_struct *find_process_by_pid(pid_t pid)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9b36424b03c5..e9127ec612ef 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -476,8 +476,9 @@ einval: li v0, -ENOSYS
476 /* 476 /*
477 * For FPU affinity scheduling on MIPS MT processors, we need to 477 * For FPU affinity scheduling on MIPS MT processors, we need to
478 * intercept sys_sched_xxxaffinity() calls until we get a proper hook 478 * intercept sys_sched_xxxaffinity() calls until we get a proper hook
479 * in kernel/sched.c. Considered only temporary we only support these 479 * in kernel/sched/core.c. Considered only temporary we only support
480 * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm. 480 * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
481 * atm.
481 */ 482 */
482 sys mipsmt_sys_sched_setaffinity 3 483 sys mipsmt_sys_sched_setaffinity 3
483 sys mipsmt_sys_sched_getaffinity 3 484 sys mipsmt_sys_sched_getaffinity 3
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index a73668a5f30d..b467530e2485 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -38,7 +38,7 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm);
38 38
39/* 39/*
40 * switch_mm is the entry point called from the architecture independent 40 * switch_mm is the entry point called from the architecture independent
41 * code in kernel/sched.c 41 * code in kernel/sched/core.c
42 */ 42 */
43static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 43static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
44 struct task_struct *tsk) 44 struct task_struct *tsk)
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 2b70dfb1442e..b3f104953da2 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -225,7 +225,7 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags);
225 225
226/* 226/*
227 * Return saved (kernel) PC of a blocked thread. 227 * Return saved (kernel) PC of a blocked thread.
228 * Only used in a printk() in kernel/sched.c, so don't work too hard. 228 * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
229 */ 229 */
230#define thread_saved_pc(t) ((t)->thread.pc) 230#define thread_saved_pc(t) ((t)->thread.pc)
231 231
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index ed258b8ae320..af8dfc9665f6 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -442,7 +442,7 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
442 regs_to_pt_regs(&regs, pc, lr, sp, r52)); 442 regs_to_pt_regs(&regs, pc, lr, sp, r52));
443} 443}
444 444
445/* This is called only from kernel/sched.c, with esp == NULL */ 445/* This is called only from kernel/sched/core.c, with esp == NULL */
446void show_stack(struct task_struct *task, unsigned long *esp) 446void show_stack(struct task_struct *task, unsigned long *esp)
447{ 447{
448 struct KBacktraceIterator kbt; 448 struct KBacktraceIterator kbt;
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 7d101a2a1541..0dc4d1c6f98a 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -39,7 +39,7 @@ void show_trace(struct task_struct *task, unsigned long * stack)
39static const int kstack_depth_to_print = 24; 39static const int kstack_depth_to_print = 24;
40 40
41/* This recently started being used in arch-independent code too, as in 41/* This recently started being used in arch-independent code too, as in
42 * kernel/sched.c.*/ 42 * kernel/sched/core.c.*/
43void show_stack(struct task_struct *task, unsigned long *esp) 43void show_stack(struct task_struct *task, unsigned long *esp)
44{ 44{
45 unsigned long *stack; 45 unsigned long *stack;
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 33f0280fd533..3cd574d5b19e 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -5,7 +5,7 @@
5 * (C) Copyright 2001 Linus Torvalds 5 * (C) Copyright 2001 Linus Torvalds
6 * 6 *
7 * Atomic wait-for-completion handler data structures. 7 * Atomic wait-for-completion handler data structures.
8 * See kernel/sched.c for details. 8 * See kernel/sched/core.c for details.
9 */ 9 */
10 10
11#include <linux/wait.h> 11#include <linux/wait.h>
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f463a46424e2..5ec99e5a50d2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -803,7 +803,7 @@ static inline void perf_restore_debug_store(void) { }
803#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) 803#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
804 804
805/* 805/*
806 * This has to have a higher priority than migration_notifier in sched.c. 806 * This has to have a higher priority than migration_notifier in sched/core.c.
807 */ 807 */
808#define perf_cpu_notifier(fn) \ 808#define perf_cpu_notifier(fn) \
809do { \ 809do { \
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index e2369c167dbd..8b3ac0d718eb 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -67,7 +67,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
67 67
68#else /* DEBUG_SPINLOCK */ 68#else /* DEBUG_SPINLOCK */
69#define arch_spin_is_locked(lock) ((void)(lock), 0) 69#define arch_spin_is_locked(lock) ((void)(lock), 0)
70/* for sched.c and kernel_lock.c: */ 70/* for sched/core.c and kernel_lock.c: */
71# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) 71# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
72# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) 72# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
73# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) 73# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 0cc74c4403e4..a20a9b4d3871 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -361,7 +361,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
361#define __NR_ptrace 117 361#define __NR_ptrace 117
362__SYSCALL(__NR_ptrace, sys_ptrace) 362__SYSCALL(__NR_ptrace, sys_ptrace)
363 363
364/* kernel/sched.c */ 364/* kernel/sched/core.c */
365#define __NR_sched_setparam 118 365#define __NR_sched_setparam 118
366__SYSCALL(__NR_sched_setparam, sys_sched_setparam) 366__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
367#define __NR_sched_setscheduler 119 367#define __NR_sched_setscheduler 119
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 64b3f791bbe5..902d13fc2b13 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -540,7 +540,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
540 * This function builds a partial partition of the systems CPUs 540 * This function builds a partial partition of the systems CPUs
541 * A 'partial partition' is a set of non-overlapping subsets whose 541 * A 'partial partition' is a set of non-overlapping subsets whose
542 * union is a subset of that set. 542 * union is a subset of that set.
543 * The output of this function needs to be passed to kernel/sched.c 543 * The output of this function needs to be passed to kernel/sched/core.c
544 * partition_sched_domains() routine, which will rebuild the scheduler's 544 * partition_sched_domains() routine, which will rebuild the scheduler's
545 * load balancing domains (sched domains) as specified by that partial 545 * load balancing domains (sched domains) as specified by that partial
546 * partition. 546 * partition.
@@ -569,7 +569,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
569 * is a subset of one of these domains, while there are as 569 * is a subset of one of these domains, while there are as
570 * many such domains as possible, each as small as possible. 570 * many such domains as possible, each as small as possible.
571 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 571 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
572 * the kernel/sched.c routine partition_sched_domains() in a 572 * the kernel/sched/core.c routine partition_sched_domains() in a
573 * convenient format, that can be easily compared to the prior 573 * convenient format, that can be easily compared to the prior
574 * value to determine what partition elements (sched domains) 574 * value to determine what partition elements (sched domains)
575 * were changed (added or removed.) 575 * were changed (added or removed.)
diff --git a/kernel/time.c b/kernel/time.c
index d3617dbd3dca..7c7964c33ae7 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -11,7 +11,7 @@
11 * Modification history kernel/time.c 11 * Modification history kernel/time.c
12 * 12 *
13 * 1993-09-02 Philip Gladstone 13 * 1993-09-02 Philip Gladstone
14 * Created file with time related functions from sched.c and adjtimex() 14 * Created file with time related functions from sched/core.c and adjtimex()
15 * 1993-10-08 Torsten Duwe 15 * 1993-10-08 Torsten Duwe
16 * adjtime interface update and CMOS clock write code 16 * adjtime interface update and CMOS clock write code
17 * 1995-08-13 Torsten Duwe 17 * 1995-08-13 Torsten Duwe
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index ad83c96b2ece..7e2204db0b1a 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -64,7 +64,7 @@ static inline struct worker *current_wq_worker(void)
64 64
65/* 65/*
66 * Scheduler hooks for concurrency managed workqueue. Only to be used from 66 * Scheduler hooks for concurrency managed workqueue. Only to be used from
67 * sched.c and workqueue.c. 67 * sched/core.c and workqueue.c.
68 */ 68 */
69void wq_worker_waking_up(struct task_struct *task, int cpu); 69void wq_worker_waking_up(struct task_struct *task, int cpu);
70struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); 70struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);