diff options
-rw-r--r-- | arch/alpha/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 6 | ||||
-rw-r--r-- | arch/um/kernel/tt/process_kern.c | 2 | ||||
-rw-r--r-- | drivers/char/tty_io.c | 2 | ||||
-rw-r--r-- | fs/eventpoll.c | 4 | ||||
-rw-r--r-- | include/asm-ia64/thread_info.h | 2 | ||||
-rw-r--r-- | include/asm-m32r/system.h | 2 | ||||
-rw-r--r-- | include/asm-sh/system.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 55 | ||||
-rw-r--r-- | kernel/capability.c | 8 | ||||
-rw-r--r-- | kernel/exit.c | 35 | ||||
-rw-r--r-- | kernel/fork.c | 18 | ||||
-rw-r--r-- | kernel/hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/pid.c | 6 | ||||
-rw-r--r-- | kernel/ptrace.c | 6 | ||||
-rw-r--r-- | kernel/rtmutex-debug.c | 5 | ||||
-rw-r--r-- | kernel/rtmutex-tester.c | 4 | ||||
-rw-r--r-- | kernel/rtmutex.c | 11 | ||||
-rw-r--r-- | kernel/sched.c | 192 | ||||
-rw-r--r-- | kernel/timer.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 8 |
25 files changed, 203 insertions, 187 deletions
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 01c8c8b23337..41ebf51a107a 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -474,7 +474,7 @@ out: | |||
474 | */ | 474 | */ |
475 | 475 | ||
476 | unsigned long | 476 | unsigned long |
477 | thread_saved_pc(task_t *t) | 477 | thread_saved_pc(struct task_struct *t) |
478 | { | 478 | { |
479 | unsigned long base = (unsigned long)task_stack_page(t); | 479 | unsigned long base = (unsigned long)task_stack_page(t); |
480 | unsigned long fp, sp = task_thread_info(t)->pcb.ksp; | 480 | unsigned long fp, sp = task_thread_info(t)->pcb.ksp; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index eb8e8dc5ac8e..2fbe4536fe18 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -678,7 +678,7 @@ copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) | |||
678 | */ | 678 | */ |
679 | 679 | ||
680 | static void | 680 | static void |
681 | ia64_mca_modify_comm(const task_t *previous_current) | 681 | ia64_mca_modify_comm(const struct task_struct *previous_current) |
682 | { | 682 | { |
683 | char *p, comm[sizeof(current->comm)]; | 683 | char *p, comm[sizeof(current->comm)]; |
684 | if (previous_current->pid) | 684 | if (previous_current->pid) |
@@ -709,7 +709,7 @@ ia64_mca_modify_comm(const task_t *previous_current) | |||
709 | * that we can do backtrace on the MCA/INIT handler code itself. | 709 | * that we can do backtrace on the MCA/INIT handler code itself. |
710 | */ | 710 | */ |
711 | 711 | ||
712 | static task_t * | 712 | static struct task_struct * |
713 | ia64_mca_modify_original_stack(struct pt_regs *regs, | 713 | ia64_mca_modify_original_stack(struct pt_regs *regs, |
714 | const struct switch_stack *sw, | 714 | const struct switch_stack *sw, |
715 | struct ia64_sal_os_state *sos, | 715 | struct ia64_sal_os_state *sos, |
@@ -719,7 +719,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, | |||
719 | ia64_va va; | 719 | ia64_va va; |
720 | extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ | 720 | extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ |
721 | const pal_min_state_area_t *ms = sos->pal_min_state; | 721 | const pal_min_state_area_t *ms = sos->pal_min_state; |
722 | task_t *previous_current; | 722 | struct task_struct *previous_current; |
723 | struct pt_regs *old_regs; | 723 | struct pt_regs *old_regs; |
724 | struct switch_stack *old_sw; | 724 | struct switch_stack *old_sw; |
725 | unsigned size = sizeof(struct pt_regs) + | 725 | unsigned size = sizeof(struct pt_regs) + |
@@ -1023,7 +1023,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1023 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) | 1023 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) |
1024 | &sos->proc_state_param; | 1024 | &sos->proc_state_param; |
1025 | int recover, cpu = smp_processor_id(); | 1025 | int recover, cpu = smp_processor_id(); |
1026 | task_t *previous_current; | 1026 | struct task_struct *previous_current; |
1027 | struct ia64_mca_notify_die nd = | 1027 | struct ia64_mca_notify_die nd = |
1028 | { .sos = sos, .monarch_cpu = &monarch_cpu }; | 1028 | { .sos = sos, .monarch_cpu = &monarch_cpu }; |
1029 | 1029 | ||
@@ -1352,7 +1352,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1352 | { | 1352 | { |
1353 | static atomic_t slaves; | 1353 | static atomic_t slaves; |
1354 | static atomic_t monarchs; | 1354 | static atomic_t monarchs; |
1355 | task_t *previous_current; | 1355 | struct task_struct *previous_current; |
1356 | int cpu = smp_processor_id(); | 1356 | int cpu = smp_processor_id(); |
1357 | struct ia64_mca_notify_die nd = | 1357 | struct ia64_mca_notify_die nd = |
1358 | { .sos = sos, .monarch_cpu = &monarch_cpu }; | 1358 | { .sos = sos, .monarch_cpu = &monarch_cpu }; |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e1960979be29..6203ed4ec8cf 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -124,7 +124,7 @@ extern void __devinit calibrate_delay (void); | |||
124 | extern void start_ap (void); | 124 | extern void start_ap (void); |
125 | extern unsigned long ia64_iobase; | 125 | extern unsigned long ia64_iobase; |
126 | 126 | ||
127 | task_t *task_for_booting_cpu; | 127 | struct task_struct *task_for_booting_cpu; |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * State for each CPU | 130 | * State for each CPU |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index ecfd637d702a..01e7fa86aa43 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -65,7 +65,7 @@ need_resched: | |||
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | FEXPORT(ret_from_fork) | 67 | FEXPORT(ret_from_fork) |
68 | jal schedule_tail # a0 = task_t *prev | 68 | jal schedule_tail # a0 = struct task_struct *prev |
69 | 69 | ||
70 | FEXPORT(syscall_exit) | 70 | FEXPORT(syscall_exit) |
71 | local_irq_disable # make sure need_resched and | 71 | local_irq_disable # make sure need_resched and |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 02237a685ec7..4dcc39f42951 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -47,7 +47,7 @@ unsigned long mt_fpemul_threshold = 0; | |||
47 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | 47 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so |
48 | * cloned here. | 48 | * cloned here. |
49 | */ | 49 | */ |
50 | static inline task_t *find_process_by_pid(pid_t pid) | 50 | static inline struct task_struct *find_process_by_pid(pid_t pid) |
51 | { | 51 | { |
52 | return pid ? find_task_by_pid(pid) : current; | 52 | return pid ? find_task_by_pid(pid) : current; |
53 | } | 53 | } |
@@ -62,7 +62,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
62 | cpumask_t new_mask; | 62 | cpumask_t new_mask; |
63 | cpumask_t effective_mask; | 63 | cpumask_t effective_mask; |
64 | int retval; | 64 | int retval; |
65 | task_t *p; | 65 | struct task_struct *p; |
66 | 66 | ||
67 | if (len < sizeof(new_mask)) | 67 | if (len < sizeof(new_mask)) |
68 | return -EINVAL; | 68 | return -EINVAL; |
@@ -127,7 +127,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
127 | unsigned int real_len; | 127 | unsigned int real_len; |
128 | cpumask_t mask; | 128 | cpumask_t mask; |
129 | int retval; | 129 | int retval; |
130 | task_t *p; | 130 | struct task_struct *p; |
131 | 131 | ||
132 | real_len = sizeof(mask); | 132 | real_len = sizeof(mask); |
133 | if (len < real_len) | 133 | if (len < real_len) |
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c index a9c1443fc548..8368c2dbe635 100644 --- a/arch/um/kernel/tt/process_kern.c +++ b/arch/um/kernel/tt/process_kern.c | |||
@@ -119,7 +119,7 @@ void suspend_new_thread(int fd) | |||
119 | panic("read failed in suspend_new_thread, err = %d", -err); | 119 | panic("read failed in suspend_new_thread, err = %d", -err); |
120 | } | 120 | } |
121 | 121 | ||
122 | void schedule_tail(task_t *prev); | 122 | void schedule_tail(struct task_struct *prev); |
123 | 123 | ||
124 | static void new_thread_handler(int sig) | 124 | static void new_thread_handler(int sig) |
125 | { | 125 | { |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 6fb77952562d..bfdb90242a90 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -2336,7 +2336,7 @@ static int fionbio(struct file *file, int __user *p) | |||
2336 | 2336 | ||
2337 | static int tiocsctty(struct tty_struct *tty, int arg) | 2337 | static int tiocsctty(struct tty_struct *tty, int arg) |
2338 | { | 2338 | { |
2339 | task_t *p; | 2339 | struct task_struct *p; |
2340 | 2340 | ||
2341 | if (current->signal->leader && | 2341 | if (current->signal->leader && |
2342 | (current->signal->session == tty->session)) | 2342 | (current->signal->session == tty->session)) |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9c677bbd0b08..19ffb043abbc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -120,7 +120,7 @@ struct epoll_filefd { | |||
120 | */ | 120 | */ |
121 | struct wake_task_node { | 121 | struct wake_task_node { |
122 | struct list_head llink; | 122 | struct list_head llink; |
123 | task_t *task; | 123 | struct task_struct *task; |
124 | wait_queue_head_t *wq; | 124 | wait_queue_head_t *wq; |
125 | }; | 125 | }; |
126 | 126 | ||
@@ -413,7 +413,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) | |||
413 | { | 413 | { |
414 | int wake_nests = 0; | 414 | int wake_nests = 0; |
415 | unsigned long flags; | 415 | unsigned long flags; |
416 | task_t *this_task = current; | 416 | struct task_struct *this_task = current; |
417 | struct list_head *lsthead = &psw->wake_task_list, *lnk; | 417 | struct list_head *lsthead = &psw->wake_task_list, *lnk; |
418 | struct wake_task_node *tncur; | 418 | struct wake_task_node *tncur; |
419 | struct wake_task_node tnode; | 419 | struct wake_task_node tnode; |
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 8bc9869e5765..8adcde0934ca 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h | |||
@@ -68,7 +68,7 @@ struct thread_info { | |||
68 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) | 68 | #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) |
69 | 69 | ||
70 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR | 70 | #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR |
71 | #define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) | 71 | #define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) |
72 | #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) | 72 | #define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) |
73 | 73 | ||
74 | #endif /* !__ASSEMBLY */ | 74 | #endif /* !__ASSEMBLY */ |
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 66c4742f09e7..311cebf44eff 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * switch_to(prev, next) should switch from task `prev' to `next' | 18 | * switch_to(prev, next) should switch from task `prev' to `next' |
19 | * `prev' will never be the same as `next'. | 19 | * `prev' will never be the same as `next'. |
20 | * | 20 | * |
21 | * `next' and `prev' should be task_t, but it isn't always defined | 21 | * `next' and `prev' should be struct task_struct, but it isn't always defined |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #define switch_to(prev, next, last) do { \ | 24 | #define switch_to(prev, next, last) do { \ |
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index b752e5cbb830..ce2e60664a86 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h | |||
@@ -12,7 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define switch_to(prev, next, last) do { \ | 14 | #define switch_to(prev, next, last) do { \ |
15 | task_t *__last; \ | 15 | struct task_struct *__last; \ |
16 | register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ | 16 | register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ |
17 | register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ | 17 | register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ |
18 | register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ | 18 | register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8ebddba4448d..c2797f04d931 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu); | |||
184 | extern rwlock_t tasklist_lock; | 184 | extern rwlock_t tasklist_lock; |
185 | extern spinlock_t mmlist_lock; | 185 | extern spinlock_t mmlist_lock; |
186 | 186 | ||
187 | typedef struct task_struct task_t; | 187 | struct task_struct; |
188 | 188 | ||
189 | extern void sched_init(void); | 189 | extern void sched_init(void); |
190 | extern void sched_init_smp(void); | 190 | extern void sched_init_smp(void); |
191 | extern void init_idle(task_t *idle, int cpu); | 191 | extern void init_idle(struct task_struct *idle, int cpu); |
192 | 192 | ||
193 | extern cpumask_t nohz_cpu_mask; | 193 | extern cpumask_t nohz_cpu_mask; |
194 | 194 | ||
@@ -383,7 +383,7 @@ struct signal_struct { | |||
383 | wait_queue_head_t wait_chldexit; /* for wait4() */ | 383 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
384 | 384 | ||
385 | /* current thread group signal load-balancing target: */ | 385 | /* current thread group signal load-balancing target: */ |
386 | task_t *curr_target; | 386 | struct task_struct *curr_target; |
387 | 387 | ||
388 | /* shared signal handling: */ | 388 | /* shared signal handling: */ |
389 | struct sigpending shared_pending; | 389 | struct sigpending shared_pending; |
@@ -699,7 +699,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp); | |||
699 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) | 699 | ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) |
700 | 700 | ||
701 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK | 701 | #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK |
702 | extern void prefetch_stack(struct task_struct*); | 702 | extern void prefetch_stack(struct task_struct *t); |
703 | #else | 703 | #else |
704 | static inline void prefetch_stack(struct task_struct *t) { } | 704 | static inline void prefetch_stack(struct task_struct *t) { } |
705 | #endif | 705 | #endif |
@@ -1031,9 +1031,9 @@ static inline void put_task_struct(struct task_struct *t) | |||
1031 | #define used_math() tsk_used_math(current) | 1031 | #define used_math() tsk_used_math(current) |
1032 | 1032 | ||
1033 | #ifdef CONFIG_SMP | 1033 | #ifdef CONFIG_SMP |
1034 | extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); | 1034 | extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); |
1035 | #else | 1035 | #else |
1036 | static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 1036 | static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
1037 | { | 1037 | { |
1038 | if (!cpu_isset(0, new_mask)) | 1038 | if (!cpu_isset(0, new_mask)) |
1039 | return -EINVAL; | 1039 | return -EINVAL; |
@@ -1042,7 +1042,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) | |||
1042 | #endif | 1042 | #endif |
1043 | 1043 | ||
1044 | extern unsigned long long sched_clock(void); | 1044 | extern unsigned long long sched_clock(void); |
1045 | extern unsigned long long current_sched_time(const task_t *current_task); | 1045 | extern unsigned long long |
1046 | current_sched_time(const struct task_struct *current_task); | ||
1046 | 1047 | ||
1047 | /* sched_exec is called by processes performing an exec */ | 1048 | /* sched_exec is called by processes performing an exec */ |
1048 | #ifdef CONFIG_SMP | 1049 | #ifdef CONFIG_SMP |
@@ -1060,27 +1061,27 @@ static inline void idle_task_exit(void) {} | |||
1060 | extern void sched_idle_next(void); | 1061 | extern void sched_idle_next(void); |
1061 | 1062 | ||
1062 | #ifdef CONFIG_RT_MUTEXES | 1063 | #ifdef CONFIG_RT_MUTEXES |
1063 | extern int rt_mutex_getprio(task_t *p); | 1064 | extern int rt_mutex_getprio(struct task_struct *p); |
1064 | extern void rt_mutex_setprio(task_t *p, int prio); | 1065 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
1065 | extern void rt_mutex_adjust_pi(task_t *p); | 1066 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
1066 | #else | 1067 | #else |
1067 | static inline int rt_mutex_getprio(task_t *p) | 1068 | static inline int rt_mutex_getprio(struct task_struct *p) |
1068 | { | 1069 | { |
1069 | return p->normal_prio; | 1070 | return p->normal_prio; |
1070 | } | 1071 | } |
1071 | # define rt_mutex_adjust_pi(p) do { } while (0) | 1072 | # define rt_mutex_adjust_pi(p) do { } while (0) |
1072 | #endif | 1073 | #endif |
1073 | 1074 | ||
1074 | extern void set_user_nice(task_t *p, long nice); | 1075 | extern void set_user_nice(struct task_struct *p, long nice); |
1075 | extern int task_prio(const task_t *p); | 1076 | extern int task_prio(const struct task_struct *p); |
1076 | extern int task_nice(const task_t *p); | 1077 | extern int task_nice(const struct task_struct *p); |
1077 | extern int can_nice(const task_t *p, const int nice); | 1078 | extern int can_nice(const struct task_struct *p, const int nice); |
1078 | extern int task_curr(const task_t *p); | 1079 | extern int task_curr(const struct task_struct *p); |
1079 | extern int idle_cpu(int cpu); | 1080 | extern int idle_cpu(int cpu); |
1080 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); | 1081 | extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); |
1081 | extern task_t *idle_task(int cpu); | 1082 | extern struct task_struct *idle_task(int cpu); |
1082 | extern task_t *curr_task(int cpu); | 1083 | extern struct task_struct *curr_task(int cpu); |
1083 | extern void set_curr_task(int cpu, task_t *p); | 1084 | extern void set_curr_task(int cpu, struct task_struct *p); |
1084 | 1085 | ||
1085 | void yield(void); | 1086 | void yield(void); |
1086 | 1087 | ||
@@ -1137,8 +1138,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, | |||
1137 | #else | 1138 | #else |
1138 | static inline void kick_process(struct task_struct *tsk) { } | 1139 | static inline void kick_process(struct task_struct *tsk) { } |
1139 | #endif | 1140 | #endif |
1140 | extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); | 1141 | extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); |
1141 | extern void FASTCALL(sched_exit(task_t * p)); | 1142 | extern void FASTCALL(sched_exit(struct task_struct * p)); |
1142 | 1143 | ||
1143 | extern int in_group_p(gid_t); | 1144 | extern int in_group_p(gid_t); |
1144 | extern int in_egroup_p(gid_t); | 1145 | extern int in_egroup_p(gid_t); |
@@ -1243,17 +1244,17 @@ extern NORET_TYPE void do_group_exit(int); | |||
1243 | extern void daemonize(const char *, ...); | 1244 | extern void daemonize(const char *, ...); |
1244 | extern int allow_signal(int); | 1245 | extern int allow_signal(int); |
1245 | extern int disallow_signal(int); | 1246 | extern int disallow_signal(int); |
1246 | extern task_t *child_reaper; | 1247 | extern struct task_struct *child_reaper; |
1247 | 1248 | ||
1248 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 1249 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); |
1249 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 1250 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
1250 | task_t *fork_idle(int); | 1251 | struct task_struct *fork_idle(int); |
1251 | 1252 | ||
1252 | extern void set_task_comm(struct task_struct *tsk, char *from); | 1253 | extern void set_task_comm(struct task_struct *tsk, char *from); |
1253 | extern void get_task_comm(char *to, struct task_struct *tsk); | 1254 | extern void get_task_comm(char *to, struct task_struct *tsk); |
1254 | 1255 | ||
1255 | #ifdef CONFIG_SMP | 1256 | #ifdef CONFIG_SMP |
1256 | extern void wait_task_inactive(task_t * p); | 1257 | extern void wait_task_inactive(struct task_struct * p); |
1257 | #else | 1258 | #else |
1258 | #define wait_task_inactive(p) do { } while (0) | 1259 | #define wait_task_inactive(p) do { } while (0) |
1259 | #endif | 1260 | #endif |
@@ -1279,13 +1280,13 @@ extern void wait_task_inactive(task_t * p); | |||
1279 | /* de_thread depends on thread_group_leader not being a pid based check */ | 1280 | /* de_thread depends on thread_group_leader not being a pid based check */ |
1280 | #define thread_group_leader(p) (p == p->group_leader) | 1281 | #define thread_group_leader(p) (p == p->group_leader) |
1281 | 1282 | ||
1282 | static inline task_t *next_thread(const task_t *p) | 1283 | static inline struct task_struct *next_thread(const struct task_struct *p) |
1283 | { | 1284 | { |
1284 | return list_entry(rcu_dereference(p->thread_group.next), | 1285 | return list_entry(rcu_dereference(p->thread_group.next), |
1285 | task_t, thread_group); | 1286 | struct task_struct, thread_group); |
1286 | } | 1287 | } |
1287 | 1288 | ||
1288 | static inline int thread_group_empty(task_t *p) | 1289 | static inline int thread_group_empty(struct task_struct *p) |
1289 | { | 1290 | { |
1290 | return list_empty(&p->thread_group); | 1291 | return list_empty(&p->thread_group); |
1291 | } | 1292 | } |
diff --git a/kernel/capability.c b/kernel/capability.c index 1a4d8a40d3f9..c7685ad00a97 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -46,7 +46,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
46 | int ret = 0; | 46 | int ret = 0; |
47 | pid_t pid; | 47 | pid_t pid; |
48 | __u32 version; | 48 | __u32 version; |
49 | task_t *target; | 49 | struct task_struct *target; |
50 | struct __user_cap_data_struct data; | 50 | struct __user_cap_data_struct data; |
51 | 51 | ||
52 | if (get_user(version, &header->version)) | 52 | if (get_user(version, &header->version)) |
@@ -96,7 +96,7 @@ static inline int cap_set_pg(int pgrp, kernel_cap_t *effective, | |||
96 | kernel_cap_t *inheritable, | 96 | kernel_cap_t *inheritable, |
97 | kernel_cap_t *permitted) | 97 | kernel_cap_t *permitted) |
98 | { | 98 | { |
99 | task_t *g, *target; | 99 | struct task_struct *g, *target; |
100 | int ret = -EPERM; | 100 | int ret = -EPERM; |
101 | int found = 0; | 101 | int found = 0; |
102 | 102 | ||
@@ -128,7 +128,7 @@ static inline int cap_set_all(kernel_cap_t *effective, | |||
128 | kernel_cap_t *inheritable, | 128 | kernel_cap_t *inheritable, |
129 | kernel_cap_t *permitted) | 129 | kernel_cap_t *permitted) |
130 | { | 130 | { |
131 | task_t *g, *target; | 131 | struct task_struct *g, *target; |
132 | int ret = -EPERM; | 132 | int ret = -EPERM; |
133 | int found = 0; | 133 | int found = 0; |
134 | 134 | ||
@@ -172,7 +172,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
172 | { | 172 | { |
173 | kernel_cap_t inheritable, permitted, effective; | 173 | kernel_cap_t inheritable, permitted, effective; |
174 | __u32 version; | 174 | __u32 version; |
175 | task_t *target; | 175 | struct task_struct *target; |
176 | int ret; | 176 | int ret; |
177 | pid_t pid; | 177 | pid_t pid; |
178 | 178 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index c595db14cf25..6664c084783d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -134,8 +134,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
134 | 134 | ||
135 | void release_task(struct task_struct * p) | 135 | void release_task(struct task_struct * p) |
136 | { | 136 | { |
137 | struct task_struct *leader; | ||
137 | int zap_leader; | 138 | int zap_leader; |
138 | task_t *leader; | ||
139 | repeat: | 139 | repeat: |
140 | atomic_dec(&p->user->processes); | 140 | atomic_dec(&p->user->processes); |
141 | write_lock_irq(&tasklist_lock); | 141 | write_lock_irq(&tasklist_lock); |
@@ -209,7 +209,7 @@ out: | |||
209 | * | 209 | * |
210 | * "I ask you, have you ever known what it is to be an orphan?" | 210 | * "I ask you, have you ever known what it is to be an orphan?" |
211 | */ | 211 | */ |
212 | static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) | 212 | static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) |
213 | { | 213 | { |
214 | struct task_struct *p; | 214 | struct task_struct *p; |
215 | int ret = 1; | 215 | int ret = 1; |
@@ -582,7 +582,8 @@ static void exit_mm(struct task_struct * tsk) | |||
582 | mmput(mm); | 582 | mmput(mm); |
583 | } | 583 | } |
584 | 584 | ||
585 | static inline void choose_new_parent(task_t *p, task_t *reaper) | 585 | static inline void |
586 | choose_new_parent(struct task_struct *p, struct task_struct *reaper) | ||
586 | { | 587 | { |
587 | /* | 588 | /* |
588 | * Make sure we're not reparenting to ourselves and that | 589 | * Make sure we're not reparenting to ourselves and that |
@@ -592,7 +593,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper) | |||
592 | p->real_parent = reaper; | 593 | p->real_parent = reaper; |
593 | } | 594 | } |
594 | 595 | ||
595 | static void reparent_thread(task_t *p, task_t *father, int traced) | 596 | static void |
597 | reparent_thread(struct task_struct *p, struct task_struct *father, int traced) | ||
596 | { | 598 | { |
597 | /* We don't want people slaying init. */ | 599 | /* We don't want people slaying init. */ |
598 | if (p->exit_signal != -1) | 600 | if (p->exit_signal != -1) |
@@ -656,8 +658,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced) | |||
656 | * group, and if no such member exists, give it to | 658 | * group, and if no such member exists, give it to |
657 | * the global child reaper process (ie "init") | 659 | * the global child reaper process (ie "init") |
658 | */ | 660 | */ |
659 | static void forget_original_parent(struct task_struct * father, | 661 | static void |
660 | struct list_head *to_release) | 662 | forget_original_parent(struct task_struct *father, struct list_head *to_release) |
661 | { | 663 | { |
662 | struct task_struct *p, *reaper = father; | 664 | struct task_struct *p, *reaper = father; |
663 | struct list_head *_p, *_n; | 665 | struct list_head *_p, *_n; |
@@ -680,7 +682,7 @@ static void forget_original_parent(struct task_struct * father, | |||
680 | */ | 682 | */ |
681 | list_for_each_safe(_p, _n, &father->children) { | 683 | list_for_each_safe(_p, _n, &father->children) { |
682 | int ptrace; | 684 | int ptrace; |
683 | p = list_entry(_p,struct task_struct,sibling); | 685 | p = list_entry(_p, struct task_struct, sibling); |
684 | 686 | ||
685 | ptrace = p->ptrace; | 687 | ptrace = p->ptrace; |
686 | 688 | ||
@@ -709,7 +711,7 @@ static void forget_original_parent(struct task_struct * father, | |||
709 | list_add(&p->ptrace_list, to_release); | 711 | list_add(&p->ptrace_list, to_release); |
710 | } | 712 | } |
711 | list_for_each_safe(_p, _n, &father->ptrace_children) { | 713 | list_for_each_safe(_p, _n, &father->ptrace_children) { |
712 | p = list_entry(_p,struct task_struct,ptrace_list); | 714 | p = list_entry(_p, struct task_struct, ptrace_list); |
713 | choose_new_parent(p, reaper); | 715 | choose_new_parent(p, reaper); |
714 | reparent_thread(p, father, 1); | 716 | reparent_thread(p, father, 1); |
715 | } | 717 | } |
@@ -829,7 +831,7 @@ static void exit_notify(struct task_struct *tsk) | |||
829 | 831 | ||
830 | list_for_each_safe(_p, _n, &ptrace_dead) { | 832 | list_for_each_safe(_p, _n, &ptrace_dead) { |
831 | list_del_init(_p); | 833 | list_del_init(_p); |
832 | t = list_entry(_p,struct task_struct,ptrace_list); | 834 | t = list_entry(_p, struct task_struct, ptrace_list); |
833 | release_task(t); | 835 | release_task(t); |
834 | } | 836 | } |
835 | 837 | ||
@@ -1010,7 +1012,7 @@ asmlinkage void sys_exit_group(int error_code) | |||
1010 | do_group_exit((error_code & 0xff) << 8); | 1012 | do_group_exit((error_code & 0xff) << 8); |
1011 | } | 1013 | } |
1012 | 1014 | ||
1013 | static int eligible_child(pid_t pid, int options, task_t *p) | 1015 | static int eligible_child(pid_t pid, int options, struct task_struct *p) |
1014 | { | 1016 | { |
1015 | if (pid > 0) { | 1017 | if (pid > 0) { |
1016 | if (p->pid != pid) | 1018 | if (p->pid != pid) |
@@ -1051,12 +1053,13 @@ static int eligible_child(pid_t pid, int options, task_t *p) | |||
1051 | return 1; | 1053 | return 1; |
1052 | } | 1054 | } |
1053 | 1055 | ||
1054 | static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, | 1056 | static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, |
1055 | int why, int status, | 1057 | int why, int status, |
1056 | struct siginfo __user *infop, | 1058 | struct siginfo __user *infop, |
1057 | struct rusage __user *rusagep) | 1059 | struct rusage __user *rusagep) |
1058 | { | 1060 | { |
1059 | int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; | 1061 | int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; |
1062 | |||
1060 | put_task_struct(p); | 1063 | put_task_struct(p); |
1061 | if (!retval) | 1064 | if (!retval) |
1062 | retval = put_user(SIGCHLD, &infop->si_signo); | 1065 | retval = put_user(SIGCHLD, &infop->si_signo); |
@@ -1081,7 +1084,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, | |||
1081 | * the lock and this task is uninteresting. If we return nonzero, we have | 1084 | * the lock and this task is uninteresting. If we return nonzero, we have |
1082 | * released the lock and the system call should return. | 1085 | * released the lock and the system call should return. |
1083 | */ | 1086 | */ |
1084 | static int wait_task_zombie(task_t *p, int noreap, | 1087 | static int wait_task_zombie(struct task_struct *p, int noreap, |
1085 | struct siginfo __user *infop, | 1088 | struct siginfo __user *infop, |
1086 | int __user *stat_addr, struct rusage __user *ru) | 1089 | int __user *stat_addr, struct rusage __user *ru) |
1087 | { | 1090 | { |
@@ -1243,8 +1246,8 @@ static int wait_task_zombie(task_t *p, int noreap, | |||
1243 | * the lock and this task is uninteresting. If we return nonzero, we have | 1246 | * the lock and this task is uninteresting. If we return nonzero, we have |
1244 | * released the lock and the system call should return. | 1247 | * released the lock and the system call should return. |
1245 | */ | 1248 | */ |
1246 | static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, | 1249 | static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, |
1247 | struct siginfo __user *infop, | 1250 | int noreap, struct siginfo __user *infop, |
1248 | int __user *stat_addr, struct rusage __user *ru) | 1251 | int __user *stat_addr, struct rusage __user *ru) |
1249 | { | 1252 | { |
1250 | int retval, exit_code; | 1253 | int retval, exit_code; |
@@ -1358,7 +1361,7 @@ bail_ref: | |||
1358 | * the lock and this task is uninteresting. If we return nonzero, we have | 1361 | * the lock and this task is uninteresting. If we return nonzero, we have |
1359 | * released the lock and the system call should return. | 1362 | * released the lock and the system call should return. |
1360 | */ | 1363 | */ |
1361 | static int wait_task_continued(task_t *p, int noreap, | 1364 | static int wait_task_continued(struct task_struct *p, int noreap, |
1362 | struct siginfo __user *infop, | 1365 | struct siginfo __user *infop, |
1363 | int __user *stat_addr, struct rusage __user *ru) | 1366 | int __user *stat_addr, struct rusage __user *ru) |
1364 | { | 1367 | { |
@@ -1444,7 +1447,7 @@ repeat: | |||
1444 | int ret; | 1447 | int ret; |
1445 | 1448 | ||
1446 | list_for_each(_p,&tsk->children) { | 1449 | list_for_each(_p,&tsk->children) { |
1447 | p = list_entry(_p,struct task_struct,sibling); | 1450 | p = list_entry(_p, struct task_struct, sibling); |
1448 | 1451 | ||
1449 | ret = eligible_child(pid, options, p); | 1452 | ret = eligible_child(pid, options, p); |
1450 | if (!ret) | 1453 | if (!ret) |
diff --git a/kernel/fork.c b/kernel/fork.c index 54953d8a6f17..56e4e07e45f7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -933,13 +933,13 @@ static inline void rt_mutex_init_task(struct task_struct *p) | |||
933 | * parts of the process environment (as per the clone | 933 | * parts of the process environment (as per the clone |
934 | * flags). The actual kick-off is left to the caller. | 934 | * flags). The actual kick-off is left to the caller. |
935 | */ | 935 | */ |
936 | static task_t *copy_process(unsigned long clone_flags, | 936 | static struct task_struct *copy_process(unsigned long clone_flags, |
937 | unsigned long stack_start, | 937 | unsigned long stack_start, |
938 | struct pt_regs *regs, | 938 | struct pt_regs *regs, |
939 | unsigned long stack_size, | 939 | unsigned long stack_size, |
940 | int __user *parent_tidptr, | 940 | int __user *parent_tidptr, |
941 | int __user *child_tidptr, | 941 | int __user *child_tidptr, |
942 | int pid) | 942 | int pid) |
943 | { | 943 | { |
944 | int retval; | 944 | int retval; |
945 | struct task_struct *p = NULL; | 945 | struct task_struct *p = NULL; |
@@ -1294,9 +1294,9 @@ struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) | |||
1294 | return regs; | 1294 | return regs; |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | task_t * __devinit fork_idle(int cpu) | 1297 | struct task_struct * __devinit fork_idle(int cpu) |
1298 | { | 1298 | { |
1299 | task_t *task; | 1299 | struct task_struct *task; |
1300 | struct pt_regs regs; | 1300 | struct pt_regs regs; |
1301 | 1301 | ||
1302 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); | 1302 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 617304ce67db..d17766d40dab 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -669,7 +669,7 @@ static int hrtimer_wakeup(struct hrtimer *timer) | |||
669 | return HRTIMER_NORESTART; | 669 | return HRTIMER_NORESTART; |
670 | } | 670 | } |
671 | 671 | ||
672 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) | 672 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
673 | { | 673 | { |
674 | sl->timer.function = hrtimer_wakeup; | 674 | sl->timer.function = hrtimer_wakeup; |
675 | sl->task = task; | 675 | sl->task = task; |
diff --git a/kernel/pid.c b/kernel/pid.c index eeb836b65ca4..93e212f20671 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -218,7 +218,7 @@ struct pid * fastcall find_pid(int nr) | |||
218 | return NULL; | 218 | return NULL; |
219 | } | 219 | } |
220 | 220 | ||
221 | int fastcall attach_pid(task_t *task, enum pid_type type, int nr) | 221 | int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) |
222 | { | 222 | { |
223 | struct pid_link *link; | 223 | struct pid_link *link; |
224 | struct pid *pid; | 224 | struct pid *pid; |
@@ -233,7 +233,7 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr) | |||
233 | return 0; | 233 | return 0; |
234 | } | 234 | } |
235 | 235 | ||
236 | void fastcall detach_pid(task_t *task, enum pid_type type) | 236 | void fastcall detach_pid(struct task_struct *task, enum pid_type type) |
237 | { | 237 | { |
238 | struct pid_link *link; | 238 | struct pid_link *link; |
239 | struct pid *pid; | 239 | struct pid *pid; |
@@ -267,7 +267,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) | |||
267 | /* | 267 | /* |
268 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. | 268 | * Must be called under rcu_read_lock() or with tasklist_lock read-held. |
269 | */ | 269 | */ |
270 | task_t *find_task_by_pid_type(int type, int nr) | 270 | struct task_struct *find_task_by_pid_type(int type, int nr) |
271 | { | 271 | { |
272 | return pid_task(find_pid(nr), type); | 272 | return pid_task(find_pid(nr), type); |
273 | } | 273 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 335c5b932e14..9a111f70145c 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -28,7 +28,7 @@ | |||
28 | * | 28 | * |
29 | * Must be called with the tasklist lock write-held. | 29 | * Must be called with the tasklist lock write-held. |
30 | */ | 30 | */ |
31 | void __ptrace_link(task_t *child, task_t *new_parent) | 31 | void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) |
32 | { | 32 | { |
33 | BUG_ON(!list_empty(&child->ptrace_list)); | 33 | BUG_ON(!list_empty(&child->ptrace_list)); |
34 | if (child->parent == new_parent) | 34 | if (child->parent == new_parent) |
@@ -46,7 +46,7 @@ void __ptrace_link(task_t *child, task_t *new_parent) | |||
46 | * TASK_TRACED, resume it now. | 46 | * TASK_TRACED, resume it now. |
47 | * Requires that irqs be disabled. | 47 | * Requires that irqs be disabled. |
48 | */ | 48 | */ |
49 | void ptrace_untrace(task_t *child) | 49 | void ptrace_untrace(struct task_struct *child) |
50 | { | 50 | { |
51 | spin_lock(&child->sighand->siglock); | 51 | spin_lock(&child->sighand->siglock); |
52 | if (child->state == TASK_TRACED) { | 52 | if (child->state == TASK_TRACED) { |
@@ -65,7 +65,7 @@ void ptrace_untrace(task_t *child) | |||
65 | * | 65 | * |
66 | * Must be called with the tasklist lock write-held. | 66 | * Must be called with the tasklist lock write-held. |
67 | */ | 67 | */ |
68 | void __ptrace_unlink(task_t *child) | 68 | void __ptrace_unlink(struct task_struct *child) |
69 | { | 69 | { |
70 | BUG_ON(!child->ptrace); | 70 | BUG_ON(!child->ptrace); |
71 | 71 | ||
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 353a853bc390..0c1faa950af7 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c | |||
@@ -96,7 +96,7 @@ void deadlock_trace_off(void) | |||
96 | rt_trace_on = 0; | 96 | rt_trace_on = 0; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void printk_task(task_t *p) | 99 | static void printk_task(struct task_struct *p) |
100 | { | 100 | { |
101 | if (p) | 101 | if (p) |
102 | printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); | 102 | printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); |
@@ -231,7 +231,8 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) | |||
231 | lock->name = name; | 231 | lock->name = name; |
232 | } | 232 | } |
233 | 233 | ||
234 | void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) | 234 | void |
235 | rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) | ||
235 | { | 236 | { |
236 | } | 237 | } |
237 | 238 | ||
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index e82c2f848249..494dac872a13 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c | |||
@@ -33,7 +33,7 @@ struct test_thread_data { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; | 35 | static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; |
36 | static task_t *threads[MAX_RT_TEST_THREADS]; | 36 | static struct task_struct *threads[MAX_RT_TEST_THREADS]; |
37 | static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; | 37 | static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES]; |
38 | 38 | ||
39 | enum test_opcodes { | 39 | enum test_opcodes { |
@@ -361,8 +361,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, | |||
361 | static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) | 361 | static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) |
362 | { | 362 | { |
363 | struct test_thread_data *td; | 363 | struct test_thread_data *td; |
364 | struct task_struct *tsk; | ||
364 | char *curr = buf; | 365 | char *curr = buf; |
365 | task_t *tsk; | ||
366 | int i; | 366 | int i; |
367 | 367 | ||
368 | td = container_of(dev, struct test_thread_data, sysdev); | 368 | td = container_of(dev, struct test_thread_data, sysdev); |
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 91b699aa658b..d2ef13b485e7 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -157,7 +157,7 @@ int max_lock_depth = 1024; | |||
157 | * Decreases task's usage by one - may thus free the task. | 157 | * Decreases task's usage by one - may thus free the task. |
158 | * Returns 0 or -EDEADLK. | 158 | * Returns 0 or -EDEADLK. |
159 | */ | 159 | */ |
160 | static int rt_mutex_adjust_prio_chain(task_t *task, | 160 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
161 | int deadlock_detect, | 161 | int deadlock_detect, |
162 | struct rt_mutex *orig_lock, | 162 | struct rt_mutex *orig_lock, |
163 | struct rt_mutex_waiter *orig_waiter, | 163 | struct rt_mutex_waiter *orig_waiter, |
@@ -282,6 +282,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | |||
282 | spin_unlock_irqrestore(&task->pi_lock, flags); | 282 | spin_unlock_irqrestore(&task->pi_lock, flags); |
283 | out_put_task: | 283 | out_put_task: |
284 | put_task_struct(task); | 284 | put_task_struct(task); |
285 | |||
285 | return ret; | 286 | return ret; |
286 | } | 287 | } |
287 | 288 | ||
@@ -403,10 +404,10 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
403 | struct rt_mutex_waiter *waiter, | 404 | struct rt_mutex_waiter *waiter, |
404 | int detect_deadlock) | 405 | int detect_deadlock) |
405 | { | 406 | { |
407 | struct task_struct *owner = rt_mutex_owner(lock); | ||
406 | struct rt_mutex_waiter *top_waiter = waiter; | 408 | struct rt_mutex_waiter *top_waiter = waiter; |
407 | task_t *owner = rt_mutex_owner(lock); | ||
408 | int boost = 0, res; | ||
409 | unsigned long flags; | 409 | unsigned long flags; |
410 | int boost = 0, res; | ||
410 | 411 | ||
411 | spin_lock_irqsave(¤t->pi_lock, flags); | 412 | spin_lock_irqsave(¤t->pi_lock, flags); |
412 | __rt_mutex_adjust_prio(current); | 413 | __rt_mutex_adjust_prio(current); |
@@ -527,9 +528,9 @@ static void remove_waiter(struct rt_mutex *lock, | |||
527 | struct rt_mutex_waiter *waiter) | 528 | struct rt_mutex_waiter *waiter) |
528 | { | 529 | { |
529 | int first = (waiter == rt_mutex_top_waiter(lock)); | 530 | int first = (waiter == rt_mutex_top_waiter(lock)); |
530 | int boost = 0; | 531 | struct task_struct *owner = rt_mutex_owner(lock); |
531 | task_t *owner = rt_mutex_owner(lock); | ||
532 | unsigned long flags; | 532 | unsigned long flags; |
533 | int boost = 0; | ||
533 | 534 | ||
534 | spin_lock_irqsave(¤t->pi_lock, flags); | 535 | spin_lock_irqsave(¤t->pi_lock, flags); |
535 | plist_del(&waiter->list_entry, &lock->wait_list); | 536 | plist_del(&waiter->list_entry, &lock->wait_list); |
diff --git a/kernel/sched.c b/kernel/sched.c index b0326141f841..021b31219516 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -179,7 +179,7 @@ static unsigned int static_prio_timeslice(int static_prio) | |||
179 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); | 179 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline unsigned int task_timeslice(task_t *p) | 182 | static inline unsigned int task_timeslice(struct task_struct *p) |
183 | { | 183 | { |
184 | return static_prio_timeslice(p->static_prio); | 184 | return static_prio_timeslice(p->static_prio); |
185 | } | 185 | } |
@@ -227,7 +227,7 @@ struct runqueue { | |||
227 | 227 | ||
228 | unsigned long expired_timestamp; | 228 | unsigned long expired_timestamp; |
229 | unsigned long long timestamp_last_tick; | 229 | unsigned long long timestamp_last_tick; |
230 | task_t *curr, *idle; | 230 | struct task_struct *curr, *idle; |
231 | struct mm_struct *prev_mm; | 231 | struct mm_struct *prev_mm; |
232 | prio_array_t *active, *expired, arrays[2]; | 232 | prio_array_t *active, *expired, arrays[2]; |
233 | int best_expired_prio; | 233 | int best_expired_prio; |
@@ -240,7 +240,7 @@ struct runqueue { | |||
240 | int active_balance; | 240 | int active_balance; |
241 | int push_cpu; | 241 | int push_cpu; |
242 | 242 | ||
243 | task_t *migration_thread; | 243 | struct task_struct *migration_thread; |
244 | struct list_head migration_queue; | 244 | struct list_head migration_queue; |
245 | #endif | 245 | #endif |
246 | 246 | ||
@@ -291,16 +291,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues); | |||
291 | #endif | 291 | #endif |
292 | 292 | ||
293 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW | 293 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
294 | static inline int task_running(runqueue_t *rq, task_t *p) | 294 | static inline int task_running(runqueue_t *rq, struct task_struct *p) |
295 | { | 295 | { |
296 | return rq->curr == p; | 296 | return rq->curr == p; |
297 | } | 297 | } |
298 | 298 | ||
299 | static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) | 299 | static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) |
300 | { | 300 | { |
301 | } | 301 | } |
302 | 302 | ||
303 | static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | 303 | static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) |
304 | { | 304 | { |
305 | #ifdef CONFIG_DEBUG_SPINLOCK | 305 | #ifdef CONFIG_DEBUG_SPINLOCK |
306 | /* this is a valid case when another task releases the spinlock */ | 306 | /* this is a valid case when another task releases the spinlock */ |
@@ -317,7 +317,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | |||
317 | } | 317 | } |
318 | 318 | ||
319 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ | 319 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
320 | static inline int task_running(runqueue_t *rq, task_t *p) | 320 | static inline int task_running(runqueue_t *rq, struct task_struct *p) |
321 | { | 321 | { |
322 | #ifdef CONFIG_SMP | 322 | #ifdef CONFIG_SMP |
323 | return p->oncpu; | 323 | return p->oncpu; |
@@ -326,7 +326,7 @@ static inline int task_running(runqueue_t *rq, task_t *p) | |||
326 | #endif | 326 | #endif |
327 | } | 327 | } |
328 | 328 | ||
329 | static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) | 329 | static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next) |
330 | { | 330 | { |
331 | #ifdef CONFIG_SMP | 331 | #ifdef CONFIG_SMP |
332 | /* | 332 | /* |
@@ -343,7 +343,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) | |||
343 | #endif | 343 | #endif |
344 | } | 344 | } |
345 | 345 | ||
346 | static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | 346 | static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev) |
347 | { | 347 | { |
348 | #ifdef CONFIG_SMP | 348 | #ifdef CONFIG_SMP |
349 | /* | 349 | /* |
@@ -364,7 +364,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) | |||
364 | * __task_rq_lock - lock the runqueue a given task resides on. | 364 | * __task_rq_lock - lock the runqueue a given task resides on. |
365 | * Must be called interrupts disabled. | 365 | * Must be called interrupts disabled. |
366 | */ | 366 | */ |
367 | static inline runqueue_t *__task_rq_lock(task_t *p) | 367 | static inline runqueue_t *__task_rq_lock(struct task_struct *p) |
368 | __acquires(rq->lock) | 368 | __acquires(rq->lock) |
369 | { | 369 | { |
370 | struct runqueue *rq; | 370 | struct runqueue *rq; |
@@ -384,7 +384,7 @@ repeat_lock_task: | |||
384 | * interrupts. Note the ordering: we can safely lookup the task_rq without | 384 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
385 | * explicitly disabling preemption. | 385 | * explicitly disabling preemption. |
386 | */ | 386 | */ |
387 | static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) | 387 | static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags) |
388 | __acquires(rq->lock) | 388 | __acquires(rq->lock) |
389 | { | 389 | { |
390 | struct runqueue *rq; | 390 | struct runqueue *rq; |
@@ -541,7 +541,7 @@ static inline runqueue_t *this_rq_lock(void) | |||
541 | * long it was from the *first* time it was queued to the time that it | 541 | * long it was from the *first* time it was queued to the time that it |
542 | * finally hit a cpu. | 542 | * finally hit a cpu. |
543 | */ | 543 | */ |
544 | static inline void sched_info_dequeued(task_t *t) | 544 | static inline void sched_info_dequeued(struct task_struct *t) |
545 | { | 545 | { |
546 | t->sched_info.last_queued = 0; | 546 | t->sched_info.last_queued = 0; |
547 | } | 547 | } |
@@ -551,7 +551,7 @@ static inline void sched_info_dequeued(task_t *t) | |||
551 | * long it was waiting to run. We also note when it began so that we | 551 | * long it was waiting to run. We also note when it began so that we |
552 | * can keep stats on how long its timeslice is. | 552 | * can keep stats on how long its timeslice is. |
553 | */ | 553 | */ |
554 | static void sched_info_arrive(task_t *t) | 554 | static void sched_info_arrive(struct task_struct *t) |
555 | { | 555 | { |
556 | unsigned long now = jiffies, diff = 0; | 556 | unsigned long now = jiffies, diff = 0; |
557 | struct runqueue *rq = task_rq(t); | 557 | struct runqueue *rq = task_rq(t); |
@@ -585,7 +585,7 @@ static void sched_info_arrive(task_t *t) | |||
585 | * the timestamp if it is already not set. It's assumed that | 585 | * the timestamp if it is already not set. It's assumed that |
586 | * sched_info_dequeued() will clear that stamp when appropriate. | 586 | * sched_info_dequeued() will clear that stamp when appropriate. |
587 | */ | 587 | */ |
588 | static inline void sched_info_queued(task_t *t) | 588 | static inline void sched_info_queued(struct task_struct *t) |
589 | { | 589 | { |
590 | if (!t->sched_info.last_queued) | 590 | if (!t->sched_info.last_queued) |
591 | t->sched_info.last_queued = jiffies; | 591 | t->sched_info.last_queued = jiffies; |
@@ -595,7 +595,7 @@ static inline void sched_info_queued(task_t *t) | |||
595 | * Called when a process ceases being the active-running process, either | 595 | * Called when a process ceases being the active-running process, either |
596 | * voluntarily or involuntarily. Now we can calculate how long we ran. | 596 | * voluntarily or involuntarily. Now we can calculate how long we ran. |
597 | */ | 597 | */ |
598 | static inline void sched_info_depart(task_t *t) | 598 | static inline void sched_info_depart(struct task_struct *t) |
599 | { | 599 | { |
600 | struct runqueue *rq = task_rq(t); | 600 | struct runqueue *rq = task_rq(t); |
601 | unsigned long diff = jiffies - t->sched_info.last_arrival; | 601 | unsigned long diff = jiffies - t->sched_info.last_arrival; |
@@ -611,7 +611,8 @@ static inline void sched_info_depart(task_t *t) | |||
611 | * their time slice. (This may also be called when switching to or from | 611 | * their time slice. (This may also be called when switching to or from |
612 | * the idle task.) We are only called when prev != next. | 612 | * the idle task.) We are only called when prev != next. |
613 | */ | 613 | */ |
614 | static inline void sched_info_switch(task_t *prev, task_t *next) | 614 | static inline void |
615 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | ||
615 | { | 616 | { |
616 | struct runqueue *rq = task_rq(prev); | 617 | struct runqueue *rq = task_rq(prev); |
617 | 618 | ||
@@ -683,7 +684,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array) | |||
683 | * Both properties are important to certain workloads. | 684 | * Both properties are important to certain workloads. |
684 | */ | 685 | */ |
685 | 686 | ||
686 | static inline int __normal_prio(task_t *p) | 687 | static inline int __normal_prio(struct task_struct *p) |
687 | { | 688 | { |
688 | int bonus, prio; | 689 | int bonus, prio; |
689 | 690 | ||
@@ -719,7 +720,7 @@ static inline int __normal_prio(task_t *p) | |||
719 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ | 720 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ |
720 | (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) | 721 | (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) |
721 | 722 | ||
722 | static void set_load_weight(task_t *p) | 723 | static void set_load_weight(struct task_struct *p) |
723 | { | 724 | { |
724 | if (has_rt_policy(p)) { | 725 | if (has_rt_policy(p)) { |
725 | #ifdef CONFIG_SMP | 726 | #ifdef CONFIG_SMP |
@@ -737,23 +738,25 @@ static void set_load_weight(task_t *p) | |||
737 | p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); | 738 | p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); |
738 | } | 739 | } |
739 | 740 | ||
740 | static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) | 741 | static inline void |
742 | inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) | ||
741 | { | 743 | { |
742 | rq->raw_weighted_load += p->load_weight; | 744 | rq->raw_weighted_load += p->load_weight; |
743 | } | 745 | } |
744 | 746 | ||
745 | static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) | 747 | static inline void |
748 | dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p) | ||
746 | { | 749 | { |
747 | rq->raw_weighted_load -= p->load_weight; | 750 | rq->raw_weighted_load -= p->load_weight; |
748 | } | 751 | } |
749 | 752 | ||
750 | static inline void inc_nr_running(task_t *p, runqueue_t *rq) | 753 | static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq) |
751 | { | 754 | { |
752 | rq->nr_running++; | 755 | rq->nr_running++; |
753 | inc_raw_weighted_load(rq, p); | 756 | inc_raw_weighted_load(rq, p); |
754 | } | 757 | } |
755 | 758 | ||
756 | static inline void dec_nr_running(task_t *p, runqueue_t *rq) | 759 | static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq) |
757 | { | 760 | { |
758 | rq->nr_running--; | 761 | rq->nr_running--; |
759 | dec_raw_weighted_load(rq, p); | 762 | dec_raw_weighted_load(rq, p); |
@@ -766,7 +769,7 @@ static inline void dec_nr_running(task_t *p, runqueue_t *rq) | |||
766 | * setprio syscalls, and whenever the interactivity | 769 | * setprio syscalls, and whenever the interactivity |
767 | * estimator recalculates. | 770 | * estimator recalculates. |
768 | */ | 771 | */ |
769 | static inline int normal_prio(task_t *p) | 772 | static inline int normal_prio(struct task_struct *p) |
770 | { | 773 | { |
771 | int prio; | 774 | int prio; |
772 | 775 | ||
@@ -784,7 +787,7 @@ static inline int normal_prio(task_t *p) | |||
784 | * interactivity modifiers. Will be RT if the task got | 787 | * interactivity modifiers. Will be RT if the task got |
785 | * RT-boosted. If not then it returns p->normal_prio. | 788 | * RT-boosted. If not then it returns p->normal_prio. |
786 | */ | 789 | */ |
787 | static int effective_prio(task_t *p) | 790 | static int effective_prio(struct task_struct *p) |
788 | { | 791 | { |
789 | p->normal_prio = normal_prio(p); | 792 | p->normal_prio = normal_prio(p); |
790 | /* | 793 | /* |
@@ -800,7 +803,7 @@ static int effective_prio(task_t *p) | |||
800 | /* | 803 | /* |
801 | * __activate_task - move a task to the runqueue. | 804 | * __activate_task - move a task to the runqueue. |
802 | */ | 805 | */ |
803 | static void __activate_task(task_t *p, runqueue_t *rq) | 806 | static void __activate_task(struct task_struct *p, runqueue_t *rq) |
804 | { | 807 | { |
805 | prio_array_t *target = rq->active; | 808 | prio_array_t *target = rq->active; |
806 | 809 | ||
@@ -813,7 +816,7 @@ static void __activate_task(task_t *p, runqueue_t *rq) | |||
813 | /* | 816 | /* |
814 | * __activate_idle_task - move idle task to the _front_ of runqueue. | 817 | * __activate_idle_task - move idle task to the _front_ of runqueue. |
815 | */ | 818 | */ |
816 | static inline void __activate_idle_task(task_t *p, runqueue_t *rq) | 819 | static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq) |
817 | { | 820 | { |
818 | enqueue_task_head(p, rq->active); | 821 | enqueue_task_head(p, rq->active); |
819 | inc_nr_running(p, rq); | 822 | inc_nr_running(p, rq); |
@@ -823,7 +826,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq) | |||
823 | * Recalculate p->normal_prio and p->prio after having slept, | 826 | * Recalculate p->normal_prio and p->prio after having slept, |
824 | * updating the sleep-average too: | 827 | * updating the sleep-average too: |
825 | */ | 828 | */ |
826 | static int recalc_task_prio(task_t *p, unsigned long long now) | 829 | static int recalc_task_prio(struct task_struct *p, unsigned long long now) |
827 | { | 830 | { |
828 | /* Caller must always ensure 'now >= p->timestamp' */ | 831 | /* Caller must always ensure 'now >= p->timestamp' */ |
829 | unsigned long sleep_time = now - p->timestamp; | 832 | unsigned long sleep_time = now - p->timestamp; |
@@ -895,7 +898,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
895 | * Update all the scheduling statistics stuff. (sleep average | 898 | * Update all the scheduling statistics stuff. (sleep average |
896 | * calculation, priority modifiers, etc.) | 899 | * calculation, priority modifiers, etc.) |
897 | */ | 900 | */ |
898 | static void activate_task(task_t *p, runqueue_t *rq, int local) | 901 | static void activate_task(struct task_struct *p, runqueue_t *rq, int local) |
899 | { | 902 | { |
900 | unsigned long long now; | 903 | unsigned long long now; |
901 | 904 | ||
@@ -962,7 +965,7 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq) | |||
962 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | 965 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
963 | #endif | 966 | #endif |
964 | 967 | ||
965 | static void resched_task(task_t *p) | 968 | static void resched_task(struct task_struct *p) |
966 | { | 969 | { |
967 | int cpu; | 970 | int cpu; |
968 | 971 | ||
@@ -983,7 +986,7 @@ static void resched_task(task_t *p) | |||
983 | smp_send_reschedule(cpu); | 986 | smp_send_reschedule(cpu); |
984 | } | 987 | } |
985 | #else | 988 | #else |
986 | static inline void resched_task(task_t *p) | 989 | static inline void resched_task(struct task_struct *p) |
987 | { | 990 | { |
988 | assert_spin_locked(&task_rq(p)->lock); | 991 | assert_spin_locked(&task_rq(p)->lock); |
989 | set_tsk_need_resched(p); | 992 | set_tsk_need_resched(p); |
@@ -994,7 +997,7 @@ static inline void resched_task(task_t *p) | |||
994 | * task_curr - is this task currently executing on a CPU? | 997 | * task_curr - is this task currently executing on a CPU? |
995 | * @p: the task in question. | 998 | * @p: the task in question. |
996 | */ | 999 | */ |
997 | inline int task_curr(const task_t *p) | 1000 | inline int task_curr(const struct task_struct *p) |
998 | { | 1001 | { |
999 | return cpu_curr(task_cpu(p)) == p; | 1002 | return cpu_curr(task_cpu(p)) == p; |
1000 | } | 1003 | } |
@@ -1009,7 +1012,7 @@ unsigned long weighted_cpuload(const int cpu) | |||
1009 | typedef struct { | 1012 | typedef struct { |
1010 | struct list_head list; | 1013 | struct list_head list; |
1011 | 1014 | ||
1012 | task_t *task; | 1015 | struct task_struct *task; |
1013 | int dest_cpu; | 1016 | int dest_cpu; |
1014 | 1017 | ||
1015 | struct completion done; | 1018 | struct completion done; |
@@ -1019,7 +1022,8 @@ typedef struct { | |||
1019 | * The task's runqueue lock must be held. | 1022 | * The task's runqueue lock must be held. |
1020 | * Returns true if you have to wait for migration thread. | 1023 | * Returns true if you have to wait for migration thread. |
1021 | */ | 1024 | */ |
1022 | static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) | 1025 | static int |
1026 | migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req) | ||
1023 | { | 1027 | { |
1024 | runqueue_t *rq = task_rq(p); | 1028 | runqueue_t *rq = task_rq(p); |
1025 | 1029 | ||
@@ -1049,7 +1053,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) | |||
1049 | * smp_call_function() if an IPI is sent by the same process we are | 1053 | * smp_call_function() if an IPI is sent by the same process we are |
1050 | * waiting to become inactive. | 1054 | * waiting to become inactive. |
1051 | */ | 1055 | */ |
1052 | void wait_task_inactive(task_t *p) | 1056 | void wait_task_inactive(struct task_struct *p) |
1053 | { | 1057 | { |
1054 | unsigned long flags; | 1058 | unsigned long flags; |
1055 | runqueue_t *rq; | 1059 | runqueue_t *rq; |
@@ -1083,7 +1087,7 @@ repeat: | |||
1083 | * to another CPU then no harm is done and the purpose has been | 1087 | * to another CPU then no harm is done and the purpose has been |
1084 | * achieved as well. | 1088 | * achieved as well. |
1085 | */ | 1089 | */ |
1086 | void kick_process(task_t *p) | 1090 | void kick_process(struct task_struct *p) |
1087 | { | 1091 | { |
1088 | int cpu; | 1092 | int cpu; |
1089 | 1093 | ||
@@ -1286,7 +1290,7 @@ nextlevel: | |||
1286 | * Returns the CPU we should wake onto. | 1290 | * Returns the CPU we should wake onto. |
1287 | */ | 1291 | */ |
1288 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 1292 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) |
1289 | static int wake_idle(int cpu, task_t *p) | 1293 | static int wake_idle(int cpu, struct task_struct *p) |
1290 | { | 1294 | { |
1291 | cpumask_t tmp; | 1295 | cpumask_t tmp; |
1292 | struct sched_domain *sd; | 1296 | struct sched_domain *sd; |
@@ -1309,7 +1313,7 @@ static int wake_idle(int cpu, task_t *p) | |||
1309 | return cpu; | 1313 | return cpu; |
1310 | } | 1314 | } |
1311 | #else | 1315 | #else |
1312 | static inline int wake_idle(int cpu, task_t *p) | 1316 | static inline int wake_idle(int cpu, struct task_struct *p) |
1313 | { | 1317 | { |
1314 | return cpu; | 1318 | return cpu; |
1315 | } | 1319 | } |
@@ -1329,7 +1333,7 @@ static inline int wake_idle(int cpu, task_t *p) | |||
1329 | * | 1333 | * |
1330 | * returns failure only if the task is already active. | 1334 | * returns failure only if the task is already active. |
1331 | */ | 1335 | */ |
1332 | static int try_to_wake_up(task_t *p, unsigned int state, int sync) | 1336 | static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) |
1333 | { | 1337 | { |
1334 | int cpu, this_cpu, success = 0; | 1338 | int cpu, this_cpu, success = 0; |
1335 | unsigned long flags; | 1339 | unsigned long flags; |
@@ -1487,14 +1491,14 @@ out: | |||
1487 | return success; | 1491 | return success; |
1488 | } | 1492 | } |
1489 | 1493 | ||
1490 | int fastcall wake_up_process(task_t *p) | 1494 | int fastcall wake_up_process(struct task_struct *p) |
1491 | { | 1495 | { |
1492 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | | 1496 | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | |
1493 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); | 1497 | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); |
1494 | } | 1498 | } |
1495 | EXPORT_SYMBOL(wake_up_process); | 1499 | EXPORT_SYMBOL(wake_up_process); |
1496 | 1500 | ||
1497 | int fastcall wake_up_state(task_t *p, unsigned int state) | 1501 | int fastcall wake_up_state(struct task_struct *p, unsigned int state) |
1498 | { | 1502 | { |
1499 | return try_to_wake_up(p, state, 0); | 1503 | return try_to_wake_up(p, state, 0); |
1500 | } | 1504 | } |
@@ -1503,7 +1507,7 @@ int fastcall wake_up_state(task_t *p, unsigned int state) | |||
1503 | * Perform scheduler related setup for a newly forked process p. | 1507 | * Perform scheduler related setup for a newly forked process p. |
1504 | * p is forked by current. | 1508 | * p is forked by current. |
1505 | */ | 1509 | */ |
1506 | void fastcall sched_fork(task_t *p, int clone_flags) | 1510 | void fastcall sched_fork(struct task_struct *p, int clone_flags) |
1507 | { | 1511 | { |
1508 | int cpu = get_cpu(); | 1512 | int cpu = get_cpu(); |
1509 | 1513 | ||
@@ -1571,7 +1575,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) | |||
1571 | * that must be done for every newly created context, then puts the task | 1575 | * that must be done for every newly created context, then puts the task |
1572 | * on the runqueue and wakes it. | 1576 | * on the runqueue and wakes it. |
1573 | */ | 1577 | */ |
1574 | void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) | 1578 | void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) |
1575 | { | 1579 | { |
1576 | unsigned long flags; | 1580 | unsigned long flags; |
1577 | int this_cpu, cpu; | 1581 | int this_cpu, cpu; |
@@ -1655,7 +1659,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) | |||
1655 | * artificially, because any timeslice recovered here | 1659 | * artificially, because any timeslice recovered here |
1656 | * was given away by the parent in the first place.) | 1660 | * was given away by the parent in the first place.) |
1657 | */ | 1661 | */ |
1658 | void fastcall sched_exit(task_t *p) | 1662 | void fastcall sched_exit(struct task_struct *p) |
1659 | { | 1663 | { |
1660 | unsigned long flags; | 1664 | unsigned long flags; |
1661 | runqueue_t *rq; | 1665 | runqueue_t *rq; |
@@ -1689,7 +1693,7 @@ void fastcall sched_exit(task_t *p) | |||
1689 | * prepare_task_switch sets up locking and calls architecture specific | 1693 | * prepare_task_switch sets up locking and calls architecture specific |
1690 | * hooks. | 1694 | * hooks. |
1691 | */ | 1695 | */ |
1692 | static inline void prepare_task_switch(runqueue_t *rq, task_t *next) | 1696 | static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next) |
1693 | { | 1697 | { |
1694 | prepare_lock_switch(rq, next); | 1698 | prepare_lock_switch(rq, next); |
1695 | prepare_arch_switch(next); | 1699 | prepare_arch_switch(next); |
@@ -1710,7 +1714,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next) | |||
1710 | * with the lock held can cause deadlocks; see schedule() for | 1714 | * with the lock held can cause deadlocks; see schedule() for |
1711 | * details.) | 1715 | * details.) |
1712 | */ | 1716 | */ |
1713 | static inline void finish_task_switch(runqueue_t *rq, task_t *prev) | 1717 | static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev) |
1714 | __releases(rq->lock) | 1718 | __releases(rq->lock) |
1715 | { | 1719 | { |
1716 | struct mm_struct *mm = rq->prev_mm; | 1720 | struct mm_struct *mm = rq->prev_mm; |
@@ -1748,7 +1752,7 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) | |||
1748 | * schedule_tail - first thing a freshly forked thread must call. | 1752 | * schedule_tail - first thing a freshly forked thread must call. |
1749 | * @prev: the thread we just switched away from. | 1753 | * @prev: the thread we just switched away from. |
1750 | */ | 1754 | */ |
1751 | asmlinkage void schedule_tail(task_t *prev) | 1755 | asmlinkage void schedule_tail(struct task_struct *prev) |
1752 | __releases(rq->lock) | 1756 | __releases(rq->lock) |
1753 | { | 1757 | { |
1754 | runqueue_t *rq = this_rq(); | 1758 | runqueue_t *rq = this_rq(); |
@@ -1765,8 +1769,9 @@ asmlinkage void schedule_tail(task_t *prev) | |||
1765 | * context_switch - switch to the new MM and the new | 1769 | * context_switch - switch to the new MM and the new |
1766 | * thread's register state. | 1770 | * thread's register state. |
1767 | */ | 1771 | */ |
1768 | static inline | 1772 | static inline struct task_struct * |
1769 | task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) | 1773 | context_switch(runqueue_t *rq, struct task_struct *prev, |
1774 | struct task_struct *next) | ||
1770 | { | 1775 | { |
1771 | struct mm_struct *mm = next->mm; | 1776 | struct mm_struct *mm = next->mm; |
1772 | struct mm_struct *oldmm = prev->active_mm; | 1777 | struct mm_struct *oldmm = prev->active_mm; |
@@ -1937,7 +1942,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) | |||
1937 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 1942 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
1938 | * the cpu_allowed mask is restored. | 1943 | * the cpu_allowed mask is restored. |
1939 | */ | 1944 | */ |
1940 | static void sched_migrate_task(task_t *p, int dest_cpu) | 1945 | static void sched_migrate_task(struct task_struct *p, int dest_cpu) |
1941 | { | 1946 | { |
1942 | migration_req_t req; | 1947 | migration_req_t req; |
1943 | runqueue_t *rq; | 1948 | runqueue_t *rq; |
@@ -1952,11 +1957,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu) | |||
1952 | if (migrate_task(p, dest_cpu, &req)) { | 1957 | if (migrate_task(p, dest_cpu, &req)) { |
1953 | /* Need to wait for migration thread (might exit: take ref). */ | 1958 | /* Need to wait for migration thread (might exit: take ref). */ |
1954 | struct task_struct *mt = rq->migration_thread; | 1959 | struct task_struct *mt = rq->migration_thread; |
1960 | |||
1955 | get_task_struct(mt); | 1961 | get_task_struct(mt); |
1956 | task_rq_unlock(rq, &flags); | 1962 | task_rq_unlock(rq, &flags); |
1957 | wake_up_process(mt); | 1963 | wake_up_process(mt); |
1958 | put_task_struct(mt); | 1964 | put_task_struct(mt); |
1959 | wait_for_completion(&req.done); | 1965 | wait_for_completion(&req.done); |
1966 | |||
1960 | return; | 1967 | return; |
1961 | } | 1968 | } |
1962 | out: | 1969 | out: |
@@ -1980,9 +1987,9 @@ void sched_exec(void) | |||
1980 | * pull_task - move a task from a remote runqueue to the local runqueue. | 1987 | * pull_task - move a task from a remote runqueue to the local runqueue. |
1981 | * Both runqueues must be locked. | 1988 | * Both runqueues must be locked. |
1982 | */ | 1989 | */ |
1983 | static | 1990 | static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, |
1984 | void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, | 1991 | struct task_struct *p, runqueue_t *this_rq, |
1985 | runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) | 1992 | prio_array_t *this_array, int this_cpu) |
1986 | { | 1993 | { |
1987 | dequeue_task(p, src_array); | 1994 | dequeue_task(p, src_array); |
1988 | dec_nr_running(p, src_rq); | 1995 | dec_nr_running(p, src_rq); |
@@ -2003,7 +2010,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, | |||
2003 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? | 2010 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? |
2004 | */ | 2011 | */ |
2005 | static | 2012 | static |
2006 | int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, | 2013 | int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu, |
2007 | struct sched_domain *sd, enum idle_type idle, | 2014 | struct sched_domain *sd, enum idle_type idle, |
2008 | int *all_pinned) | 2015 | int *all_pinned) |
2009 | { | 2016 | { |
@@ -2052,8 +2059,8 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest, | |||
2052 | best_prio_seen, skip_for_load; | 2059 | best_prio_seen, skip_for_load; |
2053 | prio_array_t *array, *dst_array; | 2060 | prio_array_t *array, *dst_array; |
2054 | struct list_head *head, *curr; | 2061 | struct list_head *head, *curr; |
2062 | struct task_struct *tmp; | ||
2055 | long rem_load_move; | 2063 | long rem_load_move; |
2056 | task_t *tmp; | ||
2057 | 2064 | ||
2058 | if (max_nr_move == 0 || max_load_move == 0) | 2065 | if (max_nr_move == 0 || max_load_move == 0) |
2059 | goto out; | 2066 | goto out; |
@@ -2105,7 +2112,7 @@ skip_bitmap: | |||
2105 | head = array->queue + idx; | 2112 | head = array->queue + idx; |
2106 | curr = head->prev; | 2113 | curr = head->prev; |
2107 | skip_queue: | 2114 | skip_queue: |
2108 | tmp = list_entry(curr, task_t, run_list); | 2115 | tmp = list_entry(curr, struct task_struct, run_list); |
2109 | 2116 | ||
2110 | curr = curr->prev; | 2117 | curr = curr->prev; |
2111 | 2118 | ||
@@ -2819,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2819 | * Bank in p->sched_time the ns elapsed since the last tick or switch. | 2826 | * Bank in p->sched_time the ns elapsed since the last tick or switch. |
2820 | */ | 2827 | */ |
2821 | static inline void | 2828 | static inline void |
2822 | update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) | 2829 | update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now) |
2823 | { | 2830 | { |
2824 | p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); | 2831 | p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); |
2825 | } | 2832 | } |
@@ -2828,7 +2835,7 @@ update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) | |||
2828 | * Return current->sched_time plus any more ns on the sched_clock | 2835 | * Return current->sched_time plus any more ns on the sched_clock |
2829 | * that have not yet been banked. | 2836 | * that have not yet been banked. |
2830 | */ | 2837 | */ |
2831 | unsigned long long current_sched_time(const task_t *p) | 2838 | unsigned long long current_sched_time(const struct task_struct *p) |
2832 | { | 2839 | { |
2833 | unsigned long long ns; | 2840 | unsigned long long ns; |
2834 | unsigned long flags; | 2841 | unsigned long flags; |
@@ -2945,9 +2952,9 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
2945 | void scheduler_tick(void) | 2952 | void scheduler_tick(void) |
2946 | { | 2953 | { |
2947 | unsigned long long now = sched_clock(); | 2954 | unsigned long long now = sched_clock(); |
2955 | struct task_struct *p = current; | ||
2948 | int cpu = smp_processor_id(); | 2956 | int cpu = smp_processor_id(); |
2949 | runqueue_t *rq = this_rq(); | 2957 | runqueue_t *rq = this_rq(); |
2950 | task_t *p = current; | ||
2951 | 2958 | ||
2952 | update_cpu_clock(p, rq, now); | 2959 | update_cpu_clock(p, rq, now); |
2953 | 2960 | ||
@@ -3079,7 +3086,8 @@ static void wake_sleeping_dependent(int this_cpu) | |||
3079 | * utilize, if another task runs on a sibling. This models the | 3086 | * utilize, if another task runs on a sibling. This models the |
3080 | * slowdown effect of other tasks running on siblings: | 3087 | * slowdown effect of other tasks running on siblings: |
3081 | */ | 3088 | */ |
3082 | static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) | 3089 | static inline unsigned long |
3090 | smt_slice(struct task_struct *p, struct sched_domain *sd) | ||
3083 | { | 3091 | { |
3084 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; | 3092 | return p->time_slice * (100 - sd->per_cpu_gain) / 100; |
3085 | } | 3093 | } |
@@ -3090,7 +3098,8 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) | |||
3090 | * acquire their lock. As we only trylock the normal locking order does not | 3098 | * acquire their lock. As we only trylock the normal locking order does not |
3091 | * need to be obeyed. | 3099 | * need to be obeyed. |
3092 | */ | 3100 | */ |
3093 | static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) | 3101 | static int |
3102 | dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) | ||
3094 | { | 3103 | { |
3095 | struct sched_domain *tmp, *sd = NULL; | 3104 | struct sched_domain *tmp, *sd = NULL; |
3096 | int ret = 0, i; | 3105 | int ret = 0, i; |
@@ -3110,8 +3119,8 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) | |||
3110 | return 0; | 3119 | return 0; |
3111 | 3120 | ||
3112 | for_each_cpu_mask(i, sd->span) { | 3121 | for_each_cpu_mask(i, sd->span) { |
3122 | struct task_struct *smt_curr; | ||
3113 | runqueue_t *smt_rq; | 3123 | runqueue_t *smt_rq; |
3114 | task_t *smt_curr; | ||
3115 | 3124 | ||
3116 | if (i == this_cpu) | 3125 | if (i == this_cpu) |
3117 | continue; | 3126 | continue; |
@@ -3157,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu) | |||
3157 | { | 3166 | { |
3158 | } | 3167 | } |
3159 | static inline int | 3168 | static inline int |
3160 | dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) | 3169 | dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p) |
3161 | { | 3170 | { |
3162 | return 0; | 3171 | return 0; |
3163 | } | 3172 | } |
@@ -3211,11 +3220,11 @@ static inline int interactive_sleep(enum sleep_type sleep_type) | |||
3211 | */ | 3220 | */ |
3212 | asmlinkage void __sched schedule(void) | 3221 | asmlinkage void __sched schedule(void) |
3213 | { | 3222 | { |
3223 | struct task_struct *prev, *next; | ||
3214 | struct list_head *queue; | 3224 | struct list_head *queue; |
3215 | unsigned long long now; | 3225 | unsigned long long now; |
3216 | unsigned long run_time; | 3226 | unsigned long run_time; |
3217 | int cpu, idx, new_prio; | 3227 | int cpu, idx, new_prio; |
3218 | task_t *prev, *next; | ||
3219 | prio_array_t *array; | 3228 | prio_array_t *array; |
3220 | long *switch_count; | 3229 | long *switch_count; |
3221 | runqueue_t *rq; | 3230 | runqueue_t *rq; |
@@ -3308,7 +3317,7 @@ need_resched_nonpreemptible: | |||
3308 | 3317 | ||
3309 | idx = sched_find_first_bit(array->bitmap); | 3318 | idx = sched_find_first_bit(array->bitmap); |
3310 | queue = array->queue + idx; | 3319 | queue = array->queue + idx; |
3311 | next = list_entry(queue->next, task_t, run_list); | 3320 | next = list_entry(queue->next, struct task_struct, run_list); |
3312 | 3321 | ||
3313 | if (!rt_task(next) && interactive_sleep(next->sleep_type)) { | 3322 | if (!rt_task(next) && interactive_sleep(next->sleep_type)) { |
3314 | unsigned long long delta = now - next->timestamp; | 3323 | unsigned long long delta = now - next->timestamp; |
@@ -3776,7 +3785,7 @@ EXPORT_SYMBOL(sleep_on_timeout); | |||
3776 | * | 3785 | * |
3777 | * Used by the rt_mutex code to implement priority inheritance logic. | 3786 | * Used by the rt_mutex code to implement priority inheritance logic. |
3778 | */ | 3787 | */ |
3779 | void rt_mutex_setprio(task_t *p, int prio) | 3788 | void rt_mutex_setprio(struct task_struct *p, int prio) |
3780 | { | 3789 | { |
3781 | unsigned long flags; | 3790 | unsigned long flags; |
3782 | prio_array_t *array; | 3791 | prio_array_t *array; |
@@ -3817,7 +3826,7 @@ void rt_mutex_setprio(task_t *p, int prio) | |||
3817 | 3826 | ||
3818 | #endif | 3827 | #endif |
3819 | 3828 | ||
3820 | void set_user_nice(task_t *p, long nice) | 3829 | void set_user_nice(struct task_struct *p, long nice) |
3821 | { | 3830 | { |
3822 | int old_prio, delta; | 3831 | int old_prio, delta; |
3823 | unsigned long flags; | 3832 | unsigned long flags; |
@@ -3873,7 +3882,7 @@ EXPORT_SYMBOL(set_user_nice); | |||
3873 | * @p: task | 3882 | * @p: task |
3874 | * @nice: nice value | 3883 | * @nice: nice value |
3875 | */ | 3884 | */ |
3876 | int can_nice(const task_t *p, const int nice) | 3885 | int can_nice(const struct task_struct *p, const int nice) |
3877 | { | 3886 | { |
3878 | /* convert nice value [19,-20] to rlimit style value [1,40] */ | 3887 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
3879 | int nice_rlim = 20 - nice; | 3888 | int nice_rlim = 20 - nice; |
@@ -3932,7 +3941,7 @@ asmlinkage long sys_nice(int increment) | |||
3932 | * RT tasks are offset by -200. Normal tasks are centered | 3941 | * RT tasks are offset by -200. Normal tasks are centered |
3933 | * around 0, value goes from -16 to +15. | 3942 | * around 0, value goes from -16 to +15. |
3934 | */ | 3943 | */ |
3935 | int task_prio(const task_t *p) | 3944 | int task_prio(const struct task_struct *p) |
3936 | { | 3945 | { |
3937 | return p->prio - MAX_RT_PRIO; | 3946 | return p->prio - MAX_RT_PRIO; |
3938 | } | 3947 | } |
@@ -3941,7 +3950,7 @@ int task_prio(const task_t *p) | |||
3941 | * task_nice - return the nice value of a given task. | 3950 | * task_nice - return the nice value of a given task. |
3942 | * @p: the task in question. | 3951 | * @p: the task in question. |
3943 | */ | 3952 | */ |
3944 | int task_nice(const task_t *p) | 3953 | int task_nice(const struct task_struct *p) |
3945 | { | 3954 | { |
3946 | return TASK_NICE(p); | 3955 | return TASK_NICE(p); |
3947 | } | 3956 | } |
@@ -3960,7 +3969,7 @@ int idle_cpu(int cpu) | |||
3960 | * idle_task - return the idle task for a given cpu. | 3969 | * idle_task - return the idle task for a given cpu. |
3961 | * @cpu: the processor in question. | 3970 | * @cpu: the processor in question. |
3962 | */ | 3971 | */ |
3963 | task_t *idle_task(int cpu) | 3972 | struct task_struct *idle_task(int cpu) |
3964 | { | 3973 | { |
3965 | return cpu_rq(cpu)->idle; | 3974 | return cpu_rq(cpu)->idle; |
3966 | } | 3975 | } |
@@ -3969,7 +3978,7 @@ task_t *idle_task(int cpu) | |||
3969 | * find_process_by_pid - find a process with a matching PID value. | 3978 | * find_process_by_pid - find a process with a matching PID value. |
3970 | * @pid: the pid in question. | 3979 | * @pid: the pid in question. |
3971 | */ | 3980 | */ |
3972 | static inline task_t *find_process_by_pid(pid_t pid) | 3981 | static inline struct task_struct *find_process_by_pid(pid_t pid) |
3973 | { | 3982 | { |
3974 | return pid ? find_task_by_pid(pid) : current; | 3983 | return pid ? find_task_by_pid(pid) : current; |
3975 | } | 3984 | } |
@@ -4103,9 +4112,9 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); | |||
4103 | static int | 4112 | static int |
4104 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | 4113 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
4105 | { | 4114 | { |
4106 | int retval; | ||
4107 | struct sched_param lparam; | 4115 | struct sched_param lparam; |
4108 | struct task_struct *p; | 4116 | struct task_struct *p; |
4117 | int retval; | ||
4109 | 4118 | ||
4110 | if (!param || pid < 0) | 4119 | if (!param || pid < 0) |
4111 | return -EINVAL; | 4120 | return -EINVAL; |
@@ -4121,6 +4130,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
4121 | read_unlock_irq(&tasklist_lock); | 4130 | read_unlock_irq(&tasklist_lock); |
4122 | retval = sched_setscheduler(p, policy, &lparam); | 4131 | retval = sched_setscheduler(p, policy, &lparam); |
4123 | put_task_struct(p); | 4132 | put_task_struct(p); |
4133 | |||
4124 | return retval; | 4134 | return retval; |
4125 | } | 4135 | } |
4126 | 4136 | ||
@@ -4156,8 +4166,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) | |||
4156 | */ | 4166 | */ |
4157 | asmlinkage long sys_sched_getscheduler(pid_t pid) | 4167 | asmlinkage long sys_sched_getscheduler(pid_t pid) |
4158 | { | 4168 | { |
4169 | struct task_struct *p; | ||
4159 | int retval = -EINVAL; | 4170 | int retval = -EINVAL; |
4160 | task_t *p; | ||
4161 | 4171 | ||
4162 | if (pid < 0) | 4172 | if (pid < 0) |
4163 | goto out_nounlock; | 4173 | goto out_nounlock; |
@@ -4184,8 +4194,8 @@ out_nounlock: | |||
4184 | asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) | 4194 | asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) |
4185 | { | 4195 | { |
4186 | struct sched_param lp; | 4196 | struct sched_param lp; |
4197 | struct task_struct *p; | ||
4187 | int retval = -EINVAL; | 4198 | int retval = -EINVAL; |
4188 | task_t *p; | ||
4189 | 4199 | ||
4190 | if (!param || pid < 0) | 4200 | if (!param || pid < 0) |
4191 | goto out_nounlock; | 4201 | goto out_nounlock; |
@@ -4218,9 +4228,9 @@ out_unlock: | |||
4218 | 4228 | ||
4219 | long sched_setaffinity(pid_t pid, cpumask_t new_mask) | 4229 | long sched_setaffinity(pid_t pid, cpumask_t new_mask) |
4220 | { | 4230 | { |
4221 | task_t *p; | ||
4222 | int retval; | ||
4223 | cpumask_t cpus_allowed; | 4231 | cpumask_t cpus_allowed; |
4232 | struct task_struct *p; | ||
4233 | int retval; | ||
4224 | 4234 | ||
4225 | lock_cpu_hotplug(); | 4235 | lock_cpu_hotplug(); |
4226 | read_lock(&tasklist_lock); | 4236 | read_lock(&tasklist_lock); |
@@ -4306,8 +4316,8 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; | |||
4306 | 4316 | ||
4307 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 4317 | long sched_getaffinity(pid_t pid, cpumask_t *mask) |
4308 | { | 4318 | { |
4319 | struct task_struct *p; | ||
4309 | int retval; | 4320 | int retval; |
4310 | task_t *p; | ||
4311 | 4321 | ||
4312 | lock_cpu_hotplug(); | 4322 | lock_cpu_hotplug(); |
4313 | read_lock(&tasklist_lock); | 4323 | read_lock(&tasklist_lock); |
@@ -4592,9 +4602,9 @@ asmlinkage long sys_sched_get_priority_min(int policy) | |||
4592 | asmlinkage | 4602 | asmlinkage |
4593 | long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | 4603 | long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) |
4594 | { | 4604 | { |
4605 | struct task_struct *p; | ||
4595 | int retval = -EINVAL; | 4606 | int retval = -EINVAL; |
4596 | struct timespec t; | 4607 | struct timespec t; |
4597 | task_t *p; | ||
4598 | 4608 | ||
4599 | if (pid < 0) | 4609 | if (pid < 0) |
4600 | goto out_nounlock; | 4610 | goto out_nounlock; |
@@ -4641,12 +4651,13 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) | |||
4641 | return list_entry(p->sibling.next,struct task_struct,sibling); | 4651 | return list_entry(p->sibling.next,struct task_struct,sibling); |
4642 | } | 4652 | } |
4643 | 4653 | ||
4644 | static void show_task(task_t *p) | 4654 | static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; |
4655 | |||
4656 | static void show_task(struct task_struct *p) | ||
4645 | { | 4657 | { |
4646 | task_t *relative; | 4658 | struct task_struct *relative; |
4647 | unsigned state; | ||
4648 | unsigned long free = 0; | 4659 | unsigned long free = 0; |
4649 | static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; | 4660 | unsigned state; |
4650 | 4661 | ||
4651 | printk("%-13.13s ", p->comm); | 4662 | printk("%-13.13s ", p->comm); |
4652 | state = p->state ? __ffs(p->state) + 1 : 0; | 4663 | state = p->state ? __ffs(p->state) + 1 : 0; |
@@ -4697,7 +4708,7 @@ static void show_task(task_t *p) | |||
4697 | 4708 | ||
4698 | void show_state(void) | 4709 | void show_state(void) |
4699 | { | 4710 | { |
4700 | task_t *g, *p; | 4711 | struct task_struct *g, *p; |
4701 | 4712 | ||
4702 | #if (BITS_PER_LONG == 32) | 4713 | #if (BITS_PER_LONG == 32) |
4703 | printk("\n" | 4714 | printk("\n" |
@@ -4730,7 +4741,7 @@ void show_state(void) | |||
4730 | * NOTE: this function does not set the idle thread's NEED_RESCHED | 4741 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
4731 | * flag, to make booting more robust. | 4742 | * flag, to make booting more robust. |
4732 | */ | 4743 | */ |
4733 | void __devinit init_idle(task_t *idle, int cpu) | 4744 | void __devinit init_idle(struct task_struct *idle, int cpu) |
4734 | { | 4745 | { |
4735 | runqueue_t *rq = cpu_rq(cpu); | 4746 | runqueue_t *rq = cpu_rq(cpu); |
4736 | unsigned long flags; | 4747 | unsigned long flags; |
@@ -4793,7 +4804,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | |||
4793 | * task must not exit() & deallocate itself prematurely. The | 4804 | * task must not exit() & deallocate itself prematurely. The |
4794 | * call is not atomic; no spinlocks may be held. | 4805 | * call is not atomic; no spinlocks may be held. |
4795 | */ | 4806 | */ |
4796 | int set_cpus_allowed(task_t *p, cpumask_t new_mask) | 4807 | int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) |
4797 | { | 4808 | { |
4798 | unsigned long flags; | 4809 | unsigned long flags; |
4799 | migration_req_t req; | 4810 | migration_req_t req; |
@@ -5061,7 +5072,7 @@ void idle_task_exit(void) | |||
5061 | mmdrop(mm); | 5072 | mmdrop(mm); |
5062 | } | 5073 | } |
5063 | 5074 | ||
5064 | static void migrate_dead(unsigned int dead_cpu, task_t *p) | 5075 | static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) |
5065 | { | 5076 | { |
5066 | struct runqueue *rq = cpu_rq(dead_cpu); | 5077 | struct runqueue *rq = cpu_rq(dead_cpu); |
5067 | 5078 | ||
@@ -5096,9 +5107,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5096 | struct list_head *list = &rq->arrays[arr].queue[i]; | 5107 | struct list_head *list = &rq->arrays[arr].queue[i]; |
5097 | 5108 | ||
5098 | while (!list_empty(list)) | 5109 | while (!list_empty(list)) |
5099 | migrate_dead(dead_cpu, | 5110 | migrate_dead(dead_cpu, list_entry(list->next, |
5100 | list_entry(list->next, task_t, | 5111 | struct task_struct, run_list)); |
5101 | run_list)); | ||
5102 | } | 5112 | } |
5103 | } | 5113 | } |
5104 | } | 5114 | } |
@@ -6801,7 +6811,7 @@ void normalize_rt_tasks(void) | |||
6801 | * | 6811 | * |
6802 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! | 6812 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
6803 | */ | 6813 | */ |
6804 | task_t *curr_task(int cpu) | 6814 | struct task_struct *curr_task(int cpu) |
6805 | { | 6815 | { |
6806 | return cpu_curr(cpu); | 6816 | return cpu_curr(cpu); |
6807 | } | 6817 | } |
@@ -6821,7 +6831,7 @@ task_t *curr_task(int cpu) | |||
6821 | * | 6831 | * |
6822 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! | 6832 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
6823 | */ | 6833 | */ |
6824 | void set_curr_task(int cpu, task_t *p) | 6834 | void set_curr_task(int cpu, struct task_struct *p) |
6825 | { | 6835 | { |
6826 | cpu_curr(cpu) = p; | 6836 | cpu_curr(cpu) = p; |
6827 | } | 6837 | } |
diff --git a/kernel/timer.c b/kernel/timer.c index b761898d04c8..396a3c024c2c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1368,7 +1368,7 @@ asmlinkage long sys_getegid(void) | |||
1368 | 1368 | ||
1369 | static void process_timeout(unsigned long __data) | 1369 | static void process_timeout(unsigned long __data) |
1370 | { | 1370 | { |
1371 | wake_up_process((task_t *)__data); | 1371 | wake_up_process((struct task_struct *)__data); |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | /** | 1374 | /** |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 59f0b42bd89e..90d2c6001659 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -51,7 +51,7 @@ struct cpu_workqueue_struct { | |||
51 | wait_queue_head_t work_done; | 51 | wait_queue_head_t work_done; |
52 | 52 | ||
53 | struct workqueue_struct *wq; | 53 | struct workqueue_struct *wq; |
54 | task_t *thread; | 54 | struct task_struct *thread; |
55 | 55 | ||
56 | int run_depth; /* Detect run_workqueue() recursion depth */ | 56 | int run_depth; /* Detect run_workqueue() recursion depth */ |
57 | } ____cacheline_aligned; | 57 | } ____cacheline_aligned; |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d46ed0f1dc06..b9af136e5cfa 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -225,7 +225,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints) | |||
225 | * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that | 225 | * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that |
226 | * we select a process with CAP_SYS_RAW_IO set). | 226 | * we select a process with CAP_SYS_RAW_IO set). |
227 | */ | 227 | */ |
228 | static void __oom_kill_task(task_t *p, const char *message) | 228 | static void __oom_kill_task(struct task_struct *p, const char *message) |
229 | { | 229 | { |
230 | if (p->pid == 1) { | 230 | if (p->pid == 1) { |
231 | WARN_ON(1); | 231 | WARN_ON(1); |
@@ -255,10 +255,10 @@ static void __oom_kill_task(task_t *p, const char *message) | |||
255 | force_sig(SIGKILL, p); | 255 | force_sig(SIGKILL, p); |
256 | } | 256 | } |
257 | 257 | ||
258 | static int oom_kill_task(task_t *p, const char *message) | 258 | static int oom_kill_task(struct task_struct *p, const char *message) |
259 | { | 259 | { |
260 | struct mm_struct *mm; | 260 | struct mm_struct *mm; |
261 | task_t * g, * q; | 261 | struct task_struct *g, *q; |
262 | 262 | ||
263 | mm = p->mm; | 263 | mm = p->mm; |
264 | 264 | ||
@@ -316,7 +316,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points, | |||
316 | */ | 316 | */ |
317 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | 317 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) |
318 | { | 318 | { |
319 | task_t *p; | 319 | struct task_struct *p; |
320 | unsigned long points = 0; | 320 | unsigned long points = 0; |
321 | 321 | ||
322 | if (printk_ratelimit()) { | 322 | if (printk_ratelimit()) { |