diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 83 | ||||
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 2 | ||||
-rw-r--r-- | kernel/power/main.c | 21 | ||||
-rw-r--r-- | kernel/ptrace.c | 3 | ||||
-rw-r--r-- | kernel/workqueue.c | 12 |
6 files changed, 76 insertions, 50 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index d61ba88f34e5..e882c6babf41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -16,47 +16,76 @@ | |||
16 | #include <asm/semaphore.h> | 16 | #include <asm/semaphore.h> |
17 | 17 | ||
18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
19 | DECLARE_MUTEX(cpucontrol); | 19 | static DECLARE_MUTEX(cpucontrol); |
20 | EXPORT_SYMBOL_GPL(cpucontrol); | ||
21 | 20 | ||
22 | static struct notifier_block *cpu_chain; | 21 | static struct notifier_block *cpu_chain; |
23 | 22 | ||
24 | /* | 23 | #ifdef CONFIG_HOTPLUG_CPU |
25 | * Used to check by callers if they need to acquire the cpucontrol | 24 | static struct task_struct *lock_cpu_hotplug_owner; |
26 | * or not to protect a cpu from being removed. Its sometimes required to | 25 | static int lock_cpu_hotplug_depth; |
27 | * call these functions both for normal operations, and in response to | ||
28 | * a cpu being added/removed. If the context of the call is in the same | ||
29 | * thread context as a CPU hotplug thread, we dont need to take the lock | ||
30 | * since its already protected | ||
31 | * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj | ||
32 | */ | ||
33 | 26 | ||
34 | int current_in_cpu_hotplug(void) | 27 | static int __lock_cpu_hotplug(int interruptible) |
35 | { | 28 | { |
36 | return (current->flags & PF_HOTPLUG_CPU); | 29 | int ret = 0; |
30 | |||
31 | if (lock_cpu_hotplug_owner != current) { | ||
32 | if (interruptible) | ||
33 | ret = down_interruptible(&cpucontrol); | ||
34 | else | ||
35 | down(&cpucontrol); | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * Set only if we succeed in locking | ||
40 | */ | ||
41 | if (!ret) { | ||
42 | lock_cpu_hotplug_depth++; | ||
43 | lock_cpu_hotplug_owner = current; | ||
44 | } | ||
45 | |||
46 | return ret; | ||
37 | } | 47 | } |
38 | 48 | ||
39 | EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); | 49 | void lock_cpu_hotplug(void) |
50 | { | ||
51 | __lock_cpu_hotplug(0); | ||
52 | } | ||
53 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | ||
40 | 54 | ||
55 | void unlock_cpu_hotplug(void) | ||
56 | { | ||
57 | if (--lock_cpu_hotplug_depth == 0) { | ||
58 | lock_cpu_hotplug_owner = NULL; | ||
59 | up(&cpucontrol); | ||
60 | } | ||
61 | } | ||
62 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | ||
63 | |||
64 | int lock_cpu_hotplug_interruptible(void) | ||
65 | { | ||
66 | return __lock_cpu_hotplug(1); | ||
67 | } | ||
68 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | ||
69 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
41 | 70 | ||
42 | /* Need to know about CPUs going up/down? */ | 71 | /* Need to know about CPUs going up/down? */ |
43 | int register_cpu_notifier(struct notifier_block *nb) | 72 | int register_cpu_notifier(struct notifier_block *nb) |
44 | { | 73 | { |
45 | int ret; | 74 | int ret; |
46 | 75 | ||
47 | if ((ret = down_interruptible(&cpucontrol)) != 0) | 76 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
48 | return ret; | 77 | return ret; |
49 | ret = notifier_chain_register(&cpu_chain, nb); | 78 | ret = notifier_chain_register(&cpu_chain, nb); |
50 | up(&cpucontrol); | 79 | unlock_cpu_hotplug(); |
51 | return ret; | 80 | return ret; |
52 | } | 81 | } |
53 | EXPORT_SYMBOL(register_cpu_notifier); | 82 | EXPORT_SYMBOL(register_cpu_notifier); |
54 | 83 | ||
55 | void unregister_cpu_notifier(struct notifier_block *nb) | 84 | void unregister_cpu_notifier(struct notifier_block *nb) |
56 | { | 85 | { |
57 | down(&cpucontrol); | 86 | lock_cpu_hotplug(); |
58 | notifier_chain_unregister(&cpu_chain, nb); | 87 | notifier_chain_unregister(&cpu_chain, nb); |
59 | up(&cpucontrol); | 88 | unlock_cpu_hotplug(); |
60 | } | 89 | } |
61 | EXPORT_SYMBOL(unregister_cpu_notifier); | 90 | EXPORT_SYMBOL(unregister_cpu_notifier); |
62 | 91 | ||
@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) | |||
112 | goto out; | 141 | goto out; |
113 | } | 142 | } |
114 | 143 | ||
115 | /* | ||
116 | * Leave a trace in current->flags indicating we are already in | ||
117 | * process of performing CPU hotplug. Callers can check if cpucontrol | ||
118 | * is already acquired by current thread, and if so not cause | ||
119 | * a dead lock by not acquiring the lock | ||
120 | */ | ||
121 | current->flags |= PF_HOTPLUG_CPU; | ||
122 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 144 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
123 | (void *)(long)cpu); | 145 | (void *)(long)cpu); |
124 | if (err == NOTIFY_BAD) { | 146 | if (err == NOTIFY_BAD) { |
@@ -171,7 +193,6 @@ out_thread: | |||
171 | out_allowed: | 193 | out_allowed: |
172 | set_cpus_allowed(current, old_allowed); | 194 | set_cpus_allowed(current, old_allowed); |
173 | out: | 195 | out: |
174 | current->flags &= ~PF_HOTPLUG_CPU; | ||
175 | unlock_cpu_hotplug(); | 196 | unlock_cpu_hotplug(); |
176 | return err; | 197 | return err; |
177 | } | 198 | } |
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) | |||
182 | int ret; | 203 | int ret; |
183 | void *hcpu = (void *)(long)cpu; | 204 | void *hcpu = (void *)(long)cpu; |
184 | 205 | ||
185 | if ((ret = down_interruptible(&cpucontrol)) != 0) | 206 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
186 | return ret; | 207 | return ret; |
187 | 208 | ||
188 | if (cpu_online(cpu) || !cpu_present(cpu)) { | 209 | if (cpu_online(cpu) || !cpu_present(cpu)) { |
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) | |||
190 | goto out; | 211 | goto out; |
191 | } | 212 | } |
192 | 213 | ||
193 | /* | ||
194 | * Leave a trace in current->flags indicating we are already in | ||
195 | * process of performing CPU hotplug. | ||
196 | */ | ||
197 | current->flags |= PF_HOTPLUG_CPU; | ||
198 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 214 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
199 | if (ret == NOTIFY_BAD) { | 215 | if (ret == NOTIFY_BAD) { |
200 | printk("%s: attempt to bring up CPU %u failed\n", | 216 | printk("%s: attempt to bring up CPU %u failed\n", |
@@ -217,7 +233,6 @@ out_notify: | |||
217 | if (ret != 0) | 233 | if (ret != 0) |
218 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); | 234 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); |
219 | out: | 235 | out: |
220 | current->flags &= ~PF_HOTPLUG_CPU; | 236 | unlock_cpu_hotplug(); |
221 | up(&cpucontrol); | ||
222 | return ret; | 237 | return ret; |
223 | } | 238 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 1c1cf8dc396b..fb8572a42297 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1124 | if (unlikely(p->ptrace & PT_PTRACED)) | 1124 | if (unlikely(p->ptrace & PT_PTRACED)) |
1125 | __ptrace_link(p, current->parent); | 1125 | __ptrace_link(p, current->parent); |
1126 | 1126 | ||
1127 | cpuset_fork(p); | ||
1128 | |||
1129 | attach_pid(p, PIDTYPE_PID, p->pid); | 1127 | attach_pid(p, PIDTYPE_PID, p->pid); |
1130 | attach_pid(p, PIDTYPE_TGID, p->tgid); | 1128 | attach_pid(p, PIDTYPE_TGID, p->tgid); |
1131 | if (thread_group_leader(p)) { | 1129 | if (thread_group_leader(p)) { |
@@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags, | |||
1135 | __get_cpu_var(process_counts)++; | 1133 | __get_cpu_var(process_counts)++; |
1136 | } | 1134 | } |
1137 | 1135 | ||
1138 | proc_fork_connector(p); | ||
1139 | if (!current->signal->tty && p->signal->tty) | 1136 | if (!current->signal->tty && p->signal->tty) |
1140 | p->signal->tty = NULL; | 1137 | p->signal->tty = NULL; |
1141 | 1138 | ||
1142 | nr_threads++; | 1139 | nr_threads++; |
1143 | total_forks++; | 1140 | total_forks++; |
1144 | write_unlock_irq(&tasklist_lock); | 1141 | write_unlock_irq(&tasklist_lock); |
1142 | proc_fork_connector(p); | ||
1143 | cpuset_fork(p); | ||
1145 | retval = 0; | 1144 | retval = 0; |
1146 | 1145 | ||
1147 | fork_out: | 1146 | fork_out: |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 84af54c39e1b..cae4f5728997 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp) | |||
36 | union cpu_time_count ret; | 36 | union cpu_time_count ret; |
37 | ret.sched = 0; /* high half always zero when .cpu used */ | 37 | ret.sched = 0; /* high half always zero when .cpu used */ |
38 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 38 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
39 | ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; | 39 | ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
40 | } else { | 40 | } else { |
41 | ret.cpu = timespec_to_cputime(tp); | 41 | ret.cpu = timespec_to_cputime(tp); |
42 | } | 42 | } |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 6ee2cad530e8..d253f3ae2fa5 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | DECLARE_MUTEX(pm_sem); | 25 | DECLARE_MUTEX(pm_sem); |
26 | 26 | ||
27 | struct pm_ops * pm_ops = NULL; | 27 | struct pm_ops *pm_ops; |
28 | suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN; | 28 | suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN; |
29 | 29 | ||
30 | /** | 30 | /** |
@@ -151,6 +151,18 @@ static char *pm_states[PM_SUSPEND_MAX] = { | |||
151 | #endif | 151 | #endif |
152 | }; | 152 | }; |
153 | 153 | ||
154 | static inline int valid_state(suspend_state_t state) | ||
155 | { | ||
156 | /* Suspend-to-disk does not really need low-level support. | ||
157 | * It can work with reboot if needed. */ | ||
158 | if (state == PM_SUSPEND_DISK) | ||
159 | return 1; | ||
160 | |||
161 | if (pm_ops && pm_ops->valid && !pm_ops->valid(state)) | ||
162 | return 0; | ||
163 | return 1; | ||
164 | } | ||
165 | |||
154 | 166 | ||
155 | /** | 167 | /** |
156 | * enter_state - Do common work of entering low-power state. | 168 | * enter_state - Do common work of entering low-power state. |
@@ -167,7 +179,7 @@ static int enter_state(suspend_state_t state) | |||
167 | { | 179 | { |
168 | int error; | 180 | int error; |
169 | 181 | ||
170 | if (pm_ops && pm_ops->valid && !pm_ops->valid(state)) | 182 | if (!valid_state(state)) |
171 | return -ENODEV; | 183 | return -ENODEV; |
172 | if (down_trylock(&pm_sem)) | 184 | if (down_trylock(&pm_sem)) |
173 | return -EBUSY; | 185 | return -EBUSY; |
@@ -238,9 +250,8 @@ static ssize_t state_show(struct subsystem * subsys, char * buf) | |||
238 | char * s = buf; | 250 | char * s = buf; |
239 | 251 | ||
240 | for (i = 0; i < PM_SUSPEND_MAX; i++) { | 252 | for (i = 0; i < PM_SUSPEND_MAX; i++) { |
241 | if (pm_states[i] && pm_ops && (!pm_ops->valid | 253 | if (pm_states[i] && valid_state(i)) |
242 | ||(pm_ops->valid && pm_ops->valid(i)))) | 254 | s += sprintf(s,"%s ", pm_states[i]); |
243 | s += sprintf(s,"%s ",pm_states[i]); | ||
244 | } | 255 | } |
245 | s += sprintf(s,"\n"); | 256 | s += sprintf(s,"\n"); |
246 | return (s - buf); | 257 | return (s - buf); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 17ee7e5a3451..656476eedb1b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -241,7 +241,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in | |||
241 | if (write) { | 241 | if (write) { |
242 | copy_to_user_page(vma, page, addr, | 242 | copy_to_user_page(vma, page, addr, |
243 | maddr + offset, buf, bytes); | 243 | maddr + offset, buf, bytes); |
244 | set_page_dirty_lock(page); | 244 | if (!PageCompound(page)) |
245 | set_page_dirty_lock(page); | ||
245 | } else { | 246 | } else { |
246 | copy_from_user_page(vma, page, addr, | 247 | copy_from_user_page(vma, page, addr, |
247 | buf, maddr + offset, bytes); | 248 | buf, maddr + offset, bytes); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 42df83d7fad2..2bd5aee1c736 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
102 | 102 | ||
103 | if (!test_and_set_bit(0, &work->pending)) { | 103 | if (!test_and_set_bit(0, &work->pending)) { |
104 | if (unlikely(is_single_threaded(wq))) | 104 | if (unlikely(is_single_threaded(wq))) |
105 | cpu = 0; | 105 | cpu = any_online_cpu(cpu_online_map); |
106 | BUG_ON(!list_empty(&work->entry)); | 106 | BUG_ON(!list_empty(&work->entry)); |
107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
108 | ret = 1; | 108 | ret = 1; |
@@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
118 | int cpu = smp_processor_id(); | 118 | int cpu = smp_processor_id(); |
119 | 119 | ||
120 | if (unlikely(is_single_threaded(wq))) | 120 | if (unlikely(is_single_threaded(wq))) |
121 | cpu = 0; | 121 | cpu = any_online_cpu(cpu_online_map); |
122 | 122 | ||
123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
124 | } | 124 | } |
@@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
266 | might_sleep(); | 266 | might_sleep(); |
267 | 267 | ||
268 | if (is_single_threaded(wq)) { | 268 | if (is_single_threaded(wq)) { |
269 | /* Always use cpu 0's area. */ | 269 | /* Always use first cpu's area. */ |
270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0)); | 270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map))); |
271 | } else { | 271 | } else { |
272 | int cpu; | 272 | int cpu; |
273 | 273 | ||
@@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
320 | lock_cpu_hotplug(); | 320 | lock_cpu_hotplug(); |
321 | if (singlethread) { | 321 | if (singlethread) { |
322 | INIT_LIST_HEAD(&wq->list); | 322 | INIT_LIST_HEAD(&wq->list); |
323 | p = create_workqueue_thread(wq, 0); | 323 | p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map)); |
324 | if (!p) | 324 | if (!p) |
325 | destroy = 1; | 325 | destroy = 1; |
326 | else | 326 | else |
@@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
374 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 374 | /* We don't need the distraction of CPUs appearing and vanishing. */ |
375 | lock_cpu_hotplug(); | 375 | lock_cpu_hotplug(); |
376 | if (is_single_threaded(wq)) | 376 | if (is_single_threaded(wq)) |
377 | cleanup_workqueue_thread(wq, 0); | 377 | cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map)); |
378 | else { | 378 | else { |
379 | for_each_online_cpu(cpu) | 379 | for_each_online_cpu(cpu) |
380 | cleanup_workqueue_thread(wq, cpu); | 380 | cleanup_workqueue_thread(wq, cpu); |