aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2005-11-29 15:48:34 -0500
committerDave Jones <davej@redhat.com>2005-11-29 15:48:34 -0500
commitbe37bdbce7ceaacf4f20c6cc759efbe75ebd1196 (patch)
treef1a42fb5a4a17eea41d47d2c002fa303009f5523 /kernel
parent019a61b99338d0ac05de25317b85da88e7ec4b35 (diff)
parentd70aa5e4b54aa7e704c886838715ac8a45d5750f (diff)
Merge ../linus
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c83
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/futex.c15
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/workqueue.c12
7 files changed, 75 insertions, 61 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d61ba88f34..e882c6babf 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,47 +16,76 @@
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19DECLARE_MUTEX(cpucontrol); 19static DECLARE_MUTEX(cpucontrol);
20EXPORT_SYMBOL_GPL(cpucontrol);
21 20
22static struct notifier_block *cpu_chain; 21static struct notifier_block *cpu_chain;
23 22
24/* 23#ifdef CONFIG_HOTPLUG_CPU
25 * Used to check by callers if they need to acquire the cpucontrol 24static struct task_struct *lock_cpu_hotplug_owner;
26 * or not to protect a cpu from being removed. Its sometimes required to 25static int lock_cpu_hotplug_depth;
27 * call these functions both for normal operations, and in response to
28 * a cpu being added/removed. If the context of the call is in the same
29 * thread context as a CPU hotplug thread, we dont need to take the lock
30 * since its already protected
31 * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
32 */
33 26
34int current_in_cpu_hotplug(void) 27static int __lock_cpu_hotplug(int interruptible)
35{ 28{
36 return (current->flags & PF_HOTPLUG_CPU); 29 int ret = 0;
30
31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible)
33 ret = down_interruptible(&cpucontrol);
34 else
35 down(&cpucontrol);
36 }
37
38 /*
39 * Set only if we succeed in locking
40 */
41 if (!ret) {
42 lock_cpu_hotplug_depth++;
43 lock_cpu_hotplug_owner = current;
44 }
45
46 return ret;
37} 47}
38 48
39EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); 49void lock_cpu_hotplug(void)
50{
51 __lock_cpu_hotplug(0);
52}
53EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
40 54
55void unlock_cpu_hotplug(void)
56{
57 if (--lock_cpu_hotplug_depth == 0) {
58 lock_cpu_hotplug_owner = NULL;
59 up(&cpucontrol);
60 }
61}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
63
64int lock_cpu_hotplug_interruptible(void)
65{
66 return __lock_cpu_hotplug(1);
67}
68EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */
41 70
42/* Need to know about CPUs going up/down? */ 71/* Need to know about CPUs going up/down? */
43int register_cpu_notifier(struct notifier_block *nb) 72int register_cpu_notifier(struct notifier_block *nb)
44{ 73{
45 int ret; 74 int ret;
46 75
47 if ((ret = down_interruptible(&cpucontrol)) != 0) 76 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
48 return ret; 77 return ret;
49 ret = notifier_chain_register(&cpu_chain, nb); 78 ret = notifier_chain_register(&cpu_chain, nb);
50 up(&cpucontrol); 79 unlock_cpu_hotplug();
51 return ret; 80 return ret;
52} 81}
53EXPORT_SYMBOL(register_cpu_notifier); 82EXPORT_SYMBOL(register_cpu_notifier);
54 83
55void unregister_cpu_notifier(struct notifier_block *nb) 84void unregister_cpu_notifier(struct notifier_block *nb)
56{ 85{
57 down(&cpucontrol); 86 lock_cpu_hotplug();
58 notifier_chain_unregister(&cpu_chain, nb); 87 notifier_chain_unregister(&cpu_chain, nb);
59 up(&cpucontrol); 88 unlock_cpu_hotplug();
60} 89}
61EXPORT_SYMBOL(unregister_cpu_notifier); 90EXPORT_SYMBOL(unregister_cpu_notifier);
62 91
@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
112 goto out; 141 goto out;
113 } 142 }
114 143
115 /*
116 * Leave a trace in current->flags indicating we are already in
117 * process of performing CPU hotplug. Callers can check if cpucontrol
118 * is already acquired by current thread, and if so not cause
119 * a dead lock by not acquiring the lock
120 */
121 current->flags |= PF_HOTPLUG_CPU;
122 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 144 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
123 (void *)(long)cpu); 145 (void *)(long)cpu);
124 if (err == NOTIFY_BAD) { 146 if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@ out_thread:
171out_allowed: 193out_allowed:
172 set_cpus_allowed(current, old_allowed); 194 set_cpus_allowed(current, old_allowed);
173out: 195out:
174 current->flags &= ~PF_HOTPLUG_CPU;
175 unlock_cpu_hotplug(); 196 unlock_cpu_hotplug();
176 return err; 197 return err;
177} 198}
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
182 int ret; 203 int ret;
183 void *hcpu = (void *)(long)cpu; 204 void *hcpu = (void *)(long)cpu;
184 205
185 if ((ret = down_interruptible(&cpucontrol)) != 0) 206 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
186 return ret; 207 return ret;
187 208
188 if (cpu_online(cpu) || !cpu_present(cpu)) { 209 if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
190 goto out; 211 goto out;
191 } 212 }
192 213
193 /*
194 * Leave a trace in current->flags indicating we are already in
195 * process of performing CPU hotplug.
196 */
197 current->flags |= PF_HOTPLUG_CPU;
198 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 214 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
199 if (ret == NOTIFY_BAD) { 215 if (ret == NOTIFY_BAD) {
200 printk("%s: attempt to bring up CPU %u failed\n", 216 printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@ out_notify:
217 if (ret != 0) 233 if (ret != 0)
218 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); 234 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
219out: 235out:
220 current->flags &= ~PF_HOTPLUG_CPU; 236 unlock_cpu_hotplug();
221 up(&cpucontrol);
222 return ret; 237 return ret;
223} 238}
diff --git a/kernel/fork.c b/kernel/fork.c
index e0d0b77343..fb8572a422 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -263,7 +263,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
263 rb_parent = &tmp->vm_rb; 263 rb_parent = &tmp->vm_rb;
264 264
265 mm->map_count++; 265 mm->map_count++;
266 retval = copy_page_range(mm, oldmm, tmp); 266 retval = copy_page_range(mm, oldmm, mpnt);
267 267
268 if (tmp->vm_ops && tmp->vm_ops->open) 268 if (tmp->vm_ops && tmp->vm_ops->open)
269 tmp->vm_ops->open(tmp); 269 tmp->vm_ops->open(tmp);
@@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags,
1124 if (unlikely(p->ptrace & PT_PTRACED)) 1124 if (unlikely(p->ptrace & PT_PTRACED))
1125 __ptrace_link(p, current->parent); 1125 __ptrace_link(p, current->parent);
1126 1126
1127 cpuset_fork(p);
1128
1129 attach_pid(p, PIDTYPE_PID, p->pid); 1127 attach_pid(p, PIDTYPE_PID, p->pid);
1130 attach_pid(p, PIDTYPE_TGID, p->tgid); 1128 attach_pid(p, PIDTYPE_TGID, p->tgid);
1131 if (thread_group_leader(p)) { 1129 if (thread_group_leader(p)) {
@@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags,
1135 __get_cpu_var(process_counts)++; 1133 __get_cpu_var(process_counts)++;
1136 } 1134 }
1137 1135
1138 proc_fork_connector(p);
1139 if (!current->signal->tty && p->signal->tty) 1136 if (!current->signal->tty && p->signal->tty)
1140 p->signal->tty = NULL; 1137 p->signal->tty = NULL;
1141 1138
1142 nr_threads++; 1139 nr_threads++;
1143 total_forks++; 1140 total_forks++;
1144 write_unlock_irq(&tasklist_lock); 1141 write_unlock_irq(&tasklist_lock);
1142 proc_fork_connector(p);
1143 cpuset_fork(p);
1145 retval = 0; 1144 retval = 0;
1146 1145
1147fork_out: 1146fork_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index aca8d10704..5872e3507f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -201,21 +201,6 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
201 * from swap. But that's a lot of code to duplicate here 201 * from swap. But that's a lot of code to duplicate here
202 * for a rare case, so we simply fetch the page. 202 * for a rare case, so we simply fetch the page.
203 */ 203 */
204
205 /*
206 * Do a quick atomic lookup first - this is the fastpath.
207 */
208 page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
209 if (likely(page != NULL)) {
210 key->shared.pgoff =
211 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
212 put_page(page);
213 return 0;
214 }
215
216 /*
217 * Do it the general way.
218 */
219 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); 204 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
220 if (err >= 0) { 205 if (err >= 0) {
221 key->shared.pgoff = 206 key->shared.pgoff =
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3bd7226d15..81c49a4d67 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -36,6 +36,9 @@ void synchronize_irq(unsigned int irq)
36{ 36{
37 struct irq_desc *desc = irq_desc + irq; 37 struct irq_desc *desc = irq_desc + irq;
38 38
39 if (irq >= NR_IRQS)
40 return;
41
39 while (desc->status & IRQ_INPROGRESS) 42 while (desc->status & IRQ_INPROGRESS)
40 cpu_relax(); 43 cpu_relax();
41} 44}
@@ -60,6 +63,9 @@ void disable_irq_nosync(unsigned int irq)
60 irq_desc_t *desc = irq_desc + irq; 63 irq_desc_t *desc = irq_desc + irq;
61 unsigned long flags; 64 unsigned long flags;
62 65
66 if (irq >= NR_IRQS)
67 return;
68
63 spin_lock_irqsave(&desc->lock, flags); 69 spin_lock_irqsave(&desc->lock, flags);
64 if (!desc->depth++) { 70 if (!desc->depth++) {
65 desc->status |= IRQ_DISABLED; 71 desc->status |= IRQ_DISABLED;
@@ -86,6 +92,9 @@ void disable_irq(unsigned int irq)
86{ 92{
87 irq_desc_t *desc = irq_desc + irq; 93 irq_desc_t *desc = irq_desc + irq;
88 94
95 if (irq >= NR_IRQS)
96 return;
97
89 disable_irq_nosync(irq); 98 disable_irq_nosync(irq);
90 if (desc->action) 99 if (desc->action)
91 synchronize_irq(irq); 100 synchronize_irq(irq);
@@ -108,6 +117,9 @@ void enable_irq(unsigned int irq)
108 irq_desc_t *desc = irq_desc + irq; 117 irq_desc_t *desc = irq_desc + irq;
109 unsigned long flags; 118 unsigned long flags;
110 119
120 if (irq >= NR_IRQS)
121 return;
122
111 spin_lock_irqsave(&desc->lock, flags); 123 spin_lock_irqsave(&desc->lock, flags);
112 switch (desc->depth) { 124 switch (desc->depth) {
113 case 0: 125 case 0:
@@ -163,6 +175,9 @@ int setup_irq(unsigned int irq, struct irqaction * new)
163 unsigned long flags; 175 unsigned long flags;
164 int shared = 0; 176 int shared = 0;
165 177
178 if (irq >= NR_IRQS)
179 return -EINVAL;
180
166 if (desc->handler == &no_irq_type) 181 if (desc->handler == &no_irq_type)
167 return -ENOSYS; 182 return -ENOSYS;
168 /* 183 /*
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 84af54c39e..cae4f57289 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
36 union cpu_time_count ret; 36 union cpu_time_count ret;
37 ret.sched = 0; /* high half always zero when .cpu used */ 37 ret.sched = 0; /* high half always zero when .cpu used */
38 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 38 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
39 ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; 39 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
40 } else { 40 } else {
41 ret.cpu = timespec_to_cputime(tp); 41 ret.cpu = timespec_to_cputime(tp);
42 } 42 }
diff --git a/kernel/printk.c b/kernel/printk.c
index ac8a08f362..5287be83e3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -956,7 +956,7 @@ int unregister_console(struct console *console)
956 if (console_drivers == console) { 956 if (console_drivers == console) {
957 console_drivers=console->next; 957 console_drivers=console->next;
958 res = 0; 958 res = 0;
959 } else { 959 } else if (console_drivers) {
960 for (a=console_drivers->next, b=console_drivers ; 960 for (a=console_drivers->next, b=console_drivers ;
961 a; b=a, a=b->next) { 961 a; b=a, a=b->next) {
962 if (a == console) { 962 if (a == console) {
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 42df83d7fa..2bd5aee1c7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
102 102
103 if (!test_and_set_bit(0, &work->pending)) { 103 if (!test_and_set_bit(0, &work->pending)) {
104 if (unlikely(is_single_threaded(wq))) 104 if (unlikely(is_single_threaded(wq)))
105 cpu = 0; 105 cpu = any_online_cpu(cpu_online_map);
106 BUG_ON(!list_empty(&work->entry)); 106 BUG_ON(!list_empty(&work->entry));
107 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 107 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
108 ret = 1; 108 ret = 1;
@@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data)
118 int cpu = smp_processor_id(); 118 int cpu = smp_processor_id();
119 119
120 if (unlikely(is_single_threaded(wq))) 120 if (unlikely(is_single_threaded(wq)))
121 cpu = 0; 121 cpu = any_online_cpu(cpu_online_map);
122 122
123 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 123 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
124} 124}
@@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
266 might_sleep(); 266 might_sleep();
267 267
268 if (is_single_threaded(wq)) { 268 if (is_single_threaded(wq)) {
269 /* Always use cpu 0's area. */ 269 /* Always use first cpu's area. */
270 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0)); 270 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
271 } else { 271 } else {
272 int cpu; 272 int cpu;
273 273
@@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
320 lock_cpu_hotplug(); 320 lock_cpu_hotplug();
321 if (singlethread) { 321 if (singlethread) {
322 INIT_LIST_HEAD(&wq->list); 322 INIT_LIST_HEAD(&wq->list);
323 p = create_workqueue_thread(wq, 0); 323 p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
324 if (!p) 324 if (!p)
325 destroy = 1; 325 destroy = 1;
326 else 326 else
@@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
374 /* We don't need the distraction of CPUs appearing and vanishing. */ 374 /* We don't need the distraction of CPUs appearing and vanishing. */
375 lock_cpu_hotplug(); 375 lock_cpu_hotplug();
376 if (is_single_threaded(wq)) 376 if (is_single_threaded(wq))
377 cleanup_workqueue_thread(wq, 0); 377 cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
378 else { 378 else {
379 for_each_online_cpu(cpu) 379 for_each_online_cpu(cpu)
380 cleanup_workqueue_thread(wq, cpu); 380 cleanup_workqueue_thread(wq, cpu);