aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/irq/manage.c8
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/smp.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/sys_ni.c1
7 files changed, 21 insertions, 9 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index f42a17f88699..a28d11e10877 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -194,6 +194,7 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
194 mm->mmap = NULL; 194 mm->mmap = NULL;
195 mm->mmap_cache = NULL; 195 mm->mmap_cache = NULL;
196 mm->free_area_cache = oldmm->mmap_base; 196 mm->free_area_cache = oldmm->mmap_base;
197 mm->cached_hole_size = ~0UL;
197 mm->map_count = 0; 198 mm->map_count = 0;
198 set_mm_counter(mm, rss, 0); 199 set_mm_counter(mm, rss, 0);
199 set_mm_counter(mm, anon_rss, 0); 200 set_mm_counter(mm, anon_rss, 0);
@@ -249,8 +250,9 @@ static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
249 250
250 /* 251 /*
251 * Link in the new vma and copy the page table entries: 252 * Link in the new vma and copy the page table entries:
252 * link in first so that swapoff can see swap entries, 253 * link in first so that swapoff can see swap entries.
253 * and try_to_unmap_one's find_vma find the new vma. 254 * Note that, exceptionally, here the vma is inserted
255 * without holding mm->mmap_sem.
254 */ 256 */
255 spin_lock(&mm->page_table_lock); 257 spin_lock(&mm->page_table_lock);
256 *pprev = tmp; 258 *pprev = tmp;
@@ -322,6 +324,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
322 mm->ioctx_list = NULL; 324 mm->ioctx_list = NULL;
323 mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm); 325 mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
324 mm->free_area_cache = TASK_UNMAPPED_BASE; 326 mm->free_area_cache = TASK_UNMAPPED_BASE;
327 mm->cached_hole_size = ~0UL;
325 328
326 if (likely(!mm_alloc_pgd(mm))) { 329 if (likely(!mm_alloc_pgd(mm))) {
327 mm->def_flags = 0; 330 mm->def_flags = 0;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 5202e4c4a5b6..ac6700985705 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -6,6 +6,7 @@
6 * This file contains driver APIs to the irq subsystem. 6 * This file contains driver APIs to the irq subsystem.
7 */ 7 */
8 8
9#include <linux/config.h>
9#include <linux/irq.h> 10#include <linux/irq.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/random.h> 12#include <linux/random.h>
@@ -255,6 +256,13 @@ void free_irq(unsigned int irq, void *dev_id)
255 256
256 /* Found it - now remove it from the list of entries */ 257 /* Found it - now remove it from the list of entries */
257 *pp = action->next; 258 *pp = action->next;
259
260 /* Currently used only by UML, might disappear one day.*/
261#ifdef CONFIG_IRQ_RELEASE_METHOD
262 if (desc->handler->release)
263 desc->handler->release(irq, dev_id);
264#endif
265
258 if (!desc->action) { 266 if (!desc->action) {
259 desc->status |= IRQ_DISABLED; 267 desc->status |= IRQ_DISABLED;
260 if (desc->handler->shutdown) 268 if (desc->handler->shutdown)
diff --git a/kernel/module.c b/kernel/module.c
index 83b3d376708c..a566745dde62 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod)
379 for (i = 0; i < NR_CPUS; i++) 379 for (i = 0; i < NR_CPUS; i++)
380 local_set(&mod->ref[i].count, 0); 380 local_set(&mod->ref[i].count, 0);
381 /* Hold reference count during initialization. */ 381 /* Hold reference count during initialization. */
382 local_set(&mod->ref[_smp_processor_id()].count, 1); 382 local_set(&mod->ref[raw_smp_processor_id()].count, 1);
383 /* Backwards compatibility macros put refcount during init. */ 383 /* Backwards compatibility macros put refcount during init. */
384 mod->waiter = current; 384 mod->waiter = current;
385} 385}
diff --git a/kernel/power/smp.c b/kernel/power/smp.c
index cba3584b80fe..457c2302ed42 100644
--- a/kernel/power/smp.c
+++ b/kernel/power/smp.c
@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void)
48{ 48{
49 oldmask = current->cpus_allowed; 49 oldmask = current->cpus_allowed;
50 set_cpus_allowed(current, cpumask_of_cpu(0)); 50 set_cpus_allowed(current, cpumask_of_cpu(0));
51 printk("Freezing CPUs (at %d)", _smp_processor_id()); 51 printk("Freezing CPUs (at %d)", raw_smp_processor_id());
52 current->state = TASK_INTERRUPTIBLE; 52 current->state = TASK_INTERRUPTIBLE;
53 schedule_timeout(HZ); 53 schedule_timeout(HZ);
54 printk("..."); 54 printk("...");
55 BUG_ON(_smp_processor_id() != 0); 55 BUG_ON(raw_smp_processor_id() != 0);
56 56
57 /* FIXME: for this to work, all the CPUs must be running 57 /* FIXME: for this to work, all the CPUs must be running
58 * "idle" thread (or we deadlock). Is that guaranteed? */ 58 * "idle" thread (or we deadlock). Is that guaranteed? */
diff --git a/kernel/sched.c b/kernel/sched.c
index f12a0c8a7d98..deca041fc364 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield);
3814 */ 3814 */
3815void __sched io_schedule(void) 3815void __sched io_schedule(void)
3816{ 3816{
3817 struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); 3817 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3818 3818
3819 atomic_inc(&rq->nr_iowait); 3819 atomic_inc(&rq->nr_iowait);
3820 schedule(); 3820 schedule();
@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
3825 3825
3826long __sched io_schedule_timeout(long timeout) 3826long __sched io_schedule_timeout(long timeout)
3827{ 3827{
3828 struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); 3828 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3829 long ret; 3829 long ret;
3830 3830
3831 atomic_inc(&rq->nr_iowait); 3831 atomic_inc(&rq->nr_iowait);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 6116b25aa7cf..84a9d18aa8da 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -100,7 +100,7 @@ static int stop_machine(void)
100 stopmachine_state = STOPMACHINE_WAIT; 100 stopmachine_state = STOPMACHINE_WAIT;
101 101
102 for_each_online_cpu(i) { 102 for_each_online_cpu(i) {
103 if (i == _smp_processor_id()) 103 if (i == raw_smp_processor_id())
104 continue; 104 continue;
105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
106 if (ret < 0) 106 if (ret < 0)
@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
182 182
183 /* If they don't care which CPU fn runs on, bind to any online one. */ 183 /* If they don't care which CPU fn runs on, bind to any online one. */
184 if (cpu == NR_CPUS) 184 if (cpu == NR_CPUS)
185 cpu = _smp_processor_id(); 185 cpu = raw_smp_processor_id();
186 186
187 p = kthread_create(do_stop, &smdata, "kstopmachine"); 187 p = kthread_create(do_stop, &smdata, "kstopmachine");
188 if (!IS_ERR(p)) { 188 if (!IS_ERR(p)) {
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 0dda70ed1f98..6f15bea7d1a8 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -77,6 +77,7 @@ cond_syscall(sys_request_key);
77cond_syscall(sys_keyctl); 77cond_syscall(sys_keyctl);
78cond_syscall(compat_sys_keyctl); 78cond_syscall(compat_sys_keyctl);
79cond_syscall(compat_sys_socketcall); 79cond_syscall(compat_sys_socketcall);
80cond_syscall(sys_set_zone_reclaim);
80 81
81/* arch-specific weak syscall entries */ 82/* arch-specific weak syscall entries */
82cond_syscall(sys_pciconfig_read); 83cond_syscall(sys_pciconfig_read);