aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kexec.c10
-rw-r--r--kernel/sched.c9
2 files changed, 13 insertions, 6 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7843548cf2..cdd4dcd8fb 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -241,7 +241,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
241 241
242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
243 unsigned long nr_segments, 243 unsigned long nr_segments,
244 struct kexec_segment *segments) 244 struct kexec_segment __user *segments)
245{ 245{
246 int result; 246 int result;
247 struct kimage *image; 247 struct kimage *image;
@@ -650,7 +650,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image,
650 } 650 }
651 } 651 }
652 652
653 return 0; 653 return NULL;
654} 654}
655 655
656static struct page *kimage_alloc_page(struct kimage *image, 656static struct page *kimage_alloc_page(struct kimage *image,
@@ -696,7 +696,7 @@ static struct page *kimage_alloc_page(struct kimage *image,
696 /* Allocate a page, if we run out of memory give up */ 696 /* Allocate a page, if we run out of memory give up */
697 page = kimage_alloc_pages(gfp_mask, 0); 697 page = kimage_alloc_pages(gfp_mask, 0);
698 if (!page) 698 if (!page)
699 return 0; 699 return NULL;
700 /* If the page cannot be used file it away */ 700 /* If the page cannot be used file it away */
701 if (page_to_pfn(page) > 701 if (page_to_pfn(page) >
702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
@@ -754,7 +754,7 @@ static int kimage_load_normal_segment(struct kimage *image,
754 unsigned long maddr; 754 unsigned long maddr;
755 unsigned long ubytes, mbytes; 755 unsigned long ubytes, mbytes;
756 int result; 756 int result;
757 unsigned char *buf; 757 unsigned char __user *buf;
758 758
759 result = 0; 759 result = 0;
760 buf = segment->buf; 760 buf = segment->buf;
@@ -818,7 +818,7 @@ static int kimage_load_crash_segment(struct kimage *image,
818 unsigned long maddr; 818 unsigned long maddr;
819 unsigned long ubytes, mbytes; 819 unsigned long ubytes, mbytes;
820 int result; 820 int result;
821 unsigned char *buf; 821 unsigned char __user *buf;
822 822
823 result = 0; 823 result = 0;
824 buf = segment->buf; 824 buf = segment->buf;
diff --git a/kernel/sched.c b/kernel/sched.c
index e2b0d3e4dd..5f2182d422 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4166,6 +4166,14 @@ void show_state(void)
4166 read_unlock(&tasklist_lock); 4166 read_unlock(&tasklist_lock);
4167} 4167}
4168 4168
4169/**
4170 * init_idle - set up an idle thread for a given CPU
4171 * @idle: task in question
4172 * @cpu: cpu the idle task belongs to
4173 *
4174 * NOTE: this function does not set the idle thread's NEED_RESCHED
4175 * flag, to make booting more robust.
4176 */
4169void __devinit init_idle(task_t *idle, int cpu) 4177void __devinit init_idle(task_t *idle, int cpu)
4170{ 4178{
4171 runqueue_t *rq = cpu_rq(cpu); 4179 runqueue_t *rq = cpu_rq(cpu);
@@ -4183,7 +4191,6 @@ void __devinit init_idle(task_t *idle, int cpu)
4183#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 4191#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4184 idle->oncpu = 1; 4192 idle->oncpu = 1;
4185#endif 4193#endif
4186 set_tsk_need_resched(idle);
4187 spin_unlock_irqrestore(&rq->lock, flags); 4194 spin_unlock_irqrestore(&rq->lock, flags);
4188 4195
4189 /* Set the preempt count _outside_ the spinlocks! */ 4196 /* Set the preempt count _outside_ the spinlocks! */