aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorManeesh Soni <maneesh@in.ibm.com>2005-06-25 17:58:28 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:55 -0400
commit72414d3f1d22fc3e311b162fca95c430048d38ce (patch)
tree46850947c1602357dd3c51d8d6ebaa5805507f9f
parent4f339ecb30c759f94a29992d4635d9194132b6cf (diff)
[PATCH] kexec code cleanup
o Following patch provides purely cosmetic changes and corrects CodingStyle guide lines related certain issues like below in kexec related files o braces for one line "if" statements, "for" loops, o more than 80 column wide lines, o No space after "while", "for" and "switch" key words o Changes: o take-2: Removed the extra tab before "case" key words. o take-3: Put operator at the end of line and space before "*/" Signed-off-by: Maneesh Soni <maneesh@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/crash.c23
-rw-r--r--arch/i386/kernel/machine_kexec.c16
-rw-r--r--arch/ppc/kernel/machine_kexec.c30
-rw-r--r--arch/ppc64/kernel/machine_kexec.c9
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/x86_64/kernel/machine_kexec.c49
-rw-r--r--drivers/char/mem.c2
-rw-r--r--include/linux/kexec.h13
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--kernel/kexec.c302
10 files changed, 243 insertions, 211 deletions
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 8bdb4b6af0ff..e5fab12f7926 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -31,10 +31,11 @@ note_buf_t crash_notes[NR_CPUS];
31/* This keeps a track of which one is crashing cpu. */ 31/* This keeps a track of which one is crashing cpu. */
32static int crashing_cpu; 32static int crashing_cpu;
33 33
34static u32 *append_elf_note(u32 *buf, 34static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
35 char *name, unsigned type, void *data, size_t data_len) 35 size_t data_len)
36{ 36{
37 struct elf_note note; 37 struct elf_note note;
38
38 note.n_namesz = strlen(name) + 1; 39 note.n_namesz = strlen(name) + 1;
39 note.n_descsz = data_len; 40 note.n_descsz = data_len;
40 note.n_type = type; 41 note.n_type = type;
@@ -44,26 +45,28 @@ static u32 *append_elf_note(u32 *buf,
44 buf += (note.n_namesz + 3)/4; 45 buf += (note.n_namesz + 3)/4;
45 memcpy(buf, data, note.n_descsz); 46 memcpy(buf, data, note.n_descsz);
46 buf += (note.n_descsz + 3)/4; 47 buf += (note.n_descsz + 3)/4;
48
47 return buf; 49 return buf;
48} 50}
49 51
50static void final_note(u32 *buf) 52static void final_note(u32 *buf)
51{ 53{
52 struct elf_note note; 54 struct elf_note note;
55
53 note.n_namesz = 0; 56 note.n_namesz = 0;
54 note.n_descsz = 0; 57 note.n_descsz = 0;
55 note.n_type = 0; 58 note.n_type = 0;
56 memcpy(buf, &note, sizeof(note)); 59 memcpy(buf, &note, sizeof(note));
57} 60}
58 61
59
60static void crash_save_this_cpu(struct pt_regs *regs, int cpu) 62static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
61{ 63{
62 struct elf_prstatus prstatus; 64 struct elf_prstatus prstatus;
63 u32 *buf; 65 u32 *buf;
64 if ((cpu < 0) || (cpu >= NR_CPUS)) { 66
67 if ((cpu < 0) || (cpu >= NR_CPUS))
65 return; 68 return;
66 } 69
67 /* Using ELF notes here is opportunistic. 70 /* Using ELF notes here is opportunistic.
68 * I need a well defined structure format 71 * I need a well defined structure format
69 * for the data I pass, and I need tags 72 * for the data I pass, and I need tags
@@ -75,9 +78,8 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
75 memset(&prstatus, 0, sizeof(prstatus)); 78 memset(&prstatus, 0, sizeof(prstatus));
76 prstatus.pr_pid = current->pid; 79 prstatus.pr_pid = current->pid;
77 elf_core_copy_regs(&prstatus.pr_reg, regs); 80 elf_core_copy_regs(&prstatus.pr_reg, regs);
78 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, 81 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
79 &prstatus, sizeof(prstatus)); 82 sizeof(prstatus));
80
81 final_note(buf); 83 final_note(buf);
82} 84}
83 85
@@ -119,8 +121,8 @@ static void crash_save_self(struct pt_regs *saved_regs)
119{ 121{
120 struct pt_regs regs; 122 struct pt_regs regs;
121 int cpu; 123 int cpu;
122 cpu = smp_processor_id();
123 124
125 cpu = smp_processor_id();
124 if (saved_regs) 126 if (saved_regs)
125 crash_setup_regs(&regs, saved_regs); 127 crash_setup_regs(&regs, saved_regs);
126 else 128 else
@@ -153,6 +155,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
153 /* Assume hlt works */ 155 /* Assume hlt works */
154 __asm__("hlt"); 156 __asm__("hlt");
155 for(;;); 157 for(;;);
158
156 return 1; 159 return 1;
157} 160}
158 161
@@ -169,8 +172,8 @@ static void smp_send_nmi_allbutself(void)
169static void nmi_shootdown_cpus(void) 172static void nmi_shootdown_cpus(void)
170{ 173{
171 unsigned long msecs; 174 unsigned long msecs;
172 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
173 175
176 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
174 /* Would it be better to replace the trap vector here? */ 177 /* Would it be better to replace the trap vector here? */
175 set_nmi_callback(crash_nmi_callback); 178 set_nmi_callback(crash_nmi_callback);
176 /* Ensure the new callback function is set before sending 179 /* Ensure the new callback function is set before sending
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c
index 671880415d1c..52ed18d8b511 100644
--- a/arch/i386/kernel/machine_kexec.c
+++ b/arch/i386/kernel/machine_kexec.c
@@ -80,7 +80,8 @@ static void identity_map_page(unsigned long address)
80 /* Identity map the page table entry */ 80 /* Identity map the page table entry */
81 pgtable_level1[level1_index] = address | L0_ATTR; 81 pgtable_level1[level1_index] = address | L0_ATTR;
82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; 82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
83 set_64bit(&pgtable_level3[level3_index], __pa(pgtable_level2) | L2_ATTR); 83 set_64bit(&pgtable_level3[level3_index],
84 __pa(pgtable_level2) | L2_ATTR);
84 85
85 /* Flush the tlb so the new mapping takes effect. 86 /* Flush the tlb so the new mapping takes effect.
86 * Global tlb entries are not flushed but that is not an issue. 87 * Global tlb entries are not flushed but that is not an issue.
@@ -139,8 +140,10 @@ static void load_segments(void)
139} 140}
140 141
141typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( 142typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
142 unsigned long indirection_page, unsigned long reboot_code_buffer, 143 unsigned long indirection_page,
143 unsigned long start_address, unsigned int has_pae) ATTRIB_NORET; 144 unsigned long reboot_code_buffer,
145 unsigned long start_address,
146 unsigned int has_pae) ATTRIB_NORET;
144 147
145const extern unsigned char relocate_new_kernel[]; 148const extern unsigned char relocate_new_kernel[];
146extern void relocate_new_kernel_end(void); 149extern void relocate_new_kernel_end(void);
@@ -180,20 +183,23 @@ NORET_TYPE void machine_kexec(struct kimage *image)
180{ 183{
181 unsigned long page_list; 184 unsigned long page_list;
182 unsigned long reboot_code_buffer; 185 unsigned long reboot_code_buffer;
186
183 relocate_new_kernel_t rnk; 187 relocate_new_kernel_t rnk;
184 188
185 /* Interrupts aren't acceptable while we reboot */ 189 /* Interrupts aren't acceptable while we reboot */
186 local_irq_disable(); 190 local_irq_disable();
187 191
188 /* Compute some offsets */ 192 /* Compute some offsets */
189 reboot_code_buffer = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 193 reboot_code_buffer = page_to_pfn(image->control_code_page)
194 << PAGE_SHIFT;
190 page_list = image->head; 195 page_list = image->head;
191 196
192 /* Set up an identity mapping for the reboot_code_buffer */ 197 /* Set up an identity mapping for the reboot_code_buffer */
193 identity_map_page(reboot_code_buffer); 198 identity_map_page(reboot_code_buffer);
194 199
195 /* copy it out */ 200 /* copy it out */
196 memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); 201 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
202 relocate_new_kernel_size);
197 203
198 /* The segment registers are funny things, they are 204 /* The segment registers are funny things, they are
199 * automatically loaded from a table, in memory wherever you 205 * automatically loaded from a table, in memory wherever you
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
index b82535357d6d..84d65a87191e 100644
--- a/arch/ppc/kernel/machine_kexec.c
+++ b/arch/ppc/kernel/machine_kexec.c
@@ -21,24 +21,23 @@
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22 22
23typedef NORET_TYPE void (*relocate_new_kernel_t)( 23typedef NORET_TYPE void (*relocate_new_kernel_t)(
24 unsigned long indirection_page, unsigned long reboot_code_buffer, 24 unsigned long indirection_page,
25 unsigned long start_address) ATTRIB_NORET; 25 unsigned long reboot_code_buffer,
26 unsigned long start_address) ATTRIB_NORET;
26 27
27const extern unsigned char relocate_new_kernel[]; 28const extern unsigned char relocate_new_kernel[];
28const extern unsigned int relocate_new_kernel_size; 29const extern unsigned int relocate_new_kernel_size;
29 30
30void machine_shutdown(void) 31void machine_shutdown(void)
31{ 32{
32 if (ppc_md.machine_shutdown) { 33 if (ppc_md.machine_shutdown)
33 ppc_md.machine_shutdown(); 34 ppc_md.machine_shutdown();
34 }
35} 35}
36 36
37void machine_crash_shutdown(struct pt_regs *regs) 37void machine_crash_shutdown(struct pt_regs *regs)
38{ 38{
39 if (ppc_md.machine_crash_shutdown) { 39 if (ppc_md.machine_crash_shutdown)
40 ppc_md.machine_crash_shutdown(); 40 ppc_md.machine_crash_shutdown();
41 }
42} 41}
43 42
44/* 43/*
@@ -48,9 +47,8 @@ void machine_crash_shutdown(struct pt_regs *regs)
48 */ 47 */
49int machine_kexec_prepare(struct kimage *image) 48int machine_kexec_prepare(struct kimage *image)
50{ 49{
51 if (ppc_md.machine_kexec_prepare) { 50 if (ppc_md.machine_kexec_prepare)
52 return ppc_md.machine_kexec_prepare(image); 51 return ppc_md.machine_kexec_prepare(image);
53 }
54 /* 52 /*
55 * Fail if platform doesn't provide its own machine_kexec_prepare 53 * Fail if platform doesn't provide its own machine_kexec_prepare
56 * implementation. 54 * implementation.
@@ -60,9 +58,8 @@ int machine_kexec_prepare(struct kimage *image)
60 58
61void machine_kexec_cleanup(struct kimage *image) 59void machine_kexec_cleanup(struct kimage *image)
62{ 60{
63 if (ppc_md.machine_kexec_cleanup) { 61 if (ppc_md.machine_kexec_cleanup)
64 ppc_md.machine_kexec_cleanup(image); 62 ppc_md.machine_kexec_cleanup(image);
65 }
66} 63}
67 64
68/* 65/*
@@ -71,9 +68,9 @@ void machine_kexec_cleanup(struct kimage *image)
71 */ 68 */
72NORET_TYPE void machine_kexec(struct kimage *image) 69NORET_TYPE void machine_kexec(struct kimage *image)
73{ 70{
74 if (ppc_md.machine_kexec) { 71 if (ppc_md.machine_kexec)
75 ppc_md.machine_kexec(image); 72 ppc_md.machine_kexec(image);
76 } else { 73 else {
77 /* 74 /*
78 * Fall back to normal restart if platform doesn't provide 75 * Fall back to normal restart if platform doesn't provide
79 * its own kexec function, and user insist to kexec... 76 * its own kexec function, and user insist to kexec...
@@ -83,7 +80,6 @@ NORET_TYPE void machine_kexec(struct kimage *image)
83 for(;;); 80 for(;;);
84} 81}
85 82
86
87/* 83/*
88 * This is a generic machine_kexec function suitable at least for 84 * This is a generic machine_kexec function suitable at least for
89 * non-OpenFirmware embedded platforms. 85 * non-OpenFirmware embedded platforms.
@@ -104,15 +100,15 @@ void machine_kexec_simple(struct kimage *image)
104 100
105 /* we need both effective and real address here */ 101 /* we need both effective and real address here */
106 reboot_code_buffer = 102 reboot_code_buffer =
107 (unsigned long)page_address(image->control_code_page); 103 (unsigned long)page_address(image->control_code_page);
108 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); 104 reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer);
109 105
110 /* copy our kernel relocation code to the control code page */ 106 /* copy our kernel relocation code to the control code page */
111 memcpy((void *)reboot_code_buffer, 107 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
112 relocate_new_kernel, relocate_new_kernel_size); 108 relocate_new_kernel_size);
113 109
114 flush_icache_range(reboot_code_buffer, 110 flush_icache_range(reboot_code_buffer,
115 reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); 111 reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
116 printk(KERN_INFO "Bye!\n"); 112 printk(KERN_INFO "Bye!\n");
117 113
118 /* now call it */ 114 /* now call it */
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index 06b25b59c8a8..fdb2fc649d72 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -58,7 +58,7 @@ int machine_kexec_prepare(struct kimage *image)
58 * handle the virtual mode, we must make sure no destination 58 * handle the virtual mode, we must make sure no destination
59 * overlaps kernel static data or bss. 59 * overlaps kernel static data or bss.
60 */ 60 */
61 for(i = 0; i < image->nr_segments; i++) 61 for (i = 0; i < image->nr_segments; i++)
62 if (image->segment[i].mem < __pa(_end)) 62 if (image->segment[i].mem < __pa(_end))
63 return -ETXTBSY; 63 return -ETXTBSY;
64 64
@@ -76,7 +76,7 @@ int machine_kexec_prepare(struct kimage *image)
76 low = __pa(htab_address); 76 low = __pa(htab_address);
77 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; 77 high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
78 78
79 for(i = 0; i < image->nr_segments; i++) { 79 for (i = 0; i < image->nr_segments; i++) {
80 begin = image->segment[i].mem; 80 begin = image->segment[i].mem;
81 end = begin + image->segment[i].memsz; 81 end = begin + image->segment[i].memsz;
82 82
@@ -98,7 +98,7 @@ int machine_kexec_prepare(struct kimage *image)
98 low = *basep; 98 low = *basep;
99 high = low + (*sizep); 99 high = low + (*sizep);
100 100
101 for(i = 0; i < image->nr_segments; i++) { 101 for (i = 0; i < image->nr_segments; i++) {
102 begin = image->segment[i].mem; 102 begin = image->segment[i].mem;
103 end = begin + image->segment[i].memsz; 103 end = begin + image->segment[i].memsz;
104 104
@@ -274,7 +274,8 @@ union thread_union kexec_stack
274 274
275/* Our assembly helper, in kexec_stub.S */ 275/* Our assembly helper, in kexec_stub.S */
276extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, 276extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
277 void *image, void *control, void (*clear_all)(void)) ATTRIB_NORET; 277 void *image, void *control,
278 void (*clear_all)(void)) ATTRIB_NORET;
278 279
279/* too late to fail here */ 280/* too late to fail here */
280void machine_kexec(struct kimage *image) 281void machine_kexec(struct kimage *image)
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 7a94db76df46..2721c3a32b84 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -67,7 +67,7 @@ machine_kexec(struct kimage *image)
67 ctl_clear_bit(0,28); 67 ctl_clear_bit(0,28);
68 68
69 on_each_cpu(kexec_halt_all_cpus, image, 0, 0); 69 on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
70 for(;;); 70 for (;;);
71} 71}
72 72
73static void 73static void
@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
85 for_each_online_cpu(cpu) { 85 for_each_online_cpu(cpu) {
86 if (cpu == smp_processor_id()) 86 if (cpu == smp_processor_id())
87 continue; 87 continue;
88 while(!smp_cpu_not_running(cpu)) 88 while (!smp_cpu_not_running(cpu))
89 cpu_relax(); 89 cpu_relax();
90 } 90 }
91 91
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 200b5993f8d9..60d1eff41567 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -32,29 +32,31 @@
32#define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 32#define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
33#define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 33#define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
34 34
35static void init_level2_page( 35static void init_level2_page(u64 *level2p, unsigned long addr)
36 u64 *level2p, unsigned long addr)
37{ 36{
38 unsigned long end_addr; 37 unsigned long end_addr;
38
39 addr &= PAGE_MASK; 39 addr &= PAGE_MASK;
40 end_addr = addr + LEVEL2_SIZE; 40 end_addr = addr + LEVEL2_SIZE;
41 while(addr < end_addr) { 41 while (addr < end_addr) {
42 *(level2p++) = addr | L1_ATTR; 42 *(level2p++) = addr | L1_ATTR;
43 addr += LEVEL1_SIZE; 43 addr += LEVEL1_SIZE;
44 } 44 }
45} 45}
46 46
47static int init_level3_page(struct kimage *image, 47static int init_level3_page(struct kimage *image, u64 *level3p,
48 u64 *level3p, unsigned long addr, unsigned long last_addr) 48 unsigned long addr, unsigned long last_addr)
49{ 49{
50 unsigned long end_addr; 50 unsigned long end_addr;
51 int result; 51 int result;
52
52 result = 0; 53 result = 0;
53 addr &= PAGE_MASK; 54 addr &= PAGE_MASK;
54 end_addr = addr + LEVEL3_SIZE; 55 end_addr = addr + LEVEL3_SIZE;
55 while((addr < last_addr) && (addr < end_addr)) { 56 while ((addr < last_addr) && (addr < end_addr)) {
56 struct page *page; 57 struct page *page;
57 u64 *level2p; 58 u64 *level2p;
59
58 page = kimage_alloc_control_pages(image, 0); 60 page = kimage_alloc_control_pages(image, 0);
59 if (!page) { 61 if (!page) {
60 result = -ENOMEM; 62 result = -ENOMEM;
@@ -66,7 +68,7 @@ static int init_level3_page(struct kimage *image,
66 addr += LEVEL2_SIZE; 68 addr += LEVEL2_SIZE;
67 } 69 }
68 /* clear the unused entries */ 70 /* clear the unused entries */
69 while(addr < end_addr) { 71 while (addr < end_addr) {
70 *(level3p++) = 0; 72 *(level3p++) = 0;
71 addr += LEVEL2_SIZE; 73 addr += LEVEL2_SIZE;
72 } 74 }
@@ -75,17 +77,19 @@ out:
75} 77}
76 78
77 79
78static int init_level4_page(struct kimage *image, 80static int init_level4_page(struct kimage *image, u64 *level4p,
79 u64 *level4p, unsigned long addr, unsigned long last_addr) 81 unsigned long addr, unsigned long last_addr)
80{ 82{
81 unsigned long end_addr; 83 unsigned long end_addr;
82 int result; 84 int result;
85
83 result = 0; 86 result = 0;
84 addr &= PAGE_MASK; 87 addr &= PAGE_MASK;
85 end_addr = addr + LEVEL4_SIZE; 88 end_addr = addr + LEVEL4_SIZE;
86 while((addr < last_addr) && (addr < end_addr)) { 89 while ((addr < last_addr) && (addr < end_addr)) {
87 struct page *page; 90 struct page *page;
88 u64 *level3p; 91 u64 *level3p;
92
89 page = kimage_alloc_control_pages(image, 0); 93 page = kimage_alloc_control_pages(image, 0);
90 if (!page) { 94 if (!page) {
91 result = -ENOMEM; 95 result = -ENOMEM;
@@ -100,11 +104,11 @@ static int init_level4_page(struct kimage *image,
100 addr += LEVEL3_SIZE; 104 addr += LEVEL3_SIZE;
101 } 105 }
102 /* clear the unused entries */ 106 /* clear the unused entries */
103 while(addr < end_addr) { 107 while (addr < end_addr) {
104 *(level4p++) = 0; 108 *(level4p++) = 0;
105 addr += LEVEL3_SIZE; 109 addr += LEVEL3_SIZE;
106 } 110 }
107 out: 111out:
108 return result; 112 return result;
109} 113}
110 114
@@ -113,7 +117,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
113{ 117{
114 u64 *level4p; 118 u64 *level4p;
115 level4p = (u64 *)__va(start_pgtable); 119 level4p = (u64 *)__va(start_pgtable);
116 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 120 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
117} 121}
118 122
119static void set_idt(void *newidt, u16 limit) 123static void set_idt(void *newidt, u16 limit)
@@ -159,9 +163,10 @@ static void load_segments(void)
159#undef __STR 163#undef __STR
160} 164}
161 165
162typedef NORET_TYPE void (*relocate_new_kernel_t)( 166typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
163 unsigned long indirection_page, unsigned long control_code_buffer, 167 unsigned long control_code_buffer,
164 unsigned long start_address, unsigned long pgtable) ATTRIB_NORET; 168 unsigned long start_address,
169 unsigned long pgtable) ATTRIB_NORET;
165 170
166const extern unsigned char relocate_new_kernel[]; 171const extern unsigned char relocate_new_kernel[];
167const extern unsigned long relocate_new_kernel_size; 172const extern unsigned long relocate_new_kernel_size;
@@ -172,17 +177,17 @@ int machine_kexec_prepare(struct kimage *image)
172 int result; 177 int result;
173 178
174 /* Calculate the offsets */ 179 /* Calculate the offsets */
175 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 180 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
176 control_code_buffer = start_pgtable + 4096UL; 181 control_code_buffer = start_pgtable + 4096UL;
177 182
178 /* Setup the identity mapped 64bit page table */ 183 /* Setup the identity mapped 64bit page table */
179 result = init_pgtable(image, start_pgtable); 184 result = init_pgtable(image, start_pgtable);
180 if (result) { 185 if (result)
181 return result; 186 return result;
182 }
183 187
184 /* Place the code in the reboot code buffer */ 188 /* Place the code in the reboot code buffer */
185 memcpy(__va(control_code_buffer), relocate_new_kernel, relocate_new_kernel_size); 189 memcpy(__va(control_code_buffer), relocate_new_kernel,
190 relocate_new_kernel_size);
186 191
187 return 0; 192 return 0;
188} 193}
@@ -207,8 +212,8 @@ NORET_TYPE void machine_kexec(struct kimage *image)
207 local_irq_disable(); 212 local_irq_disable();
208 213
209 /* Calculate the offsets */ 214 /* Calculate the offsets */
210 page_list = image->head; 215 page_list = image->head;
211 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 216 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
212 control_code_buffer = start_pgtable + 4096UL; 217 control_code_buffer = start_pgtable + 4096UL;
213 218
214 /* Set the low half of the page table to my identity mapped 219 /* Set the low half of the page table to my identity mapped
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index b64108dd765b..42187381506b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -287,7 +287,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
287 size_t read = 0, csize; 287 size_t read = 0, csize;
288 int rc = 0; 288 int rc = 0;
289 289
290 while(count) { 290 while (count) {
291 pfn = *ppos / PAGE_SIZE; 291 pfn = *ppos / PAGE_SIZE;
292 if (pfn > saved_max_pfn) 292 if (pfn > saved_max_pfn)
293 return read; 293 return read;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 7383173a3a9c..c8468472aec0 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -91,14 +91,17 @@ extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
91extern int machine_kexec_prepare(struct kimage *image); 91extern int machine_kexec_prepare(struct kimage *image);
92extern void machine_kexec_cleanup(struct kimage *image); 92extern void machine_kexec_cleanup(struct kimage *image);
93extern asmlinkage long sys_kexec_load(unsigned long entry, 93extern asmlinkage long sys_kexec_load(unsigned long entry,
94 unsigned long nr_segments, struct kexec_segment __user *segments, 94 unsigned long nr_segments,
95 unsigned long flags); 95 struct kexec_segment __user *segments,
96 unsigned long flags);
96#ifdef CONFIG_COMPAT 97#ifdef CONFIG_COMPAT
97extern asmlinkage long compat_sys_kexec_load(unsigned long entry, 98extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
98 unsigned long nr_segments, struct compat_kexec_segment __user *segments, 99 unsigned long nr_segments,
99 unsigned long flags); 100 struct compat_kexec_segment __user *segments,
101 unsigned long flags);
100#endif 102#endif
101extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order); 103extern struct page *kimage_alloc_control_pages(struct kimage *image,
104 unsigned int order);
102extern void crash_kexec(struct pt_regs *); 105extern void crash_kexec(struct pt_regs *);
103int kexec_should_crash(struct task_struct *); 106int kexec_should_crash(struct task_struct *);
104extern struct kimage *kexec_image; 107extern struct kimage *kexec_image;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 7ba8f8f747aa..52830b6d94e5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -159,9 +159,9 @@ asmlinkage long sys_shutdown(int, int);
159asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, 159asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
160 void __user *arg); 160 void __user *arg);
161asmlinkage long sys_restart_syscall(void); 161asmlinkage long sys_restart_syscall(void);
162asmlinkage long sys_kexec_load(unsigned long entry, 162asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
163 unsigned long nr_segments, struct kexec_segment __user *segments, 163 struct kexec_segment __user *segments,
164 unsigned long flags); 164 unsigned long flags);
165 165
166asmlinkage long sys_exit(int error_code); 166asmlinkage long sys_exit(int error_code);
167asmlinkage void sys_exit_group(int error_code); 167asmlinkage void sys_exit_group(int error_code);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 277f22afe74b..7843548cf2d9 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -87,12 +87,15 @@ int kexec_should_crash(struct task_struct *p)
87 */ 87 */
88#define KIMAGE_NO_DEST (-1UL) 88#define KIMAGE_NO_DEST (-1UL)
89 89
90static int kimage_is_destination_range( 90static int kimage_is_destination_range(struct kimage *image,
91 struct kimage *image, unsigned long start, unsigned long end); 91 unsigned long start, unsigned long end);
92static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); 92static struct page *kimage_alloc_page(struct kimage *image,
93 unsigned int gfp_mask,
94 unsigned long dest);
93 95
94static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, 96static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
95 unsigned long nr_segments, struct kexec_segment __user *segments) 97 unsigned long nr_segments,
98 struct kexec_segment __user *segments)
96{ 99{
97 size_t segment_bytes; 100 size_t segment_bytes;
98 struct kimage *image; 101 struct kimage *image;
@@ -102,9 +105,9 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
102 /* Allocate a controlling structure */ 105 /* Allocate a controlling structure */
103 result = -ENOMEM; 106 result = -ENOMEM;
104 image = kmalloc(sizeof(*image), GFP_KERNEL); 107 image = kmalloc(sizeof(*image), GFP_KERNEL);
105 if (!image) { 108 if (!image)
106 goto out; 109 goto out;
107 } 110
108 memset(image, 0, sizeof(*image)); 111 memset(image, 0, sizeof(*image));
109 image->head = 0; 112 image->head = 0;
110 image->entry = &image->head; 113 image->entry = &image->head;
@@ -145,6 +148,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
145 result = -EADDRNOTAVAIL; 148 result = -EADDRNOTAVAIL;
146 for (i = 0; i < nr_segments; i++) { 149 for (i = 0; i < nr_segments; i++) {
147 unsigned long mstart, mend; 150 unsigned long mstart, mend;
151
148 mstart = image->segment[i].mem; 152 mstart = image->segment[i].mem;
149 mend = mstart + image->segment[i].memsz; 153 mend = mstart + image->segment[i].memsz;
150 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) 154 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
@@ -159,12 +163,13 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
159 * easy explanation as one segment stops on another. 163 * easy explanation as one segment stops on another.
160 */ 164 */
161 result = -EINVAL; 165 result = -EINVAL;
162 for(i = 0; i < nr_segments; i++) { 166 for (i = 0; i < nr_segments; i++) {
163 unsigned long mstart, mend; 167 unsigned long mstart, mend;
164 unsigned long j; 168 unsigned long j;
169
165 mstart = image->segment[i].mem; 170 mstart = image->segment[i].mem;
166 mend = mstart + image->segment[i].memsz; 171 mend = mstart + image->segment[i].memsz;
167 for(j = 0; j < i; j++) { 172 for (j = 0; j < i; j++) {
168 unsigned long pstart, pend; 173 unsigned long pstart, pend;
169 pstart = image->segment[j].mem; 174 pstart = image->segment[j].mem;
170 pend = pstart + image->segment[j].memsz; 175 pend = pstart + image->segment[j].memsz;
@@ -180,25 +185,25 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
180 * later on. 185 * later on.
181 */ 186 */
182 result = -EINVAL; 187 result = -EINVAL;
183 for(i = 0; i < nr_segments; i++) { 188 for (i = 0; i < nr_segments; i++) {
184 if (image->segment[i].bufsz > image->segment[i].memsz) 189 if (image->segment[i].bufsz > image->segment[i].memsz)
185 goto out; 190 goto out;
186 } 191 }
187 192
188
189 result = 0; 193 result = 0;
190 out: 194out:
191 if (result == 0) { 195 if (result == 0)
192 *rimage = image; 196 *rimage = image;
193 } else { 197 else
194 kfree(image); 198 kfree(image);
195 } 199
196 return result; 200 return result;
197 201
198} 202}
199 203
200static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 204static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
201 unsigned long nr_segments, struct kexec_segment __user *segments) 205 unsigned long nr_segments,
206 struct kexec_segment __user *segments)
202{ 207{
203 int result; 208 int result;
204 struct kimage *image; 209 struct kimage *image;
@@ -206,9 +211,9 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
206 /* Allocate and initialize a controlling structure */ 211 /* Allocate and initialize a controlling structure */
207 image = NULL; 212 image = NULL;
208 result = do_kimage_alloc(&image, entry, nr_segments, segments); 213 result = do_kimage_alloc(&image, entry, nr_segments, segments);
209 if (result) { 214 if (result)
210 goto out; 215 goto out;
211 } 216
212 *rimage = image; 217 *rimage = image;
213 218
214 /* 219 /*
@@ -218,7 +223,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
218 */ 223 */
219 result = -ENOMEM; 224 result = -ENOMEM;
220 image->control_code_page = kimage_alloc_control_pages(image, 225 image->control_code_page = kimage_alloc_control_pages(image,
221 get_order(KEXEC_CONTROL_CODE_SIZE)); 226 get_order(KEXEC_CONTROL_CODE_SIZE));
222 if (!image->control_code_page) { 227 if (!image->control_code_page) {
223 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 228 printk(KERN_ERR "Could not allocate control_code_buffer\n");
224 goto out; 229 goto out;
@@ -226,16 +231,17 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
226 231
227 result = 0; 232 result = 0;
228 out: 233 out:
229 if (result == 0) { 234 if (result == 0)
230 *rimage = image; 235 *rimage = image;
231 } else { 236 else
232 kfree(image); 237 kfree(image);
233 } 238
234 return result; 239 return result;
235} 240}
236 241
237static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, 242static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
238 unsigned long nr_segments, struct kexec_segment *segments) 243 unsigned long nr_segments,
244 struct kexec_segment *segments)
239{ 245{
240 int result; 246 int result;
241 struct kimage *image; 247 struct kimage *image;
@@ -250,9 +256,8 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
250 256
251 /* Allocate and initialize a controlling structure */ 257 /* Allocate and initialize a controlling structure */
252 result = do_kimage_alloc(&image, entry, nr_segments, segments); 258 result = do_kimage_alloc(&image, entry, nr_segments, segments);
253 if (result) { 259 if (result)
254 goto out; 260 goto out;
255 }
256 261
257 /* Enable the special crash kernel control page 262 /* Enable the special crash kernel control page
258 * allocation policy. 263 * allocation policy.
@@ -272,6 +277,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
272 result = -EADDRNOTAVAIL; 277 result = -EADDRNOTAVAIL;
273 for (i = 0; i < nr_segments; i++) { 278 for (i = 0; i < nr_segments; i++) {
274 unsigned long mstart, mend; 279 unsigned long mstart, mend;
280
275 mstart = image->segment[i].mem; 281 mstart = image->segment[i].mem;
276 mend = mstart + image->segment[i].memsz - 1; 282 mend = mstart + image->segment[i].memsz - 1;
277 /* Ensure we are within the crash kernel limits */ 283 /* Ensure we are within the crash kernel limits */
@@ -279,7 +285,6 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
279 goto out; 285 goto out;
280 } 286 }
281 287
282
283 /* 288 /*
284 * Find a location for the control code buffer, and add 289 * Find a location for the control code buffer, and add
285 * the vector of segments so that it's pages will also be 290 * the vector of segments so that it's pages will also be
@@ -287,80 +292,84 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
287 */ 292 */
288 result = -ENOMEM; 293 result = -ENOMEM;
289 image->control_code_page = kimage_alloc_control_pages(image, 294 image->control_code_page = kimage_alloc_control_pages(image,
290 get_order(KEXEC_CONTROL_CODE_SIZE)); 295 get_order(KEXEC_CONTROL_CODE_SIZE));
291 if (!image->control_code_page) { 296 if (!image->control_code_page) {
292 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 297 printk(KERN_ERR "Could not allocate control_code_buffer\n");
293 goto out; 298 goto out;
294 } 299 }
295 300
296 result = 0; 301 result = 0;
297 out: 302out:
298 if (result == 0) { 303 if (result == 0)
299 *rimage = image; 304 *rimage = image;
300 } else { 305 else
301 kfree(image); 306 kfree(image);
302 } 307
303 return result; 308 return result;
304} 309}
305 310
306static int kimage_is_destination_range( 311static int kimage_is_destination_range(struct kimage *image,
307 struct kimage *image, unsigned long start, unsigned long end) 312 unsigned long start,
313 unsigned long end)
308{ 314{
309 unsigned long i; 315 unsigned long i;
310 316
311 for (i = 0; i < image->nr_segments; i++) { 317 for (i = 0; i < image->nr_segments; i++) {
312 unsigned long mstart, mend; 318 unsigned long mstart, mend;
319
313 mstart = image->segment[i].mem; 320 mstart = image->segment[i].mem;
314 mend = mstart + image->segment[i].memsz; 321 mend = mstart + image->segment[i].memsz;
315 if ((end > mstart) && (start < mend)) { 322 if ((end > mstart) && (start < mend))
316 return 1; 323 return 1;
317 }
318 } 324 }
325
319 return 0; 326 return 0;
320} 327}
321 328
322static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order) 329static struct page *kimage_alloc_pages(unsigned int gfp_mask,
330 unsigned int order)
323{ 331{
324 struct page *pages; 332 struct page *pages;
333
325 pages = alloc_pages(gfp_mask, order); 334 pages = alloc_pages(gfp_mask, order);
326 if (pages) { 335 if (pages) {
327 unsigned int count, i; 336 unsigned int count, i;
328 pages->mapping = NULL; 337 pages->mapping = NULL;
329 pages->private = order; 338 pages->private = order;
330 count = 1 << order; 339 count = 1 << order;
331 for(i = 0; i < count; i++) { 340 for (i = 0; i < count; i++)
332 SetPageReserved(pages + i); 341 SetPageReserved(pages + i);
333 }
334 } 342 }
343
335 return pages; 344 return pages;
336} 345}
337 346
338static void kimage_free_pages(struct page *page) 347static void kimage_free_pages(struct page *page)
339{ 348{
340 unsigned int order, count, i; 349 unsigned int order, count, i;
350
341 order = page->private; 351 order = page->private;
342 count = 1 << order; 352 count = 1 << order;
343 for(i = 0; i < count; i++) { 353 for (i = 0; i < count; i++)
344 ClearPageReserved(page + i); 354 ClearPageReserved(page + i);
345 }
346 __free_pages(page, order); 355 __free_pages(page, order);
347} 356}
348 357
349static void kimage_free_page_list(struct list_head *list) 358static void kimage_free_page_list(struct list_head *list)
350{ 359{
351 struct list_head *pos, *next; 360 struct list_head *pos, *next;
361
352 list_for_each_safe(pos, next, list) { 362 list_for_each_safe(pos, next, list) {
353 struct page *page; 363 struct page *page;
354 364
355 page = list_entry(pos, struct page, lru); 365 page = list_entry(pos, struct page, lru);
356 list_del(&page->lru); 366 list_del(&page->lru);
357
358 kimage_free_pages(page); 367 kimage_free_pages(page);
359 } 368 }
360} 369}
361 370
362static struct page *kimage_alloc_normal_control_pages( 371static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
363 struct kimage *image, unsigned int order) 372 unsigned int order)
364{ 373{
365 /* Control pages are special, they are the intermediaries 374 /* Control pages are special, they are the intermediaries
366 * that are needed while we copy the rest of the pages 375 * that are needed while we copy the rest of the pages
@@ -387,6 +396,7 @@ static struct page *kimage_alloc_normal_control_pages(
387 */ 396 */
388 do { 397 do {
389 unsigned long pfn, epfn, addr, eaddr; 398 unsigned long pfn, epfn, addr, eaddr;
399
390 pages = kimage_alloc_pages(GFP_KERNEL, order); 400 pages = kimage_alloc_pages(GFP_KERNEL, order);
391 if (!pages) 401 if (!pages)
392 break; 402 break;
@@ -395,12 +405,12 @@ static struct page *kimage_alloc_normal_control_pages(
395 addr = pfn << PAGE_SHIFT; 405 addr = pfn << PAGE_SHIFT;
396 eaddr = epfn << PAGE_SHIFT; 406 eaddr = epfn << PAGE_SHIFT;
397 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 407 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
398 kimage_is_destination_range(image, addr, eaddr)) 408 kimage_is_destination_range(image, addr, eaddr)) {
399 {
400 list_add(&pages->lru, &extra_pages); 409 list_add(&pages->lru, &extra_pages);
401 pages = NULL; 410 pages = NULL;
402 } 411 }
403 } while(!pages); 412 } while (!pages);
413
404 if (pages) { 414 if (pages) {
405 /* Remember the allocated page... */ 415 /* Remember the allocated page... */
406 list_add(&pages->lru, &image->control_pages); 416 list_add(&pages->lru, &image->control_pages);
@@ -420,12 +430,12 @@ static struct page *kimage_alloc_normal_control_pages(
420 * For now it is simpler to just free the pages. 430 * For now it is simpler to just free the pages.
421 */ 431 */
422 kimage_free_page_list(&extra_pages); 432 kimage_free_page_list(&extra_pages);
423 return pages;
424 433
434 return pages;
425} 435}
426 436
427static struct page *kimage_alloc_crash_control_pages( 437static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
428 struct kimage *image, unsigned int order) 438 unsigned int order)
429{ 439{
430 /* Control pages are special, they are the intermediaries 440 /* Control pages are special, they are the intermediaries
431 * that are needed while we copy the rest of the pages 441 * that are needed while we copy the rest of the pages
@@ -450,21 +460,22 @@ static struct page *kimage_alloc_crash_control_pages(
450 */ 460 */
451 unsigned long hole_start, hole_end, size; 461 unsigned long hole_start, hole_end, size;
452 struct page *pages; 462 struct page *pages;
463
453 pages = NULL; 464 pages = NULL;
454 size = (1 << order) << PAGE_SHIFT; 465 size = (1 << order) << PAGE_SHIFT;
455 hole_start = (image->control_page + (size - 1)) & ~(size - 1); 466 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
456 hole_end = hole_start + size - 1; 467 hole_end = hole_start + size - 1;
457 while(hole_end <= crashk_res.end) { 468 while (hole_end <= crashk_res.end) {
458 unsigned long i; 469 unsigned long i;
459 if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) { 470
471 if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
460 break; 472 break;
461 } 473 if (hole_end > crashk_res.end)
462 if (hole_end > crashk_res.end) {
463 break; 474 break;
464 }
465 /* See if I overlap any of the segments */ 475 /* See if I overlap any of the segments */
466 for(i = 0; i < image->nr_segments; i++) { 476 for (i = 0; i < image->nr_segments; i++) {
467 unsigned long mstart, mend; 477 unsigned long mstart, mend;
478
468 mstart = image->segment[i].mem; 479 mstart = image->segment[i].mem;
469 mend = mstart + image->segment[i].memsz - 1; 480 mend = mstart + image->segment[i].memsz - 1;
470 if ((hole_end >= mstart) && (hole_start <= mend)) { 481 if ((hole_end >= mstart) && (hole_start <= mend)) {
@@ -480,18 +491,19 @@ static struct page *kimage_alloc_crash_control_pages(
480 break; 491 break;
481 } 492 }
482 } 493 }
483 if (pages) { 494 if (pages)
484 image->control_page = hole_end; 495 image->control_page = hole_end;
485 } 496
486 return pages; 497 return pages;
487} 498}
488 499
489 500
490struct page *kimage_alloc_control_pages( 501struct page *kimage_alloc_control_pages(struct kimage *image,
491 struct kimage *image, unsigned int order) 502 unsigned int order)
492{ 503{
493 struct page *pages = NULL; 504 struct page *pages = NULL;
494 switch(image->type) { 505
506 switch (image->type) {
495 case KEXEC_TYPE_DEFAULT: 507 case KEXEC_TYPE_DEFAULT:
496 pages = kimage_alloc_normal_control_pages(image, order); 508 pages = kimage_alloc_normal_control_pages(image, order);
497 break; 509 break;
@@ -499,43 +511,46 @@ struct page *kimage_alloc_control_pages(
499 pages = kimage_alloc_crash_control_pages(image, order); 511 pages = kimage_alloc_crash_control_pages(image, order);
500 break; 512 break;
501 } 513 }
514
502 return pages; 515 return pages;
503} 516}
504 517
505static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 518static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
506{ 519{
507 if (*image->entry != 0) { 520 if (*image->entry != 0)
508 image->entry++; 521 image->entry++;
509 } 522
510 if (image->entry == image->last_entry) { 523 if (image->entry == image->last_entry) {
511 kimage_entry_t *ind_page; 524 kimage_entry_t *ind_page;
512 struct page *page; 525 struct page *page;
526
513 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 527 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
514 if (!page) { 528 if (!page)
515 return -ENOMEM; 529 return -ENOMEM;
516 } 530
517 ind_page = page_address(page); 531 ind_page = page_address(page);
518 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; 532 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
519 image->entry = ind_page; 533 image->entry = ind_page;
520 image->last_entry = 534 image->last_entry = ind_page +
521 ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 535 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
522 } 536 }
523 *image->entry = entry; 537 *image->entry = entry;
524 image->entry++; 538 image->entry++;
525 *image->entry = 0; 539 *image->entry = 0;
540
526 return 0; 541 return 0;
527} 542}
528 543
529static int kimage_set_destination( 544static int kimage_set_destination(struct kimage *image,
530 struct kimage *image, unsigned long destination) 545 unsigned long destination)
531{ 546{
532 int result; 547 int result;
533 548
534 destination &= PAGE_MASK; 549 destination &= PAGE_MASK;
535 result = kimage_add_entry(image, destination | IND_DESTINATION); 550 result = kimage_add_entry(image, destination | IND_DESTINATION);
536 if (result == 0) { 551 if (result == 0)
537 image->destination = destination; 552 image->destination = destination;
538 } 553
539 return result; 554 return result;
540} 555}
541 556
@@ -546,9 +561,9 @@ static int kimage_add_page(struct kimage *image, unsigned long page)
546 561
547 page &= PAGE_MASK; 562 page &= PAGE_MASK;
548 result = kimage_add_entry(image, page | IND_SOURCE); 563 result = kimage_add_entry(image, page | IND_SOURCE);
549 if (result == 0) { 564 if (result == 0)
550 image->destination += PAGE_SIZE; 565 image->destination += PAGE_SIZE;
551 } 566
552 return result; 567 return result;
553} 568}
554 569
@@ -564,10 +579,11 @@ static void kimage_free_extra_pages(struct kimage *image)
564} 579}
565static int kimage_terminate(struct kimage *image) 580static int kimage_terminate(struct kimage *image)
566{ 581{
567 if (*image->entry != 0) { 582 if (*image->entry != 0)
568 image->entry++; 583 image->entry++;
569 } 584
570 *image->entry = IND_DONE; 585 *image->entry = IND_DONE;
586
571 return 0; 587 return 0;
572} 588}
573 589
@@ -591,26 +607,24 @@ static void kimage_free(struct kimage *image)
591 607
592 if (!image) 608 if (!image)
593 return; 609 return;
610
594 kimage_free_extra_pages(image); 611 kimage_free_extra_pages(image);
595 for_each_kimage_entry(image, ptr, entry) { 612 for_each_kimage_entry(image, ptr, entry) {
596 if (entry & IND_INDIRECTION) { 613 if (entry & IND_INDIRECTION) {
597 /* Free the previous indirection page */ 614 /* Free the previous indirection page */
598 if (ind & IND_INDIRECTION) { 615 if (ind & IND_INDIRECTION)
599 kimage_free_entry(ind); 616 kimage_free_entry(ind);
600 }
601 /* Save this indirection page until we are 617 /* Save this indirection page until we are
602 * done with it. 618 * done with it.
603 */ 619 */
604 ind = entry; 620 ind = entry;
605 } 621 }
606 else if (entry & IND_SOURCE) { 622 else if (entry & IND_SOURCE)
607 kimage_free_entry(entry); 623 kimage_free_entry(entry);
608 }
609 } 624 }
610 /* Free the final indirection page */ 625 /* Free the final indirection page */
611 if (ind & IND_INDIRECTION) { 626 if (ind & IND_INDIRECTION)
612 kimage_free_entry(ind); 627 kimage_free_entry(ind);
613 }
614 628
615 /* Handle any machine specific cleanup */ 629 /* Handle any machine specific cleanup */
616 machine_kexec_cleanup(image); 630 machine_kexec_cleanup(image);
@@ -620,26 +634,28 @@ static void kimage_free(struct kimage *image)
620 kfree(image); 634 kfree(image);
621} 635}
622 636
623static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) 637static kimage_entry_t *kimage_dst_used(struct kimage *image,
638 unsigned long page)
624{ 639{
625 kimage_entry_t *ptr, entry; 640 kimage_entry_t *ptr, entry;
626 unsigned long destination = 0; 641 unsigned long destination = 0;
627 642
628 for_each_kimage_entry(image, ptr, entry) { 643 for_each_kimage_entry(image, ptr, entry) {
629 if (entry & IND_DESTINATION) { 644 if (entry & IND_DESTINATION)
630 destination = entry & PAGE_MASK; 645 destination = entry & PAGE_MASK;
631 }
632 else if (entry & IND_SOURCE) { 646 else if (entry & IND_SOURCE) {
633 if (page == destination) { 647 if (page == destination)
634 return ptr; 648 return ptr;
635 }
636 destination += PAGE_SIZE; 649 destination += PAGE_SIZE;
637 } 650 }
638 } 651 }
652
639 return 0; 653 return 0;
640} 654}
641 655
642static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) 656static struct page *kimage_alloc_page(struct kimage *image,
657 unsigned int gfp_mask,
658 unsigned long destination)
643{ 659{
644 /* 660 /*
645 * Here we implement safeguards to ensure that a source page 661 * Here we implement safeguards to ensure that a source page
@@ -679,11 +695,11 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas
679 695
680 /* Allocate a page, if we run out of memory give up */ 696 /* Allocate a page, if we run out of memory give up */
681 page = kimage_alloc_pages(gfp_mask, 0); 697 page = kimage_alloc_pages(gfp_mask, 0);
682 if (!page) { 698 if (!page)
683 return 0; 699 return 0;
684 }
685 /* If the page cannot be used file it away */ 700 /* If the page cannot be used file it away */
686 if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 701 if (page_to_pfn(page) >
702 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
687 list_add(&page->lru, &image->unuseable_pages); 703 list_add(&page->lru, &image->unuseable_pages);
688 continue; 704 continue;
689 } 705 }
@@ -694,7 +710,8 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas
694 break; 710 break;
695 711
696 /* If the page is not a destination page use it */ 712 /* If the page is not a destination page use it */
697 if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) 713 if (!kimage_is_destination_range(image, addr,
714 addr + PAGE_SIZE))
698 break; 715 break;
699 716
700 /* 717 /*
@@ -727,11 +744,12 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas
727 list_add(&page->lru, &image->dest_pages); 744 list_add(&page->lru, &image->dest_pages);
728 } 745 }
729 } 746 }
747
730 return page; 748 return page;
731} 749}
732 750
733static int kimage_load_normal_segment(struct kimage *image, 751static int kimage_load_normal_segment(struct kimage *image,
734 struct kexec_segment *segment) 752 struct kexec_segment *segment)
735{ 753{
736 unsigned long maddr; 754 unsigned long maddr;
737 unsigned long ubytes, mbytes; 755 unsigned long ubytes, mbytes;
@@ -745,34 +763,36 @@ static int kimage_load_normal_segment(struct kimage *image,
745 maddr = segment->mem; 763 maddr = segment->mem;
746 764
747 result = kimage_set_destination(image, maddr); 765 result = kimage_set_destination(image, maddr);
748 if (result < 0) { 766 if (result < 0)
749 goto out; 767 goto out;
750 } 768
751 while(mbytes) { 769 while (mbytes) {
752 struct page *page; 770 struct page *page;
753 char *ptr; 771 char *ptr;
754 size_t uchunk, mchunk; 772 size_t uchunk, mchunk;
773
755 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 774 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
756 if (page == 0) { 775 if (page == 0) {
757 result = -ENOMEM; 776 result = -ENOMEM;
758 goto out; 777 goto out;
759 } 778 }
760 result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); 779 result = kimage_add_page(image, page_to_pfn(page)
761 if (result < 0) { 780 << PAGE_SHIFT);
781 if (result < 0)
762 goto out; 782 goto out;
763 } 783
764 ptr = kmap(page); 784 ptr = kmap(page);
765 /* Start with a clear page */ 785 /* Start with a clear page */
766 memset(ptr, 0, PAGE_SIZE); 786 memset(ptr, 0, PAGE_SIZE);
767 ptr += maddr & ~PAGE_MASK; 787 ptr += maddr & ~PAGE_MASK;
768 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 788 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
769 if (mchunk > mbytes) { 789 if (mchunk > mbytes)
770 mchunk = mbytes; 790 mchunk = mbytes;
771 } 791
772 uchunk = mchunk; 792 uchunk = mchunk;
773 if (uchunk > ubytes) { 793 if (uchunk > ubytes)
774 uchunk = ubytes; 794 uchunk = ubytes;
775 } 795
776 result = copy_from_user(ptr, buf, uchunk); 796 result = copy_from_user(ptr, buf, uchunk);
777 kunmap(page); 797 kunmap(page);
778 if (result) { 798 if (result) {
@@ -784,12 +804,12 @@ static int kimage_load_normal_segment(struct kimage *image,
784 buf += mchunk; 804 buf += mchunk;
785 mbytes -= mchunk; 805 mbytes -= mchunk;
786 } 806 }
787 out: 807out:
788 return result; 808 return result;
789} 809}
790 810
791static int kimage_load_crash_segment(struct kimage *image, 811static int kimage_load_crash_segment(struct kimage *image,
792 struct kexec_segment *segment) 812 struct kexec_segment *segment)
793{ 813{
794 /* For crash dumps kernels we simply copy the data from 814 /* For crash dumps kernels we simply copy the data from
795 * user space to it's destination. 815 * user space to it's destination.
@@ -805,10 +825,11 @@ static int kimage_load_crash_segment(struct kimage *image,
805 ubytes = segment->bufsz; 825 ubytes = segment->bufsz;
806 mbytes = segment->memsz; 826 mbytes = segment->memsz;
807 maddr = segment->mem; 827 maddr = segment->mem;
808 while(mbytes) { 828 while (mbytes) {
809 struct page *page; 829 struct page *page;
810 char *ptr; 830 char *ptr;
811 size_t uchunk, mchunk; 831 size_t uchunk, mchunk;
832
812 page = pfn_to_page(maddr >> PAGE_SHIFT); 833 page = pfn_to_page(maddr >> PAGE_SHIFT);
813 if (page == 0) { 834 if (page == 0) {
814 result = -ENOMEM; 835 result = -ENOMEM;
@@ -817,9 +838,9 @@ static int kimage_load_crash_segment(struct kimage *image,
817 ptr = kmap(page); 838 ptr = kmap(page);
818 ptr += maddr & ~PAGE_MASK; 839 ptr += maddr & ~PAGE_MASK;
819 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 840 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
820 if (mchunk > mbytes) { 841 if (mchunk > mbytes)
821 mchunk = mbytes; 842 mchunk = mbytes;
822 } 843
823 uchunk = mchunk; 844 uchunk = mchunk;
824 if (uchunk > ubytes) { 845 if (uchunk > ubytes) {
825 uchunk = ubytes; 846 uchunk = ubytes;
@@ -837,15 +858,16 @@ static int kimage_load_crash_segment(struct kimage *image,
837 buf += mchunk; 858 buf += mchunk;
838 mbytes -= mchunk; 859 mbytes -= mchunk;
839 } 860 }
840 out: 861out:
841 return result; 862 return result;
842} 863}
843 864
844static int kimage_load_segment(struct kimage *image, 865static int kimage_load_segment(struct kimage *image,
845 struct kexec_segment *segment) 866 struct kexec_segment *segment)
846{ 867{
847 int result = -ENOMEM; 868 int result = -ENOMEM;
848 switch(image->type) { 869
870 switch (image->type) {
849 case KEXEC_TYPE_DEFAULT: 871 case KEXEC_TYPE_DEFAULT:
850 result = kimage_load_normal_segment(image, segment); 872 result = kimage_load_normal_segment(image, segment);
851 break; 873 break;
@@ -853,6 +875,7 @@ static int kimage_load_segment(struct kimage *image,
853 result = kimage_load_crash_segment(image, segment); 875 result = kimage_load_crash_segment(image, segment);
854 break; 876 break;
855 } 877 }
878
856 return result; 879 return result;
857} 880}
858 881
@@ -885,9 +908,9 @@ static struct kimage *kexec_crash_image = NULL;
885 */ 908 */
886static int kexec_lock = 0; 909static int kexec_lock = 0;
887 910
888asmlinkage long sys_kexec_load(unsigned long entry, 911asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
889 unsigned long nr_segments, struct kexec_segment __user *segments, 912 struct kexec_segment __user *segments,
890 unsigned long flags) 913 unsigned long flags)
891{ 914{
892 struct kimage **dest_image, *image; 915 struct kimage **dest_image, *image;
893 int locked; 916 int locked;
@@ -907,9 +930,7 @@ asmlinkage long sys_kexec_load(unsigned long entry,
907 /* Verify we are on the appropriate architecture */ 930 /* Verify we are on the appropriate architecture */
908 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && 931 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
909 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) 932 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
910 {
911 return -EINVAL; 933 return -EINVAL;
912 }
913 934
914 /* Put an artificial cap on the number 935 /* Put an artificial cap on the number
915 * of segments passed to kexec_load. 936 * of segments passed to kexec_load.
@@ -929,58 +950,59 @@ asmlinkage long sys_kexec_load(unsigned long entry,
929 * KISS: always take the mutex. 950 * KISS: always take the mutex.
930 */ 951 */
931 locked = xchg(&kexec_lock, 1); 952 locked = xchg(&kexec_lock, 1);
932 if (locked) { 953 if (locked)
933 return -EBUSY; 954 return -EBUSY;
934 } 955
935 dest_image = &kexec_image; 956 dest_image = &kexec_image;
936 if (flags & KEXEC_ON_CRASH) { 957 if (flags & KEXEC_ON_CRASH)
937 dest_image = &kexec_crash_image; 958 dest_image = &kexec_crash_image;
938 }
939 if (nr_segments > 0) { 959 if (nr_segments > 0) {
940 unsigned long i; 960 unsigned long i;
961
941 /* Loading another kernel to reboot into */ 962 /* Loading another kernel to reboot into */
942 if ((flags & KEXEC_ON_CRASH) == 0) { 963 if ((flags & KEXEC_ON_CRASH) == 0)
943 result = kimage_normal_alloc(&image, entry, nr_segments, segments); 964 result = kimage_normal_alloc(&image, entry,
944 } 965 nr_segments, segments);
945 /* Loading another kernel to switch to if this one crashes */ 966 /* Loading another kernel to switch to if this one crashes */
946 else if (flags & KEXEC_ON_CRASH) { 967 else if (flags & KEXEC_ON_CRASH) {
947 /* Free any current crash dump kernel before 968 /* Free any current crash dump kernel before
948 * we corrupt it. 969 * we corrupt it.
949 */ 970 */
950 kimage_free(xchg(&kexec_crash_image, NULL)); 971 kimage_free(xchg(&kexec_crash_image, NULL));
951 result = kimage_crash_alloc(&image, entry, nr_segments, segments); 972 result = kimage_crash_alloc(&image, entry,
973 nr_segments, segments);
952 } 974 }
953 if (result) { 975 if (result)
954 goto out; 976 goto out;
955 } 977
956 result = machine_kexec_prepare(image); 978 result = machine_kexec_prepare(image);
957 if (result) { 979 if (result)
958 goto out; 980 goto out;
959 } 981
960 for(i = 0; i < nr_segments; i++) { 982 for (i = 0; i < nr_segments; i++) {
961 result = kimage_load_segment(image, &image->segment[i]); 983 result = kimage_load_segment(image, &image->segment[i]);
962 if (result) { 984 if (result)
963 goto out; 985 goto out;
964 }
965 } 986 }
966 result = kimage_terminate(image); 987 result = kimage_terminate(image);
967 if (result) { 988 if (result)
968 goto out; 989 goto out;
969 }
970 } 990 }
971 /* Install the new kernel, and Uninstall the old */ 991 /* Install the new kernel, and Uninstall the old */
972 image = xchg(dest_image, image); 992 image = xchg(dest_image, image);
973 993
974 out: 994out:
975 xchg(&kexec_lock, 0); /* Release the mutex */ 995 xchg(&kexec_lock, 0); /* Release the mutex */
976 kimage_free(image); 996 kimage_free(image);
997
977 return result; 998 return result;
978} 999}
979 1000
980#ifdef CONFIG_COMPAT 1001#ifdef CONFIG_COMPAT
981asmlinkage long compat_sys_kexec_load(unsigned long entry, 1002asmlinkage long compat_sys_kexec_load(unsigned long entry,
982 unsigned long nr_segments, struct compat_kexec_segment __user *segments, 1003 unsigned long nr_segments,
983 unsigned long flags) 1004 struct compat_kexec_segment __user *segments,
1005 unsigned long flags)
984{ 1006{
985 struct compat_kexec_segment in; 1007 struct compat_kexec_segment in;
986 struct kexec_segment out, __user *ksegments; 1008 struct kexec_segment out, __user *ksegments;
@@ -989,20 +1011,17 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
989 /* Don't allow clients that don't understand the native 1011 /* Don't allow clients that don't understand the native
990 * architecture to do anything. 1012 * architecture to do anything.
991 */ 1013 */
992 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) { 1014 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
993 return -EINVAL; 1015 return -EINVAL;
994 }
995 1016
996 if (nr_segments > KEXEC_SEGMENT_MAX) { 1017 if (nr_segments > KEXEC_SEGMENT_MAX)
997 return -EINVAL; 1018 return -EINVAL;
998 }
999 1019
1000 ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); 1020 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1001 for (i=0; i < nr_segments; i++) { 1021 for (i=0; i < nr_segments; i++) {
1002 result = copy_from_user(&in, &segments[i], sizeof(in)); 1022 result = copy_from_user(&in, &segments[i], sizeof(in));
1003 if (result) { 1023 if (result)
1004 return -EFAULT; 1024 return -EFAULT;
1005 }
1006 1025
1007 out.buf = compat_ptr(in.buf); 1026 out.buf = compat_ptr(in.buf);
1008 out.bufsz = in.bufsz; 1027 out.bufsz = in.bufsz;
@@ -1010,9 +1029,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
1010 out.memsz = in.memsz; 1029 out.memsz = in.memsz;
1011 1030
1012 result = copy_to_user(&ksegments[i], &out, sizeof(out)); 1031 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1013 if (result) { 1032 if (result)
1014 return -EFAULT; 1033 return -EFAULT;
1015 }
1016 } 1034 }
1017 1035
1018 return sys_kexec_load(entry, nr_segments, ksegments, flags); 1036 return sys_kexec_load(entry, nr_segments, ksegments, flags);