diff options
author | Maneesh Soni <maneesh@in.ibm.com> | 2005-06-25 17:58:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:55 -0400 |
commit | 72414d3f1d22fc3e311b162fca95c430048d38ce (patch) | |
tree | 46850947c1602357dd3c51d8d6ebaa5805507f9f /arch | |
parent | 4f339ecb30c759f94a29992d4635d9194132b6cf (diff) |
[PATCH] kexec code cleanup
o Following patch provides purely cosmetic changes and corrects CodingStyle
guide lines related certain issues like below in kexec related files
o braces for one line "if" statements, "for" loops,
o more than 80 column wide lines,
o No space after "while", "for" and "switch" key words
o Changes:
o take-2: Removed the extra tab before "case" key words.
o take-3: Put operator at the end of line and space before "*/"
Signed-off-by: Maneesh Soni <maneesh@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/crash.c | 23 | ||||
-rw-r--r-- | arch/i386/kernel/machine_kexec.c | 16 | ||||
-rw-r--r-- | arch/ppc/kernel/machine_kexec.c | 30 | ||||
-rw-r--r-- | arch/ppc64/kernel/machine_kexec.c | 9 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/machine_kexec.c | 49 |
6 files changed, 71 insertions, 60 deletions
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index 8bdb4b6af0f..e5fab12f792 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c | |||
@@ -31,10 +31,11 @@ note_buf_t crash_notes[NR_CPUS]; | |||
31 | /* This keeps a track of which one is crashing cpu. */ | 31 | /* This keeps a track of which one is crashing cpu. */ |
32 | static int crashing_cpu; | 32 | static int crashing_cpu; |
33 | 33 | ||
34 | static u32 *append_elf_note(u32 *buf, | 34 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, |
35 | char *name, unsigned type, void *data, size_t data_len) | 35 | size_t data_len) |
36 | { | 36 | { |
37 | struct elf_note note; | 37 | struct elf_note note; |
38 | |||
38 | note.n_namesz = strlen(name) + 1; | 39 | note.n_namesz = strlen(name) + 1; |
39 | note.n_descsz = data_len; | 40 | note.n_descsz = data_len; |
40 | note.n_type = type; | 41 | note.n_type = type; |
@@ -44,26 +45,28 @@ static u32 *append_elf_note(u32 *buf, | |||
44 | buf += (note.n_namesz + 3)/4; | 45 | buf += (note.n_namesz + 3)/4; |
45 | memcpy(buf, data, note.n_descsz); | 46 | memcpy(buf, data, note.n_descsz); |
46 | buf += (note.n_descsz + 3)/4; | 47 | buf += (note.n_descsz + 3)/4; |
48 | |||
47 | return buf; | 49 | return buf; |
48 | } | 50 | } |
49 | 51 | ||
50 | static void final_note(u32 *buf) | 52 | static void final_note(u32 *buf) |
51 | { | 53 | { |
52 | struct elf_note note; | 54 | struct elf_note note; |
55 | |||
53 | note.n_namesz = 0; | 56 | note.n_namesz = 0; |
54 | note.n_descsz = 0; | 57 | note.n_descsz = 0; |
55 | note.n_type = 0; | 58 | note.n_type = 0; |
56 | memcpy(buf, ¬e, sizeof(note)); | 59 | memcpy(buf, ¬e, sizeof(note)); |
57 | } | 60 | } |
58 | 61 | ||
59 | |||
60 | static void crash_save_this_cpu(struct pt_regs *regs, int cpu) | 62 | static void crash_save_this_cpu(struct pt_regs *regs, int cpu) |
61 | { | 63 | { |
62 | struct elf_prstatus prstatus; | 64 | struct elf_prstatus prstatus; |
63 | u32 *buf; | 65 | u32 *buf; |
64 | if ((cpu < 0) || (cpu >= NR_CPUS)) { | 66 | |
67 | if ((cpu < 0) || (cpu >= NR_CPUS)) | ||
65 | return; | 68 | return; |
66 | } | 69 | |
67 | /* Using ELF notes here is opportunistic. | 70 | /* Using ELF notes here is opportunistic. |
68 | * I need a well defined structure format | 71 | * I need a well defined structure format |
69 | * for the data I pass, and I need tags | 72 | * for the data I pass, and I need tags |
@@ -75,9 +78,8 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu) | |||
75 | memset(&prstatus, 0, sizeof(prstatus)); | 78 | memset(&prstatus, 0, sizeof(prstatus)); |
76 | prstatus.pr_pid = current->pid; | 79 | prstatus.pr_pid = current->pid; |
77 | elf_core_copy_regs(&prstatus.pr_reg, regs); | 80 | elf_core_copy_regs(&prstatus.pr_reg, regs); |
78 | buf = append_elf_note(buf, "CORE", NT_PRSTATUS, | 81 | buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, |
79 | &prstatus, sizeof(prstatus)); | 82 | sizeof(prstatus)); |
80 | |||
81 | final_note(buf); | 83 | final_note(buf); |
82 | } | 84 | } |
83 | 85 | ||
@@ -119,8 +121,8 @@ static void crash_save_self(struct pt_regs *saved_regs) | |||
119 | { | 121 | { |
120 | struct pt_regs regs; | 122 | struct pt_regs regs; |
121 | int cpu; | 123 | int cpu; |
122 | cpu = smp_processor_id(); | ||
123 | 124 | ||
125 | cpu = smp_processor_id(); | ||
124 | if (saved_regs) | 126 | if (saved_regs) |
125 | crash_setup_regs(®s, saved_regs); | 127 | crash_setup_regs(®s, saved_regs); |
126 | else | 128 | else |
@@ -153,6 +155,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) | |||
153 | /* Assume hlt works */ | 155 | /* Assume hlt works */ |
154 | __asm__("hlt"); | 156 | __asm__("hlt"); |
155 | for(;;); | 157 | for(;;); |
158 | |||
156 | return 1; | 159 | return 1; |
157 | } | 160 | } |
158 | 161 | ||
@@ -169,8 +172,8 @@ static void smp_send_nmi_allbutself(void) | |||
169 | static void nmi_shootdown_cpus(void) | 172 | static void nmi_shootdown_cpus(void) |
170 | { | 173 | { |
171 | unsigned long msecs; | 174 | unsigned long msecs; |
172 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | ||
173 | 175 | ||
176 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | ||
174 | /* Would it be better to replace the trap vector here? */ | 177 | /* Would it be better to replace the trap vector here? */ |
175 | set_nmi_callback(crash_nmi_callback); | 178 | set_nmi_callback(crash_nmi_callback); |
176 | /* Ensure the new callback function is set before sending | 179 | /* Ensure the new callback function is set before sending |
diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c index 671880415d1..52ed18d8b51 100644 --- a/arch/i386/kernel/machine_kexec.c +++ b/arch/i386/kernel/machine_kexec.c | |||
@@ -80,7 +80,8 @@ static void identity_map_page(unsigned long address) | |||
80 | /* Identity map the page table entry */ | 80 | /* Identity map the page table entry */ |
81 | pgtable_level1[level1_index] = address | L0_ATTR; | 81 | pgtable_level1[level1_index] = address | L0_ATTR; |
82 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; | 82 | pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR; |
83 | set_64bit(&pgtable_level3[level3_index], __pa(pgtable_level2) | L2_ATTR); | 83 | set_64bit(&pgtable_level3[level3_index], |
84 | __pa(pgtable_level2) | L2_ATTR); | ||
84 | 85 | ||
85 | /* Flush the tlb so the new mapping takes effect. | 86 | /* Flush the tlb so the new mapping takes effect. |
86 | * Global tlb entries are not flushed but that is not an issue. | 87 | * Global tlb entries are not flushed but that is not an issue. |
@@ -139,8 +140,10 @@ static void load_segments(void) | |||
139 | } | 140 | } |
140 | 141 | ||
141 | typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( | 142 | typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)( |
142 | unsigned long indirection_page, unsigned long reboot_code_buffer, | 143 | unsigned long indirection_page, |
143 | unsigned long start_address, unsigned int has_pae) ATTRIB_NORET; | 144 | unsigned long reboot_code_buffer, |
145 | unsigned long start_address, | ||
146 | unsigned int has_pae) ATTRIB_NORET; | ||
144 | 147 | ||
145 | const extern unsigned char relocate_new_kernel[]; | 148 | const extern unsigned char relocate_new_kernel[]; |
146 | extern void relocate_new_kernel_end(void); | 149 | extern void relocate_new_kernel_end(void); |
@@ -180,20 +183,23 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
180 | { | 183 | { |
181 | unsigned long page_list; | 184 | unsigned long page_list; |
182 | unsigned long reboot_code_buffer; | 185 | unsigned long reboot_code_buffer; |
186 | |||
183 | relocate_new_kernel_t rnk; | 187 | relocate_new_kernel_t rnk; |
184 | 188 | ||
185 | /* Interrupts aren't acceptable while we reboot */ | 189 | /* Interrupts aren't acceptable while we reboot */ |
186 | local_irq_disable(); | 190 | local_irq_disable(); |
187 | 191 | ||
188 | /* Compute some offsets */ | 192 | /* Compute some offsets */ |
189 | reboot_code_buffer = page_to_pfn(image->control_code_page) << PAGE_SHIFT; | 193 | reboot_code_buffer = page_to_pfn(image->control_code_page) |
194 | << PAGE_SHIFT; | ||
190 | page_list = image->head; | 195 | page_list = image->head; |
191 | 196 | ||
192 | /* Set up an identity mapping for the reboot_code_buffer */ | 197 | /* Set up an identity mapping for the reboot_code_buffer */ |
193 | identity_map_page(reboot_code_buffer); | 198 | identity_map_page(reboot_code_buffer); |
194 | 199 | ||
195 | /* copy it out */ | 200 | /* copy it out */ |
196 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); | 201 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, |
202 | relocate_new_kernel_size); | ||
197 | 203 | ||
198 | /* The segment registers are funny things, they are | 204 | /* The segment registers are funny things, they are |
199 | * automatically loaded from a table, in memory wherever you | 205 | * automatically loaded from a table, in memory wherever you |
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c index b82535357d6..84d65a87191 100644 --- a/arch/ppc/kernel/machine_kexec.c +++ b/arch/ppc/kernel/machine_kexec.c | |||
@@ -21,24 +21,23 @@ | |||
21 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
22 | 22 | ||
23 | typedef NORET_TYPE void (*relocate_new_kernel_t)( | 23 | typedef NORET_TYPE void (*relocate_new_kernel_t)( |
24 | unsigned long indirection_page, unsigned long reboot_code_buffer, | 24 | unsigned long indirection_page, |
25 | unsigned long start_address) ATTRIB_NORET; | 25 | unsigned long reboot_code_buffer, |
26 | unsigned long start_address) ATTRIB_NORET; | ||
26 | 27 | ||
27 | const extern unsigned char relocate_new_kernel[]; | 28 | const extern unsigned char relocate_new_kernel[]; |
28 | const extern unsigned int relocate_new_kernel_size; | 29 | const extern unsigned int relocate_new_kernel_size; |
29 | 30 | ||
30 | void machine_shutdown(void) | 31 | void machine_shutdown(void) |
31 | { | 32 | { |
32 | if (ppc_md.machine_shutdown) { | 33 | if (ppc_md.machine_shutdown) |
33 | ppc_md.machine_shutdown(); | 34 | ppc_md.machine_shutdown(); |
34 | } | ||
35 | } | 35 | } |
36 | 36 | ||
37 | void machine_crash_shutdown(struct pt_regs *regs) | 37 | void machine_crash_shutdown(struct pt_regs *regs) |
38 | { | 38 | { |
39 | if (ppc_md.machine_crash_shutdown) { | 39 | if (ppc_md.machine_crash_shutdown) |
40 | ppc_md.machine_crash_shutdown(); | 40 | ppc_md.machine_crash_shutdown(); |
41 | } | ||
42 | } | 41 | } |
43 | 42 | ||
44 | /* | 43 | /* |
@@ -48,9 +47,8 @@ void machine_crash_shutdown(struct pt_regs *regs) | |||
48 | */ | 47 | */ |
49 | int machine_kexec_prepare(struct kimage *image) | 48 | int machine_kexec_prepare(struct kimage *image) |
50 | { | 49 | { |
51 | if (ppc_md.machine_kexec_prepare) { | 50 | if (ppc_md.machine_kexec_prepare) |
52 | return ppc_md.machine_kexec_prepare(image); | 51 | return ppc_md.machine_kexec_prepare(image); |
53 | } | ||
54 | /* | 52 | /* |
55 | * Fail if platform doesn't provide its own machine_kexec_prepare | 53 | * Fail if platform doesn't provide its own machine_kexec_prepare |
56 | * implementation. | 54 | * implementation. |
@@ -60,9 +58,8 @@ int machine_kexec_prepare(struct kimage *image) | |||
60 | 58 | ||
61 | void machine_kexec_cleanup(struct kimage *image) | 59 | void machine_kexec_cleanup(struct kimage *image) |
62 | { | 60 | { |
63 | if (ppc_md.machine_kexec_cleanup) { | 61 | if (ppc_md.machine_kexec_cleanup) |
64 | ppc_md.machine_kexec_cleanup(image); | 62 | ppc_md.machine_kexec_cleanup(image); |
65 | } | ||
66 | } | 63 | } |
67 | 64 | ||
68 | /* | 65 | /* |
@@ -71,9 +68,9 @@ void machine_kexec_cleanup(struct kimage *image) | |||
71 | */ | 68 | */ |
72 | NORET_TYPE void machine_kexec(struct kimage *image) | 69 | NORET_TYPE void machine_kexec(struct kimage *image) |
73 | { | 70 | { |
74 | if (ppc_md.machine_kexec) { | 71 | if (ppc_md.machine_kexec) |
75 | ppc_md.machine_kexec(image); | 72 | ppc_md.machine_kexec(image); |
76 | } else { | 73 | else { |
77 | /* | 74 | /* |
78 | * Fall back to normal restart if platform doesn't provide | 75 | * Fall back to normal restart if platform doesn't provide |
79 | * its own kexec function, and user insist to kexec... | 76 | * its own kexec function, and user insist to kexec... |
@@ -83,7 +80,6 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
83 | for(;;); | 80 | for(;;); |
84 | } | 81 | } |
85 | 82 | ||
86 | |||
87 | /* | 83 | /* |
88 | * This is a generic machine_kexec function suitable at least for | 84 | * This is a generic machine_kexec function suitable at least for |
89 | * non-OpenFirmware embedded platforms. | 85 | * non-OpenFirmware embedded platforms. |
@@ -104,15 +100,15 @@ void machine_kexec_simple(struct kimage *image) | |||
104 | 100 | ||
105 | /* we need both effective and real address here */ | 101 | /* we need both effective and real address here */ |
106 | reboot_code_buffer = | 102 | reboot_code_buffer = |
107 | (unsigned long)page_address(image->control_code_page); | 103 | (unsigned long)page_address(image->control_code_page); |
108 | reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); | 104 | reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); |
109 | 105 | ||
110 | /* copy our kernel relocation code to the control code page */ | 106 | /* copy our kernel relocation code to the control code page */ |
111 | memcpy((void *)reboot_code_buffer, | 107 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, |
112 | relocate_new_kernel, relocate_new_kernel_size); | 108 | relocate_new_kernel_size); |
113 | 109 | ||
114 | flush_icache_range(reboot_code_buffer, | 110 | flush_icache_range(reboot_code_buffer, |
115 | reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); | 111 | reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); |
116 | printk(KERN_INFO "Bye!\n"); | 112 | printk(KERN_INFO "Bye!\n"); |
117 | 113 | ||
118 | /* now call it */ | 114 | /* now call it */ |
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c index 06b25b59c8a..fdb2fc649d7 100644 --- a/arch/ppc64/kernel/machine_kexec.c +++ b/arch/ppc64/kernel/machine_kexec.c | |||
@@ -58,7 +58,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
58 | * handle the virtual mode, we must make sure no destination | 58 | * handle the virtual mode, we must make sure no destination |
59 | * overlaps kernel static data or bss. | 59 | * overlaps kernel static data or bss. |
60 | */ | 60 | */ |
61 | for(i = 0; i < image->nr_segments; i++) | 61 | for (i = 0; i < image->nr_segments; i++) |
62 | if (image->segment[i].mem < __pa(_end)) | 62 | if (image->segment[i].mem < __pa(_end)) |
63 | return -ETXTBSY; | 63 | return -ETXTBSY; |
64 | 64 | ||
@@ -76,7 +76,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
76 | low = __pa(htab_address); | 76 | low = __pa(htab_address); |
77 | high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; | 77 | high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; |
78 | 78 | ||
79 | for(i = 0; i < image->nr_segments; i++) { | 79 | for (i = 0; i < image->nr_segments; i++) { |
80 | begin = image->segment[i].mem; | 80 | begin = image->segment[i].mem; |
81 | end = begin + image->segment[i].memsz; | 81 | end = begin + image->segment[i].memsz; |
82 | 82 | ||
@@ -98,7 +98,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
98 | low = *basep; | 98 | low = *basep; |
99 | high = low + (*sizep); | 99 | high = low + (*sizep); |
100 | 100 | ||
101 | for(i = 0; i < image->nr_segments; i++) { | 101 | for (i = 0; i < image->nr_segments; i++) { |
102 | begin = image->segment[i].mem; | 102 | begin = image->segment[i].mem; |
103 | end = begin + image->segment[i].memsz; | 103 | end = begin + image->segment[i].memsz; |
104 | 104 | ||
@@ -274,7 +274,8 @@ union thread_union kexec_stack | |||
274 | 274 | ||
275 | /* Our assembly helper, in kexec_stub.S */ | 275 | /* Our assembly helper, in kexec_stub.S */ |
276 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, | 276 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, |
277 | void *image, void *control, void (*clear_all)(void)) ATTRIB_NORET; | 277 | void *image, void *control, |
278 | void (*clear_all)(void)) ATTRIB_NORET; | ||
278 | 279 | ||
279 | /* too late to fail here */ | 280 | /* too late to fail here */ |
280 | void machine_kexec(struct kimage *image) | 281 | void machine_kexec(struct kimage *image) |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 7a94db76df4..2721c3a32b8 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -67,7 +67,7 @@ machine_kexec(struct kimage *image) | |||
67 | ctl_clear_bit(0,28); | 67 | ctl_clear_bit(0,28); |
68 | 68 | ||
69 | on_each_cpu(kexec_halt_all_cpus, image, 0, 0); | 69 | on_each_cpu(kexec_halt_all_cpus, image, 0, 0); |
70 | for(;;); | 70 | for (;;); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void | 73 | static void |
@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image) | |||
85 | for_each_online_cpu(cpu) { | 85 | for_each_online_cpu(cpu) { |
86 | if (cpu == smp_processor_id()) | 86 | if (cpu == smp_processor_id()) |
87 | continue; | 87 | continue; |
88 | while(!smp_cpu_not_running(cpu)) | 88 | while (!smp_cpu_not_running(cpu)) |
89 | cpu_relax(); | 89 | cpu_relax(); |
90 | } | 90 | } |
91 | 91 | ||
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c index 200b5993f8d..60d1eff4156 100644 --- a/arch/x86_64/kernel/machine_kexec.c +++ b/arch/x86_64/kernel/machine_kexec.c | |||
@@ -32,29 +32,31 @@ | |||
32 | #define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 32 | #define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
33 | #define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 33 | #define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
34 | 34 | ||
35 | static void init_level2_page( | 35 | static void init_level2_page(u64 *level2p, unsigned long addr) |
36 | u64 *level2p, unsigned long addr) | ||
37 | { | 36 | { |
38 | unsigned long end_addr; | 37 | unsigned long end_addr; |
38 | |||
39 | addr &= PAGE_MASK; | 39 | addr &= PAGE_MASK; |
40 | end_addr = addr + LEVEL2_SIZE; | 40 | end_addr = addr + LEVEL2_SIZE; |
41 | while(addr < end_addr) { | 41 | while (addr < end_addr) { |
42 | *(level2p++) = addr | L1_ATTR; | 42 | *(level2p++) = addr | L1_ATTR; |
43 | addr += LEVEL1_SIZE; | 43 | addr += LEVEL1_SIZE; |
44 | } | 44 | } |
45 | } | 45 | } |
46 | 46 | ||
47 | static int init_level3_page(struct kimage *image, | 47 | static int init_level3_page(struct kimage *image, u64 *level3p, |
48 | u64 *level3p, unsigned long addr, unsigned long last_addr) | 48 | unsigned long addr, unsigned long last_addr) |
49 | { | 49 | { |
50 | unsigned long end_addr; | 50 | unsigned long end_addr; |
51 | int result; | 51 | int result; |
52 | |||
52 | result = 0; | 53 | result = 0; |
53 | addr &= PAGE_MASK; | 54 | addr &= PAGE_MASK; |
54 | end_addr = addr + LEVEL3_SIZE; | 55 | end_addr = addr + LEVEL3_SIZE; |
55 | while((addr < last_addr) && (addr < end_addr)) { | 56 | while ((addr < last_addr) && (addr < end_addr)) { |
56 | struct page *page; | 57 | struct page *page; |
57 | u64 *level2p; | 58 | u64 *level2p; |
59 | |||
58 | page = kimage_alloc_control_pages(image, 0); | 60 | page = kimage_alloc_control_pages(image, 0); |
59 | if (!page) { | 61 | if (!page) { |
60 | result = -ENOMEM; | 62 | result = -ENOMEM; |
@@ -66,7 +68,7 @@ static int init_level3_page(struct kimage *image, | |||
66 | addr += LEVEL2_SIZE; | 68 | addr += LEVEL2_SIZE; |
67 | } | 69 | } |
68 | /* clear the unused entries */ | 70 | /* clear the unused entries */ |
69 | while(addr < end_addr) { | 71 | while (addr < end_addr) { |
70 | *(level3p++) = 0; | 72 | *(level3p++) = 0; |
71 | addr += LEVEL2_SIZE; | 73 | addr += LEVEL2_SIZE; |
72 | } | 74 | } |
@@ -75,17 +77,19 @@ out: | |||
75 | } | 77 | } |
76 | 78 | ||
77 | 79 | ||
78 | static int init_level4_page(struct kimage *image, | 80 | static int init_level4_page(struct kimage *image, u64 *level4p, |
79 | u64 *level4p, unsigned long addr, unsigned long last_addr) | 81 | unsigned long addr, unsigned long last_addr) |
80 | { | 82 | { |
81 | unsigned long end_addr; | 83 | unsigned long end_addr; |
82 | int result; | 84 | int result; |
85 | |||
83 | result = 0; | 86 | result = 0; |
84 | addr &= PAGE_MASK; | 87 | addr &= PAGE_MASK; |
85 | end_addr = addr + LEVEL4_SIZE; | 88 | end_addr = addr + LEVEL4_SIZE; |
86 | while((addr < last_addr) && (addr < end_addr)) { | 89 | while ((addr < last_addr) && (addr < end_addr)) { |
87 | struct page *page; | 90 | struct page *page; |
88 | u64 *level3p; | 91 | u64 *level3p; |
92 | |||
89 | page = kimage_alloc_control_pages(image, 0); | 93 | page = kimage_alloc_control_pages(image, 0); |
90 | if (!page) { | 94 | if (!page) { |
91 | result = -ENOMEM; | 95 | result = -ENOMEM; |
@@ -100,11 +104,11 @@ static int init_level4_page(struct kimage *image, | |||
100 | addr += LEVEL3_SIZE; | 104 | addr += LEVEL3_SIZE; |
101 | } | 105 | } |
102 | /* clear the unused entries */ | 106 | /* clear the unused entries */ |
103 | while(addr < end_addr) { | 107 | while (addr < end_addr) { |
104 | *(level4p++) = 0; | 108 | *(level4p++) = 0; |
105 | addr += LEVEL3_SIZE; | 109 | addr += LEVEL3_SIZE; |
106 | } | 110 | } |
107 | out: | 111 | out: |
108 | return result; | 112 | return result; |
109 | } | 113 | } |
110 | 114 | ||
@@ -113,7 +117,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable) | |||
113 | { | 117 | { |
114 | u64 *level4p; | 118 | u64 *level4p; |
115 | level4p = (u64 *)__va(start_pgtable); | 119 | level4p = (u64 *)__va(start_pgtable); |
116 | return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); | 120 | return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); |
117 | } | 121 | } |
118 | 122 | ||
119 | static void set_idt(void *newidt, u16 limit) | 123 | static void set_idt(void *newidt, u16 limit) |
@@ -159,9 +163,10 @@ static void load_segments(void) | |||
159 | #undef __STR | 163 | #undef __STR |
160 | } | 164 | } |
161 | 165 | ||
162 | typedef NORET_TYPE void (*relocate_new_kernel_t)( | 166 | typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page, |
163 | unsigned long indirection_page, unsigned long control_code_buffer, | 167 | unsigned long control_code_buffer, |
164 | unsigned long start_address, unsigned long pgtable) ATTRIB_NORET; | 168 | unsigned long start_address, |
169 | unsigned long pgtable) ATTRIB_NORET; | ||
165 | 170 | ||
166 | const extern unsigned char relocate_new_kernel[]; | 171 | const extern unsigned char relocate_new_kernel[]; |
167 | const extern unsigned long relocate_new_kernel_size; | 172 | const extern unsigned long relocate_new_kernel_size; |
@@ -172,17 +177,17 @@ int machine_kexec_prepare(struct kimage *image) | |||
172 | int result; | 177 | int result; |
173 | 178 | ||
174 | /* Calculate the offsets */ | 179 | /* Calculate the offsets */ |
175 | start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; | 180 | start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; |
176 | control_code_buffer = start_pgtable + 4096UL; | 181 | control_code_buffer = start_pgtable + 4096UL; |
177 | 182 | ||
178 | /* Setup the identity mapped 64bit page table */ | 183 | /* Setup the identity mapped 64bit page table */ |
179 | result = init_pgtable(image, start_pgtable); | 184 | result = init_pgtable(image, start_pgtable); |
180 | if (result) { | 185 | if (result) |
181 | return result; | 186 | return result; |
182 | } | ||
183 | 187 | ||
184 | /* Place the code in the reboot code buffer */ | 188 | /* Place the code in the reboot code buffer */ |
185 | memcpy(__va(control_code_buffer), relocate_new_kernel, relocate_new_kernel_size); | 189 | memcpy(__va(control_code_buffer), relocate_new_kernel, |
190 | relocate_new_kernel_size); | ||
186 | 191 | ||
187 | return 0; | 192 | return 0; |
188 | } | 193 | } |
@@ -207,8 +212,8 @@ NORET_TYPE void machine_kexec(struct kimage *image) | |||
207 | local_irq_disable(); | 212 | local_irq_disable(); |
208 | 213 | ||
209 | /* Calculate the offsets */ | 214 | /* Calculate the offsets */ |
210 | page_list = image->head; | 215 | page_list = image->head; |
211 | start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; | 216 | start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; |
212 | control_code_buffer = start_pgtable + 4096UL; | 217 | control_code_buffer = start_pgtable + 4096UL; |
213 | 218 | ||
214 | /* Set the low half of the page table to my identity mapped | 219 | /* Set the low half of the page table to my identity mapped |