diff options
Diffstat (limited to 'kernel/kexec.c')
-rw-r--r-- | kernel/kexec.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c index c0613f7d6730..8d814cbc8109 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
34 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
35 | #include <linux/kmsg_dump.h> | 35 | #include <linux/kmsg_dump.h> |
36 | #include <linux/syscore_ops.h> | ||
36 | 37 | ||
37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
@@ -144,7 +145,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
144 | /* Initialize the list of destination pages */ | 145 | /* Initialize the list of destination pages */ |
145 | INIT_LIST_HEAD(&image->dest_pages); | 146 | INIT_LIST_HEAD(&image->dest_pages); |
146 | 147 | ||
147 | /* Initialize the list of unuseable pages */ | 148 | /* Initialize the list of unusable pages */ |
148 | INIT_LIST_HEAD(&image->unuseable_pages); | 149 | INIT_LIST_HEAD(&image->unuseable_pages); |
149 | 150 | ||
150 | /* Read in the segments */ | 151 | /* Read in the segments */ |
@@ -163,7 +164,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
163 | * just verifies it is an address we can use. | 164 | * just verifies it is an address we can use. |
164 | * | 165 | * |
165 | * Since the kernel does everything in page size chunks ensure | 166 | * Since the kernel does everything in page size chunks ensure |
166 | * the destination addreses are page aligned. Too many | 167 | * the destination addresses are page aligned. Too many |
167 | * special cases crop of when we don't do this. The most | 168 | * special cases crop of when we don't do this. The most |
168 | * insidious is getting overlapping destination addresses | 169 | * insidious is getting overlapping destination addresses |
169 | * simply because addresses are changed to page size | 170 | * simply because addresses are changed to page size |
@@ -454,7 +455,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |||
454 | /* Deal with the destination pages I have inadvertently allocated. | 455 | /* Deal with the destination pages I have inadvertently allocated. |
455 | * | 456 | * |
456 | * Ideally I would convert multi-page allocations into single | 457 | * Ideally I would convert multi-page allocations into single |
457 | * page allocations, and add everyting to image->dest_pages. | 458 | * page allocations, and add everything to image->dest_pages. |
458 | * | 459 | * |
459 | * For now it is simpler to just free the pages. | 460 | * For now it is simpler to just free the pages. |
460 | */ | 461 | */ |
@@ -602,7 +603,7 @@ static void kimage_free_extra_pages(struct kimage *image) | |||
602 | /* Walk through and free any extra destination pages I may have */ | 603 | /* Walk through and free any extra destination pages I may have */ |
603 | kimage_free_page_list(&image->dest_pages); | 604 | kimage_free_page_list(&image->dest_pages); |
604 | 605 | ||
605 | /* Walk through and free any unuseable pages I have cached */ | 606 | /* Walk through and free any unusable pages I have cached */ |
606 | kimage_free_page_list(&image->unuseable_pages); | 607 | kimage_free_page_list(&image->unuseable_pages); |
607 | 608 | ||
608 | } | 609 | } |
@@ -816,7 +817,7 @@ static int kimage_load_normal_segment(struct kimage *image, | |||
816 | 817 | ||
817 | ptr = kmap(page); | 818 | ptr = kmap(page); |
818 | /* Start with a clear page */ | 819 | /* Start with a clear page */ |
819 | memset(ptr, 0, PAGE_SIZE); | 820 | clear_page(ptr); |
820 | ptr += maddr & ~PAGE_MASK; | 821 | ptr += maddr & ~PAGE_MASK; |
821 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | 822 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); |
822 | if (mchunk > mbytes) | 823 | if (mchunk > mbytes) |
@@ -1099,7 +1100,8 @@ size_t crash_get_memory_size(void) | |||
1099 | return size; | 1100 | return size; |
1100 | } | 1101 | } |
1101 | 1102 | ||
1102 | static void free_reserved_phys_range(unsigned long begin, unsigned long end) | 1103 | void __weak crash_free_reserved_phys_range(unsigned long begin, |
1104 | unsigned long end) | ||
1103 | { | 1105 | { |
1104 | unsigned long addr; | 1106 | unsigned long addr; |
1105 | 1107 | ||
@@ -1135,7 +1137,7 @@ int crash_shrink_memory(unsigned long new_size) | |||
1135 | start = roundup(start, PAGE_SIZE); | 1137 | start = roundup(start, PAGE_SIZE); |
1136 | end = roundup(start + new_size, PAGE_SIZE); | 1138 | end = roundup(start + new_size, PAGE_SIZE); |
1137 | 1139 | ||
1138 | free_reserved_phys_range(end, crashk_res.end); | 1140 | crash_free_reserved_phys_range(end, crashk_res.end); |
1139 | 1141 | ||
1140 | if ((start == end) && (crashk_res.parent != NULL)) | 1142 | if ((start == end) && (crashk_res.parent != NULL)) |
1141 | release_resource(&crashk_res); | 1143 | release_resource(&crashk_res); |
@@ -1529,8 +1531,7 @@ int kernel_kexec(void) | |||
1529 | if (error) | 1531 | if (error) |
1530 | goto Enable_cpus; | 1532 | goto Enable_cpus; |
1531 | local_irq_disable(); | 1533 | local_irq_disable(); |
1532 | /* Suspend system devices */ | 1534 | error = syscore_suspend(); |
1533 | error = sysdev_suspend(PMSG_FREEZE); | ||
1534 | if (error) | 1535 | if (error) |
1535 | goto Enable_irqs; | 1536 | goto Enable_irqs; |
1536 | } else | 1537 | } else |
@@ -1545,7 +1546,7 @@ int kernel_kexec(void) | |||
1545 | 1546 | ||
1546 | #ifdef CONFIG_KEXEC_JUMP | 1547 | #ifdef CONFIG_KEXEC_JUMP |
1547 | if (kexec_image->preserve_context) { | 1548 | if (kexec_image->preserve_context) { |
1548 | sysdev_resume(); | 1549 | syscore_resume(); |
1549 | Enable_irqs: | 1550 | Enable_irqs: |
1550 | local_irq_enable(); | 1551 | local_irq_enable(); |
1551 | Enable_cpus: | 1552 | Enable_cpus: |