aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorzhong jiang <zhongjiang@huawei.com>2016-08-02 17:06:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-02 19:35:31 -0400
commit1730f146604ea426e54938cdbcf87df1047ef0dc (patch)
tree71502274a92fad2d3503b5bd2d7d12909bdda06e
parentc0253115968c35f3e1ee497282efb75ccf29fb98 (diff)
kexec: add restriction on kexec_load() segment sizes
I hit the following issue when run trinity in my system. The kernel is 3.4 version, but mainline has the same issue. The root cause is that the segment size is too large so the kerenl spends too long trying to allocate a page. Other cases will block until the test case quits. Also, OOM conditions will occur. Call Trace: __alloc_pages_nodemask+0x14c/0x8f0 alloc_pages_current+0xaf/0x120 kimage_alloc_pages+0x10/0x60 kimage_alloc_control_pages+0x5d/0x270 machine_kexec_prepare+0xe5/0x6c0 ? kimage_free_page_list+0x52/0x70 sys_kexec_load+0x141/0x600 ? vfs_write+0x100/0x180 system_call_fastpath+0x16/0x1b The patch changes sanity_check_segment_list() to verify that the usage by all segments does not exceed half of memory. [akpm@linux-foundation.org: fix for kexec-return-error-number-directly.patch, update comment] Link: http://lkml.kernel.org/r/1469625474-53904-1-git-send-email-zhongjiang@huawei.com Signed-off-by: zhong jiang <zhongjiang@huawei.com> Suggested-by: Eric W. Biederman <ebiederm@xmission.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Dave Young <dyoung@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/kexec_core.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 704534029a00..561675589511 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -146,6 +146,7 @@ EXPORT_SYMBOL_GPL(kexec_crash_loaded);
146 * allocating pages whose destination address we do not care about. 146 * allocating pages whose destination address we do not care about.
147 */ 147 */
148#define KIMAGE_NO_DEST (-1UL) 148#define KIMAGE_NO_DEST (-1UL)
149#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
149 150
150static struct page *kimage_alloc_page(struct kimage *image, 151static struct page *kimage_alloc_page(struct kimage *image,
151 gfp_t gfp_mask, 152 gfp_t gfp_mask,
@@ -155,6 +156,7 @@ int sanity_check_segment_list(struct kimage *image)
155{ 156{
156 int i; 157 int i;
157 unsigned long nr_segments = image->nr_segments; 158 unsigned long nr_segments = image->nr_segments;
159 unsigned long total_pages = 0;
158 160
159 /* 161 /*
160 * Verify we have good destination addresses. The caller is 162 * Verify we have good destination addresses. The caller is
@@ -215,6 +217,21 @@ int sanity_check_segment_list(struct kimage *image)
215 } 217 }
216 218
217 /* 219 /*
220 * Verify that no more than half of memory will be consumed. If the
221 * request from userspace is too large, a large amount of time will be
222 * wasted allocating pages, which can cause a soft lockup.
223 */
224 for (i = 0; i < nr_segments; i++) {
225 if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2)
226 return -EINVAL;
227
228 total_pages += PAGE_COUNT(image->segment[i].memsz);
229 }
230
231 if (total_pages > totalram_pages / 2)
232 return -EINVAL;
233
234 /*
218 * Verify we have good destination addresses. Normally 235 * Verify we have good destination addresses. Normally
219 * the caller is responsible for making certain we don't 236 * the caller is responsible for making certain we don't
220 * attempt to load the new image into invalid or reserved 237 * attempt to load the new image into invalid or reserved