diff options
author | Maneesh Soni <maneesh@in.ibm.com> | 2005-06-25 17:58:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:55 -0400 |
commit | 72414d3f1d22fc3e311b162fca95c430048d38ce (patch) | |
tree | 46850947c1602357dd3c51d8d6ebaa5805507f9f /kernel/kexec.c | |
parent | 4f339ecb30c759f94a29992d4635d9194132b6cf (diff) |
[PATCH] kexec code cleanup
o Following patch provides purely cosmetic changes and corrects CodingStyle
guide lines related certain issues like below in kexec related files
o braces for one line "if" statements, "for" loops,
o more than 80 column wide lines,
o No space after "while", "for" and "switch" key words
o Changes:
o take-2: Removed the extra tab before "case" key words.
o take-3: Put operator at the end of line and space before "*/"
Signed-off-by: Maneesh Soni <maneesh@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/kexec.c')
-rw-r--r-- | kernel/kexec.c | 302 |
1 files changed, 160 insertions, 142 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c index 277f22afe74..7843548cf2d 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -87,12 +87,15 @@ int kexec_should_crash(struct task_struct *p) | |||
87 | */ | 87 | */ |
88 | #define KIMAGE_NO_DEST (-1UL) | 88 | #define KIMAGE_NO_DEST (-1UL) |
89 | 89 | ||
90 | static int kimage_is_destination_range( | 90 | static int kimage_is_destination_range(struct kimage *image, |
91 | struct kimage *image, unsigned long start, unsigned long end); | 91 | unsigned long start, unsigned long end); |
92 | static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest); | 92 | static struct page *kimage_alloc_page(struct kimage *image, |
93 | unsigned int gfp_mask, | ||
94 | unsigned long dest); | ||
93 | 95 | ||
94 | static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | 96 | static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, |
95 | unsigned long nr_segments, struct kexec_segment __user *segments) | 97 | unsigned long nr_segments, |
98 | struct kexec_segment __user *segments) | ||
96 | { | 99 | { |
97 | size_t segment_bytes; | 100 | size_t segment_bytes; |
98 | struct kimage *image; | 101 | struct kimage *image; |
@@ -102,9 +105,9 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
102 | /* Allocate a controlling structure */ | 105 | /* Allocate a controlling structure */ |
103 | result = -ENOMEM; | 106 | result = -ENOMEM; |
104 | image = kmalloc(sizeof(*image), GFP_KERNEL); | 107 | image = kmalloc(sizeof(*image), GFP_KERNEL); |
105 | if (!image) { | 108 | if (!image) |
106 | goto out; | 109 | goto out; |
107 | } | 110 | |
108 | memset(image, 0, sizeof(*image)); | 111 | memset(image, 0, sizeof(*image)); |
109 | image->head = 0; | 112 | image->head = 0; |
110 | image->entry = &image->head; | 113 | image->entry = &image->head; |
@@ -145,6 +148,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
145 | result = -EADDRNOTAVAIL; | 148 | result = -EADDRNOTAVAIL; |
146 | for (i = 0; i < nr_segments; i++) { | 149 | for (i = 0; i < nr_segments; i++) { |
147 | unsigned long mstart, mend; | 150 | unsigned long mstart, mend; |
151 | |||
148 | mstart = image->segment[i].mem; | 152 | mstart = image->segment[i].mem; |
149 | mend = mstart + image->segment[i].memsz; | 153 | mend = mstart + image->segment[i].memsz; |
150 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) | 154 | if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) |
@@ -159,12 +163,13 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
159 | * easy explanation as one segment stops on another. | 163 | * easy explanation as one segment stops on another. |
160 | */ | 164 | */ |
161 | result = -EINVAL; | 165 | result = -EINVAL; |
162 | for(i = 0; i < nr_segments; i++) { | 166 | for (i = 0; i < nr_segments; i++) { |
163 | unsigned long mstart, mend; | 167 | unsigned long mstart, mend; |
164 | unsigned long j; | 168 | unsigned long j; |
169 | |||
165 | mstart = image->segment[i].mem; | 170 | mstart = image->segment[i].mem; |
166 | mend = mstart + image->segment[i].memsz; | 171 | mend = mstart + image->segment[i].memsz; |
167 | for(j = 0; j < i; j++) { | 172 | for (j = 0; j < i; j++) { |
168 | unsigned long pstart, pend; | 173 | unsigned long pstart, pend; |
169 | pstart = image->segment[j].mem; | 174 | pstart = image->segment[j].mem; |
170 | pend = pstart + image->segment[j].memsz; | 175 | pend = pstart + image->segment[j].memsz; |
@@ -180,25 +185,25 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, | |||
180 | * later on. | 185 | * later on. |
181 | */ | 186 | */ |
182 | result = -EINVAL; | 187 | result = -EINVAL; |
183 | for(i = 0; i < nr_segments; i++) { | 188 | for (i = 0; i < nr_segments; i++) { |
184 | if (image->segment[i].bufsz > image->segment[i].memsz) | 189 | if (image->segment[i].bufsz > image->segment[i].memsz) |
185 | goto out; | 190 | goto out; |
186 | } | 191 | } |
187 | 192 | ||
188 | |||
189 | result = 0; | 193 | result = 0; |
190 | out: | 194 | out: |
191 | if (result == 0) { | 195 | if (result == 0) |
192 | *rimage = image; | 196 | *rimage = image; |
193 | } else { | 197 | else |
194 | kfree(image); | 198 | kfree(image); |
195 | } | 199 | |
196 | return result; | 200 | return result; |
197 | 201 | ||
198 | } | 202 | } |
199 | 203 | ||
200 | static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | 204 | static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, |
201 | unsigned long nr_segments, struct kexec_segment __user *segments) | 205 | unsigned long nr_segments, |
206 | struct kexec_segment __user *segments) | ||
202 | { | 207 | { |
203 | int result; | 208 | int result; |
204 | struct kimage *image; | 209 | struct kimage *image; |
@@ -206,9 +211,9 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |||
206 | /* Allocate and initialize a controlling structure */ | 211 | /* Allocate and initialize a controlling structure */ |
207 | image = NULL; | 212 | image = NULL; |
208 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | 213 | result = do_kimage_alloc(&image, entry, nr_segments, segments); |
209 | if (result) { | 214 | if (result) |
210 | goto out; | 215 | goto out; |
211 | } | 216 | |
212 | *rimage = image; | 217 | *rimage = image; |
213 | 218 | ||
214 | /* | 219 | /* |
@@ -218,7 +223,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |||
218 | */ | 223 | */ |
219 | result = -ENOMEM; | 224 | result = -ENOMEM; |
220 | image->control_code_page = kimage_alloc_control_pages(image, | 225 | image->control_code_page = kimage_alloc_control_pages(image, |
221 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 226 | get_order(KEXEC_CONTROL_CODE_SIZE)); |
222 | if (!image->control_code_page) { | 227 | if (!image->control_code_page) { |
223 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 228 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
224 | goto out; | 229 | goto out; |
@@ -226,16 +231,17 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |||
226 | 231 | ||
227 | result = 0; | 232 | result = 0; |
228 | out: | 233 | out: |
229 | if (result == 0) { | 234 | if (result == 0) |
230 | *rimage = image; | 235 | *rimage = image; |
231 | } else { | 236 | else |
232 | kfree(image); | 237 | kfree(image); |
233 | } | 238 | |
234 | return result; | 239 | return result; |
235 | } | 240 | } |
236 | 241 | ||
237 | static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | 242 | static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, |
238 | unsigned long nr_segments, struct kexec_segment *segments) | 243 | unsigned long nr_segments, |
244 | struct kexec_segment *segments) | ||
239 | { | 245 | { |
240 | int result; | 246 | int result; |
241 | struct kimage *image; | 247 | struct kimage *image; |
@@ -250,9 +256,8 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
250 | 256 | ||
251 | /* Allocate and initialize a controlling structure */ | 257 | /* Allocate and initialize a controlling structure */ |
252 | result = do_kimage_alloc(&image, entry, nr_segments, segments); | 258 | result = do_kimage_alloc(&image, entry, nr_segments, segments); |
253 | if (result) { | 259 | if (result) |
254 | goto out; | 260 | goto out; |
255 | } | ||
256 | 261 | ||
257 | /* Enable the special crash kernel control page | 262 | /* Enable the special crash kernel control page |
258 | * allocation policy. | 263 | * allocation policy. |
@@ -272,6 +277,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
272 | result = -EADDRNOTAVAIL; | 277 | result = -EADDRNOTAVAIL; |
273 | for (i = 0; i < nr_segments; i++) { | 278 | for (i = 0; i < nr_segments; i++) { |
274 | unsigned long mstart, mend; | 279 | unsigned long mstart, mend; |
280 | |||
275 | mstart = image->segment[i].mem; | 281 | mstart = image->segment[i].mem; |
276 | mend = mstart + image->segment[i].memsz - 1; | 282 | mend = mstart + image->segment[i].memsz - 1; |
277 | /* Ensure we are within the crash kernel limits */ | 283 | /* Ensure we are within the crash kernel limits */ |
@@ -279,7 +285,6 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
279 | goto out; | 285 | goto out; |
280 | } | 286 | } |
281 | 287 | ||
282 | |||
283 | /* | 288 | /* |
284 | * Find a location for the control code buffer, and add | 289 | * Find a location for the control code buffer, and add |
285 | * the vector of segments so that it's pages will also be | 290 | * the vector of segments so that it's pages will also be |
@@ -287,80 +292,84 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
287 | */ | 292 | */ |
288 | result = -ENOMEM; | 293 | result = -ENOMEM; |
289 | image->control_code_page = kimage_alloc_control_pages(image, | 294 | image->control_code_page = kimage_alloc_control_pages(image, |
290 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 295 | get_order(KEXEC_CONTROL_CODE_SIZE)); |
291 | if (!image->control_code_page) { | 296 | if (!image->control_code_page) { |
292 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 297 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
293 | goto out; | 298 | goto out; |
294 | } | 299 | } |
295 | 300 | ||
296 | result = 0; | 301 | result = 0; |
297 | out: | 302 | out: |
298 | if (result == 0) { | 303 | if (result == 0) |
299 | *rimage = image; | 304 | *rimage = image; |
300 | } else { | 305 | else |
301 | kfree(image); | 306 | kfree(image); |
302 | } | 307 | |
303 | return result; | 308 | return result; |
304 | } | 309 | } |
305 | 310 | ||
306 | static int kimage_is_destination_range( | 311 | static int kimage_is_destination_range(struct kimage *image, |
307 | struct kimage *image, unsigned long start, unsigned long end) | 312 | unsigned long start, |
313 | unsigned long end) | ||
308 | { | 314 | { |
309 | unsigned long i; | 315 | unsigned long i; |
310 | 316 | ||
311 | for (i = 0; i < image->nr_segments; i++) { | 317 | for (i = 0; i < image->nr_segments; i++) { |
312 | unsigned long mstart, mend; | 318 | unsigned long mstart, mend; |
319 | |||
313 | mstart = image->segment[i].mem; | 320 | mstart = image->segment[i].mem; |
314 | mend = mstart + image->segment[i].memsz; | 321 | mend = mstart + image->segment[i].memsz; |
315 | if ((end > mstart) && (start < mend)) { | 322 | if ((end > mstart) && (start < mend)) |
316 | return 1; | 323 | return 1; |
317 | } | ||
318 | } | 324 | } |
325 | |||
319 | return 0; | 326 | return 0; |
320 | } | 327 | } |
321 | 328 | ||
322 | static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order) | 329 | static struct page *kimage_alloc_pages(unsigned int gfp_mask, |
330 | unsigned int order) | ||
323 | { | 331 | { |
324 | struct page *pages; | 332 | struct page *pages; |
333 | |||
325 | pages = alloc_pages(gfp_mask, order); | 334 | pages = alloc_pages(gfp_mask, order); |
326 | if (pages) { | 335 | if (pages) { |
327 | unsigned int count, i; | 336 | unsigned int count, i; |
328 | pages->mapping = NULL; | 337 | pages->mapping = NULL; |
329 | pages->private = order; | 338 | pages->private = order; |
330 | count = 1 << order; | 339 | count = 1 << order; |
331 | for(i = 0; i < count; i++) { | 340 | for (i = 0; i < count; i++) |
332 | SetPageReserved(pages + i); | 341 | SetPageReserved(pages + i); |
333 | } | ||
334 | } | 342 | } |
343 | |||
335 | return pages; | 344 | return pages; |
336 | } | 345 | } |
337 | 346 | ||
338 | static void kimage_free_pages(struct page *page) | 347 | static void kimage_free_pages(struct page *page) |
339 | { | 348 | { |
340 | unsigned int order, count, i; | 349 | unsigned int order, count, i; |
350 | |||
341 | order = page->private; | 351 | order = page->private; |
342 | count = 1 << order; | 352 | count = 1 << order; |
343 | for(i = 0; i < count; i++) { | 353 | for (i = 0; i < count; i++) |
344 | ClearPageReserved(page + i); | 354 | ClearPageReserved(page + i); |
345 | } | ||
346 | __free_pages(page, order); | 355 | __free_pages(page, order); |
347 | } | 356 | } |
348 | 357 | ||
349 | static void kimage_free_page_list(struct list_head *list) | 358 | static void kimage_free_page_list(struct list_head *list) |
350 | { | 359 | { |
351 | struct list_head *pos, *next; | 360 | struct list_head *pos, *next; |
361 | |||
352 | list_for_each_safe(pos, next, list) { | 362 | list_for_each_safe(pos, next, list) { |
353 | struct page *page; | 363 | struct page *page; |
354 | 364 | ||
355 | page = list_entry(pos, struct page, lru); | 365 | page = list_entry(pos, struct page, lru); |
356 | list_del(&page->lru); | 366 | list_del(&page->lru); |
357 | |||
358 | kimage_free_pages(page); | 367 | kimage_free_pages(page); |
359 | } | 368 | } |
360 | } | 369 | } |
361 | 370 | ||
362 | static struct page *kimage_alloc_normal_control_pages( | 371 | static struct page *kimage_alloc_normal_control_pages(struct kimage *image, |
363 | struct kimage *image, unsigned int order) | 372 | unsigned int order) |
364 | { | 373 | { |
365 | /* Control pages are special, they are the intermediaries | 374 | /* Control pages are special, they are the intermediaries |
366 | * that are needed while we copy the rest of the pages | 375 | * that are needed while we copy the rest of the pages |
@@ -387,6 +396,7 @@ static struct page *kimage_alloc_normal_control_pages( | |||
387 | */ | 396 | */ |
388 | do { | 397 | do { |
389 | unsigned long pfn, epfn, addr, eaddr; | 398 | unsigned long pfn, epfn, addr, eaddr; |
399 | |||
390 | pages = kimage_alloc_pages(GFP_KERNEL, order); | 400 | pages = kimage_alloc_pages(GFP_KERNEL, order); |
391 | if (!pages) | 401 | if (!pages) |
392 | break; | 402 | break; |
@@ -395,12 +405,12 @@ static struct page *kimage_alloc_normal_control_pages( | |||
395 | addr = pfn << PAGE_SHIFT; | 405 | addr = pfn << PAGE_SHIFT; |
396 | eaddr = epfn << PAGE_SHIFT; | 406 | eaddr = epfn << PAGE_SHIFT; |
397 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || | 407 | if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || |
398 | kimage_is_destination_range(image, addr, eaddr)) | 408 | kimage_is_destination_range(image, addr, eaddr)) { |
399 | { | ||
400 | list_add(&pages->lru, &extra_pages); | 409 | list_add(&pages->lru, &extra_pages); |
401 | pages = NULL; | 410 | pages = NULL; |
402 | } | 411 | } |
403 | } while(!pages); | 412 | } while (!pages); |
413 | |||
404 | if (pages) { | 414 | if (pages) { |
405 | /* Remember the allocated page... */ | 415 | /* Remember the allocated page... */ |
406 | list_add(&pages->lru, &image->control_pages); | 416 | list_add(&pages->lru, &image->control_pages); |
@@ -420,12 +430,12 @@ static struct page *kimage_alloc_normal_control_pages( | |||
420 | * For now it is simpler to just free the pages. | 430 | * For now it is simpler to just free the pages. |
421 | */ | 431 | */ |
422 | kimage_free_page_list(&extra_pages); | 432 | kimage_free_page_list(&extra_pages); |
423 | return pages; | ||
424 | 433 | ||
434 | return pages; | ||
425 | } | 435 | } |
426 | 436 | ||
427 | static struct page *kimage_alloc_crash_control_pages( | 437 | static struct page *kimage_alloc_crash_control_pages(struct kimage *image, |
428 | struct kimage *image, unsigned int order) | 438 | unsigned int order) |
429 | { | 439 | { |
430 | /* Control pages are special, they are the intermediaries | 440 | /* Control pages are special, they are the intermediaries |
431 | * that are needed while we copy the rest of the pages | 441 | * that are needed while we copy the rest of the pages |
@@ -450,21 +460,22 @@ static struct page *kimage_alloc_crash_control_pages( | |||
450 | */ | 460 | */ |
451 | unsigned long hole_start, hole_end, size; | 461 | unsigned long hole_start, hole_end, size; |
452 | struct page *pages; | 462 | struct page *pages; |
463 | |||
453 | pages = NULL; | 464 | pages = NULL; |
454 | size = (1 << order) << PAGE_SHIFT; | 465 | size = (1 << order) << PAGE_SHIFT; |
455 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); | 466 | hole_start = (image->control_page + (size - 1)) & ~(size - 1); |
456 | hole_end = hole_start + size - 1; | 467 | hole_end = hole_start + size - 1; |
457 | while(hole_end <= crashk_res.end) { | 468 | while (hole_end <= crashk_res.end) { |
458 | unsigned long i; | 469 | unsigned long i; |
459 | if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) { | 470 | |
471 | if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) | ||
460 | break; | 472 | break; |
461 | } | 473 | if (hole_end > crashk_res.end) |
462 | if (hole_end > crashk_res.end) { | ||
463 | break; | 474 | break; |
464 | } | ||
465 | /* See if I overlap any of the segments */ | 475 | /* See if I overlap any of the segments */ |
466 | for(i = 0; i < image->nr_segments; i++) { | 476 | for (i = 0; i < image->nr_segments; i++) { |
467 | unsigned long mstart, mend; | 477 | unsigned long mstart, mend; |
478 | |||
468 | mstart = image->segment[i].mem; | 479 | mstart = image->segment[i].mem; |
469 | mend = mstart + image->segment[i].memsz - 1; | 480 | mend = mstart + image->segment[i].memsz - 1; |
470 | if ((hole_end >= mstart) && (hole_start <= mend)) { | 481 | if ((hole_end >= mstart) && (hole_start <= mend)) { |
@@ -480,18 +491,19 @@ static struct page *kimage_alloc_crash_control_pages( | |||
480 | break; | 491 | break; |
481 | } | 492 | } |
482 | } | 493 | } |
483 | if (pages) { | 494 | if (pages) |
484 | image->control_page = hole_end; | 495 | image->control_page = hole_end; |
485 | } | 496 | |
486 | return pages; | 497 | return pages; |
487 | } | 498 | } |
488 | 499 | ||
489 | 500 | ||
490 | struct page *kimage_alloc_control_pages( | 501 | struct page *kimage_alloc_control_pages(struct kimage *image, |
491 | struct kimage *image, unsigned int order) | 502 | unsigned int order) |
492 | { | 503 | { |
493 | struct page *pages = NULL; | 504 | struct page *pages = NULL; |
494 | switch(image->type) { | 505 | |
506 | switch (image->type) { | ||
495 | case KEXEC_TYPE_DEFAULT: | 507 | case KEXEC_TYPE_DEFAULT: |
496 | pages = kimage_alloc_normal_control_pages(image, order); | 508 | pages = kimage_alloc_normal_control_pages(image, order); |
497 | break; | 509 | break; |
@@ -499,43 +511,46 @@ struct page *kimage_alloc_control_pages( | |||
499 | pages = kimage_alloc_crash_control_pages(image, order); | 511 | pages = kimage_alloc_crash_control_pages(image, order); |
500 | break; | 512 | break; |
501 | } | 513 | } |
514 | |||
502 | return pages; | 515 | return pages; |
503 | } | 516 | } |
504 | 517 | ||
505 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) | 518 | static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) |
506 | { | 519 | { |
507 | if (*image->entry != 0) { | 520 | if (*image->entry != 0) |
508 | image->entry++; | 521 | image->entry++; |
509 | } | 522 | |
510 | if (image->entry == image->last_entry) { | 523 | if (image->entry == image->last_entry) { |
511 | kimage_entry_t *ind_page; | 524 | kimage_entry_t *ind_page; |
512 | struct page *page; | 525 | struct page *page; |
526 | |||
513 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); | 527 | page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); |
514 | if (!page) { | 528 | if (!page) |
515 | return -ENOMEM; | 529 | return -ENOMEM; |
516 | } | 530 | |
517 | ind_page = page_address(page); | 531 | ind_page = page_address(page); |
518 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; | 532 | *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; |
519 | image->entry = ind_page; | 533 | image->entry = ind_page; |
520 | image->last_entry = | 534 | image->last_entry = ind_page + |
521 | ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); | 535 | ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); |
522 | } | 536 | } |
523 | *image->entry = entry; | 537 | *image->entry = entry; |
524 | image->entry++; | 538 | image->entry++; |
525 | *image->entry = 0; | 539 | *image->entry = 0; |
540 | |||
526 | return 0; | 541 | return 0; |
527 | } | 542 | } |
528 | 543 | ||
529 | static int kimage_set_destination( | 544 | static int kimage_set_destination(struct kimage *image, |
530 | struct kimage *image, unsigned long destination) | 545 | unsigned long destination) |
531 | { | 546 | { |
532 | int result; | 547 | int result; |
533 | 548 | ||
534 | destination &= PAGE_MASK; | 549 | destination &= PAGE_MASK; |
535 | result = kimage_add_entry(image, destination | IND_DESTINATION); | 550 | result = kimage_add_entry(image, destination | IND_DESTINATION); |
536 | if (result == 0) { | 551 | if (result == 0) |
537 | image->destination = destination; | 552 | image->destination = destination; |
538 | } | 553 | |
539 | return result; | 554 | return result; |
540 | } | 555 | } |
541 | 556 | ||
@@ -546,9 +561,9 @@ static int kimage_add_page(struct kimage *image, unsigned long page) | |||
546 | 561 | ||
547 | page &= PAGE_MASK; | 562 | page &= PAGE_MASK; |
548 | result = kimage_add_entry(image, page | IND_SOURCE); | 563 | result = kimage_add_entry(image, page | IND_SOURCE); |
549 | if (result == 0) { | 564 | if (result == 0) |
550 | image->destination += PAGE_SIZE; | 565 | image->destination += PAGE_SIZE; |
551 | } | 566 | |
552 | return result; | 567 | return result; |
553 | } | 568 | } |
554 | 569 | ||
@@ -564,10 +579,11 @@ static void kimage_free_extra_pages(struct kimage *image) | |||
564 | } | 579 | } |
565 | static int kimage_terminate(struct kimage *image) | 580 | static int kimage_terminate(struct kimage *image) |
566 | { | 581 | { |
567 | if (*image->entry != 0) { | 582 | if (*image->entry != 0) |
568 | image->entry++; | 583 | image->entry++; |
569 | } | 584 | |
570 | *image->entry = IND_DONE; | 585 | *image->entry = IND_DONE; |
586 | |||
571 | return 0; | 587 | return 0; |
572 | } | 588 | } |
573 | 589 | ||
@@ -591,26 +607,24 @@ static void kimage_free(struct kimage *image) | |||
591 | 607 | ||
592 | if (!image) | 608 | if (!image) |
593 | return; | 609 | return; |
610 | |||
594 | kimage_free_extra_pages(image); | 611 | kimage_free_extra_pages(image); |
595 | for_each_kimage_entry(image, ptr, entry) { | 612 | for_each_kimage_entry(image, ptr, entry) { |
596 | if (entry & IND_INDIRECTION) { | 613 | if (entry & IND_INDIRECTION) { |
597 | /* Free the previous indirection page */ | 614 | /* Free the previous indirection page */ |
598 | if (ind & IND_INDIRECTION) { | 615 | if (ind & IND_INDIRECTION) |
599 | kimage_free_entry(ind); | 616 | kimage_free_entry(ind); |
600 | } | ||
601 | /* Save this indirection page until we are | 617 | /* Save this indirection page until we are |
602 | * done with it. | 618 | * done with it. |
603 | */ | 619 | */ |
604 | ind = entry; | 620 | ind = entry; |
605 | } | 621 | } |
606 | else if (entry & IND_SOURCE) { | 622 | else if (entry & IND_SOURCE) |
607 | kimage_free_entry(entry); | 623 | kimage_free_entry(entry); |
608 | } | ||
609 | } | 624 | } |
610 | /* Free the final indirection page */ | 625 | /* Free the final indirection page */ |
611 | if (ind & IND_INDIRECTION) { | 626 | if (ind & IND_INDIRECTION) |
612 | kimage_free_entry(ind); | 627 | kimage_free_entry(ind); |
613 | } | ||
614 | 628 | ||
615 | /* Handle any machine specific cleanup */ | 629 | /* Handle any machine specific cleanup */ |
616 | machine_kexec_cleanup(image); | 630 | machine_kexec_cleanup(image); |
@@ -620,26 +634,28 @@ static void kimage_free(struct kimage *image) | |||
620 | kfree(image); | 634 | kfree(image); |
621 | } | 635 | } |
622 | 636 | ||
623 | static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page) | 637 | static kimage_entry_t *kimage_dst_used(struct kimage *image, |
638 | unsigned long page) | ||
624 | { | 639 | { |
625 | kimage_entry_t *ptr, entry; | 640 | kimage_entry_t *ptr, entry; |
626 | unsigned long destination = 0; | 641 | unsigned long destination = 0; |
627 | 642 | ||
628 | for_each_kimage_entry(image, ptr, entry) { | 643 | for_each_kimage_entry(image, ptr, entry) { |
629 | if (entry & IND_DESTINATION) { | 644 | if (entry & IND_DESTINATION) |
630 | destination = entry & PAGE_MASK; | 645 | destination = entry & PAGE_MASK; |
631 | } | ||
632 | else if (entry & IND_SOURCE) { | 646 | else if (entry & IND_SOURCE) { |
633 | if (page == destination) { | 647 | if (page == destination) |
634 | return ptr; | 648 | return ptr; |
635 | } | ||
636 | destination += PAGE_SIZE; | 649 | destination += PAGE_SIZE; |
637 | } | 650 | } |
638 | } | 651 | } |
652 | |||
639 | return 0; | 653 | return 0; |
640 | } | 654 | } |
641 | 655 | ||
642 | static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination) | 656 | static struct page *kimage_alloc_page(struct kimage *image, |
657 | unsigned int gfp_mask, | ||
658 | unsigned long destination) | ||
643 | { | 659 | { |
644 | /* | 660 | /* |
645 | * Here we implement safeguards to ensure that a source page | 661 | * Here we implement safeguards to ensure that a source page |
@@ -679,11 +695,11 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas | |||
679 | 695 | ||
680 | /* Allocate a page, if we run out of memory give up */ | 696 | /* Allocate a page, if we run out of memory give up */ |
681 | page = kimage_alloc_pages(gfp_mask, 0); | 697 | page = kimage_alloc_pages(gfp_mask, 0); |
682 | if (!page) { | 698 | if (!page) |
683 | return 0; | 699 | return 0; |
684 | } | ||
685 | /* If the page cannot be used file it away */ | 700 | /* If the page cannot be used file it away */ |
686 | if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | 701 | if (page_to_pfn(page) > |
702 | (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { | ||
687 | list_add(&page->lru, &image->unuseable_pages); | 703 | list_add(&page->lru, &image->unuseable_pages); |
688 | continue; | 704 | continue; |
689 | } | 705 | } |
@@ -694,7 +710,8 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas | |||
694 | break; | 710 | break; |
695 | 711 | ||
696 | /* If the page is not a destination page use it */ | 712 | /* If the page is not a destination page use it */ |
697 | if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE)) | 713 | if (!kimage_is_destination_range(image, addr, |
714 | addr + PAGE_SIZE)) | ||
698 | break; | 715 | break; |
699 | 716 | ||
700 | /* | 717 | /* |
@@ -727,11 +744,12 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas | |||
727 | list_add(&page->lru, &image->dest_pages); | 744 | list_add(&page->lru, &image->dest_pages); |
728 | } | 745 | } |
729 | } | 746 | } |
747 | |||
730 | return page; | 748 | return page; |
731 | } | 749 | } |
732 | 750 | ||
733 | static int kimage_load_normal_segment(struct kimage *image, | 751 | static int kimage_load_normal_segment(struct kimage *image, |
734 | struct kexec_segment *segment) | 752 | struct kexec_segment *segment) |
735 | { | 753 | { |
736 | unsigned long maddr; | 754 | unsigned long maddr; |
737 | unsigned long ubytes, mbytes; | 755 | unsigned long ubytes, mbytes; |
@@ -745,34 +763,36 @@ static int kimage_load_normal_segment(struct kimage *image, | |||
745 | maddr = segment->mem; | 763 | maddr = segment->mem; |
746 | 764 | ||
747 | result = kimage_set_destination(image, maddr); | 765 | result = kimage_set_destination(image, maddr); |
748 | if (result < 0) { | 766 | if (result < 0) |
749 | goto out; | 767 | goto out; |
750 | } | 768 | |
751 | while(mbytes) { | 769 | while (mbytes) { |
752 | struct page *page; | 770 | struct page *page; |
753 | char *ptr; | 771 | char *ptr; |
754 | size_t uchunk, mchunk; | 772 | size_t uchunk, mchunk; |
773 | |||
755 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); | 774 | page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); |
756 | if (page == 0) { | 775 | if (page == 0) { |
757 | result = -ENOMEM; | 776 | result = -ENOMEM; |
758 | goto out; | 777 | goto out; |
759 | } | 778 | } |
760 | result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT); | 779 | result = kimage_add_page(image, page_to_pfn(page) |
761 | if (result < 0) { | 780 | << PAGE_SHIFT); |
781 | if (result < 0) | ||
762 | goto out; | 782 | goto out; |
763 | } | 783 | |
764 | ptr = kmap(page); | 784 | ptr = kmap(page); |
765 | /* Start with a clear page */ | 785 | /* Start with a clear page */ |
766 | memset(ptr, 0, PAGE_SIZE); | 786 | memset(ptr, 0, PAGE_SIZE); |
767 | ptr += maddr & ~PAGE_MASK; | 787 | ptr += maddr & ~PAGE_MASK; |
768 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | 788 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); |
769 | if (mchunk > mbytes) { | 789 | if (mchunk > mbytes) |
770 | mchunk = mbytes; | 790 | mchunk = mbytes; |
771 | } | 791 | |
772 | uchunk = mchunk; | 792 | uchunk = mchunk; |
773 | if (uchunk > ubytes) { | 793 | if (uchunk > ubytes) |
774 | uchunk = ubytes; | 794 | uchunk = ubytes; |
775 | } | 795 | |
776 | result = copy_from_user(ptr, buf, uchunk); | 796 | result = copy_from_user(ptr, buf, uchunk); |
777 | kunmap(page); | 797 | kunmap(page); |
778 | if (result) { | 798 | if (result) { |
@@ -784,12 +804,12 @@ static int kimage_load_normal_segment(struct kimage *image, | |||
784 | buf += mchunk; | 804 | buf += mchunk; |
785 | mbytes -= mchunk; | 805 | mbytes -= mchunk; |
786 | } | 806 | } |
787 | out: | 807 | out: |
788 | return result; | 808 | return result; |
789 | } | 809 | } |
790 | 810 | ||
791 | static int kimage_load_crash_segment(struct kimage *image, | 811 | static int kimage_load_crash_segment(struct kimage *image, |
792 | struct kexec_segment *segment) | 812 | struct kexec_segment *segment) |
793 | { | 813 | { |
794 | /* For crash dumps kernels we simply copy the data from | 814 | /* For crash dumps kernels we simply copy the data from |
795 | * user space to it's destination. | 815 | * user space to it's destination. |
@@ -805,10 +825,11 @@ static int kimage_load_crash_segment(struct kimage *image, | |||
805 | ubytes = segment->bufsz; | 825 | ubytes = segment->bufsz; |
806 | mbytes = segment->memsz; | 826 | mbytes = segment->memsz; |
807 | maddr = segment->mem; | 827 | maddr = segment->mem; |
808 | while(mbytes) { | 828 | while (mbytes) { |
809 | struct page *page; | 829 | struct page *page; |
810 | char *ptr; | 830 | char *ptr; |
811 | size_t uchunk, mchunk; | 831 | size_t uchunk, mchunk; |
832 | |||
812 | page = pfn_to_page(maddr >> PAGE_SHIFT); | 833 | page = pfn_to_page(maddr >> PAGE_SHIFT); |
813 | if (page == 0) { | 834 | if (page == 0) { |
814 | result = -ENOMEM; | 835 | result = -ENOMEM; |
@@ -817,9 +838,9 @@ static int kimage_load_crash_segment(struct kimage *image, | |||
817 | ptr = kmap(page); | 838 | ptr = kmap(page); |
818 | ptr += maddr & ~PAGE_MASK; | 839 | ptr += maddr & ~PAGE_MASK; |
819 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); | 840 | mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); |
820 | if (mchunk > mbytes) { | 841 | if (mchunk > mbytes) |
821 | mchunk = mbytes; | 842 | mchunk = mbytes; |
822 | } | 843 | |
823 | uchunk = mchunk; | 844 | uchunk = mchunk; |
824 | if (uchunk > ubytes) { | 845 | if (uchunk > ubytes) { |
825 | uchunk = ubytes; | 846 | uchunk = ubytes; |
@@ -837,15 +858,16 @@ static int kimage_load_crash_segment(struct kimage *image, | |||
837 | buf += mchunk; | 858 | buf += mchunk; |
838 | mbytes -= mchunk; | 859 | mbytes -= mchunk; |
839 | } | 860 | } |
840 | out: | 861 | out: |
841 | return result; | 862 | return result; |
842 | } | 863 | } |
843 | 864 | ||
844 | static int kimage_load_segment(struct kimage *image, | 865 | static int kimage_load_segment(struct kimage *image, |
845 | struct kexec_segment *segment) | 866 | struct kexec_segment *segment) |
846 | { | 867 | { |
847 | int result = -ENOMEM; | 868 | int result = -ENOMEM; |
848 | switch(image->type) { | 869 | |
870 | switch (image->type) { | ||
849 | case KEXEC_TYPE_DEFAULT: | 871 | case KEXEC_TYPE_DEFAULT: |
850 | result = kimage_load_normal_segment(image, segment); | 872 | result = kimage_load_normal_segment(image, segment); |
851 | break; | 873 | break; |
@@ -853,6 +875,7 @@ static int kimage_load_segment(struct kimage *image, | |||
853 | result = kimage_load_crash_segment(image, segment); | 875 | result = kimage_load_crash_segment(image, segment); |
854 | break; | 876 | break; |
855 | } | 877 | } |
878 | |||
856 | return result; | 879 | return result; |
857 | } | 880 | } |
858 | 881 | ||
@@ -885,9 +908,9 @@ static struct kimage *kexec_crash_image = NULL; | |||
885 | */ | 908 | */ |
886 | static int kexec_lock = 0; | 909 | static int kexec_lock = 0; |
887 | 910 | ||
888 | asmlinkage long sys_kexec_load(unsigned long entry, | 911 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, |
889 | unsigned long nr_segments, struct kexec_segment __user *segments, | 912 | struct kexec_segment __user *segments, |
890 | unsigned long flags) | 913 | unsigned long flags) |
891 | { | 914 | { |
892 | struct kimage **dest_image, *image; | 915 | struct kimage **dest_image, *image; |
893 | int locked; | 916 | int locked; |
@@ -907,9 +930,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, | |||
907 | /* Verify we are on the appropriate architecture */ | 930 | /* Verify we are on the appropriate architecture */ |
908 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && | 931 | if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && |
909 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) | 932 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) |
910 | { | ||
911 | return -EINVAL; | 933 | return -EINVAL; |
912 | } | ||
913 | 934 | ||
914 | /* Put an artificial cap on the number | 935 | /* Put an artificial cap on the number |
915 | * of segments passed to kexec_load. | 936 | * of segments passed to kexec_load. |
@@ -929,58 +950,59 @@ asmlinkage long sys_kexec_load(unsigned long entry, | |||
929 | * KISS: always take the mutex. | 950 | * KISS: always take the mutex. |
930 | */ | 951 | */ |
931 | locked = xchg(&kexec_lock, 1); | 952 | locked = xchg(&kexec_lock, 1); |
932 | if (locked) { | 953 | if (locked) |
933 | return -EBUSY; | 954 | return -EBUSY; |
934 | } | 955 | |
935 | dest_image = &kexec_image; | 956 | dest_image = &kexec_image; |
936 | if (flags & KEXEC_ON_CRASH) { | 957 | if (flags & KEXEC_ON_CRASH) |
937 | dest_image = &kexec_crash_image; | 958 | dest_image = &kexec_crash_image; |
938 | } | ||
939 | if (nr_segments > 0) { | 959 | if (nr_segments > 0) { |
940 | unsigned long i; | 960 | unsigned long i; |
961 | |||
941 | /* Loading another kernel to reboot into */ | 962 | /* Loading another kernel to reboot into */ |
942 | if ((flags & KEXEC_ON_CRASH) == 0) { | 963 | if ((flags & KEXEC_ON_CRASH) == 0) |
943 | result = kimage_normal_alloc(&image, entry, nr_segments, segments); | 964 | result = kimage_normal_alloc(&image, entry, |
944 | } | 965 | nr_segments, segments); |
945 | /* Loading another kernel to switch to if this one crashes */ | 966 | /* Loading another kernel to switch to if this one crashes */ |
946 | else if (flags & KEXEC_ON_CRASH) { | 967 | else if (flags & KEXEC_ON_CRASH) { |
947 | /* Free any current crash dump kernel before | 968 | /* Free any current crash dump kernel before |
948 | * we corrupt it. | 969 | * we corrupt it. |
949 | */ | 970 | */ |
950 | kimage_free(xchg(&kexec_crash_image, NULL)); | 971 | kimage_free(xchg(&kexec_crash_image, NULL)); |
951 | result = kimage_crash_alloc(&image, entry, nr_segments, segments); | 972 | result = kimage_crash_alloc(&image, entry, |
973 | nr_segments, segments); | ||
952 | } | 974 | } |
953 | if (result) { | 975 | if (result) |
954 | goto out; | 976 | goto out; |
955 | } | 977 | |
956 | result = machine_kexec_prepare(image); | 978 | result = machine_kexec_prepare(image); |
957 | if (result) { | 979 | if (result) |
958 | goto out; | 980 | goto out; |
959 | } | 981 | |
960 | for(i = 0; i < nr_segments; i++) { | 982 | for (i = 0; i < nr_segments; i++) { |
961 | result = kimage_load_segment(image, &image->segment[i]); | 983 | result = kimage_load_segment(image, &image->segment[i]); |
962 | if (result) { | 984 | if (result) |
963 | goto out; | 985 | goto out; |
964 | } | ||
965 | } | 986 | } |
966 | result = kimage_terminate(image); | 987 | result = kimage_terminate(image); |
967 | if (result) { | 988 | if (result) |
968 | goto out; | 989 | goto out; |
969 | } | ||
970 | } | 990 | } |
971 | /* Install the new kernel, and Uninstall the old */ | 991 | /* Install the new kernel, and Uninstall the old */ |
972 | image = xchg(dest_image, image); | 992 | image = xchg(dest_image, image); |
973 | 993 | ||
974 | out: | 994 | out: |
975 | xchg(&kexec_lock, 0); /* Release the mutex */ | 995 | xchg(&kexec_lock, 0); /* Release the mutex */ |
976 | kimage_free(image); | 996 | kimage_free(image); |
997 | |||
977 | return result; | 998 | return result; |
978 | } | 999 | } |
979 | 1000 | ||
980 | #ifdef CONFIG_COMPAT | 1001 | #ifdef CONFIG_COMPAT |
981 | asmlinkage long compat_sys_kexec_load(unsigned long entry, | 1002 | asmlinkage long compat_sys_kexec_load(unsigned long entry, |
982 | unsigned long nr_segments, struct compat_kexec_segment __user *segments, | 1003 | unsigned long nr_segments, |
983 | unsigned long flags) | 1004 | struct compat_kexec_segment __user *segments, |
1005 | unsigned long flags) | ||
984 | { | 1006 | { |
985 | struct compat_kexec_segment in; | 1007 | struct compat_kexec_segment in; |
986 | struct kexec_segment out, __user *ksegments; | 1008 | struct kexec_segment out, __user *ksegments; |
@@ -989,20 +1011,17 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, | |||
989 | /* Don't allow clients that don't understand the native | 1011 | /* Don't allow clients that don't understand the native |
990 | * architecture to do anything. | 1012 | * architecture to do anything. |
991 | */ | 1013 | */ |
992 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) { | 1014 | if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) |
993 | return -EINVAL; | 1015 | return -EINVAL; |
994 | } | ||
995 | 1016 | ||
996 | if (nr_segments > KEXEC_SEGMENT_MAX) { | 1017 | if (nr_segments > KEXEC_SEGMENT_MAX) |
997 | return -EINVAL; | 1018 | return -EINVAL; |
998 | } | ||
999 | 1019 | ||
1000 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); | 1020 | ksegments = compat_alloc_user_space(nr_segments * sizeof(out)); |
1001 | for (i=0; i < nr_segments; i++) { | 1021 | for (i=0; i < nr_segments; i++) { |
1002 | result = copy_from_user(&in, &segments[i], sizeof(in)); | 1022 | result = copy_from_user(&in, &segments[i], sizeof(in)); |
1003 | if (result) { | 1023 | if (result) |
1004 | return -EFAULT; | 1024 | return -EFAULT; |
1005 | } | ||
1006 | 1025 | ||
1007 | out.buf = compat_ptr(in.buf); | 1026 | out.buf = compat_ptr(in.buf); |
1008 | out.bufsz = in.bufsz; | 1027 | out.bufsz = in.bufsz; |
@@ -1010,9 +1029,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, | |||
1010 | out.memsz = in.memsz; | 1029 | out.memsz = in.memsz; |
1011 | 1030 | ||
1012 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); | 1031 | result = copy_to_user(&ksegments[i], &out, sizeof(out)); |
1013 | if (result) { | 1032 | if (result) |
1014 | return -EFAULT; | 1033 | return -EFAULT; |
1015 | } | ||
1016 | } | 1034 | } |
1017 | 1035 | ||
1018 | return sys_kexec_load(entry, nr_segments, ksegments, flags); | 1036 | return sys_kexec_load(entry, nr_segments, ksegments, flags); |