diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 1 | ||||
-rw-r--r-- | kernel/fork.c | 394 | ||||
-rw-r--r-- | kernel/module.c | 3 | ||||
-rw-r--r-- | kernel/power/console.c | 4 | ||||
-rw-r--r-- | kernel/power/power.h | 16 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 6 | ||||
-rw-r--r-- | kernel/signal.c | 9 | ||||
-rw-r--r-- | kernel/sys.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 2 |
9 files changed, 354 insertions, 83 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 1867290c37e3..8c9cd88b6785 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/security.h> | 23 | #include <linux/security.h> |
24 | 24 | ||
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/bug.h> | ||
27 | 26 | ||
28 | int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) | 27 | int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) |
29 | { | 28 | { |
diff --git a/kernel/fork.c b/kernel/fork.c index 7f0ab5ee948c..8e88b374cee9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -446,6 +446,55 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | /* | ||
450 | * Allocate a new mm structure and copy contents from the | ||
451 | * mm structure of the passed in task structure. | ||
452 | */ | ||
453 | static struct mm_struct *dup_mm(struct task_struct *tsk) | ||
454 | { | ||
455 | struct mm_struct *mm, *oldmm = current->mm; | ||
456 | int err; | ||
457 | |||
458 | if (!oldmm) | ||
459 | return NULL; | ||
460 | |||
461 | mm = allocate_mm(); | ||
462 | if (!mm) | ||
463 | goto fail_nomem; | ||
464 | |||
465 | memcpy(mm, oldmm, sizeof(*mm)); | ||
466 | |||
467 | if (!mm_init(mm)) | ||
468 | goto fail_nomem; | ||
469 | |||
470 | if (init_new_context(tsk, mm)) | ||
471 | goto fail_nocontext; | ||
472 | |||
473 | err = dup_mmap(mm, oldmm); | ||
474 | if (err) | ||
475 | goto free_pt; | ||
476 | |||
477 | mm->hiwater_rss = get_mm_rss(mm); | ||
478 | mm->hiwater_vm = mm->total_vm; | ||
479 | |||
480 | return mm; | ||
481 | |||
482 | free_pt: | ||
483 | mmput(mm); | ||
484 | |||
485 | fail_nomem: | ||
486 | return NULL; | ||
487 | |||
488 | fail_nocontext: | ||
489 | /* | ||
490 | * If init_new_context() failed, we cannot use mmput() to free the mm | ||
491 | * because it calls destroy_context() | ||
492 | */ | ||
493 | mm_free_pgd(mm); | ||
494 | free_mm(mm); | ||
495 | return NULL; | ||
496 | } | ||
497 | |||
449 | static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) | 498 | static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) |
450 | { | 499 | { |
451 | struct mm_struct * mm, *oldmm; | 500 | struct mm_struct * mm, *oldmm; |
@@ -473,43 +522,17 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) | |||
473 | } | 522 | } |
474 | 523 | ||
475 | retval = -ENOMEM; | 524 | retval = -ENOMEM; |
476 | mm = allocate_mm(); | 525 | mm = dup_mm(tsk); |
477 | if (!mm) | 526 | if (!mm) |
478 | goto fail_nomem; | 527 | goto fail_nomem; |
479 | 528 | ||
480 | /* Copy the current MM stuff.. */ | ||
481 | memcpy(mm, oldmm, sizeof(*mm)); | ||
482 | if (!mm_init(mm)) | ||
483 | goto fail_nomem; | ||
484 | |||
485 | if (init_new_context(tsk,mm)) | ||
486 | goto fail_nocontext; | ||
487 | |||
488 | retval = dup_mmap(mm, oldmm); | ||
489 | if (retval) | ||
490 | goto free_pt; | ||
491 | |||
492 | mm->hiwater_rss = get_mm_rss(mm); | ||
493 | mm->hiwater_vm = mm->total_vm; | ||
494 | |||
495 | good_mm: | 529 | good_mm: |
496 | tsk->mm = mm; | 530 | tsk->mm = mm; |
497 | tsk->active_mm = mm; | 531 | tsk->active_mm = mm; |
498 | return 0; | 532 | return 0; |
499 | 533 | ||
500 | free_pt: | ||
501 | mmput(mm); | ||
502 | fail_nomem: | 534 | fail_nomem: |
503 | return retval; | 535 | return retval; |
504 | |||
505 | fail_nocontext: | ||
506 | /* | ||
507 | * If init_new_context() failed, we cannot use mmput() to free the mm | ||
508 | * because it calls destroy_context() | ||
509 | */ | ||
510 | mm_free_pgd(mm); | ||
511 | free_mm(mm); | ||
512 | return retval; | ||
513 | } | 536 | } |
514 | 537 | ||
515 | static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) | 538 | static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) |
@@ -597,32 +620,17 @@ out: | |||
597 | return newf; | 620 | return newf; |
598 | } | 621 | } |
599 | 622 | ||
600 | static int copy_files(unsigned long clone_flags, struct task_struct * tsk) | 623 | /* |
624 | * Allocate a new files structure and copy contents from the | ||
625 | * passed in files structure. | ||
626 | */ | ||
627 | static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) | ||
601 | { | 628 | { |
602 | struct files_struct *oldf, *newf; | 629 | struct files_struct *newf; |
603 | struct file **old_fds, **new_fds; | 630 | struct file **old_fds, **new_fds; |
604 | int open_files, size, i, error = 0, expand; | 631 | int open_files, size, i, expand; |
605 | struct fdtable *old_fdt, *new_fdt; | 632 | struct fdtable *old_fdt, *new_fdt; |
606 | 633 | ||
607 | /* | ||
608 | * A background process may not have any files ... | ||
609 | */ | ||
610 | oldf = current->files; | ||
611 | if (!oldf) | ||
612 | goto out; | ||
613 | |||
614 | if (clone_flags & CLONE_FILES) { | ||
615 | atomic_inc(&oldf->count); | ||
616 | goto out; | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * Note: we may be using current for both targets (See exec.c) | ||
621 | * This works because we cache current->files (old) as oldf. Don't | ||
622 | * break this. | ||
623 | */ | ||
624 | tsk->files = NULL; | ||
625 | error = -ENOMEM; | ||
626 | newf = alloc_files(); | 634 | newf = alloc_files(); |
627 | if (!newf) | 635 | if (!newf) |
628 | goto out; | 636 | goto out; |
@@ -651,9 +659,9 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk) | |||
651 | if (expand) { | 659 | if (expand) { |
652 | spin_unlock(&oldf->file_lock); | 660 | spin_unlock(&oldf->file_lock); |
653 | spin_lock(&newf->file_lock); | 661 | spin_lock(&newf->file_lock); |
654 | error = expand_files(newf, open_files-1); | 662 | *errorp = expand_files(newf, open_files-1); |
655 | spin_unlock(&newf->file_lock); | 663 | spin_unlock(&newf->file_lock); |
656 | if (error < 0) | 664 | if (*errorp < 0) |
657 | goto out_release; | 665 | goto out_release; |
658 | new_fdt = files_fdtable(newf); | 666 | new_fdt = files_fdtable(newf); |
659 | /* | 667 | /* |
@@ -702,10 +710,8 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk) | |||
702 | memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); | 710 | memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); |
703 | } | 711 | } |
704 | 712 | ||
705 | tsk->files = newf; | ||
706 | error = 0; | ||
707 | out: | 713 | out: |
708 | return error; | 714 | return newf; |
709 | 715 | ||
710 | out_release: | 716 | out_release: |
711 | free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset); | 717 | free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset); |
@@ -715,6 +721,40 @@ out_release: | |||
715 | goto out; | 721 | goto out; |
716 | } | 722 | } |
717 | 723 | ||
724 | static int copy_files(unsigned long clone_flags, struct task_struct * tsk) | ||
725 | { | ||
726 | struct files_struct *oldf, *newf; | ||
727 | int error = 0; | ||
728 | |||
729 | /* | ||
730 | * A background process may not have any files ... | ||
731 | */ | ||
732 | oldf = current->files; | ||
733 | if (!oldf) | ||
734 | goto out; | ||
735 | |||
736 | if (clone_flags & CLONE_FILES) { | ||
737 | atomic_inc(&oldf->count); | ||
738 | goto out; | ||
739 | } | ||
740 | |||
741 | /* | ||
742 | * Note: we may be using current for both targets (See exec.c) | ||
743 | * This works because we cache current->files (old) as oldf. Don't | ||
744 | * break this. | ||
745 | */ | ||
746 | tsk->files = NULL; | ||
747 | error = -ENOMEM; | ||
748 | newf = dup_fd(oldf, &error); | ||
749 | if (!newf) | ||
750 | goto out; | ||
751 | |||
752 | tsk->files = newf; | ||
753 | error = 0; | ||
754 | out: | ||
755 | return error; | ||
756 | } | ||
757 | |||
718 | /* | 758 | /* |
719 | * Helper to unshare the files of the current task. | 759 | * Helper to unshare the files of the current task. |
720 | * We don't want to expose copy_files internals to | 760 | * We don't want to expose copy_files internals to |
@@ -1323,3 +1363,249 @@ void __init proc_caches_init(void) | |||
1323 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1363 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1324 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); | 1364 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); |
1325 | } | 1365 | } |
1366 | |||
1367 | |||
1368 | /* | ||
1369 | * Check constraints on flags passed to the unshare system call and | ||
1370 | * force unsharing of additional process context as appropriate. | ||
1371 | */ | ||
1372 | static inline void check_unshare_flags(unsigned long *flags_ptr) | ||
1373 | { | ||
1374 | /* | ||
1375 | * If unsharing a thread from a thread group, must also | ||
1376 | * unshare vm. | ||
1377 | */ | ||
1378 | if (*flags_ptr & CLONE_THREAD) | ||
1379 | *flags_ptr |= CLONE_VM; | ||
1380 | |||
1381 | /* | ||
1382 | * If unsharing vm, must also unshare signal handlers. | ||
1383 | */ | ||
1384 | if (*flags_ptr & CLONE_VM) | ||
1385 | *flags_ptr |= CLONE_SIGHAND; | ||
1386 | |||
1387 | /* | ||
1388 | * If unsharing signal handlers and the task was created | ||
1389 | * using CLONE_THREAD, then must unshare the thread | ||
1390 | */ | ||
1391 | if ((*flags_ptr & CLONE_SIGHAND) && | ||
1392 | (atomic_read(¤t->signal->count) > 1)) | ||
1393 | *flags_ptr |= CLONE_THREAD; | ||
1394 | |||
1395 | /* | ||
1396 | * If unsharing namespace, must also unshare filesystem information. | ||
1397 | */ | ||
1398 | if (*flags_ptr & CLONE_NEWNS) | ||
1399 | *flags_ptr |= CLONE_FS; | ||
1400 | } | ||
1401 | |||
1402 | /* | ||
1403 | * Unsharing of tasks created with CLONE_THREAD is not supported yet | ||
1404 | */ | ||
1405 | static int unshare_thread(unsigned long unshare_flags) | ||
1406 | { | ||
1407 | if (unshare_flags & CLONE_THREAD) | ||
1408 | return -EINVAL; | ||
1409 | |||
1410 | return 0; | ||
1411 | } | ||
1412 | |||
1413 | /* | ||
1414 | * Unshare the filesystem structure if it is being shared | ||
1415 | */ | ||
1416 | static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) | ||
1417 | { | ||
1418 | struct fs_struct *fs = current->fs; | ||
1419 | |||
1420 | if ((unshare_flags & CLONE_FS) && | ||
1421 | (fs && atomic_read(&fs->count) > 1)) { | ||
1422 | *new_fsp = __copy_fs_struct(current->fs); | ||
1423 | if (!*new_fsp) | ||
1424 | return -ENOMEM; | ||
1425 | } | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | /* | ||
1431 | * Unshare the namespace structure if it is being shared | ||
1432 | */ | ||
1433 | static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs) | ||
1434 | { | ||
1435 | struct namespace *ns = current->namespace; | ||
1436 | |||
1437 | if ((unshare_flags & CLONE_NEWNS) && | ||
1438 | (ns && atomic_read(&ns->count) > 1)) { | ||
1439 | if (!capable(CAP_SYS_ADMIN)) | ||
1440 | return -EPERM; | ||
1441 | |||
1442 | *new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs); | ||
1443 | if (!*new_nsp) | ||
1444 | return -ENOMEM; | ||
1445 | } | ||
1446 | |||
1447 | return 0; | ||
1448 | } | ||
1449 | |||
1450 | /* | ||
1451 | * Unsharing of sighand for tasks created with CLONE_SIGHAND is not | ||
1452 | * supported yet | ||
1453 | */ | ||
1454 | static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp) | ||
1455 | { | ||
1456 | struct sighand_struct *sigh = current->sighand; | ||
1457 | |||
1458 | if ((unshare_flags & CLONE_SIGHAND) && | ||
1459 | (sigh && atomic_read(&sigh->count) > 1)) | ||
1460 | return -EINVAL; | ||
1461 | else | ||
1462 | return 0; | ||
1463 | } | ||
1464 | |||
1465 | /* | ||
1466 | * Unshare vm if it is being shared | ||
1467 | */ | ||
1468 | static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp) | ||
1469 | { | ||
1470 | struct mm_struct *mm = current->mm; | ||
1471 | |||
1472 | if ((unshare_flags & CLONE_VM) && | ||
1473 | (mm && atomic_read(&mm->mm_users) > 1)) { | ||
1474 | *new_mmp = dup_mm(current); | ||
1475 | if (!*new_mmp) | ||
1476 | return -ENOMEM; | ||
1477 | } | ||
1478 | |||
1479 | return 0; | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * Unshare file descriptor table if it is being shared | ||
1484 | */ | ||
1485 | static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) | ||
1486 | { | ||
1487 | struct files_struct *fd = current->files; | ||
1488 | int error = 0; | ||
1489 | |||
1490 | if ((unshare_flags & CLONE_FILES) && | ||
1491 | (fd && atomic_read(&fd->count) > 1)) { | ||
1492 | *new_fdp = dup_fd(fd, &error); | ||
1493 | if (!*new_fdp) | ||
1494 | return error; | ||
1495 | } | ||
1496 | |||
1497 | return 0; | ||
1498 | } | ||
1499 | |||
1500 | /* | ||
1501 | * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not | ||
1502 | * supported yet | ||
1503 | */ | ||
1504 | static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) | ||
1505 | { | ||
1506 | if (unshare_flags & CLONE_SYSVSEM) | ||
1507 | return -EINVAL; | ||
1508 | |||
1509 | return 0; | ||
1510 | } | ||
1511 | |||
1512 | /* | ||
1513 | * unshare allows a process to 'unshare' part of the process | ||
1514 | * context which was originally shared using clone. copy_* | ||
1515 | * functions used by do_fork() cannot be used here directly | ||
1516 | * because they modify an inactive task_struct that is being | ||
1517 | * constructed. Here we are modifying the current, active, | ||
1518 | * task_struct. | ||
1519 | */ | ||
1520 | asmlinkage long sys_unshare(unsigned long unshare_flags) | ||
1521 | { | ||
1522 | int err = 0; | ||
1523 | struct fs_struct *fs, *new_fs = NULL; | ||
1524 | struct namespace *ns, *new_ns = NULL; | ||
1525 | struct sighand_struct *sigh, *new_sigh = NULL; | ||
1526 | struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; | ||
1527 | struct files_struct *fd, *new_fd = NULL; | ||
1528 | struct sem_undo_list *new_ulist = NULL; | ||
1529 | |||
1530 | check_unshare_flags(&unshare_flags); | ||
1531 | |||
1532 | if ((err = unshare_thread(unshare_flags))) | ||
1533 | goto bad_unshare_out; | ||
1534 | if ((err = unshare_fs(unshare_flags, &new_fs))) | ||
1535 | goto bad_unshare_cleanup_thread; | ||
1536 | if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs))) | ||
1537 | goto bad_unshare_cleanup_fs; | ||
1538 | if ((err = unshare_sighand(unshare_flags, &new_sigh))) | ||
1539 | goto bad_unshare_cleanup_ns; | ||
1540 | if ((err = unshare_vm(unshare_flags, &new_mm))) | ||
1541 | goto bad_unshare_cleanup_sigh; | ||
1542 | if ((err = unshare_fd(unshare_flags, &new_fd))) | ||
1543 | goto bad_unshare_cleanup_vm; | ||
1544 | if ((err = unshare_semundo(unshare_flags, &new_ulist))) | ||
1545 | goto bad_unshare_cleanup_fd; | ||
1546 | |||
1547 | if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist) { | ||
1548 | |||
1549 | task_lock(current); | ||
1550 | |||
1551 | if (new_fs) { | ||
1552 | fs = current->fs; | ||
1553 | current->fs = new_fs; | ||
1554 | new_fs = fs; | ||
1555 | } | ||
1556 | |||
1557 | if (new_ns) { | ||
1558 | ns = current->namespace; | ||
1559 | current->namespace = new_ns; | ||
1560 | new_ns = ns; | ||
1561 | } | ||
1562 | |||
1563 | if (new_sigh) { | ||
1564 | sigh = current->sighand; | ||
1565 | current->sighand = new_sigh; | ||
1566 | new_sigh = sigh; | ||
1567 | } | ||
1568 | |||
1569 | if (new_mm) { | ||
1570 | mm = current->mm; | ||
1571 | active_mm = current->active_mm; | ||
1572 | current->mm = new_mm; | ||
1573 | current->active_mm = new_mm; | ||
1574 | activate_mm(active_mm, new_mm); | ||
1575 | new_mm = mm; | ||
1576 | } | ||
1577 | |||
1578 | if (new_fd) { | ||
1579 | fd = current->files; | ||
1580 | current->files = new_fd; | ||
1581 | new_fd = fd; | ||
1582 | } | ||
1583 | |||
1584 | task_unlock(current); | ||
1585 | } | ||
1586 | |||
1587 | bad_unshare_cleanup_fd: | ||
1588 | if (new_fd) | ||
1589 | put_files_struct(new_fd); | ||
1590 | |||
1591 | bad_unshare_cleanup_vm: | ||
1592 | if (new_mm) | ||
1593 | mmput(new_mm); | ||
1594 | |||
1595 | bad_unshare_cleanup_sigh: | ||
1596 | if (new_sigh) | ||
1597 | if (atomic_dec_and_test(&new_sigh->count)) | ||
1598 | kmem_cache_free(sighand_cachep, new_sigh); | ||
1599 | |||
1600 | bad_unshare_cleanup_ns: | ||
1601 | if (new_ns) | ||
1602 | put_namespace(new_ns); | ||
1603 | |||
1604 | bad_unshare_cleanup_fs: | ||
1605 | if (new_fs) | ||
1606 | put_fs_struct(new_fs); | ||
1607 | |||
1608 | bad_unshare_cleanup_thread: | ||
1609 | bad_unshare_out: | ||
1610 | return err; | ||
1611 | } | ||
diff --git a/kernel/module.c b/kernel/module.c index e058aedf6b93..5aad477ddc79 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1670,6 +1670,9 @@ static struct module *load_module(void __user *umod, | |||
1670 | goto free_mod; | 1670 | goto free_mod; |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | /* Userspace could have altered the string after the strlen_user() */ | ||
1674 | args[arglen - 1] = '\0'; | ||
1675 | |||
1673 | if (find_module(mod->name)) { | 1676 | if (find_module(mod->name)) { |
1674 | err = -EEXIST; | 1677 | err = -EEXIST; |
1675 | goto free_mod; | 1678 | goto free_mod; |
diff --git a/kernel/power/console.c b/kernel/power/console.c index 579d239d129f..623786d44159 100644 --- a/kernel/power/console.c +++ b/kernel/power/console.c | |||
@@ -9,7 +9,9 @@ | |||
9 | #include <linux/console.h> | 9 | #include <linux/console.h> |
10 | #include "power.h" | 10 | #include "power.h" |
11 | 11 | ||
12 | #ifdef SUSPEND_CONSOLE | 12 | #if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) |
13 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | ||
14 | |||
13 | static int orig_fgconsole, orig_kmsg; | 15 | static int orig_fgconsole, orig_kmsg; |
14 | 16 | ||
15 | int pm_prepare_console(void) | 17 | int pm_prepare_console(void) |
diff --git a/kernel/power/power.h b/kernel/power/power.h index d8f0d1a76bae..388dba680841 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -1,14 +1,6 @@ | |||
1 | #include <linux/suspend.h> | 1 | #include <linux/suspend.h> |
2 | #include <linux/utsname.h> | 2 | #include <linux/utsname.h> |
3 | 3 | ||
4 | /* With SUSPEND_CONSOLE defined suspend looks *really* cool, but | ||
5 | we probably do not take enough locks for switching consoles, etc, | ||
6 | so bad things might happen. | ||
7 | */ | ||
8 | #if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) | ||
9 | #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) | ||
10 | #endif | ||
11 | |||
12 | struct swsusp_info { | 4 | struct swsusp_info { |
13 | struct new_utsname uts; | 5 | struct new_utsname uts; |
14 | u32 version_code; | 6 | u32 version_code; |
@@ -42,14 +34,6 @@ static struct subsys_attribute _name##_attr = { \ | |||
42 | 34 | ||
43 | extern struct subsystem power_subsys; | 35 | extern struct subsystem power_subsys; |
44 | 36 | ||
45 | #ifdef SUSPEND_CONSOLE | ||
46 | extern int pm_prepare_console(void); | ||
47 | extern void pm_restore_console(void); | ||
48 | #else | ||
49 | static int pm_prepare_console(void) { return 0; } | ||
50 | static void pm_restore_console(void) {} | ||
51 | #endif | ||
52 | |||
53 | /* References to section boundaries */ | 37 | /* References to section boundaries */ |
54 | extern const void __nosave_begin, __nosave_end; | 38 | extern const void __nosave_begin, __nosave_end; |
55 | 39 | ||
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 59c91c148e82..4e90905f0e87 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -743,7 +743,6 @@ static int submit(int rw, pgoff_t page_off, void *page) | |||
743 | if (!bio) | 743 | if (!bio) |
744 | return -ENOMEM; | 744 | return -ENOMEM; |
745 | bio->bi_sector = page_off * (PAGE_SIZE >> 9); | 745 | bio->bi_sector = page_off * (PAGE_SIZE >> 9); |
746 | bio_get(bio); | ||
747 | bio->bi_bdev = resume_bdev; | 746 | bio->bi_bdev = resume_bdev; |
748 | bio->bi_end_io = end_io; | 747 | bio->bi_end_io = end_io; |
749 | 748 | ||
@@ -753,14 +752,13 @@ static int submit(int rw, pgoff_t page_off, void *page) | |||
753 | goto Done; | 752 | goto Done; |
754 | } | 753 | } |
755 | 754 | ||
756 | if (rw == WRITE) | ||
757 | bio_set_pages_dirty(bio); | ||
758 | 755 | ||
759 | atomic_set(&io_done, 1); | 756 | atomic_set(&io_done, 1); |
760 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 757 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); |
761 | while (atomic_read(&io_done)) | 758 | while (atomic_read(&io_done)) |
762 | yield(); | 759 | yield(); |
763 | 760 | if (rw == READ) | |
761 | bio_set_pages_dirty(bio); | ||
764 | Done: | 762 | Done: |
765 | bio_put(bio); | 763 | bio_put(bio); |
766 | return error; | 764 | return error; |
diff --git a/kernel/signal.c b/kernel/signal.c index b373fc2420da..ea154104a00b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2430,7 +2430,7 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | |||
2430 | } | 2430 | } |
2431 | 2431 | ||
2432 | int | 2432 | int |
2433 | do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) | 2433 | do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
2434 | { | 2434 | { |
2435 | struct k_sigaction *k; | 2435 | struct k_sigaction *k; |
2436 | sigset_t mask; | 2436 | sigset_t mask; |
@@ -2454,6 +2454,8 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) | |||
2454 | *oact = *k; | 2454 | *oact = *k; |
2455 | 2455 | ||
2456 | if (act) { | 2456 | if (act) { |
2457 | sigdelsetmask(&act->sa.sa_mask, | ||
2458 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
2457 | /* | 2459 | /* |
2458 | * POSIX 3.3.1.3: | 2460 | * POSIX 3.3.1.3: |
2459 | * "Setting a signal action to SIG_IGN for a signal that is | 2461 | * "Setting a signal action to SIG_IGN for a signal that is |
@@ -2479,8 +2481,6 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) | |||
2479 | read_lock(&tasklist_lock); | 2481 | read_lock(&tasklist_lock); |
2480 | spin_lock_irq(&t->sighand->siglock); | 2482 | spin_lock_irq(&t->sighand->siglock); |
2481 | *k = *act; | 2483 | *k = *act; |
2482 | sigdelsetmask(&k->sa.sa_mask, | ||
2483 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
2484 | sigemptyset(&mask); | 2484 | sigemptyset(&mask); |
2485 | sigaddset(&mask, sig); | 2485 | sigaddset(&mask, sig); |
2486 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 2486 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
@@ -2495,8 +2495,6 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) | |||
2495 | } | 2495 | } |
2496 | 2496 | ||
2497 | *k = *act; | 2497 | *k = *act; |
2498 | sigdelsetmask(&k->sa.sa_mask, | ||
2499 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
2500 | } | 2498 | } |
2501 | 2499 | ||
2502 | spin_unlock_irq(¤t->sighand->siglock); | 2500 | spin_unlock_irq(¤t->sighand->siglock); |
@@ -2702,6 +2700,7 @@ sys_signal(int sig, __sighandler_t handler) | |||
2702 | 2700 | ||
2703 | new_sa.sa.sa_handler = handler; | 2701 | new_sa.sa.sa_handler = handler; |
2704 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | 2702 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
2703 | sigemptyset(&new_sa.sa.sa_mask); | ||
2705 | 2704 | ||
2706 | ret = do_sigaction(sig, &new_sa, &old_sa); | 2705 | ret = do_sigaction(sig, &new_sa, &old_sa); |
2707 | 2706 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 0929c698affc..f91218a5463e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -428,7 +428,7 @@ void kernel_kexec(void) | |||
428 | { | 428 | { |
429 | #ifdef CONFIG_KEXEC | 429 | #ifdef CONFIG_KEXEC |
430 | struct kimage *image; | 430 | struct kimage *image; |
431 | image = xchg(&kexec_image, 0); | 431 | image = xchg(&kexec_image, NULL); |
432 | if (!image) { | 432 | if (!image) { |
433 | return; | 433 | return; |
434 | } | 434 | } |
diff --git a/kernel/timer.c b/kernel/timer.c index 4f1cb0ab5251..b9dad3994676 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -495,7 +495,7 @@ unsigned long next_timer_interrupt(void) | |||
495 | base = &__get_cpu_var(tvec_bases); | 495 | base = &__get_cpu_var(tvec_bases); |
496 | spin_lock(&base->t_base.lock); | 496 | spin_lock(&base->t_base.lock); |
497 | expires = base->timer_jiffies + (LONG_MAX >> 1); | 497 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
498 | list = 0; | 498 | list = NULL; |
499 | 499 | ||
500 | /* Look for timer events in tv1. */ | 500 | /* Look for timer events in tv1. */ |
501 | j = base->timer_jiffies & TVR_MASK; | 501 | j = base->timer_jiffies & TVR_MASK; |