aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c33
-rw-r--r--kernel/fork.c60
-rw-r--r--kernel/sched.c55
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/tick-sched.c1
5 files changed, 39 insertions, 111 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 073005b1cfb2..97f609f574b1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -507,10 +507,9 @@ void put_files_struct(struct files_struct *files)
507 } 507 }
508} 508}
509 509
510EXPORT_SYMBOL(put_files_struct); 510void reset_files_struct(struct files_struct *files)
511
512void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
513{ 511{
512 struct task_struct *tsk = current;
514 struct files_struct *old; 513 struct files_struct *old;
515 514
516 old = tsk->files; 515 old = tsk->files;
@@ -519,9 +518,8 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
519 task_unlock(tsk); 518 task_unlock(tsk);
520 put_files_struct(old); 519 put_files_struct(old);
521} 520}
522EXPORT_SYMBOL(reset_files_struct);
523 521
524static void __exit_files(struct task_struct *tsk) 522void exit_files(struct task_struct *tsk)
525{ 523{
526 struct files_struct * files = tsk->files; 524 struct files_struct * files = tsk->files;
527 525
@@ -533,12 +531,7 @@ static void __exit_files(struct task_struct *tsk)
533 } 531 }
534} 532}
535 533
536void exit_files(struct task_struct *tsk) 534void put_fs_struct(struct fs_struct *fs)
537{
538 __exit_files(tsk);
539}
540
541static void __put_fs_struct(struct fs_struct *fs)
542{ 535{
543 /* No need to hold fs->lock if we are killing it */ 536 /* No need to hold fs->lock if we are killing it */
544 if (atomic_dec_and_test(&fs->count)) { 537 if (atomic_dec_and_test(&fs->count)) {
@@ -550,12 +543,7 @@ static void __put_fs_struct(struct fs_struct *fs)
550 } 543 }
551} 544}
552 545
553void put_fs_struct(struct fs_struct *fs) 546void exit_fs(struct task_struct *tsk)
554{
555 __put_fs_struct(fs);
556}
557
558static void __exit_fs(struct task_struct *tsk)
559{ 547{
560 struct fs_struct * fs = tsk->fs; 548 struct fs_struct * fs = tsk->fs;
561 549
@@ -563,15 +551,10 @@ static void __exit_fs(struct task_struct *tsk)
563 task_lock(tsk); 551 task_lock(tsk);
564 tsk->fs = NULL; 552 tsk->fs = NULL;
565 task_unlock(tsk); 553 task_unlock(tsk);
566 __put_fs_struct(fs); 554 put_fs_struct(fs);
567 } 555 }
568} 556}
569 557
570void exit_fs(struct task_struct *tsk)
571{
572 __exit_fs(tsk);
573}
574
575EXPORT_SYMBOL_GPL(exit_fs); 558EXPORT_SYMBOL_GPL(exit_fs);
576 559
577/* 560/*
@@ -967,8 +950,8 @@ NORET_TYPE void do_exit(long code)
967 if (group_dead) 950 if (group_dead)
968 acct_process(); 951 acct_process();
969 exit_sem(tsk); 952 exit_sem(tsk);
970 __exit_files(tsk); 953 exit_files(tsk);
971 __exit_fs(tsk); 954 exit_fs(tsk);
972 check_stack_usage(); 955 check_stack_usage();
973 exit_thread(); 956 exit_thread();
974 cgroup_exit(tsk, 1); 957 cgroup_exit(tsk, 1);
diff --git a/kernel/fork.c b/kernel/fork.c
index 89fe414645e9..c674aa8d3c31 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -521,7 +521,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
521 * Allocate a new mm structure and copy contents from the 521 * Allocate a new mm structure and copy contents from the
522 * mm structure of the passed in task structure. 522 * mm structure of the passed in task structure.
523 */ 523 */
524static struct mm_struct *dup_mm(struct task_struct *tsk) 524struct mm_struct *dup_mm(struct task_struct *tsk)
525{ 525{
526 struct mm_struct *mm, *oldmm = current->mm; 526 struct mm_struct *mm, *oldmm = current->mm;
527 int err; 527 int err;
@@ -805,12 +805,6 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
805 goto out; 805 goto out;
806 } 806 }
807 807
808 /*
809 * Note: we may be using current for both targets (See exec.c)
810 * This works because we cache current->files (old) as oldf. Don't
811 * break this.
812 */
813 tsk->files = NULL;
814 newf = dup_fd(oldf, &error); 808 newf = dup_fd(oldf, &error);
815 if (!newf) 809 if (!newf)
816 goto out; 810 goto out;
@@ -846,34 +840,6 @@ static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
846 return 0; 840 return 0;
847} 841}
848 842
849/*
850 * Helper to unshare the files of the current task.
851 * We don't want to expose copy_files internals to
852 * the exec layer of the kernel.
853 */
854
855int unshare_files(void)
856{
857 struct files_struct *files = current->files;
858 int rc;
859
860 BUG_ON(!files);
861
862 /* This can race but the race causes us to copy when we don't
863 need to and drop the copy */
864 if(atomic_read(&files->count) == 1)
865 {
866 atomic_inc(&files->count);
867 return 0;
868 }
869 rc = copy_files(0, current);
870 if(rc)
871 current->files = files;
872 return rc;
873}
874
875EXPORT_SYMBOL(unshare_files);
876
877static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) 843static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
878{ 844{
879 struct sighand_struct *sig; 845 struct sighand_struct *sig;
@@ -1811,3 +1777,27 @@ bad_unshare_cleanup_thread:
1811bad_unshare_out: 1777bad_unshare_out:
1812 return err; 1778 return err;
1813} 1779}
1780
1781/*
1782 * Helper to unshare the files of the current task.
1783 * We don't want to expose copy_files internals to
1784 * the exec layer of the kernel.
1785 */
1786
1787int unshare_files(struct files_struct **displaced)
1788{
1789 struct task_struct *task = current;
1790 struct files_struct *copy = NULL;
1791 int error;
1792
1793 error = unshare_fd(CLONE_FILES, &copy);
1794 if (error || !copy) {
1795 *displaced = NULL;
1796 return error;
1797 }
1798 *displaced = task->files;
1799 task_lock(task);
1800 task->files = copy;
1801 task_unlock(task);
1802 return 0;
1803}
diff --git a/kernel/sched.c b/kernel/sched.c
index 57ba7ea9b744..740fb409e5bb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1657,42 +1657,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
1657} 1657}
1658 1658
1659/* 1659/*
1660 * Redistribute tg->shares amongst all tg->cfs_rq[]s.
1661 */
1662static void __aggregate_redistribute_shares(struct task_group *tg)
1663{
1664 int i, max_cpu = smp_processor_id();
1665 unsigned long rq_weight = 0;
1666 unsigned long shares, max_shares = 0, shares_rem = tg->shares;
1667
1668 for_each_possible_cpu(i)
1669 rq_weight += tg->cfs_rq[i]->load.weight;
1670
1671 for_each_possible_cpu(i) {
1672 /*
1673 * divide shares proportional to the rq_weights.
1674 */
1675 shares = tg->shares * tg->cfs_rq[i]->load.weight;
1676 shares /= rq_weight + 1;
1677
1678 tg->cfs_rq[i]->shares = shares;
1679
1680 if (shares > max_shares) {
1681 max_shares = shares;
1682 max_cpu = i;
1683 }
1684 shares_rem -= shares;
1685 }
1686
1687 /*
1688 * Ensure it all adds up to tg->shares; we can loose a few
1689 * due to rounding down when computing the per-cpu shares.
1690 */
1691 if (shares_rem)
1692 tg->cfs_rq[max_cpu]->shares += shares_rem;
1693}
1694
1695/*
1696 * Compute the weight of this group on the given cpus. 1660 * Compute the weight of this group on the given cpus.
1697 */ 1661 */
1698static 1662static
@@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
1701 unsigned long shares = 0; 1665 unsigned long shares = 0;
1702 int i; 1666 int i;
1703 1667
1704again:
1705 for_each_cpu_mask(i, sd->span) 1668 for_each_cpu_mask(i, sd->span)
1706 shares += tg->cfs_rq[i]->shares; 1669 shares += tg->cfs_rq[i]->shares;
1707 1670
1708 /* 1671 if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
1709 * When the span doesn't have any shares assigned, but does have 1672 shares = tg->shares;
1710 * tasks to run do a machine wide rebalance (should be rare).
1711 */
1712 if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) {
1713 __aggregate_redistribute_shares(tg);
1714 goto again;
1715 }
1716 1673
1717 aggregate(tg, sd)->shares = shares; 1674 aggregate(tg, sd)->shares = shares;
1718} 1675}
@@ -7035,6 +6992,7 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
7035/** 6992/**
7036 * sched_domain_node_span - get a cpumask for a node's sched_domain 6993 * sched_domain_node_span - get a cpumask for a node's sched_domain
7037 * @node: node whose cpumask we're constructing 6994 * @node: node whose cpumask we're constructing
6995 * @span: resulting cpumask
7038 * 6996 *
7039 * Given a node, construct a good cpumask for its sched_domain to span. It 6997 * Given a node, construct a good cpumask for its sched_domain to span. It
7040 * should be one that prevents unnecessary balancing, but also spreads tasks 6998 * should be one that prevents unnecessary balancing, but also spreads tasks
@@ -7990,11 +7948,6 @@ void __init sched_init_smp(void)
7990#else 7948#else
7991void __init sched_init_smp(void) 7949void __init sched_init_smp(void)
7992{ 7950{
7993#if defined(CONFIG_NUMA)
7994 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7995 GFP_KERNEL);
7996 BUG_ON(sched_group_nodes_bycpu == NULL);
7997#endif
7998 sched_init_granularity(); 7951 sched_init_granularity();
7999} 7952}
8000#endif /* CONFIG_SMP */ 7953#endif /* CONFIG_SMP */
@@ -8127,7 +8080,7 @@ void __init sched_init(void)
8127 * we use alloc_bootmem(). 8080 * we use alloc_bootmem().
8128 */ 8081 */
8129 if (alloc_size) { 8082 if (alloc_size) {
8130 ptr = (unsigned long)alloc_bootmem_low(alloc_size); 8083 ptr = (unsigned long)alloc_bootmem(alloc_size);
8131 8084
8132#ifdef CONFIG_FAIR_GROUP_SCHED 8085#ifdef CONFIG_FAIR_GROUP_SCHED
8133 init_task_group.se = (struct sched_entity **)ptr; 8086 init_task_group.se = (struct sched_entity **)ptr;
diff --git a/kernel/time.c b/kernel/time.c
index a5ec013b6c80..35d373a98782 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -379,6 +379,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
379 ts->tv_sec = sec; 379 ts->tv_sec = sec;
380 ts->tv_nsec = nsec; 380 ts->tv_nsec = nsec;
381} 381}
382EXPORT_SYMBOL(set_normalized_timespec);
382 383
383/** 384/**
384 * ns_to_timespec - Convert nanoseconds to timespec 385 * ns_to_timespec - Convert nanoseconds to timespec
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d358d4e3a958..b854a895591e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void)
393 sub_preempt_count(HARDIRQ_OFFSET); 393 sub_preempt_count(HARDIRQ_OFFSET);
394 } 394 }
395 395
396 touch_softlockup_watchdog();
396 /* 397 /*
397 * Cancel the scheduled timer and restore the tick 398 * Cancel the scheduled timer and restore the tick
398 */ 399 */