aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c142
-rw-r--r--kernel/kprobes.c62
-rw-r--r--kernel/printk.c59
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/resource.c26
-rw-r--r--kernel/sched.c37
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/time.c6
-rw-r--r--kernel/time/timekeeping.c7
10 files changed, 179 insertions, 185 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 57e6448b171e..0864f4097930 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -581,26 +581,28 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
581 581
582/* 582/*
583 * Return in *pmask the portion of a cpusets's mems_allowed that 583 * Return in *pmask the portion of a cpusets's mems_allowed that
584 * are online. If none are online, walk up the cpuset hierarchy 584 * are online, with memory. If none are online with memory, walk
585 * until we find one that does have some online mems. If we get 585 * up the cpuset hierarchy until we find one that does have some
586 * all the way to the top and still haven't found any online mems, 586 * online mems. If we get all the way to the top and still haven't
587 * return node_online_map. 587 * found any online mems, return node_states[N_HIGH_MEMORY].
588 * 588 *
589 * One way or another, we guarantee to return some non-empty subset 589 * One way or another, we guarantee to return some non-empty subset
590 * of node_online_map. 590 * of node_states[N_HIGH_MEMORY].
591 * 591 *
592 * Call with callback_mutex held. 592 * Call with callback_mutex held.
593 */ 593 */
594 594
595static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) 595static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
596{ 596{
597 while (cs && !nodes_intersects(cs->mems_allowed, node_online_map)) 597 while (cs && !nodes_intersects(cs->mems_allowed,
598 node_states[N_HIGH_MEMORY]))
598 cs = cs->parent; 599 cs = cs->parent;
599 if (cs) 600 if (cs)
600 nodes_and(*pmask, cs->mems_allowed, node_online_map); 601 nodes_and(*pmask, cs->mems_allowed,
602 node_states[N_HIGH_MEMORY]);
601 else 603 else
602 *pmask = node_online_map; 604 *pmask = node_states[N_HIGH_MEMORY];
603 BUG_ON(!nodes_intersects(*pmask, node_online_map)); 605 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
604} 606}
605 607
606/** 608/**
@@ -753,68 +755,13 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
753} 755}
754 756
755/* 757/*
756 * For a given cpuset cur, partition the system as follows
757 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
758 * exclusive child cpusets
759 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
760 * exclusive child cpusets
761 * Build these two partitions by calling partition_sched_domains
762 *
763 * Call with manage_mutex held. May nest a call to the
764 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
765 * Must not be called holding callback_mutex, because we must
766 * not call lock_cpu_hotplug() while holding callback_mutex.
767 */
768
769static void update_cpu_domains(struct cpuset *cur)
770{
771 struct cpuset *c, *par = cur->parent;
772 cpumask_t pspan, cspan;
773
774 if (par == NULL || cpus_empty(cur->cpus_allowed))
775 return;
776
777 /*
778 * Get all cpus from parent's cpus_allowed not part of exclusive
779 * children
780 */
781 pspan = par->cpus_allowed;
782 list_for_each_entry(c, &par->children, sibling) {
783 if (is_cpu_exclusive(c))
784 cpus_andnot(pspan, pspan, c->cpus_allowed);
785 }
786 if (!is_cpu_exclusive(cur)) {
787 cpus_or(pspan, pspan, cur->cpus_allowed);
788 if (cpus_equal(pspan, cur->cpus_allowed))
789 return;
790 cspan = CPU_MASK_NONE;
791 } else {
792 if (cpus_empty(pspan))
793 return;
794 cspan = cur->cpus_allowed;
795 /*
796 * Get all cpus from current cpuset's cpus_allowed not part
797 * of exclusive children
798 */
799 list_for_each_entry(c, &cur->children, sibling) {
800 if (is_cpu_exclusive(c))
801 cpus_andnot(cspan, cspan, c->cpus_allowed);
802 }
803 }
804
805 lock_cpu_hotplug();
806 partition_sched_domains(&pspan, &cspan);
807 unlock_cpu_hotplug();
808}
809
810/*
811 * Call with manage_mutex held. May take callback_mutex during call. 758 * Call with manage_mutex held. May take callback_mutex during call.
812 */ 759 */
813 760
814static int update_cpumask(struct cpuset *cs, char *buf) 761static int update_cpumask(struct cpuset *cs, char *buf)
815{ 762{
816 struct cpuset trialcs; 763 struct cpuset trialcs;
817 int retval, cpus_unchanged; 764 int retval;
818 765
819 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ 766 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
820 if (cs == &top_cpuset) 767 if (cs == &top_cpuset)
@@ -841,12 +788,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
841 retval = validate_change(cs, &trialcs); 788 retval = validate_change(cs, &trialcs);
842 if (retval < 0) 789 if (retval < 0)
843 return retval; 790 return retval;
844 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
845 mutex_lock(&callback_mutex); 791 mutex_lock(&callback_mutex);
846 cs->cpus_allowed = trialcs.cpus_allowed; 792 cs->cpus_allowed = trialcs.cpus_allowed;
847 mutex_unlock(&callback_mutex); 793 mutex_unlock(&callback_mutex);
848 if (is_cpu_exclusive(cs) && !cpus_unchanged)
849 update_cpu_domains(cs);
850 return 0; 794 return 0;
851} 795}
852 796
@@ -924,7 +868,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
924 int fudge; 868 int fudge;
925 int retval; 869 int retval;
926 870
927 /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */ 871 /*
872 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
873 * it's read-only
874 */
928 if (cs == &top_cpuset) 875 if (cs == &top_cpuset)
929 return -EACCES; 876 return -EACCES;
930 877
@@ -941,8 +888,21 @@ static int update_nodemask(struct cpuset *cs, char *buf)
941 retval = nodelist_parse(buf, trialcs.mems_allowed); 888 retval = nodelist_parse(buf, trialcs.mems_allowed);
942 if (retval < 0) 889 if (retval < 0)
943 goto done; 890 goto done;
891 if (!nodes_intersects(trialcs.mems_allowed,
892 node_states[N_HIGH_MEMORY])) {
893 /*
894 * error if only memoryless nodes specified.
895 */
896 retval = -ENOSPC;
897 goto done;
898 }
944 } 899 }
945 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map); 900 /*
901 * Exclude memoryless nodes. We know that trialcs.mems_allowed
902 * contains at least one node with memory.
903 */
904 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed,
905 node_states[N_HIGH_MEMORY]);
946 oldmem = cs->mems_allowed; 906 oldmem = cs->mems_allowed;
947 if (nodes_equal(oldmem, trialcs.mems_allowed)) { 907 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
948 retval = 0; /* Too easy - nothing to do */ 908 retval = 0; /* Too easy - nothing to do */
@@ -1067,7 +1027,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1067{ 1027{
1068 int turning_on; 1028 int turning_on;
1069 struct cpuset trialcs; 1029 struct cpuset trialcs;
1070 int err, cpu_exclusive_changed; 1030 int err;
1071 1031
1072 turning_on = (simple_strtoul(buf, NULL, 10) != 0); 1032 turning_on = (simple_strtoul(buf, NULL, 10) != 0);
1073 1033
@@ -1080,14 +1040,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1080 err = validate_change(cs, &trialcs); 1040 err = validate_change(cs, &trialcs);
1081 if (err < 0) 1041 if (err < 0)
1082 return err; 1042 return err;
1083 cpu_exclusive_changed =
1084 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
1085 mutex_lock(&callback_mutex); 1043 mutex_lock(&callback_mutex);
1086 cs->flags = trialcs.flags; 1044 cs->flags = trialcs.flags;
1087 mutex_unlock(&callback_mutex); 1045 mutex_unlock(&callback_mutex);
1088 1046
1089 if (cpu_exclusive_changed)
1090 update_cpu_domains(cs);
1091 return 0; 1047 return 0;
1092} 1048}
1093 1049
@@ -1445,7 +1401,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
1445 ssize_t retval = 0; 1401 ssize_t retval = 0;
1446 char *s; 1402 char *s;
1447 1403
1448 if (!(page = (char *)__get_free_page(GFP_KERNEL))) 1404 if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1449 return -ENOMEM; 1405 return -ENOMEM;
1450 1406
1451 s = page; 1407 s = page;
@@ -1947,17 +1903,6 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1947 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); 1903 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1948} 1904}
1949 1905
1950/*
1951 * Locking note on the strange update_flag() call below:
1952 *
1953 * If the cpuset being removed is marked cpu_exclusive, then simulate
1954 * turning cpu_exclusive off, which will call update_cpu_domains().
1955 * The lock_cpu_hotplug() call in update_cpu_domains() must not be
1956 * made while holding callback_mutex. Elsewhere the kernel nests
1957 * callback_mutex inside lock_cpu_hotplug() calls. So the reverse
1958 * nesting would risk an ABBA deadlock.
1959 */
1960
1961static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) 1906static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1962{ 1907{
1963 struct cpuset *cs = dentry->d_fsdata; 1908 struct cpuset *cs = dentry->d_fsdata;
@@ -1977,13 +1922,6 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1977 mutex_unlock(&manage_mutex); 1922 mutex_unlock(&manage_mutex);
1978 return -EBUSY; 1923 return -EBUSY;
1979 } 1924 }
1980 if (is_cpu_exclusive(cs)) {
1981 int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
1982 if (retval < 0) {
1983 mutex_unlock(&manage_mutex);
1984 return retval;
1985 }
1986 }
1987 parent = cs->parent; 1925 parent = cs->parent;
1988 mutex_lock(&callback_mutex); 1926 mutex_lock(&callback_mutex);
1989 set_bit(CS_REMOVED, &cs->flags); 1927 set_bit(CS_REMOVED, &cs->flags);
@@ -2098,8 +2036,9 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
2098 2036
2099/* 2037/*
2100 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track 2038 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
2101 * cpu_online_map and node_online_map. Force the top cpuset to track 2039 * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
2102 * whats online after any CPU or memory node hotplug or unplug event. 2040 * track what's online after any CPU or memory node hotplug or unplug
2041 * event.
2103 * 2042 *
2104 * To ensure that we don't remove a CPU or node from the top cpuset 2043 * To ensure that we don't remove a CPU or node from the top cpuset
2105 * that is currently in use by a child cpuset (which would violate 2044 * that is currently in use by a child cpuset (which would violate
@@ -2119,7 +2058,7 @@ static void common_cpu_mem_hotplug_unplug(void)
2119 2058
2120 guarantee_online_cpus_mems_in_subtree(&top_cpuset); 2059 guarantee_online_cpus_mems_in_subtree(&top_cpuset);
2121 top_cpuset.cpus_allowed = cpu_online_map; 2060 top_cpuset.cpus_allowed = cpu_online_map;
2122 top_cpuset.mems_allowed = node_online_map; 2061 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2123 2062
2124 mutex_unlock(&callback_mutex); 2063 mutex_unlock(&callback_mutex);
2125 mutex_unlock(&manage_mutex); 2064 mutex_unlock(&manage_mutex);
@@ -2147,8 +2086,9 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb,
2147 2086
2148#ifdef CONFIG_MEMORY_HOTPLUG 2087#ifdef CONFIG_MEMORY_HOTPLUG
2149/* 2088/*
2150 * Keep top_cpuset.mems_allowed tracking node_online_map. 2089 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
2151 * Call this routine anytime after you change node_online_map. 2090 * Call this routine anytime after you change
2091 * node_states[N_HIGH_MEMORY].
2152 * See also the previous routine cpuset_handle_cpuhp(). 2092 * See also the previous routine cpuset_handle_cpuhp().
2153 */ 2093 */
2154 2094
@@ -2167,7 +2107,7 @@ void cpuset_track_online_nodes(void)
2167void __init cpuset_init_smp(void) 2107void __init cpuset_init_smp(void)
2168{ 2108{
2169 top_cpuset.cpus_allowed = cpu_online_map; 2109 top_cpuset.cpus_allowed = cpu_online_map;
2170 top_cpuset.mems_allowed = node_online_map; 2110 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2171 2111
2172 hotcpu_notifier(cpuset_handle_cpuhp, 0); 2112 hotcpu_notifier(cpuset_handle_cpuhp, 0);
2173} 2113}
@@ -2309,7 +2249,7 @@ void cpuset_init_current_mems_allowed(void)
2309 * 2249 *
2310 * Description: Returns the nodemask_t mems_allowed of the cpuset 2250 * Description: Returns the nodemask_t mems_allowed of the cpuset
2311 * attached to the specified @tsk. Guaranteed to return some non-empty 2251 * attached to the specified @tsk. Guaranteed to return some non-empty
2312 * subset of node_online_map, even if this means going outside the 2252 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
2313 * tasks cpuset. 2253 * tasks cpuset.
2314 **/ 2254 **/
2315 2255
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 4b8a4493c541..e3a5d817ac9b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -64,7 +64,6 @@
64 64
65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67static atomic_t kprobe_count;
68 67
69/* NOTE: change this value only with kprobe_mutex held */ 68/* NOTE: change this value only with kprobe_mutex held */
70static bool kprobe_enabled; 69static bool kprobe_enabled;
@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
73DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 72DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
74static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
75 74
76static struct notifier_block kprobe_page_fault_nb = {
77 .notifier_call = kprobe_exceptions_notify,
78 .priority = 0x7fffffff /* we need to notified first */
79};
80
81#ifdef __ARCH_WANT_KPROBES_INSN_SLOT 75#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
82/* 76/*
83 * kprobe->ainsn.insn points to the copy of the instruction to be 77 * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p,
556 old_p = get_kprobe(p->addr); 550 old_p = get_kprobe(p->addr);
557 if (old_p) { 551 if (old_p) {
558 ret = register_aggr_kprobe(old_p, p); 552 ret = register_aggr_kprobe(old_p, p);
559 if (!ret)
560 atomic_inc(&kprobe_count);
561 goto out; 553 goto out;
562 } 554 }
563 555
@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
569 hlist_add_head_rcu(&p->hlist, 561 hlist_add_head_rcu(&p->hlist,
570 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 562 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
571 563
572 if (kprobe_enabled) { 564 if (kprobe_enabled)
573 if (atomic_add_return(1, &kprobe_count) == \
574 (ARCH_INACTIVE_KPROBE_COUNT + 1))
575 register_page_fault_notifier(&kprobe_page_fault_nb);
576
577 arch_arm_kprobe(p); 565 arch_arm_kprobe(p);
578 } 566
579out: 567out:
580 mutex_unlock(&kprobe_mutex); 568 mutex_unlock(&kprobe_mutex);
581 569
@@ -658,16 +646,6 @@ valid_p:
658 } 646 }
659 mutex_unlock(&kprobe_mutex); 647 mutex_unlock(&kprobe_mutex);
660 } 648 }
661
662 /* Call unregister_page_fault_notifier()
663 * if no probes are active
664 */
665 mutex_lock(&kprobe_mutex);
666 if (atomic_add_return(-1, &kprobe_count) == \
667 ARCH_INACTIVE_KPROBE_COUNT)
668 unregister_page_fault_notifier(&kprobe_page_fault_nb);
669 mutex_unlock(&kprobe_mutex);
670 return;
671} 649}
672 650
673static struct notifier_block kprobe_exceptions_nb = { 651static struct notifier_block kprobe_exceptions_nb = {
@@ -738,6 +716,18 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
738 int ret = 0; 716 int ret = 0;
739 struct kretprobe_instance *inst; 717 struct kretprobe_instance *inst;
740 int i; 718 int i;
719 void *addr = rp->kp.addr;
720
721 if (kretprobe_blacklist_size) {
722 if (addr == NULL)
723 kprobe_lookup_name(rp->kp.symbol_name, addr);
724 addr += rp->kp.offset;
725
726 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
727 if (kretprobe_blacklist[i].addr == addr)
728 return -EINVAL;
729 }
730 }
741 731
742 rp->kp.pre_handler = pre_handler_kretprobe; 732 rp->kp.pre_handler = pre_handler_kretprobe;
743 rp->kp.post_handler = NULL; 733 rp->kp.post_handler = NULL;
@@ -815,7 +805,17 @@ static int __init init_kprobes(void)
815 INIT_HLIST_HEAD(&kprobe_table[i]); 805 INIT_HLIST_HEAD(&kprobe_table[i]);
816 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 806 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
817 } 807 }
818 atomic_set(&kprobe_count, 0); 808
809 if (kretprobe_blacklist_size) {
810 /* lookup the function address from its name */
811 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
812 kprobe_lookup_name(kretprobe_blacklist[i].name,
813 kretprobe_blacklist[i].addr);
814 if (!kretprobe_blacklist[i].addr)
815 printk("kretprobe: lookup failed: %s\n",
816 kretprobe_blacklist[i].name);
817 }
818 }
819 819
820 /* By default, kprobes are enabled */ 820 /* By default, kprobes are enabled */
821 kprobe_enabled = true; 821 kprobe_enabled = true;
@@ -921,13 +921,6 @@ static void __kprobes enable_all_kprobes(void)
921 if (kprobe_enabled) 921 if (kprobe_enabled)
922 goto already_enabled; 922 goto already_enabled;
923 923
924 /*
925 * Re-register the page fault notifier only if there are any
926 * active probes at the time of enabling kprobes globally
927 */
928 if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
929 register_page_fault_notifier(&kprobe_page_fault_nb);
930
931 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 924 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
932 head = &kprobe_table[i]; 925 head = &kprobe_table[i];
933 hlist_for_each_entry_rcu(p, node, head, hlist) 926 hlist_for_each_entry_rcu(p, node, head, hlist)
@@ -968,10 +961,7 @@ static void __kprobes disable_all_kprobes(void)
968 mutex_unlock(&kprobe_mutex); 961 mutex_unlock(&kprobe_mutex);
969 /* Allow all currently running kprobes to complete */ 962 /* Allow all currently running kprobes to complete */
970 synchronize_sched(); 963 synchronize_sched();
971 964 return;
972 mutex_lock(&kprobe_mutex);
973 /* Unconditionally unregister the page_fault notifier */
974 unregister_page_fault_notifier(&kprobe_page_fault_nb);
975 965
976already_disabled: 966already_disabled:
977 mutex_unlock(&kprobe_mutex); 967 mutex_unlock(&kprobe_mutex);
diff --git a/kernel/printk.c b/kernel/printk.c
index 8451dfc31d25..b2b5c3a22a36 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -22,6 +22,8 @@
22#include <linux/tty_driver.h> 22#include <linux/tty_driver.h>
23#include <linux/console.h> 23#include <linux/console.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/jiffies.h>
26#include <linux/nmi.h>
25#include <linux/module.h> 27#include <linux/module.h>
26#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
27#include <linux/interrupt.h> /* For in_interrupt() */ 29#include <linux/interrupt.h> /* For in_interrupt() */
@@ -162,6 +164,61 @@ out:
162 164
163__setup("log_buf_len=", log_buf_len_setup); 165__setup("log_buf_len=", log_buf_len_setup);
164 166
167#ifdef CONFIG_BOOT_PRINTK_DELAY
168
169static unsigned int boot_delay; /* msecs delay after each printk during bootup */
170static unsigned long long printk_delay_msec; /* per msec, based on boot_delay */
171
172static int __init boot_delay_setup(char *str)
173{
174 unsigned long lpj;
175 unsigned long long loops_per_msec;
176
177 lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
178 loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
179
180 get_option(&str, &boot_delay);
181 if (boot_delay > 10 * 1000)
182 boot_delay = 0;
183
184 printk_delay_msec = loops_per_msec;
185 printk(KERN_DEBUG "boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
186 "HZ: %d, printk_delay_msec: %llu\n",
187 boot_delay, preset_lpj, lpj, HZ, printk_delay_msec);
188 return 1;
189}
190__setup("boot_delay=", boot_delay_setup);
191
192static void boot_delay_msec(void)
193{
194 unsigned long long k;
195 unsigned long timeout;
196
197 if (boot_delay == 0 || system_state != SYSTEM_BOOTING)
198 return;
199
200 k = (unsigned long long)printk_delay_msec * boot_delay;
201
202 timeout = jiffies + msecs_to_jiffies(boot_delay);
203 while (k) {
204 k--;
205 cpu_relax();
206 /*
207 * use (volatile) jiffies to prevent
208 * compiler reduction; loop termination via jiffies
209 * is secondary and may or may not happen.
210 */
211 if (time_after(jiffies, timeout))
212 break;
213 touch_nmi_watchdog();
214 }
215}
216#else
217static inline void boot_delay_msec(void)
218{
219}
220#endif
221
165/* 222/*
166 * Commands to do_syslog: 223 * Commands to do_syslog:
167 * 224 *
@@ -527,6 +584,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
527 static char printk_buf[1024]; 584 static char printk_buf[1024];
528 static int log_level_unknown = 1; 585 static int log_level_unknown = 1;
529 586
587 boot_delay_msec();
588
530 preempt_disable(); 589 preempt_disable();
531 if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id()) 590 if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id())
532 /* If a crash is occurring during printk() on this CPU, 591 /* If a crash is occurring during printk() on this CPU,
diff --git a/kernel/profile.c b/kernel/profile.c
index cb1e37d2dac3..6f69bf792d96 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -346,7 +346,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
346 per_cpu(cpu_profile_flip, cpu) = 0; 346 per_cpu(cpu_profile_flip, cpu) = 0;
347 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 347 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
348 page = alloc_pages_node(node, 348 page = alloc_pages_node(node,
349 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 349 GFP_KERNEL | __GFP_ZERO,
350 0); 350 0);
351 if (!page) 351 if (!page)
352 return NOTIFY_BAD; 352 return NOTIFY_BAD;
@@ -354,7 +354,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
354 } 354 }
355 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 355 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
356 page = alloc_pages_node(node, 356 page = alloc_pages_node(node,
357 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 357 GFP_KERNEL | __GFP_ZERO,
358 0); 358 0);
359 if (!page) 359 if (!page)
360 goto out_free; 360 goto out_free;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 3eca7a55f2ee..a73ebd3b9d4c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -386,6 +386,9 @@ int ptrace_request(struct task_struct *child, long request,
386 case PTRACE_SETSIGINFO: 386 case PTRACE_SETSIGINFO:
387 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data); 387 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
388 break; 388 break;
389 case PTRACE_DETACH: /* detach a process that was attached. */
390 ret = ptrace_detach(child, data);
391 break;
389 default: 392 default:
390 break; 393 break;
391 } 394 }
@@ -450,6 +453,10 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
450 return child; 453 return child;
451} 454}
452 455
456#ifndef arch_ptrace_attach
457#define arch_ptrace_attach(child) do { } while (0)
458#endif
459
453#ifndef __ARCH_SYS_PTRACE 460#ifndef __ARCH_SYS_PTRACE
454asmlinkage long sys_ptrace(long request, long pid, long addr, long data) 461asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
455{ 462{
@@ -473,6 +480,12 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
473 480
474 if (request == PTRACE_ATTACH) { 481 if (request == PTRACE_ATTACH) {
475 ret = ptrace_attach(child); 482 ret = ptrace_attach(child);
483 /*
484 * Some architectures need to do book-keeping after
485 * a ptrace attach.
486 */
487 if (!ret)
488 arch_ptrace_attach(child);
476 goto out_put_task_struct; 489 goto out_put_task_struct;
477 } 490 }
478 491
diff --git a/kernel/resource.c b/kernel/resource.c
index 9bd14fd3e6de..a358142ff48f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -234,7 +234,7 @@ EXPORT_SYMBOL(release_resource);
234 * the caller must specify res->start, res->end, res->flags. 234 * the caller must specify res->start, res->end, res->flags.
235 * If found, returns 0, res is overwritten, if not found, returns -1. 235 * If found, returns 0, res is overwritten, if not found, returns -1.
236 */ 236 */
237int find_next_system_ram(struct resource *res) 237static int find_next_system_ram(struct resource *res)
238{ 238{
239 resource_size_t start, end; 239 resource_size_t start, end;
240 struct resource *p; 240 struct resource *p;
@@ -267,6 +267,30 @@ int find_next_system_ram(struct resource *res)
267 res->end = p->end; 267 res->end = p->end;
268 return 0; 268 return 0;
269} 269}
270int
271walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
272 int (*func)(unsigned long, unsigned long, void *))
273{
274 struct resource res;
275 unsigned long pfn, len;
276 u64 orig_end;
277 int ret = -1;
278 res.start = (u64) start_pfn << PAGE_SHIFT;
279 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
280 res.flags = IORESOURCE_MEM;
281 orig_end = res.end;
282 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
283 pfn = (unsigned long)(res.start >> PAGE_SHIFT);
284 len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
285 ret = (*func)(pfn, len, arg);
286 if (ret)
287 break;
288 res.start = res.end + 1;
289 res.end = orig_end;
290 }
291 return ret;
292}
293
270#endif 294#endif
271 295
272/* 296/*
diff --git a/kernel/sched.c b/kernel/sched.c
index bba57adb9504..0da2b2635c54 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5869,7 +5869,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
5869 struct sched_group **sg) 5869 struct sched_group **sg)
5870{ 5870{
5871 int group; 5871 int group;
5872 cpumask_t mask = cpu_sibling_map[cpu]; 5872 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
5873 cpus_and(mask, mask, *cpu_map); 5873 cpus_and(mask, mask, *cpu_map);
5874 group = first_cpu(mask); 5874 group = first_cpu(mask);
5875 if (sg) 5875 if (sg)
@@ -5898,7 +5898,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
5898 cpus_and(mask, mask, *cpu_map); 5898 cpus_and(mask, mask, *cpu_map);
5899 group = first_cpu(mask); 5899 group = first_cpu(mask);
5900#elif defined(CONFIG_SCHED_SMT) 5900#elif defined(CONFIG_SCHED_SMT)
5901 cpumask_t mask = cpu_sibling_map[cpu]; 5901 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
5902 cpus_and(mask, mask, *cpu_map); 5902 cpus_and(mask, mask, *cpu_map);
5903 group = first_cpu(mask); 5903 group = first_cpu(mask);
5904#else 5904#else
@@ -6132,7 +6132,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6132 p = sd; 6132 p = sd;
6133 sd = &per_cpu(cpu_domains, i); 6133 sd = &per_cpu(cpu_domains, i);
6134 *sd = SD_SIBLING_INIT; 6134 *sd = SD_SIBLING_INIT;
6135 sd->span = cpu_sibling_map[i]; 6135 sd->span = per_cpu(cpu_sibling_map, i);
6136 cpus_and(sd->span, sd->span, *cpu_map); 6136 cpus_and(sd->span, sd->span, *cpu_map);
6137 sd->parent = p; 6137 sd->parent = p;
6138 p->child = sd; 6138 p->child = sd;
@@ -6143,7 +6143,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6143#ifdef CONFIG_SCHED_SMT 6143#ifdef CONFIG_SCHED_SMT
6144 /* Set up CPU (sibling) groups */ 6144 /* Set up CPU (sibling) groups */
6145 for_each_cpu_mask(i, *cpu_map) { 6145 for_each_cpu_mask(i, *cpu_map) {
6146 cpumask_t this_sibling_map = cpu_sibling_map[i]; 6146 cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map); 6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
6148 if (i != first_cpu(this_sibling_map)) 6148 if (i != first_cpu(this_sibling_map))
6149 continue; 6149 continue;
@@ -6348,35 +6348,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
6348 arch_destroy_sched_domains(cpu_map); 6348 arch_destroy_sched_domains(cpu_map);
6349} 6349}
6350 6350
6351/*
6352 * Partition sched domains as specified by the cpumasks below.
6353 * This attaches all cpus from the cpumasks to the NULL domain,
6354 * waits for a RCU quiescent period, recalculates sched
6355 * domain information and then attaches them back to the
6356 * correct sched domains
6357 * Call with hotplug lock held
6358 */
6359int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
6360{
6361 cpumask_t change_map;
6362 int err = 0;
6363
6364 cpus_and(*partition1, *partition1, cpu_online_map);
6365 cpus_and(*partition2, *partition2, cpu_online_map);
6366 cpus_or(change_map, *partition1, *partition2);
6367
6368 /* Detach sched domains from all of the affected cpus */
6369 detach_destroy_domains(&change_map);
6370 if (!cpus_empty(*partition1))
6371 err = build_sched_domains(partition1);
6372 if (!err && !cpus_empty(*partition2))
6373 err = build_sched_domains(partition2);
6374
6375 register_sched_domain_sysctl();
6376
6377 return err;
6378}
6379
6380#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 6351#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6381static int arch_reinit_sched_domains(void) 6352static int arch_reinit_sched_domains(void)
6382{ 6353{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ec14aa8ac51f..96efbb859997 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -880,6 +880,14 @@ static ctl_table vm_table[] = {
880 .mode = 0644, 880 .mode = 0644,
881 .proc_handler = &hugetlb_treat_movable_handler, 881 .proc_handler = &hugetlb_treat_movable_handler,
882 }, 882 },
883 {
884 .ctl_name = CTL_UNNUMBERED,
885 .procname = "hugetlb_dynamic_pool",
886 .data = &hugetlb_dynamic_pool,
887 .maxlen = sizeof(hugetlb_dynamic_pool),
888 .mode = 0644,
889 .proc_handler = &proc_dointvec,
890 },
883#endif 891#endif
884 { 892 {
885 .ctl_name = VM_LOWMEM_RESERVE_RATIO, 893 .ctl_name = VM_LOWMEM_RESERVE_RATIO,
diff --git a/kernel/time.c b/kernel/time.c
index 2289a8d68314..1afcc78dc3b1 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -57,11 +57,7 @@ EXPORT_SYMBOL(sys_tz);
57 */ 57 */
58asmlinkage long sys_time(time_t __user * tloc) 58asmlinkage long sys_time(time_t __user * tloc)
59{ 59{
60 time_t i; 60 time_t i = get_seconds();
61 struct timespec tv;
62
63 getnstimeofday(&tv);
64 i = tv.tv_sec;
65 61
66 if (tloc) { 62 if (tloc) {
67 if (put_user(i,tloc)) 63 if (put_user(i,tloc))
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4ad79f6bdec6..7e8983aecf83 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -49,19 +49,12 @@ struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
49static unsigned long total_sleep_time; /* seconds */ 49static unsigned long total_sleep_time; /* seconds */
50EXPORT_SYMBOL(xtime); 50EXPORT_SYMBOL(xtime);
51 51
52
53#ifdef CONFIG_NO_HZ
54static struct timespec xtime_cache __attribute__ ((aligned (16))); 52static struct timespec xtime_cache __attribute__ ((aligned (16)));
55static inline void update_xtime_cache(u64 nsec) 53static inline void update_xtime_cache(u64 nsec)
56{ 54{
57 xtime_cache = xtime; 55 xtime_cache = xtime;
58 timespec_add_ns(&xtime_cache, nsec); 56 timespec_add_ns(&xtime_cache, nsec);
59} 57}
60#else
61#define xtime_cache xtime
62/* We do *not* want to evaluate the argument for this case */
63#define update_xtime_cache(n) do { } while (0)
64#endif
65 58
66static struct clocksource *clock; /* pointer to current clocksource */ 59static struct clocksource *clock; /* pointer to current clocksource */
67 60