diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 2 | ||||
-rw-r--r-- | kernel/dma-coherent.c | 47 | ||||
-rw-r--r-- | kernel/fork.c | 16 | ||||
-rw-r--r-- | kernel/hrtimer.c | 4 | ||||
-rw-r--r-- | kernel/kallsyms.c | 16 | ||||
-rw-r--r-- | kernel/kprobes.c | 2 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 70 | ||||
-rw-r--r-- | kernel/power/Makefile | 3 | ||||
-rw-r--r-- | kernel/rcuclassic.c | 2 | ||||
-rw-r--r-- | kernel/rcutree.c | 2 | ||||
-rw-r--r-- | kernel/relay.c | 4 | ||||
-rw-r--r-- | kernel/resource.c | 1 | ||||
-rw-r--r-- | kernel/sched.c | 13 | ||||
-rw-r--r-- | kernel/sched_fair.c | 37 | ||||
-rw-r--r-- | kernel/sched_stats.h | 33 | ||||
-rw-r--r-- | kernel/softlockup.c | 9 | ||||
-rw-r--r-- | kernel/sysctl.c | 11 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 20 |
19 files changed, 136 insertions, 158 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 647c77a88fcb..a85678865c5e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -568,7 +568,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | |||
568 | * load balancing domains (sched domains) as specified by that partial | 568 | * load balancing domains (sched domains) as specified by that partial |
569 | * partition. | 569 | * partition. |
570 | * | 570 | * |
571 | * See "What is sched_load_balance" in Documentation/cpusets.txt | 571 | * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt |
572 | * for a background explanation of this. | 572 | * for a background explanation of this. |
573 | * | 573 | * |
574 | * Does not return errors, on the theory that the callers of this | 574 | * Does not return errors, on the theory that the callers of this |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index 038707404b76..962a3b574f21 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c | |||
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |||
98 | * @size: size of requested memory area | 98 | * @size: size of requested memory area |
99 | * @dma_handle: This will be filled with the correct dma handle | 99 | * @dma_handle: This will be filled with the correct dma handle |
100 | * @ret: This pointer will be filled with the virtual address | 100 | * @ret: This pointer will be filled with the virtual address |
101 | * to allocated area. | 101 | * to allocated area. |
102 | * | 102 | * |
103 | * This function should be only called from per-arch dma_alloc_coherent() | 103 | * This function should be only called from per-arch dma_alloc_coherent() |
104 | * to support allocation from per-device coherent memory pools. | 104 | * to support allocation from per-device coherent memory pools. |
@@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
118 | mem = dev->dma_mem; | 118 | mem = dev->dma_mem; |
119 | if (!mem) | 119 | if (!mem) |
120 | return 0; | 120 | return 0; |
121 | if (unlikely(size > mem->size)) | 121 | |
122 | return 0; | 122 | *ret = NULL; |
123 | |||
124 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | ||
125 | goto err; | ||
123 | 126 | ||
124 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | 127 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
125 | if (pageno >= 0) { | 128 | if (unlikely(pageno < 0)) |
126 | /* | 129 | goto err; |
127 | * Memory was found in the per-device arena. | 130 | |
128 | */ | 131 | /* |
129 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | 132 | * Memory was found in the per-device area. |
130 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | 133 | */ |
131 | memset(*ret, 0, size); | 134 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
132 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { | 135 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); |
133 | /* | 136 | memset(*ret, 0, size); |
134 | * The per-device arena is exhausted and we are not | 137 | |
135 | * permitted to fall back to generic memory. | ||
136 | */ | ||
137 | *ret = NULL; | ||
138 | } else { | ||
139 | /* | ||
140 | * The per-device arena is exhausted and we are | ||
141 | * permitted to fall back to generic memory. | ||
142 | */ | ||
143 | return 0; | ||
144 | } | ||
145 | return 1; | 138 | return 1; |
139 | |||
140 | err: | ||
141 | /* | ||
142 | * In the case where the allocation can not be satisfied from the | ||
143 | * per-device area, try to fall back to generic memory if the | ||
144 | * constraints allow it. | ||
145 | */ | ||
146 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | ||
146 | } | 147 | } |
147 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 148 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
148 | 149 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index bf0cef8bbdf2..242a706e7721 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | 817 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
818 | { | 818 | { |
819 | struct signal_struct *sig; | 819 | struct signal_struct *sig; |
820 | int ret; | ||
821 | 820 | ||
822 | if (clone_flags & CLONE_THREAD) { | 821 | if (clone_flags & CLONE_THREAD) { |
823 | ret = thread_group_cputime_clone_thread(current); | 822 | atomic_inc(¤t->signal->count); |
824 | if (likely(!ret)) { | 823 | atomic_inc(¤t->signal->live); |
825 | atomic_inc(¤t->signal->count); | 824 | return 0; |
826 | atomic_inc(¤t->signal->live); | ||
827 | } | ||
828 | return ret; | ||
829 | } | 825 | } |
830 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 826 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
827 | |||
828 | if (sig) | ||
829 | posix_cpu_timers_init_group(sig); | ||
830 | |||
831 | tsk->signal = sig; | 831 | tsk->signal = sig; |
832 | if (!sig) | 832 | if (!sig) |
833 | return -ENOMEM; | 833 | return -ENOMEM; |
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 864 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
865 | task_unlock(current->group_leader); | 865 | task_unlock(current->group_leader); |
866 | 866 | ||
867 | posix_cpu_timers_init_group(sig); | ||
868 | |||
869 | acct_init_pacct(&sig->pacct); | 867 | acct_init_pacct(&sig->pacct); |
870 | 868 | ||
871 | tty_audit_fork(sig); | 869 | tty_audit_fork(sig); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 2dc30c59c5fd..f33afb0407bc 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -614,7 +614,9 @@ void clock_was_set(void) | |||
614 | */ | 614 | */ |
615 | void hres_timers_resume(void) | 615 | void hres_timers_resume(void) |
616 | { | 616 | { |
617 | /* Retrigger the CPU local events: */ | 617 | WARN_ONCE(!irqs_disabled(), |
618 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
619 | |||
618 | retrigger_next_event(NULL); | 620 | retrigger_next_event(NULL); |
619 | } | 621 | } |
620 | 622 | ||
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index e694afa0eb8c..7b8b0f21a5b1 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -30,19 +30,20 @@ | |||
30 | #define all_var 0 | 30 | #define all_var 0 |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | extern const unsigned long kallsyms_addresses[]; | 33 | /* These will be re-linked against their real values during the second link stage */ |
34 | extern const u8 kallsyms_names[]; | 34 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); |
35 | extern const u8 kallsyms_names[] __attribute__((weak)); | ||
35 | 36 | ||
36 | /* tell the compiler that the count isn't in the small data section if the arch | 37 | /* tell the compiler that the count isn't in the small data section if the arch |
37 | * has one (eg: FRV) | 38 | * has one (eg: FRV) |
38 | */ | 39 | */ |
39 | extern const unsigned long kallsyms_num_syms | 40 | extern const unsigned long kallsyms_num_syms |
40 | __attribute__((__section__(".rodata"))); | 41 | __attribute__((weak, section(".rodata"))); |
41 | 42 | ||
42 | extern const u8 kallsyms_token_table[]; | 43 | extern const u8 kallsyms_token_table[] __attribute__((weak)); |
43 | extern const u16 kallsyms_token_index[]; | 44 | extern const u16 kallsyms_token_index[] __attribute__((weak)); |
44 | 45 | ||
45 | extern const unsigned long kallsyms_markers[]; | 46 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); |
46 | 47 | ||
47 | static inline int is_kernel_inittext(unsigned long addr) | 48 | static inline int is_kernel_inittext(unsigned long addr) |
48 | { | 49 | { |
@@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr, | |||
167 | unsigned long symbol_start = 0, symbol_end = 0; | 168 | unsigned long symbol_start = 0, symbol_end = 0; |
168 | unsigned long i, low, high, mid; | 169 | unsigned long i, low, high, mid; |
169 | 170 | ||
171 | /* This kernel should never had been booted. */ | ||
172 | BUG_ON(!kallsyms_addresses); | ||
173 | |||
170 | /* do a binary search on the sorted kallsyms_addresses array */ | 174 | /* do a binary search on the sorted kallsyms_addresses array */ |
171 | low = 0; | 175 | low = 0; |
172 | high = kallsyms_num_syms; | 176 | high = kallsyms_num_syms; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1b9cbdc0127a..7ba8cd9845cb 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -123,7 +123,7 @@ static int collect_garbage_slots(void); | |||
123 | static int __kprobes check_safety(void) | 123 | static int __kprobes check_safety(void) |
124 | { | 124 | { |
125 | int ret = 0; | 125 | int ret = 0; |
126 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) | 126 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER) |
127 | ret = freeze_processes(); | 127 | ret = freeze_processes(); |
128 | if (ret == 0) { | 128 | if (ret == 0) { |
129 | struct task_struct *p, *q; | 129 | struct task_struct *p, *q; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 157de3a47832..fa07da94d7be 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -10,76 +10,6 @@ | |||
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * Allocate the thread_group_cputime structure appropriately and fill in the | ||
14 | * current values of the fields. Called from copy_signal() via | ||
15 | * thread_group_cputime_clone_thread() when adding a second or subsequent | ||
16 | * thread to a thread group. Assumes interrupts are enabled when called. | ||
17 | */ | ||
18 | int thread_group_cputime_alloc(struct task_struct *tsk) | ||
19 | { | ||
20 | struct signal_struct *sig = tsk->signal; | ||
21 | struct task_cputime *cputime; | ||
22 | |||
23 | /* | ||
24 | * If we have multiple threads and we don't already have a | ||
25 | * per-CPU task_cputime struct (checked in the caller), allocate | ||
26 | * one and fill it in with the times accumulated so far. We may | ||
27 | * race with another thread so recheck after we pick up the sighand | ||
28 | * lock. | ||
29 | */ | ||
30 | cputime = alloc_percpu(struct task_cputime); | ||
31 | if (cputime == NULL) | ||
32 | return -ENOMEM; | ||
33 | spin_lock_irq(&tsk->sighand->siglock); | ||
34 | if (sig->cputime.totals) { | ||
35 | spin_unlock_irq(&tsk->sighand->siglock); | ||
36 | free_percpu(cputime); | ||
37 | return 0; | ||
38 | } | ||
39 | sig->cputime.totals = cputime; | ||
40 | cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id()); | ||
41 | cputime->utime = tsk->utime; | ||
42 | cputime->stime = tsk->stime; | ||
43 | cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
44 | spin_unlock_irq(&tsk->sighand->siglock); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | /** | ||
49 | * thread_group_cputime - Sum the thread group time fields across all CPUs. | ||
50 | * | ||
51 | * @tsk: The task we use to identify the thread group. | ||
52 | * @times: task_cputime structure in which we return the summed fields. | ||
53 | * | ||
54 | * Walk the list of CPUs to sum the per-CPU time fields in the thread group | ||
55 | * time structure. | ||
56 | */ | ||
57 | void thread_group_cputime( | ||
58 | struct task_struct *tsk, | ||
59 | struct task_cputime *times) | ||
60 | { | ||
61 | struct task_cputime *totals, *tot; | ||
62 | int i; | ||
63 | |||
64 | totals = tsk->signal->cputime.totals; | ||
65 | if (!totals) { | ||
66 | times->utime = tsk->utime; | ||
67 | times->stime = tsk->stime; | ||
68 | times->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
69 | return; | ||
70 | } | ||
71 | |||
72 | times->stime = times->utime = cputime_zero; | ||
73 | times->sum_exec_runtime = 0; | ||
74 | for_each_possible_cpu(i) { | ||
75 | tot = per_cpu_ptr(totals, i); | ||
76 | times->utime = cputime_add(times->utime, tot->utime); | ||
77 | times->stime = cputime_add(times->stime, tot->stime); | ||
78 | times->sum_exec_runtime += tot->sum_exec_runtime; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
84 | */ | 14 | */ |
85 | void update_rlimit_cpu(unsigned long rlim_new) | 15 | void update_rlimit_cpu(unsigned long rlim_new) |
diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 597823b5b700..d7a10167a25b 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile | |||
@@ -4,7 +4,8 @@ EXTRA_CFLAGS += -DDEBUG | |||
4 | endif | 4 | endif |
5 | 5 | ||
6 | obj-y := main.o | 6 | obj-y := main.o |
7 | obj-$(CONFIG_PM_SLEEP) += process.o console.o | 7 | obj-$(CONFIG_PM_SLEEP) += console.o |
8 | obj-$(CONFIG_FREEZER) += process.o | ||
8 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o | 9 | obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o |
9 | 10 | ||
10 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o | 11 | obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 490934fc7ac3..bd5a9003497c 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
716 | raise_rcu_softirq(); | 716 | raise_rcu_softirq(); |
717 | } | 717 | } |
718 | 718 | ||
719 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | 719 | static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
720 | struct rcu_data *rdp) | 720 | struct rcu_data *rdp) |
721 | { | 721 | { |
722 | unsigned long flags; | 722 | unsigned long flags; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index f2d8638e6c60..b2fd602a6f6f 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu) | |||
1314 | * access due to the fact that this CPU cannot possibly have any RCU | 1314 | * access due to the fact that this CPU cannot possibly have any RCU |
1315 | * callbacks in flight yet. | 1315 | * callbacks in flight yet. |
1316 | */ | 1316 | */ |
1317 | static void | 1317 | static void __cpuinit |
1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) |
1319 | { | 1319 | { |
1320 | unsigned long flags; | 1320 | unsigned long flags; |
diff --git a/kernel/relay.c b/kernel/relay.c index 09ac2008f77b..9d79b7854fa6 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan, | |||
663 | 663 | ||
664 | mutex_lock(&relay_channels_mutex); | 664 | mutex_lock(&relay_channels_mutex); |
665 | /* Is chan already set up? */ | 665 | /* Is chan already set up? */ |
666 | if (unlikely(chan->has_base_filename)) | 666 | if (unlikely(chan->has_base_filename)) { |
667 | mutex_unlock(&relay_channels_mutex); | ||
667 | return -EEXIST; | 668 | return -EEXIST; |
669 | } | ||
668 | chan->has_base_filename = 1; | 670 | chan->has_base_filename = 1; |
669 | chan->parent = parent; | 671 | chan->parent = parent; |
670 | curr_cpu = get_cpu(); | 672 | curr_cpu = get_cpu(); |
diff --git a/kernel/resource.c b/kernel/resource.c index ca6a1536b205..fd5d7d574bb9 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -620,6 +620,7 @@ resource_size_t resource_alignment(struct resource *res) | |||
620 | * @start: resource start address | 620 | * @start: resource start address |
621 | * @n: resource region size | 621 | * @n: resource region size |
622 | * @name: reserving caller's ID string | 622 | * @name: reserving caller's ID string |
623 | * @flags: IO resource flags | ||
623 | */ | 624 | */ |
624 | struct resource * __request_region(struct resource *parent, | 625 | struct resource * __request_region(struct resource *parent, |
625 | resource_size_t start, resource_size_t n, | 626 | resource_size_t start, resource_size_t n, |
diff --git a/kernel/sched.c b/kernel/sched.c index eb1931eef587..52bbf1c842a8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | |||
1323 | * slice expiry etc. | 1323 | * slice expiry etc. |
1324 | */ | 1324 | */ |
1325 | 1325 | ||
1326 | #define WEIGHT_IDLEPRIO 2 | 1326 | #define WEIGHT_IDLEPRIO 3 |
1327 | #define WMULT_IDLEPRIO (1 << 31) | 1327 | #define WMULT_IDLEPRIO 1431655765 |
1328 | 1328 | ||
1329 | /* | 1329 | /* |
1330 | * Nice levels are multiplicative, with a gentle 10% change for every | 1330 | * Nice levels are multiplicative, with a gentle 10% change for every |
@@ -4440,7 +4440,7 @@ void __kprobes sub_preempt_count(int val) | |||
4440 | /* | 4440 | /* |
4441 | * Underflow? | 4441 | * Underflow? |
4442 | */ | 4442 | */ |
4443 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) | 4443 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
4444 | return; | 4444 | return; |
4445 | /* | 4445 | /* |
4446 | * Is the spinlock portion underflowing? | 4446 | * Is the spinlock portion underflowing? |
@@ -9050,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data) | |||
9050 | runtime = d->rt_runtime; | 9050 | runtime = d->rt_runtime; |
9051 | } | 9051 | } |
9052 | 9052 | ||
9053 | #ifdef CONFIG_USER_SCHED | ||
9054 | if (tg == &root_task_group) { | ||
9055 | period = global_rt_period(); | ||
9056 | runtime = global_rt_runtime(); | ||
9057 | } | ||
9058 | #endif | ||
9059 | |||
9053 | /* | 9060 | /* |
9054 | * Cannot have more runtime than the period. | 9061 | * Cannot have more runtime than the period. |
9055 | */ | 9062 | */ |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8e1352c75557..5cc1c162044f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) | |||
283 | struct sched_entity, | 283 | struct sched_entity, |
284 | run_node); | 284 | run_node); |
285 | 285 | ||
286 | if (vruntime == cfs_rq->min_vruntime) | 286 | if (!cfs_rq->curr) |
287 | vruntime = se->vruntime; | 287 | vruntime = se->vruntime; |
288 | else | 288 | else |
289 | vruntime = min_vruntime(vruntime, se->vruntime); | 289 | vruntime = min_vruntime(vruntime, se->vruntime); |
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); | 429 | u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); |
430 | 430 | ||
431 | for_each_sched_entity(se) { | 431 | for_each_sched_entity(se) { |
432 | struct load_weight *load = &cfs_rq->load; | 432 | struct load_weight *load; |
433 | |||
434 | cfs_rq = cfs_rq_of(se); | ||
435 | load = &cfs_rq->load; | ||
433 | 436 | ||
434 | if (unlikely(!se->on_rq)) { | 437 | if (unlikely(!se->on_rq)) { |
435 | struct load_weight lw = cfs_rq->load; | 438 | struct load_weight lw = cfs_rq->load; |
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
677 | unsigned long thresh = sysctl_sched_latency; | 680 | unsigned long thresh = sysctl_sched_latency; |
678 | 681 | ||
679 | /* | 682 | /* |
680 | * convert the sleeper threshold into virtual time | 683 | * Convert the sleeper threshold into virtual time. |
684 | * SCHED_IDLE is a special sub-class. We care about | ||
685 | * fairness only relative to other SCHED_IDLE tasks, | ||
686 | * all of which have the same weight. | ||
681 | */ | 687 | */ |
682 | if (sched_feat(NORMALIZED_SLEEPER)) | 688 | if (sched_feat(NORMALIZED_SLEEPER) && |
689 | task_of(se)->policy != SCHED_IDLE) | ||
683 | thresh = calc_delta_fair(thresh, se); | 690 | thresh = calc_delta_fair(thresh, se); |
684 | 691 | ||
685 | vruntime -= thresh; | 692 | vruntime -= thresh; |
@@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1340 | 1347 | ||
1341 | static void set_last_buddy(struct sched_entity *se) | 1348 | static void set_last_buddy(struct sched_entity *se) |
1342 | { | 1349 | { |
1343 | for_each_sched_entity(se) | 1350 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
1344 | cfs_rq_of(se)->last = se; | 1351 | for_each_sched_entity(se) |
1352 | cfs_rq_of(se)->last = se; | ||
1353 | } | ||
1345 | } | 1354 | } |
1346 | 1355 | ||
1347 | static void set_next_buddy(struct sched_entity *se) | 1356 | static void set_next_buddy(struct sched_entity *se) |
1348 | { | 1357 | { |
1349 | for_each_sched_entity(se) | 1358 | if (likely(task_of(se)->policy != SCHED_IDLE)) { |
1350 | cfs_rq_of(se)->next = se; | 1359 | for_each_sched_entity(se) |
1360 | cfs_rq_of(se)->next = se; | ||
1361 | } | ||
1351 | } | 1362 | } |
1352 | 1363 | ||
1353 | /* | 1364 | /* |
@@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1393 | return; | 1404 | return; |
1394 | 1405 | ||
1395 | /* | 1406 | /* |
1396 | * Batch tasks do not preempt (their preemption is driven by | 1407 | * Batch and idle tasks do not preempt (their preemption is driven by |
1397 | * the tick): | 1408 | * the tick): |
1398 | */ | 1409 | */ |
1399 | if (unlikely(p->policy == SCHED_BATCH)) | 1410 | if (unlikely(p->policy != SCHED_NORMAL)) |
1400 | return; | 1411 | return; |
1401 | 1412 | ||
1413 | /* Idle tasks are by definition preempted by everybody. */ | ||
1414 | if (unlikely(curr->policy == SCHED_IDLE)) { | ||
1415 | resched_task(curr); | ||
1416 | return; | ||
1417 | } | ||
1418 | |||
1402 | if (!sched_feat(WAKEUP_PREEMPT)) | 1419 | if (!sched_feat(WAKEUP_PREEMPT)) |
1403 | return; | 1420 | return; |
1404 | 1421 | ||
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index f2773b5d1226..8ab0cef8ecab 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next) | |||
296 | static inline void account_group_user_time(struct task_struct *tsk, | 296 | static inline void account_group_user_time(struct task_struct *tsk, |
297 | cputime_t cputime) | 297 | cputime_t cputime) |
298 | { | 298 | { |
299 | struct task_cputime *times; | ||
299 | struct signal_struct *sig; | 300 | struct signal_struct *sig; |
300 | 301 | ||
301 | /* tsk == current, ensure it is safe to use ->signal */ | 302 | /* tsk == current, ensure it is safe to use ->signal */ |
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
303 | return; | 304 | return; |
304 | 305 | ||
305 | sig = tsk->signal; | 306 | sig = tsk->signal; |
306 | if (sig->cputime.totals) { | 307 | times = &sig->cputime.totals; |
307 | struct task_cputime *times; | ||
308 | 308 | ||
309 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 309 | spin_lock(×->lock); |
310 | times->utime = cputime_add(times->utime, cputime); | 310 | times->utime = cputime_add(times->utime, cputime); |
311 | put_cpu_no_resched(); | 311 | spin_unlock(×->lock); |
312 | } | ||
313 | } | 312 | } |
314 | 313 | ||
315 | /** | 314 | /** |
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
325 | static inline void account_group_system_time(struct task_struct *tsk, | 324 | static inline void account_group_system_time(struct task_struct *tsk, |
326 | cputime_t cputime) | 325 | cputime_t cputime) |
327 | { | 326 | { |
327 | struct task_cputime *times; | ||
328 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
329 | 329 | ||
330 | /* tsk == current, ensure it is safe to use ->signal */ | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
332 | return; | 332 | return; |
333 | 333 | ||
334 | sig = tsk->signal; | 334 | sig = tsk->signal; |
335 | if (sig->cputime.totals) { | 335 | times = &sig->cputime.totals; |
336 | struct task_cputime *times; | ||
337 | 336 | ||
338 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 337 | spin_lock(×->lock); |
339 | times->stime = cputime_add(times->stime, cputime); | 338 | times->stime = cputime_add(times->stime, cputime); |
340 | put_cpu_no_resched(); | 339 | spin_unlock(×->lock); |
341 | } | ||
342 | } | 340 | } |
343 | 341 | ||
344 | /** | 342 | /** |
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
354 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 352 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
355 | unsigned long long ns) | 353 | unsigned long long ns) |
356 | { | 354 | { |
355 | struct task_cputime *times; | ||
357 | struct signal_struct *sig; | 356 | struct signal_struct *sig; |
358 | 357 | ||
359 | sig = tsk->signal; | 358 | sig = tsk->signal; |
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
362 | if (unlikely(!sig)) | 361 | if (unlikely(!sig)) |
363 | return; | 362 | return; |
364 | 363 | ||
365 | if (sig->cputime.totals) { | 364 | times = &sig->cputime.totals; |
366 | struct task_cputime *times; | ||
367 | 365 | ||
368 | times = per_cpu_ptr(sig->cputime.totals, get_cpu()); | 366 | spin_lock(×->lock); |
369 | times->sum_exec_runtime += ns; | 367 | times->sum_exec_runtime += ns; |
370 | put_cpu_no_resched(); | 368 | spin_unlock(×->lock); |
371 | } | ||
372 | } | 369 | } |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d9188c66278a..85d5a2455103 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/lockdep.h> | 16 | #include <linux/lockdep.h> |
17 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/sysctl.h> | ||
19 | 20 | ||
20 | #include <asm/irq_regs.h> | 21 | #include <asm/irq_regs.h> |
21 | 22 | ||
@@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void) | |||
88 | } | 89 | } |
89 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
90 | 91 | ||
92 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | ||
93 | struct file *filp, void __user *buffer, | ||
94 | size_t *lenp, loff_t *ppos) | ||
95 | { | ||
96 | touch_all_softlockup_watchdogs(); | ||
97 | return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | ||
98 | } | ||
99 | |||
91 | /* | 100 | /* |
92 | * This callback runs from the timer interrupt, and checks | 101 | * This callback runs from the timer interrupt, and checks |
93 | * whether the watchdog thread has hung or not: | 102 | * whether the watchdog thread has hung or not: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 3e38b74b6124..790f9d785663 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -144,6 +144,7 @@ extern int acct_parm[]; | |||
144 | 144 | ||
145 | #ifdef CONFIG_IA64 | 145 | #ifdef CONFIG_IA64 |
146 | extern int no_unaligned_warning; | 146 | extern int no_unaligned_warning; |
147 | extern int unaligned_dump_stack; | ||
147 | #endif | 148 | #endif |
148 | 149 | ||
149 | #ifdef CONFIG_RT_MUTEXES | 150 | #ifdef CONFIG_RT_MUTEXES |
@@ -781,6 +782,14 @@ static struct ctl_table kern_table[] = { | |||
781 | .mode = 0644, | 782 | .mode = 0644, |
782 | .proc_handler = &proc_dointvec, | 783 | .proc_handler = &proc_dointvec, |
783 | }, | 784 | }, |
785 | { | ||
786 | .ctl_name = CTL_UNNUMBERED, | ||
787 | .procname = "unaligned-dump-stack", | ||
788 | .data = &unaligned_dump_stack, | ||
789 | .maxlen = sizeof (int), | ||
790 | .mode = 0644, | ||
791 | .proc_handler = &proc_dointvec, | ||
792 | }, | ||
784 | #endif | 793 | #endif |
785 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 794 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
786 | { | 795 | { |
@@ -800,7 +809,7 @@ static struct ctl_table kern_table[] = { | |||
800 | .data = &softlockup_thresh, | 809 | .data = &softlockup_thresh, |
801 | .maxlen = sizeof(int), | 810 | .maxlen = sizeof(int), |
802 | .mode = 0644, | 811 | .mode = 0644, |
803 | .proc_handler = &proc_dointvec_minmax, | 812 | .proc_handler = &proc_dosoftlockup_thresh, |
804 | .strategy = &sysctl_intvec, | 813 | .strategy = &sysctl_intvec, |
805 | .extra1 = &neg_one, | 814 | .extra1 = &neg_one, |
806 | .extra2 = &sixty, | 815 | .extra2 = &sixty, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1b6c05bd0d0a..d3f1ef4d5cbe 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz); | |||
134 | * value. We do this unconditionally on any cpu, as we don't know whether the | 134 | * value. We do this unconditionally on any cpu, as we don't know whether the |
135 | * cpu, which has the update task assigned is in a long sleep. | 135 | * cpu, which has the update task assigned is in a long sleep. |
136 | */ | 136 | */ |
137 | void tick_nohz_update_jiffies(void) | 137 | static void tick_nohz_update_jiffies(void) |
138 | { | 138 | { |
139 | int cpu = smp_processor_id(); | 139 | int cpu = smp_processor_id(); |
140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 140 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2f445833ae37..1f0c509b40d3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -971,6 +971,8 @@ undo: | |||
971 | } | 971 | } |
972 | 972 | ||
973 | #ifdef CONFIG_SMP | 973 | #ifdef CONFIG_SMP |
974 | static struct workqueue_struct *work_on_cpu_wq __read_mostly; | ||
975 | |||
974 | struct work_for_cpu { | 976 | struct work_for_cpu { |
975 | struct work_struct work; | 977 | struct work_struct work; |
976 | long (*fn)(void *); | 978 | long (*fn)(void *); |
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w) | |||
991 | * @fn: the function to run | 993 | * @fn: the function to run |
992 | * @arg: the function arg | 994 | * @arg: the function arg |
993 | * | 995 | * |
994 | * This will return -EINVAL in the cpu is not online, or the return value | 996 | * This will return the value @fn returns. |
995 | * of @fn otherwise. | 997 | * It is up to the caller to ensure that the cpu doesn't go offline. |
996 | */ | 998 | */ |
997 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 999 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
998 | { | 1000 | { |
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
1001 | INIT_WORK(&wfc.work, do_work_for_cpu); | 1003 | INIT_WORK(&wfc.work, do_work_for_cpu); |
1002 | wfc.fn = fn; | 1004 | wfc.fn = fn; |
1003 | wfc.arg = arg; | 1005 | wfc.arg = arg; |
1004 | get_online_cpus(); | 1006 | queue_work_on(cpu, work_on_cpu_wq, &wfc.work); |
1005 | if (unlikely(!cpu_online(cpu))) | 1007 | flush_work(&wfc.work); |
1006 | wfc.ret = -EINVAL; | ||
1007 | else { | ||
1008 | schedule_work_on(cpu, &wfc.work); | ||
1009 | flush_work(&wfc.work); | ||
1010 | } | ||
1011 | put_online_cpus(); | ||
1012 | 1008 | ||
1013 | return wfc.ret; | 1009 | return wfc.ret; |
1014 | } | 1010 | } |
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void) | |||
1025 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1021 | hotcpu_notifier(workqueue_cpu_callback, 0); |
1026 | keventd_wq = create_workqueue("events"); | 1022 | keventd_wq = create_workqueue("events"); |
1027 | BUG_ON(!keventd_wq); | 1023 | BUG_ON(!keventd_wq); |
1024 | #ifdef CONFIG_SMP | ||
1025 | work_on_cpu_wq = create_workqueue("work_on_cpu"); | ||
1026 | BUG_ON(!work_on_cpu_wq); | ||
1027 | #endif | ||
1028 | } | 1028 | } |