aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-11 03:16:20 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-11 03:16:20 -0500
commit45b86a96f17cb2900f291129b0e67287400e45b2 (patch)
treef7968bf36e3947ee42251f7eebc6ea5f24aca202 /kernel
parent072ba49838b42c873c496d72c91bb237914cf3b6 (diff)
parent4143c5cb36331155a1823af8b3a8c761a59fed71 (diff)
Merge branch 'devel' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/sched.c13
-rw-r--r--kernel/sched_fair.c76
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/smp.c18
-rw-r--r--kernel/timer.c129
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c17
-rw-r--r--kernel/workqueue.c45
10 files changed, 235 insertions, 70 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 35eebd5510c2..358e77564e6f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2497,7 +2497,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2497 list_del(&cgrp->sibling); 2497 list_del(&cgrp->sibling);
2498 spin_lock(&cgrp->dentry->d_lock); 2498 spin_lock(&cgrp->dentry->d_lock);
2499 d = dget(cgrp->dentry); 2499 d = dget(cgrp->dentry);
2500 cgrp->dentry = NULL;
2501 spin_unlock(&d->d_lock); 2500 spin_unlock(&d->d_lock);
2502 2501
2503 cgroup_d_remove_dir(d); 2502 cgroup_d_remove_dir(d);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 86d49045daed..5a732c5ef08b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -499,3 +499,6 @@ const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
499#endif 499#endif
500}; 500};
501EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 501EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
502
503const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
504EXPORT_SYMBOL(cpu_all_bits);
diff --git a/kernel/sched.c b/kernel/sched.c
index e8819bc6f462..57c933ffbee1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -397,7 +397,7 @@ struct cfs_rq {
397 * 'curr' points to currently running entity on this cfs_rq. 397 * 'curr' points to currently running entity on this cfs_rq.
398 * It is set to NULL otherwise (i.e when none are currently running). 398 * It is set to NULL otherwise (i.e when none are currently running).
399 */ 399 */
400 struct sched_entity *curr, *next; 400 struct sched_entity *curr, *next, *last;
401 401
402 unsigned long nr_spread_over; 402 unsigned long nr_spread_over;
403 403
@@ -1805,7 +1805,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
1805 /* 1805 /*
1806 * Buddy candidates are cache hot: 1806 * Buddy candidates are cache hot:
1807 */ 1807 */
1808 if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next)) 1808 if (sched_feat(CACHE_HOT_BUDDY) &&
1809 (&p->se == cfs_rq_of(&p->se)->next ||
1810 &p->se == cfs_rq_of(&p->se)->last))
1809 return 1; 1811 return 1;
1810 1812
1811 if (p->sched_class != &fair_sched_class) 1813 if (p->sched_class != &fair_sched_class)
@@ -6875,15 +6877,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6875 struct sched_domain *tmp; 6877 struct sched_domain *tmp;
6876 6878
6877 /* Remove the sched domains which do not contribute to scheduling. */ 6879 /* Remove the sched domains which do not contribute to scheduling. */
6878 for (tmp = sd; tmp; tmp = tmp->parent) { 6880 for (tmp = sd; tmp; ) {
6879 struct sched_domain *parent = tmp->parent; 6881 struct sched_domain *parent = tmp->parent;
6880 if (!parent) 6882 if (!parent)
6881 break; 6883 break;
6884
6882 if (sd_parent_degenerate(tmp, parent)) { 6885 if (sd_parent_degenerate(tmp, parent)) {
6883 tmp->parent = parent->parent; 6886 tmp->parent = parent->parent;
6884 if (parent->parent) 6887 if (parent->parent)
6885 parent->parent->child = tmp; 6888 parent->parent->child = tmp;
6886 } 6889 } else
6890 tmp = tmp->parent;
6887 } 6891 }
6888 6892
6889 if (sd && sd_degenerate(sd)) { 6893 if (sd && sd_degenerate(sd)) {
@@ -7672,6 +7676,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7672error: 7676error:
7673 free_sched_groups(cpu_map, tmpmask); 7677 free_sched_groups(cpu_map, tmpmask);
7674 SCHED_CPUMASK_FREE((void *)allmasks); 7678 SCHED_CPUMASK_FREE((void *)allmasks);
7679 kfree(rd);
7675 return -ENOMEM; 7680 return -ENOMEM;
7676#endif 7681#endif
7677} 7682}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ce514afd78ff..51aa3e102acb 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -341,23 +341,20 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
341 cfs_rq->rb_leftmost = next_node; 341 cfs_rq->rb_leftmost = next_node;
342 } 342 }
343 343
344 if (cfs_rq->next == se)
345 cfs_rq->next = NULL;
346
347 rb_erase(&se->run_node, &cfs_rq->tasks_timeline); 344 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
348} 345}
349 346
350static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
351{
352 return cfs_rq->rb_leftmost;
353}
354
355static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq) 347static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
356{ 348{
357 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node); 349 struct rb_node *left = cfs_rq->rb_leftmost;
350
351 if (!left)
352 return NULL;
353
354 return rb_entry(left, struct sched_entity, run_node);
358} 355}
359 356
360static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) 357static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
361{ 358{
362 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline); 359 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
363 360
@@ -741,6 +738,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
741#endif 738#endif
742 } 739 }
743 740
741 if (cfs_rq->last == se)
742 cfs_rq->last = NULL;
743
744 if (cfs_rq->next == se)
745 cfs_rq->next = NULL;
746
744 if (se != cfs_rq->curr) 747 if (se != cfs_rq->curr)
745 __dequeue_entity(cfs_rq, se); 748 __dequeue_entity(cfs_rq, se);
746 account_entity_dequeue(cfs_rq, se); 749 account_entity_dequeue(cfs_rq, se);
@@ -794,24 +797,15 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
794static int 797static int
795wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 798wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
796 799
797static struct sched_entity *
798pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
799{
800 if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1)
801 return se;
802
803 return cfs_rq->next;
804}
805
806static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 800static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
807{ 801{
808 struct sched_entity *se = NULL; 802 struct sched_entity *se = __pick_next_entity(cfs_rq);
809 803
810 if (first_fair(cfs_rq)) { 804 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
811 se = __pick_next_entity(cfs_rq); 805 return cfs_rq->next;
812 se = pick_next(cfs_rq, se); 806
813 set_next_entity(cfs_rq, se); 807 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
814 } 808 return cfs_rq->last;
815 809
816 return se; 810 return se;
817} 811}
@@ -1325,26 +1319,53 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1325 return 0; 1319 return 0;
1326} 1320}
1327 1321
1322static void set_last_buddy(struct sched_entity *se)
1323{
1324 for_each_sched_entity(se)
1325 cfs_rq_of(se)->last = se;
1326}
1327
1328static void set_next_buddy(struct sched_entity *se)
1329{
1330 for_each_sched_entity(se)
1331 cfs_rq_of(se)->next = se;
1332}
1333
1328/* 1334/*
1329 * Preempt the current task with a newly woken task if needed: 1335 * Preempt the current task with a newly woken task if needed:
1330 */ 1336 */
1331static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) 1337static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1332{ 1338{
1333 struct task_struct *curr = rq->curr; 1339 struct task_struct *curr = rq->curr;
1334 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1335 struct sched_entity *se = &curr->se, *pse = &p->se; 1340 struct sched_entity *se = &curr->se, *pse = &p->se;
1336 1341
1337 if (unlikely(rt_prio(p->prio))) { 1342 if (unlikely(rt_prio(p->prio))) {
1343 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1344
1338 update_rq_clock(rq); 1345 update_rq_clock(rq);
1339 update_curr(cfs_rq); 1346 update_curr(cfs_rq);
1340 resched_task(curr); 1347 resched_task(curr);
1341 return; 1348 return;
1342 } 1349 }
1343 1350
1351 if (unlikely(p->sched_class != &fair_sched_class))
1352 return;
1353
1344 if (unlikely(se == pse)) 1354 if (unlikely(se == pse))
1345 return; 1355 return;
1346 1356
1347 cfs_rq_of(pse)->next = pse; 1357 /*
1358 * Only set the backward buddy when the current task is still on the
1359 * rq. This can happen when a wakeup gets interleaved with schedule on
1360 * the ->pre_schedule() or idle_balance() point, either of which can
1361 * drop the rq lock.
1362 *
1363 * Also, during early boot the idle thread is in the fair class, for
1364 * obvious reasons its a bad idea to schedule back to the idle thread.
1365 */
1366 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1367 set_last_buddy(se);
1368 set_next_buddy(pse);
1348 1369
1349 /* 1370 /*
1350 * We can come here with TIF_NEED_RESCHED already set from new task 1371 * We can come here with TIF_NEED_RESCHED already set from new task
@@ -1396,6 +1417,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1396 1417
1397 do { 1418 do {
1398 se = pick_next_entity(cfs_rq); 1419 se = pick_next_entity(cfs_rq);
1420 set_next_entity(cfs_rq, se);
1399 cfs_rq = group_cfs_rq(se); 1421 cfs_rq = group_cfs_rq(se);
1400 } while (cfs_rq); 1422 } while (cfs_rq);
1401 1423
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index fda016218296..da5d93b5d2c6 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -12,3 +12,4 @@ SCHED_FEAT(LB_BIAS, 1)
12SCHED_FEAT(LB_WAKEUP_UPDATE, 1) 12SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
13SCHED_FEAT(ASYM_EFF_LOAD, 1) 13SCHED_FEAT(ASYM_EFF_LOAD, 1)
14SCHED_FEAT(WAKEUP_OVERLAP, 0) 14SCHED_FEAT(WAKEUP_OVERLAP, 0)
15SCHED_FEAT(LAST_BUDDY, 1)
diff --git a/kernel/smp.c b/kernel/smp.c
index f362a8553777..75c8dde58c55 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -51,10 +51,6 @@ static void csd_flag_wait(struct call_single_data *data)
51{ 51{
52 /* Wait for response */ 52 /* Wait for response */
53 do { 53 do {
54 /*
55 * We need to see the flags store in the IPI handler
56 */
57 smp_mb();
58 if (!(data->flags & CSD_FLAG_WAIT)) 54 if (!(data->flags & CSD_FLAG_WAIT))
59 break; 55 break;
60 cpu_relax(); 56 cpu_relax();
@@ -76,6 +72,11 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
76 list_add_tail(&data->list, &dst->list); 72 list_add_tail(&data->list, &dst->list);
77 spin_unlock_irqrestore(&dst->lock, flags); 73 spin_unlock_irqrestore(&dst->lock, flags);
78 74
75 /*
76 * Make the list addition visible before sending the ipi.
77 */
78 smp_mb();
79
79 if (ipi) 80 if (ipi)
80 arch_send_call_function_single_ipi(cpu); 81 arch_send_call_function_single_ipi(cpu);
81 82
@@ -157,7 +158,7 @@ void generic_smp_call_function_single_interrupt(void)
157 * Need to see other stores to list head for checking whether 158 * Need to see other stores to list head for checking whether
158 * list is empty without holding q->lock 159 * list is empty without holding q->lock
159 */ 160 */
160 smp_mb(); 161 smp_read_barrier_depends();
161 while (!list_empty(&q->list)) { 162 while (!list_empty(&q->list)) {
162 unsigned int data_flags; 163 unsigned int data_flags;
163 164
@@ -191,7 +192,7 @@ void generic_smp_call_function_single_interrupt(void)
191 /* 192 /*
192 * See comment on outer loop 193 * See comment on outer loop
193 */ 194 */
194 smp_mb(); 195 smp_read_barrier_depends();
195 } 196 }
196} 197}
197 198
@@ -370,6 +371,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
370 list_add_tail_rcu(&data->csd.list, &call_function_queue); 371 list_add_tail_rcu(&data->csd.list, &call_function_queue);
371 spin_unlock_irqrestore(&call_function_lock, flags); 372 spin_unlock_irqrestore(&call_function_lock, flags);
372 373
374 /*
375 * Make the list addition visible before sending the ipi.
376 */
377 smp_mb();
378
373 /* Send a message to all CPUs in the map */ 379 /* Send a message to all CPUs in the map */
374 arch_send_call_function_ipi(mask); 380 arch_send_call_function_ipi(mask);
375 381
diff --git a/kernel/timer.c b/kernel/timer.c
index 56becf373c58..dbd50fabe4c7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -112,27 +112,8 @@ timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
112 tbase_get_deferrable(timer->base)); 112 tbase_get_deferrable(timer->base));
113} 113}
114 114
115/** 115static unsigned long round_jiffies_common(unsigned long j, int cpu,
116 * __round_jiffies - function to round jiffies to a full second 116 bool force_up)
117 * @j: the time in (absolute) jiffies that should be rounded
118 * @cpu: the processor number on which the timeout will happen
119 *
120 * __round_jiffies() rounds an absolute time in the future (in jiffies)
121 * up or down to (approximately) full seconds. This is useful for timers
122 * for which the exact time they fire does not matter too much, as long as
123 * they fire approximately every X seconds.
124 *
125 * By rounding these timers to whole seconds, all such timers will fire
126 * at the same time, rather than at various times spread out. The goal
127 * of this is to have the CPU wake up less, which saves power.
128 *
129 * The exact rounding is skewed for each processor to avoid all
130 * processors firing at the exact same time, which could lead
131 * to lock contention or spurious cache line bouncing.
132 *
133 * The return value is the rounded version of the @j parameter.
134 */
135unsigned long __round_jiffies(unsigned long j, int cpu)
136{ 117{
137 int rem; 118 int rem;
138 unsigned long original = j; 119 unsigned long original = j;
@@ -154,8 +135,9 @@ unsigned long __round_jiffies(unsigned long j, int cpu)
154 * due to delays of the timer irq, long irq off times etc etc) then 135 * due to delays of the timer irq, long irq off times etc etc) then
155 * we should round down to the whole second, not up. Use 1/4th second 136 * we should round down to the whole second, not up. Use 1/4th second
156 * as cutoff for this rounding as an extreme upper bound for this. 137 * as cutoff for this rounding as an extreme upper bound for this.
138 * But never round down if @force_up is set.
157 */ 139 */
158 if (rem < HZ/4) /* round down */ 140 if (rem < HZ/4 && !force_up) /* round down */
159 j = j - rem; 141 j = j - rem;
160 else /* round up */ 142 else /* round up */
161 j = j - rem + HZ; 143 j = j - rem + HZ;
@@ -167,6 +149,31 @@ unsigned long __round_jiffies(unsigned long j, int cpu)
167 return original; 149 return original;
168 return j; 150 return j;
169} 151}
152
153/**
154 * __round_jiffies - function to round jiffies to a full second
155 * @j: the time in (absolute) jiffies that should be rounded
156 * @cpu: the processor number on which the timeout will happen
157 *
158 * __round_jiffies() rounds an absolute time in the future (in jiffies)
159 * up or down to (approximately) full seconds. This is useful for timers
160 * for which the exact time they fire does not matter too much, as long as
161 * they fire approximately every X seconds.
162 *
163 * By rounding these timers to whole seconds, all such timers will fire
164 * at the same time, rather than at various times spread out. The goal
165 * of this is to have the CPU wake up less, which saves power.
166 *
167 * The exact rounding is skewed for each processor to avoid all
168 * processors firing at the exact same time, which could lead
169 * to lock contention or spurious cache line bouncing.
170 *
171 * The return value is the rounded version of the @j parameter.
172 */
173unsigned long __round_jiffies(unsigned long j, int cpu)
174{
175 return round_jiffies_common(j, cpu, false);
176}
170EXPORT_SYMBOL_GPL(__round_jiffies); 177EXPORT_SYMBOL_GPL(__round_jiffies);
171 178
172/** 179/**
@@ -191,13 +198,10 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
191 */ 198 */
192unsigned long __round_jiffies_relative(unsigned long j, int cpu) 199unsigned long __round_jiffies_relative(unsigned long j, int cpu)
193{ 200{
194 /* 201 unsigned long j0 = jiffies;
195 * In theory the following code can skip a jiffy in case jiffies 202
196 * increments right between the addition and the later subtraction. 203 /* Use j0 because jiffies might change while we run */
197 * However since the entire point of this function is to use approximate 204 return round_jiffies_common(j + j0, cpu, false) - j0;
198 * timeouts, it's entirely ok to not handle that.
199 */
200 return __round_jiffies(j + jiffies, cpu) - jiffies;
201} 205}
202EXPORT_SYMBOL_GPL(__round_jiffies_relative); 206EXPORT_SYMBOL_GPL(__round_jiffies_relative);
203 207
@@ -218,7 +222,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
218 */ 222 */
219unsigned long round_jiffies(unsigned long j) 223unsigned long round_jiffies(unsigned long j)
220{ 224{
221 return __round_jiffies(j, raw_smp_processor_id()); 225 return round_jiffies_common(j, raw_smp_processor_id(), false);
222} 226}
223EXPORT_SYMBOL_GPL(round_jiffies); 227EXPORT_SYMBOL_GPL(round_jiffies);
224 228
@@ -243,6 +247,71 @@ unsigned long round_jiffies_relative(unsigned long j)
243} 247}
244EXPORT_SYMBOL_GPL(round_jiffies_relative); 248EXPORT_SYMBOL_GPL(round_jiffies_relative);
245 249
250/**
251 * __round_jiffies_up - function to round jiffies up to a full second
252 * @j: the time in (absolute) jiffies that should be rounded
253 * @cpu: the processor number on which the timeout will happen
254 *
255 * This is the same as __round_jiffies() except that it will never
256 * round down. This is useful for timeouts for which the exact time
257 * of firing does not matter too much, as long as they don't fire too
258 * early.
259 */
260unsigned long __round_jiffies_up(unsigned long j, int cpu)
261{
262 return round_jiffies_common(j, cpu, true);
263}
264EXPORT_SYMBOL_GPL(__round_jiffies_up);
265
266/**
267 * __round_jiffies_up_relative - function to round jiffies up to a full second
268 * @j: the time in (relative) jiffies that should be rounded
269 * @cpu: the processor number on which the timeout will happen
270 *
271 * This is the same as __round_jiffies_relative() except that it will never
272 * round down. This is useful for timeouts for which the exact time
273 * of firing does not matter too much, as long as they don't fire too
274 * early.
275 */
276unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
277{
278 unsigned long j0 = jiffies;
279
280 /* Use j0 because jiffies might change while we run */
281 return round_jiffies_common(j + j0, cpu, true) - j0;
282}
283EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
284
285/**
286 * round_jiffies_up - function to round jiffies up to a full second
287 * @j: the time in (absolute) jiffies that should be rounded
288 *
289 * This is the same as round_jiffies() except that it will never
290 * round down. This is useful for timeouts for which the exact time
291 * of firing does not matter too much, as long as they don't fire too
292 * early.
293 */
294unsigned long round_jiffies_up(unsigned long j)
295{
296 return round_jiffies_common(j, raw_smp_processor_id(), true);
297}
298EXPORT_SYMBOL_GPL(round_jiffies_up);
299
300/**
301 * round_jiffies_up_relative - function to round jiffies up to a full second
302 * @j: the time in (relative) jiffies that should be rounded
303 *
304 * This is the same as round_jiffies_relative() except that it will never
305 * round down. This is useful for timeouts for which the exact time
306 * of firing does not matter too much, as long as they don't fire too
307 * early.
308 */
309unsigned long round_jiffies_up_relative(unsigned long j)
310{
311 return __round_jiffies_up_relative(j, raw_smp_processor_id());
312}
313EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
314
246 315
247static inline void set_running_timer(struct tvec_base *base, 316static inline void set_running_timer(struct tvec_base *base,
248 struct timer_list *timer) 317 struct timer_list *timer)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3f3380638646..2f76193c3489 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1060,7 +1060,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1060 1060
1061 /* Did the write stamp get updated already? */ 1061 /* Did the write stamp get updated already? */
1062 if (unlikely(ts < cpu_buffer->write_stamp)) 1062 if (unlikely(ts < cpu_buffer->write_stamp))
1063 goto again; 1063 delta = 0;
1064 1064
1065 if (test_time_stamp(delta)) { 1065 if (test_time_stamp(delta)) {
1066 1066
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 974973e39e87..697eda36b86a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2676,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2676{ 2676{
2677 unsigned long val; 2677 unsigned long val;
2678 char buf[64]; 2678 char buf[64];
2679 int ret; 2679 int ret, cpu;
2680 struct trace_array *tr = filp->private_data; 2680 struct trace_array *tr = filp->private_data;
2681 2681
2682 if (cnt >= sizeof(buf)) 2682 if (cnt >= sizeof(buf))
@@ -2704,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 goto out; 2704 goto out;
2705 } 2705 }
2706 2706
2707 /* disable all cpu buffers */
2708 for_each_tracing_cpu(cpu) {
2709 if (global_trace.data[cpu])
2710 atomic_inc(&global_trace.data[cpu]->disabled);
2711 if (max_tr.data[cpu])
2712 atomic_inc(&max_tr.data[cpu]->disabled);
2713 }
2714
2707 if (val != global_trace.entries) { 2715 if (val != global_trace.entries) {
2708 ret = ring_buffer_resize(global_trace.buffer, val); 2716 ret = ring_buffer_resize(global_trace.buffer, val);
2709 if (ret < 0) { 2717 if (ret < 0) {
@@ -2735,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2735 if (tracing_disabled) 2743 if (tracing_disabled)
2736 cnt = -ENOMEM; 2744 cnt = -ENOMEM;
2737 out: 2745 out:
2746 for_each_tracing_cpu(cpu) {
2747 if (global_trace.data[cpu])
2748 atomic_dec(&global_trace.data[cpu]->disabled);
2749 if (max_tr.data[cpu])
2750 atomic_dec(&max_tr.data[cpu]->disabled);
2751 }
2752
2738 max_tr.entries = global_trace.entries; 2753 max_tr.entries = global_trace.entries;
2739 mutex_unlock(&trace_types_lock); 2754 mutex_unlock(&trace_types_lock);
2740 2755
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f928f2a87b9b..d4dc69ddebd7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -970,6 +970,51 @@ undo:
970 return ret; 970 return ret;
971} 971}
972 972
973#ifdef CONFIG_SMP
974struct work_for_cpu {
975 struct work_struct work;
976 long (*fn)(void *);
977 void *arg;
978 long ret;
979};
980
981static void do_work_for_cpu(struct work_struct *w)
982{
983 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
984
985 wfc->ret = wfc->fn(wfc->arg);
986}
987
988/**
989 * work_on_cpu - run a function in user context on a particular cpu
990 * @cpu: the cpu to run on
991 * @fn: the function to run
992 * @arg: the function arg
993 *
994 * This will return -EINVAL in the cpu is not online, or the return value
995 * of @fn otherwise.
996 */
997long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
998{
999 struct work_for_cpu wfc;
1000
1001 INIT_WORK(&wfc.work, do_work_for_cpu);
1002 wfc.fn = fn;
1003 wfc.arg = arg;
1004 get_online_cpus();
1005 if (unlikely(!cpu_online(cpu)))
1006 wfc.ret = -EINVAL;
1007 else {
1008 schedule_work_on(cpu, &wfc.work);
1009 flush_work(&wfc.work);
1010 }
1011 put_online_cpus();
1012
1013 return wfc.ret;
1014}
1015EXPORT_SYMBOL_GPL(work_on_cpu);
1016#endif /* CONFIG_SMP */
1017
973void __init init_workqueues(void) 1018void __init init_workqueues(void)
974{ 1019{
975 cpu_populated_map = cpu_online_map; 1020 cpu_populated_map = cpu_online_map;