aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-09-28 14:40:49 -0400
committerDavid S. Miller <davem@davemloft.net>2012-09-28 14:40:49 -0400
commit6a06e5e1bb217be077e1f8ee2745b4c5b1aa02db (patch)
tree8faea23112a11f52524eb413f71b7b02712d8b53 /kernel
parentd9f72f359e00a45a6cd7cc2d5121b04b9dc927e1 (diff)
parent6672d90fe779dc0dfffe027c3ede12609df091c2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/team/team.c drivers/net/usb/qmi_wwan.c net/batman-adv/bat_iv_ogm.c net/ipv4/fib_frontend.c net/ipv4/route.c net/l2tp/l2tp_netlink.c The team, fib_frontend, route, and l2tp_netlink conflicts were simply overlapping changes. qmi_wwan and bat_iv_ogm were of the "use HEAD" variety. With help from Antonio Quartulli. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pid_namespace.c6
-rw-r--r--kernel/sched/core.c39
-rw-r--r--kernel/sched/fair.c28
-rw-r--r--kernel/time/timekeeping.c19
-rw-r--r--kernel/workqueue.c37
5 files changed, 57 insertions, 72 deletions
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index baa528d7dfbd..478bad2745e3 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -234,15 +234,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
234 */ 234 */
235 235
236 tmp.data = &current->nsproxy->pid_ns->last_pid; 236 tmp.data = &current->nsproxy->pid_ns->last_pid;
237 return proc_dointvec(&tmp, write, buffer, lenp, ppos); 237 return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
238} 238}
239 239
240extern int pid_max;
241static int zero = 0;
240static struct ctl_table pid_ns_ctl_table[] = { 242static struct ctl_table pid_ns_ctl_table[] = {
241 { 243 {
242 .procname = "ns_last_pid", 244 .procname = "ns_last_pid",
243 .maxlen = sizeof(int), 245 .maxlen = sizeof(int),
244 .mode = 0666, /* permissions are checked in the handler */ 246 .mode = 0666, /* permissions are checked in the handler */
245 .proc_handler = pid_ns_ctl_handler, 247 .proc_handler = pid_ns_ctl_handler,
248 .extra1 = &zero,
249 .extra2 = &pid_max,
246 }, 250 },
247 { } 251 { }
248}; 252};
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a4ea245f3d85..649c9f876cb1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6014,11 +6014,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6014 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 6014 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6015 * allows us to avoid some pointer chasing select_idle_sibling(). 6015 * allows us to avoid some pointer chasing select_idle_sibling().
6016 * 6016 *
6017 * Iterate domains and sched_groups downward, assigning CPUs to be
6018 * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
6019 * due to random perturbation self canceling, ie sw buddies pull
6020 * their counterpart to their CPU's hw counterpart.
6021 *
6022 * Also keep a unique ID per domain (we use the first cpu number in 6017 * Also keep a unique ID per domain (we use the first cpu number in
6023 * the cpumask of the domain), this allows us to quickly tell if 6018 * the cpumask of the domain), this allows us to quickly tell if
6024 * two cpus are in the same cache domain, see cpus_share_cache(). 6019 * two cpus are in the same cache domain, see cpus_share_cache().
@@ -6032,40 +6027,8 @@ static void update_top_cache_domain(int cpu)
6032 int id = cpu; 6027 int id = cpu;
6033 6028
6034 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 6029 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6035 if (sd) { 6030 if (sd)
6036 struct sched_domain *tmp = sd;
6037 struct sched_group *sg, *prev;
6038 bool right;
6039
6040 /*
6041 * Traverse to first CPU in group, and count hops
6042 * to cpu from there, switching direction on each
6043 * hop, never ever pointing the last CPU rightward.
6044 */
6045 do {
6046 id = cpumask_first(sched_domain_span(tmp));
6047 prev = sg = tmp->groups;
6048 right = 1;
6049
6050 while (cpumask_first(sched_group_cpus(sg)) != id)
6051 sg = sg->next;
6052
6053 while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
6054 prev = sg;
6055 sg = sg->next;
6056 right = !right;
6057 }
6058
6059 /* A CPU went down, never point back to domain start. */
6060 if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
6061 right = false;
6062
6063 sg = right ? sg->next : prev;
6064 tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
6065 } while ((tmp = tmp->child));
6066
6067 id = cpumask_first(sched_domain_span(sd)); 6031 id = cpumask_first(sched_domain_span(sd));
6068 }
6069 6032
6070 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 6033 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6071 per_cpu(sd_llc_id, cpu) = id; 6034 per_cpu(sd_llc_id, cpu) = id;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 42d9df6a5ca4..96e2b18b6283 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
2637 int cpu = smp_processor_id(); 2637 int cpu = smp_processor_id();
2638 int prev_cpu = task_cpu(p); 2638 int prev_cpu = task_cpu(p);
2639 struct sched_domain *sd; 2639 struct sched_domain *sd;
2640 struct sched_group *sg;
2641 int i;
2640 2642
2641 /* 2643 /*
2642 * If the task is going to be woken-up on this cpu and if it is 2644 * If the task is going to be woken-up on this cpu and if it is
@@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target)
2653 return prev_cpu; 2655 return prev_cpu;
2654 2656
2655 /* 2657 /*
2656 * Otherwise, check assigned siblings to find an elegible idle cpu. 2658 * Otherwise, iterate the domains and find an elegible idle cpu.
2657 */ 2659 */
2658 sd = rcu_dereference(per_cpu(sd_llc, target)); 2660 sd = rcu_dereference(per_cpu(sd_llc, target));
2659
2660 for_each_lower_domain(sd) { 2661 for_each_lower_domain(sd) {
2661 if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p))) 2662 sg = sd->groups;
2662 continue; 2663 do {
2663 if (idle_cpu(sd->idle_buddy)) 2664 if (!cpumask_intersects(sched_group_cpus(sg),
2664 return sd->idle_buddy; 2665 tsk_cpus_allowed(p)))
2665 } 2666 goto next;
2666 2667
2668 for_each_cpu(i, sched_group_cpus(sg)) {
2669 if (!idle_cpu(i))
2670 goto next;
2671 }
2672
2673 target = cpumask_first_and(sched_group_cpus(sg),
2674 tsk_cpus_allowed(p));
2675 goto done;
2676next:
2677 sg = sg->next;
2678 } while (sg != sd->groups);
2679 }
2680done:
2667 return target; 2681 return target;
2668} 2682}
2669 2683
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 34e5eac81424..d3b91e75cecd 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -303,10 +303,11 @@ void getnstimeofday(struct timespec *ts)
303 seq = read_seqbegin(&tk->lock); 303 seq = read_seqbegin(&tk->lock);
304 304
305 ts->tv_sec = tk->xtime_sec; 305 ts->tv_sec = tk->xtime_sec;
306 ts->tv_nsec = timekeeping_get_ns(tk); 306 nsecs = timekeeping_get_ns(tk);
307 307
308 } while (read_seqretry(&tk->lock, seq)); 308 } while (read_seqretry(&tk->lock, seq));
309 309
310 ts->tv_nsec = 0;
310 timespec_add_ns(ts, nsecs); 311 timespec_add_ns(ts, nsecs);
311} 312}
312EXPORT_SYMBOL(getnstimeofday); 313EXPORT_SYMBOL(getnstimeofday);
@@ -345,6 +346,7 @@ void ktime_get_ts(struct timespec *ts)
345{ 346{
346 struct timekeeper *tk = &timekeeper; 347 struct timekeeper *tk = &timekeeper;
347 struct timespec tomono; 348 struct timespec tomono;
349 s64 nsec;
348 unsigned int seq; 350 unsigned int seq;
349 351
350 WARN_ON(timekeeping_suspended); 352 WARN_ON(timekeeping_suspended);
@@ -352,13 +354,14 @@ void ktime_get_ts(struct timespec *ts)
352 do { 354 do {
353 seq = read_seqbegin(&tk->lock); 355 seq = read_seqbegin(&tk->lock);
354 ts->tv_sec = tk->xtime_sec; 356 ts->tv_sec = tk->xtime_sec;
355 ts->tv_nsec = timekeeping_get_ns(tk); 357 nsec = timekeeping_get_ns(tk);
356 tomono = tk->wall_to_monotonic; 358 tomono = tk->wall_to_monotonic;
357 359
358 } while (read_seqretry(&tk->lock, seq)); 360 } while (read_seqretry(&tk->lock, seq));
359 361
360 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, 362 ts->tv_sec += tomono.tv_sec;
361 ts->tv_nsec + tomono.tv_nsec); 363 ts->tv_nsec = 0;
364 timespec_add_ns(ts, nsec + tomono.tv_nsec);
362} 365}
363EXPORT_SYMBOL_GPL(ktime_get_ts); 366EXPORT_SYMBOL_GPL(ktime_get_ts);
364 367
@@ -1244,6 +1247,7 @@ void get_monotonic_boottime(struct timespec *ts)
1244{ 1247{
1245 struct timekeeper *tk = &timekeeper; 1248 struct timekeeper *tk = &timekeeper;
1246 struct timespec tomono, sleep; 1249 struct timespec tomono, sleep;
1250 s64 nsec;
1247 unsigned int seq; 1251 unsigned int seq;
1248 1252
1249 WARN_ON(timekeeping_suspended); 1253 WARN_ON(timekeeping_suspended);
@@ -1251,14 +1255,15 @@ void get_monotonic_boottime(struct timespec *ts)
1251 do { 1255 do {
1252 seq = read_seqbegin(&tk->lock); 1256 seq = read_seqbegin(&tk->lock);
1253 ts->tv_sec = tk->xtime_sec; 1257 ts->tv_sec = tk->xtime_sec;
1254 ts->tv_nsec = timekeeping_get_ns(tk); 1258 nsec = timekeeping_get_ns(tk);
1255 tomono = tk->wall_to_monotonic; 1259 tomono = tk->wall_to_monotonic;
1256 sleep = tk->total_sleep_time; 1260 sleep = tk->total_sleep_time;
1257 1261
1258 } while (read_seqretry(&tk->lock, seq)); 1262 } while (read_seqretry(&tk->lock, seq));
1259 1263
1260 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, 1264 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1261 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); 1265 ts->tv_nsec = 0;
1266 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1262} 1267}
1263EXPORT_SYMBOL_GPL(get_monotonic_boottime); 1268EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1264 1269
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1e1373bcb3e3..3c5a79e2134c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1349,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1349 struct worker *worker = container_of(work, struct worker, rebind_work); 1349 struct worker *worker = container_of(work, struct worker, rebind_work);
1350 struct global_cwq *gcwq = worker->pool->gcwq; 1350 struct global_cwq *gcwq = worker->pool->gcwq;
1351 1351
1352 if (worker_maybe_bind_and_lock(worker)) 1352 worker_maybe_bind_and_lock(worker);
1353 worker_clr_flags(worker, WORKER_REBIND); 1353
1354 /*
1355 * %WORKER_REBIND must be cleared even if the above binding failed;
1356 * otherwise, we may confuse the next CPU_UP cycle or oops / get
1357 * stuck by calling idle_worker_rebind() prematurely. If CPU went
1358 * down again inbetween, %WORKER_UNBOUND would be set, so clearing
1359 * %WORKER_REBIND is always safe.
1360 */
1361 worker_clr_flags(worker, WORKER_REBIND);
1354 1362
1355 spin_unlock_irq(&gcwq->lock); 1363 spin_unlock_irq(&gcwq->lock);
1356} 1364}
@@ -3568,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3568#ifdef CONFIG_SMP 3576#ifdef CONFIG_SMP
3569 3577
3570struct work_for_cpu { 3578struct work_for_cpu {
3571 struct completion completion; 3579 struct work_struct work;
3572 long (*fn)(void *); 3580 long (*fn)(void *);
3573 void *arg; 3581 void *arg;
3574 long ret; 3582 long ret;
3575}; 3583};
3576 3584
3577static int do_work_for_cpu(void *_wfc) 3585static void work_for_cpu_fn(struct work_struct *work)
3578{ 3586{
3579 struct work_for_cpu *wfc = _wfc; 3587 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
3588
3580 wfc->ret = wfc->fn(wfc->arg); 3589 wfc->ret = wfc->fn(wfc->arg);
3581 complete(&wfc->completion);
3582 return 0;
3583} 3590}
3584 3591
3585/** 3592/**
@@ -3594,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc)
3594 */ 3601 */
3595long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 3602long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3596{ 3603{
3597 struct task_struct *sub_thread; 3604 struct work_for_cpu wfc = { .fn = fn, .arg = arg };
3598 struct work_for_cpu wfc = {
3599 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3600 .fn = fn,
3601 .arg = arg,
3602 };
3603 3605
3604 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); 3606 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
3605 if (IS_ERR(sub_thread)) 3607 schedule_work_on(cpu, &wfc.work);
3606 return PTR_ERR(sub_thread); 3608 flush_work(&wfc.work);
3607 kthread_bind(sub_thread, cpu);
3608 wake_up_process(sub_thread);
3609 wait_for_completion(&wfc.completion);
3610 return wfc.ret; 3609 return wfc.ret;
3611} 3610}
3612EXPORT_SYMBOL_GPL(work_on_cpu); 3611EXPORT_SYMBOL_GPL(work_on_cpu);