diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-07-11 08:15:48 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-07-11 08:15:55 -0400 |
commit | b7e9c223be8ce335e30f2cf6ba588e6a4092275c (patch) | |
tree | 2d1e3b75606abc18df7ad65e51ac3f90cd68b38d /kernel | |
parent | c172d82500a6cf3c32d1e650722a1055d72ce858 (diff) | |
parent | e3bbfa78bab125f58b831b5f7f45b5a305091d72 (diff) |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply pending patches that
are based on newer code already present upstream.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 31 | ||||
-rw-r--r-- | kernel/gcov/Kconfig | 3 | ||||
-rw-r--r-- | kernel/irq/manage.c | 3 | ||||
-rw-r--r-- | kernel/jump_label.c | 14 | ||||
-rw-r--r-- | kernel/kmod.c | 16 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 6 | ||||
-rw-r--r-- | kernel/power/user.c | 4 | ||||
-rw-r--r-- | kernel/rcutree.c | 398 | ||||
-rw-r--r-- | kernel/rcutree.h | 12 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 419 | ||||
-rw-r--r-- | kernel/rcutree_trace.c | 32 | ||||
-rw-r--r-- | kernel/resource.c | 116 | ||||
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/sched_rt.c | 6 | ||||
-rw-r--r-- | kernel/signal.c | 2 | ||||
-rw-r--r-- | kernel/smp.c | 5 | ||||
-rw-r--r-- | kernel/softirq.c | 2 | ||||
-rw-r--r-- | kernel/taskstats.c | 15 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 158 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 24 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 5 |
23 files changed, 743 insertions, 554 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 07dc154fc799..14c9b63a96c3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -560,29 +560,28 @@ void exit_files(struct task_struct *tsk) | |||
560 | 560 | ||
561 | #ifdef CONFIG_MM_OWNER | 561 | #ifdef CONFIG_MM_OWNER |
562 | /* | 562 | /* |
563 | * Task p is exiting and it owned mm, lets find a new owner for it | 563 | * A task is exiting. If it owned this mm, find a new owner for the mm. |
564 | */ | 564 | */ |
565 | static inline int | ||
566 | mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | ||
567 | { | ||
568 | /* | ||
569 | * If there are other users of the mm and the owner (us) is exiting | ||
570 | * we need to find a new owner to take on the responsibility. | ||
571 | */ | ||
572 | if (atomic_read(&mm->mm_users) <= 1) | ||
573 | return 0; | ||
574 | if (mm->owner != p) | ||
575 | return 0; | ||
576 | return 1; | ||
577 | } | ||
578 | |||
579 | void mm_update_next_owner(struct mm_struct *mm) | 565 | void mm_update_next_owner(struct mm_struct *mm) |
580 | { | 566 | { |
581 | struct task_struct *c, *g, *p = current; | 567 | struct task_struct *c, *g, *p = current; |
582 | 568 | ||
583 | retry: | 569 | retry: |
584 | if (!mm_need_new_owner(mm, p)) | 570 | /* |
571 | * If the exiting or execing task is not the owner, it's | ||
572 | * someone else's problem. | ||
573 | */ | ||
574 | if (mm->owner != p) | ||
585 | return; | 575 | return; |
576 | /* | ||
577 | * The current owner is exiting/execing and there are no other | ||
578 | * candidates. Do not leave the mm pointing to a possibly | ||
579 | * freed task structure. | ||
580 | */ | ||
581 | if (atomic_read(&mm->mm_users) <= 1) { | ||
582 | mm->owner = NULL; | ||
583 | return; | ||
584 | } | ||
586 | 585 | ||
587 | read_lock(&tasklist_lock); | 586 | read_lock(&tasklist_lock); |
588 | /* | 587 | /* |
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index b8cadf70b1fb..5bf924d80b5c 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig | |||
@@ -2,7 +2,8 @@ menu "GCOV-based kernel profiling" | |||
2 | 2 | ||
3 | config GCOV_KERNEL | 3 | config GCOV_KERNEL |
4 | bool "Enable gcov-based kernel profiling" | 4 | bool "Enable gcov-based kernel profiling" |
5 | depends on DEBUG_FS && CONSTRUCTORS | 5 | depends on DEBUG_FS |
6 | select CONSTRUCTORS | ||
6 | default n | 7 | default n |
7 | ---help--- | 8 | ---help--- |
8 | This option enables gcov-based code profiling (e.g. for code coverage | 9 | This option enables gcov-based code profiling (e.g. for code coverage |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d64bafb1afd0..0a7840aeb0fb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on) | |||
491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); |
492 | int ret = 0; | 492 | int ret = 0; |
493 | 493 | ||
494 | if (!desc) | ||
495 | return -EINVAL; | ||
496 | |||
494 | /* wakeup-capable irqs can be shared between drivers that | 497 | /* wakeup-capable irqs can be shared between drivers that |
495 | * don't need to have the same sleep mode behaviors. | 498 | * don't need to have the same sleep mode behaviors. |
496 | */ | 499 | */ |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index fa27e750dbc0..a8ce45097f3d 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
@@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end) | |||
375 | 375 | ||
376 | static void jump_label_update(struct jump_label_key *key, int enable) | 376 | static void jump_label_update(struct jump_label_key *key, int enable) |
377 | { | 377 | { |
378 | struct jump_entry *entry = key->entries; | 378 | struct jump_entry *entry = key->entries, *stop = __stop___jump_table; |
379 | |||
380 | /* if there are no users, entry can be NULL */ | ||
381 | if (entry) | ||
382 | __jump_label_update(key, entry, __stop___jump_table, enable); | ||
383 | 379 | ||
384 | #ifdef CONFIG_MODULES | 380 | #ifdef CONFIG_MODULES |
381 | struct module *mod = __module_address((jump_label_t)key); | ||
382 | |||
385 | __jump_label_mod_update(key, enable); | 383 | __jump_label_mod_update(key, enable); |
384 | |||
385 | if (mod) | ||
386 | stop = mod->jump_entries + mod->num_jump_entries; | ||
386 | #endif | 387 | #endif |
388 | /* if there are no users, entry can be NULL */ | ||
389 | if (entry) | ||
390 | __jump_label_update(key, entry, stop, enable); | ||
387 | } | 391 | } |
388 | 392 | ||
389 | #endif | 393 | #endif |
diff --git a/kernel/kmod.c b/kernel/kmod.c index ad6a81c58b44..47613dfb7b28 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data) | |||
156 | */ | 156 | */ |
157 | set_user_nice(current, 0); | 157 | set_user_nice(current, 0); |
158 | 158 | ||
159 | if (sub_info->init) { | ||
160 | retval = sub_info->init(sub_info); | ||
161 | if (retval) | ||
162 | goto fail; | ||
163 | } | ||
164 | |||
165 | retval = -ENOMEM; | 159 | retval = -ENOMEM; |
166 | new = prepare_kernel_cred(current); | 160 | new = prepare_kernel_cred(current); |
167 | if (!new) | 161 | if (!new) |
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data) | |||
173 | new->cap_inheritable); | 167 | new->cap_inheritable); |
174 | spin_unlock(&umh_sysctl_lock); | 168 | spin_unlock(&umh_sysctl_lock); |
175 | 169 | ||
170 | if (sub_info->init) { | ||
171 | retval = sub_info->init(sub_info, new); | ||
172 | if (retval) { | ||
173 | abort_creds(new); | ||
174 | goto fail; | ||
175 | } | ||
176 | } | ||
177 | |||
176 | commit_creds(new); | 178 | commit_creds(new); |
177 | 179 | ||
178 | retval = kernel_execve(sub_info->path, | 180 | retval = kernel_execve(sub_info->path, |
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup); | |||
388 | * context in which call_usermodehelper_exec is called. | 390 | * context in which call_usermodehelper_exec is called. |
389 | */ | 391 | */ |
390 | void call_usermodehelper_setfns(struct subprocess_info *info, | 392 | void call_usermodehelper_setfns(struct subprocess_info *info, |
391 | int (*init)(struct subprocess_info *info), | 393 | int (*init)(struct subprocess_info *info, struct cred *new), |
392 | void (*cleanup)(struct subprocess_info *info), | 394 | void (*cleanup)(struct subprocess_info *info), |
393 | void *data) | 395 | void *data) |
394 | { | 396 | { |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ace55889f702..06efa54f93d6 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void) | |||
1211 | to_free_highmem = alloc_highmem - save; | 1211 | to_free_highmem = alloc_highmem - save; |
1212 | } else { | 1212 | } else { |
1213 | to_free_highmem = 0; | 1213 | to_free_highmem = 0; |
1214 | to_free_normal -= save - alloc_highmem; | 1214 | save -= alloc_highmem; |
1215 | if (to_free_normal > save) | ||
1216 | to_free_normal -= save; | ||
1217 | else | ||
1218 | to_free_normal = 0; | ||
1215 | } | 1219 | } |
1216 | 1220 | ||
1217 | memory_bm_position_reset(©_bm); | 1221 | memory_bm_position_reset(©_bm); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 7d02d33be699..42ddbc6f0de6 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
113 | if (error) | 113 | if (error) |
114 | pm_notifier_call_chain(PM_POST_RESTORE); | 114 | pm_notifier_call_chain(PM_POST_RESTORE); |
115 | } | 115 | } |
116 | if (error) | 116 | if (error) { |
117 | free_basic_memory_bitmaps(); | ||
117 | atomic_inc(&snapshot_device_available); | 118 | atomic_inc(&snapshot_device_available); |
119 | } | ||
118 | data->frozen = 0; | 120 | data->frozen = 0; |
119 | data->ready = 0; | 121 | data->ready = 0; |
120 | data->platform_support = 0; | 122 | data->platform_support = 0; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 89419ff92e99..7e59ffb3d0ba 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state; | |||
87 | int rcu_scheduler_active __read_mostly; | 87 | int rcu_scheduler_active __read_mostly; |
88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
89 | 89 | ||
90 | #ifdef CONFIG_RCU_BOOST | ||
91 | |||
90 | /* | 92 | /* |
91 | * Control variables for per-CPU and per-rcu_node kthreads. These | 93 | * Control variables for per-CPU and per-rcu_node kthreads. These |
92 | * handle all flavors of RCU. | 94 | * handle all flavors of RCU. |
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | |||
98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 100 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
99 | static char rcu_kthreads_spawnable; | 101 | static char rcu_kthreads_spawnable; |
100 | 102 | ||
103 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
104 | |||
101 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 105 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
102 | static void invoke_rcu_cpu_kthread(void); | 106 | static void invoke_rcu_core(void); |
107 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | ||
103 | 108 | ||
104 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | 109 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ |
105 | 110 | ||
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1088 | int need_report = 0; | 1093 | int need_report = 0; |
1089 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1090 | struct rcu_node *rnp; | 1095 | struct rcu_node *rnp; |
1091 | struct task_struct *t; | ||
1092 | 1096 | ||
1093 | /* Stop the CPU's kthread. */ | 1097 | rcu_stop_cpu_kthread(cpu); |
1094 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1095 | if (t != NULL) { | ||
1096 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1097 | kthread_stop(t); | ||
1098 | } | ||
1099 | 1098 | ||
1100 | /* Exclude any attempts to start a new grace period. */ | 1099 | /* Exclude any attempts to start a new grace period. */ |
1101 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1100 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1231 | 1230 | ||
1232 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1231 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
1233 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1232 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1234 | invoke_rcu_cpu_kthread(); | 1233 | invoke_rcu_core(); |
1235 | } | 1234 | } |
1236 | 1235 | ||
1237 | /* | 1236 | /* |
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1277 | } | 1276 | } |
1278 | rcu_preempt_check_callbacks(cpu); | 1277 | rcu_preempt_check_callbacks(cpu); |
1279 | if (rcu_pending(cpu)) | 1278 | if (rcu_pending(cpu)) |
1280 | invoke_rcu_cpu_kthread(); | 1279 | invoke_rcu_core(); |
1281 | } | 1280 | } |
1282 | 1281 | ||
1283 | #ifdef CONFIG_SMP | 1282 | #ifdef CONFIG_SMP |
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1442 | } | 1441 | } |
1443 | 1442 | ||
1444 | /* If there are callbacks ready, invoke them. */ | 1443 | /* If there are callbacks ready, invoke them. */ |
1445 | rcu_do_batch(rsp, rdp); | 1444 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1445 | invoke_rcu_callbacks(rsp, rdp); | ||
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | /* | 1448 | /* |
1449 | * Do softirq processing for the current CPU. | 1449 | * Do softirq processing for the current CPU. |
1450 | */ | 1450 | */ |
1451 | static void rcu_process_callbacks(void) | 1451 | static void rcu_process_callbacks(struct softirq_action *unused) |
1452 | { | 1452 | { |
1453 | __rcu_process_callbacks(&rcu_sched_state, | 1453 | __rcu_process_callbacks(&rcu_sched_state, |
1454 | &__get_cpu_var(rcu_sched_data)); | 1454 | &__get_cpu_var(rcu_sched_data)); |
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void) | |||
1465 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | 1465 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task |
1466 | * cannot disappear out from under us. | 1466 | * cannot disappear out from under us. |
1467 | */ | 1467 | */ |
1468 | static void invoke_rcu_cpu_kthread(void) | 1468 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
1469 | { | ||
1470 | unsigned long flags; | ||
1471 | |||
1472 | local_irq_save(flags); | ||
1473 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1474 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1475 | local_irq_restore(flags); | ||
1476 | return; | ||
1477 | } | ||
1478 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | ||
1479 | local_irq_restore(flags); | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * Wake up the specified per-rcu_node-structure kthread. | ||
1484 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1485 | * to do anything to keep them alive. | ||
1486 | */ | ||
1487 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1488 | { | ||
1489 | struct task_struct *t; | ||
1490 | |||
1491 | t = rnp->node_kthread_task; | ||
1492 | if (t != NULL) | ||
1493 | wake_up_process(t); | ||
1494 | } | ||
1495 | |||
1496 | /* | ||
1497 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1498 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1499 | * is not going away. | ||
1500 | */ | ||
1501 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1502 | { | ||
1503 | int policy; | ||
1504 | struct sched_param sp; | ||
1505 | struct task_struct *t; | ||
1506 | |||
1507 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1508 | if (t == NULL) | ||
1509 | return; | ||
1510 | if (to_rt) { | ||
1511 | policy = SCHED_FIFO; | ||
1512 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1513 | } else { | ||
1514 | policy = SCHED_NORMAL; | ||
1515 | sp.sched_priority = 0; | ||
1516 | } | ||
1517 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1522 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1523 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1524 | * the booster kthread. | ||
1525 | */ | ||
1526 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1527 | { | ||
1528 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
1529 | struct rcu_node *rnp = rdp->mynode; | ||
1530 | |||
1531 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
1532 | invoke_rcu_node_kthread(rnp); | ||
1533 | } | ||
1534 | |||
1535 | /* | ||
1536 | * Drop to non-real-time priority and yield, but only after posting a | ||
1537 | * timer that will cause us to regain our real-time priority if we | ||
1538 | * remain preempted. Either way, we restore our real-time priority | ||
1539 | * before returning. | ||
1540 | */ | ||
1541 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1542 | { | ||
1543 | struct sched_param sp; | ||
1544 | struct timer_list yield_timer; | ||
1545 | |||
1546 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1547 | mod_timer(&yield_timer, jiffies + 2); | ||
1548 | sp.sched_priority = 0; | ||
1549 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1550 | set_user_nice(current, 19); | ||
1551 | schedule(); | ||
1552 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1553 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1554 | del_timer(&yield_timer); | ||
1555 | } | ||
1556 | |||
1557 | /* | ||
1558 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1559 | * This can happen while the corresponding CPU is either coming online | ||
1560 | * or going offline. We cannot wait until the CPU is fully online | ||
1561 | * before starting the kthread, because the various notifier functions | ||
1562 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1563 | * the corresponding CPU is online. | ||
1564 | * | ||
1565 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1566 | * | ||
1567 | * Caller must disable bh. This function can momentarily enable it. | ||
1568 | */ | ||
1569 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1570 | { | ||
1571 | while (cpu_is_offline(cpu) || | ||
1572 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1573 | smp_processor_id() != cpu) { | ||
1574 | if (kthread_should_stop()) | ||
1575 | return 1; | ||
1576 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1577 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1578 | local_bh_enable(); | ||
1579 | schedule_timeout_uninterruptible(1); | ||
1580 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1581 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1582 | local_bh_disable(); | ||
1583 | } | ||
1584 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1585 | return 0; | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
1590 | * earlier RCU softirq. | ||
1591 | */ | ||
1592 | static int rcu_cpu_kthread(void *arg) | ||
1593 | { | ||
1594 | int cpu = (int)(long)arg; | ||
1595 | unsigned long flags; | ||
1596 | int spincnt = 0; | ||
1597 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1598 | char work; | ||
1599 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1600 | |||
1601 | for (;;) { | ||
1602 | *statusp = RCU_KTHREAD_WAITING; | ||
1603 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
1604 | local_bh_disable(); | ||
1605 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1606 | local_bh_enable(); | ||
1607 | break; | ||
1608 | } | ||
1609 | *statusp = RCU_KTHREAD_RUNNING; | ||
1610 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
1611 | local_irq_save(flags); | ||
1612 | work = *workp; | ||
1613 | *workp = 0; | ||
1614 | local_irq_restore(flags); | ||
1615 | if (work) | ||
1616 | rcu_process_callbacks(); | ||
1617 | local_bh_enable(); | ||
1618 | if (*workp != 0) | ||
1619 | spincnt++; | ||
1620 | else | ||
1621 | spincnt = 0; | ||
1622 | if (spincnt > 10) { | ||
1623 | *statusp = RCU_KTHREAD_YIELDING; | ||
1624 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1625 | spincnt = 0; | ||
1626 | } | ||
1627 | } | ||
1628 | *statusp = RCU_KTHREAD_STOPPED; | ||
1629 | return 0; | ||
1630 | } | ||
1631 | |||
1632 | /* | ||
1633 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1634 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1635 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1636 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1637 | * will enforce sufficient ordering. | ||
1638 | */ | ||
1639 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1640 | { | 1469 | { |
1641 | struct sched_param sp; | 1470 | if (likely(!rsp->boost)) { |
1642 | struct task_struct *t; | 1471 | rcu_do_batch(rsp, rdp); |
1643 | |||
1644 | if (!rcu_kthreads_spawnable || | ||
1645 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1646 | return 0; | ||
1647 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
1648 | if (IS_ERR(t)) | ||
1649 | return PTR_ERR(t); | ||
1650 | kthread_bind(t, cpu); | ||
1651 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1652 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1653 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1654 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1655 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1656 | return 0; | ||
1657 | } | ||
1658 | |||
1659 | /* | ||
1660 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1661 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1662 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1663 | * takes care of this case. | ||
1664 | */ | ||
1665 | static int rcu_node_kthread(void *arg) | ||
1666 | { | ||
1667 | int cpu; | ||
1668 | unsigned long flags; | ||
1669 | unsigned long mask; | ||
1670 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1671 | struct sched_param sp; | ||
1672 | struct task_struct *t; | ||
1673 | |||
1674 | for (;;) { | ||
1675 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1676 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
1677 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1678 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1679 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
1680 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1681 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1682 | if ((mask & 0x1) == 0) | ||
1683 | continue; | ||
1684 | preempt_disable(); | ||
1685 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1686 | if (!cpu_online(cpu) || t == NULL) { | ||
1687 | preempt_enable(); | ||
1688 | continue; | ||
1689 | } | ||
1690 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1691 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1692 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1693 | preempt_enable(); | ||
1694 | } | ||
1695 | } | ||
1696 | /* NOTREACHED */ | ||
1697 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
1698 | return 0; | ||
1699 | } | ||
1700 | |||
1701 | /* | ||
1702 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
1703 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
1704 | * held, so the value of rnp->qsmaskinit will be stable. | ||
1705 | * | ||
1706 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
1707 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
1708 | * this function allows the kthread to execute on any CPU. | ||
1709 | */ | ||
1710 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1711 | { | ||
1712 | cpumask_var_t cm; | ||
1713 | int cpu; | ||
1714 | unsigned long mask = rnp->qsmaskinit; | ||
1715 | |||
1716 | if (rnp->node_kthread_task == NULL) | ||
1717 | return; | ||
1718 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
1719 | return; | 1472 | return; |
1720 | cpumask_clear(cm); | ||
1721 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
1722 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
1723 | cpumask_set_cpu(cpu, cm); | ||
1724 | if (cpumask_weight(cm) == 0) { | ||
1725 | cpumask_setall(cm); | ||
1726 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1727 | cpumask_clear_cpu(cpu, cm); | ||
1728 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1729 | } | 1473 | } |
1730 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | 1474 | invoke_rcu_callbacks_kthread(); |
1731 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1732 | free_cpumask_var(cm); | ||
1733 | } | 1475 | } |
1734 | 1476 | ||
1735 | /* | 1477 | static void invoke_rcu_core(void) |
1736 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
1737 | * Called during boot before online/offline can happen, or, if | ||
1738 | * during runtime, with the main CPU-hotplug locks held. So only | ||
1739 | * one of these can be executing at a time. | ||
1740 | */ | ||
1741 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
1742 | struct rcu_node *rnp) | ||
1743 | { | 1478 | { |
1744 | unsigned long flags; | 1479 | raise_softirq(RCU_SOFTIRQ); |
1745 | int rnp_index = rnp - &rsp->node[0]; | ||
1746 | struct sched_param sp; | ||
1747 | struct task_struct *t; | ||
1748 | |||
1749 | if (!rcu_kthreads_spawnable || | ||
1750 | rnp->qsmaskinit == 0) | ||
1751 | return 0; | ||
1752 | if (rnp->node_kthread_task == NULL) { | ||
1753 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1754 | "rcun%d", rnp_index); | ||
1755 | if (IS_ERR(t)) | ||
1756 | return PTR_ERR(t); | ||
1757 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1758 | rnp->node_kthread_task = t; | ||
1759 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1760 | sp.sched_priority = 99; | ||
1761 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1762 | } | ||
1763 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1764 | } | 1480 | } |
1765 | 1481 | ||
1766 | static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); | ||
1767 | |||
1768 | /* | ||
1769 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
1770 | */ | ||
1771 | static int __init rcu_spawn_kthreads(void) | ||
1772 | { | ||
1773 | int cpu; | ||
1774 | struct rcu_node *rnp; | ||
1775 | struct task_struct *t; | ||
1776 | |||
1777 | rcu_kthreads_spawnable = 1; | ||
1778 | for_each_possible_cpu(cpu) { | ||
1779 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
1780 | if (cpu_online(cpu)) { | ||
1781 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1782 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1783 | if (t) | ||
1784 | wake_up_process(t); | ||
1785 | } | ||
1786 | } | ||
1787 | rnp = rcu_get_root(rcu_state); | ||
1788 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1789 | if (rnp->node_kthread_task) | ||
1790 | wake_up_process(rnp->node_kthread_task); | ||
1791 | if (NUM_RCU_NODES > 1) { | ||
1792 | rcu_for_each_leaf_node(rcu_state, rnp) { | ||
1793 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1794 | t = rnp->node_kthread_task; | ||
1795 | if (t) | ||
1796 | wake_up_process(t); | ||
1797 | rcu_wake_one_boost_kthread(rnp); | ||
1798 | } | ||
1799 | } | ||
1800 | return 0; | ||
1801 | } | ||
1802 | early_initcall(rcu_spawn_kthreads); | ||
1803 | |||
1804 | static void | 1482 | static void |
1805 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1483 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
1806 | struct rcu_state *rsp) | 1484 | struct rcu_state *rsp) |
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
2207 | rcu_preempt_init_percpu_data(cpu); | 1885 | rcu_preempt_init_percpu_data(cpu); |
2208 | } | 1886 | } |
2209 | 1887 | ||
2210 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
2211 | { | ||
2212 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2213 | struct rcu_node *rnp = rdp->mynode; | ||
2214 | |||
2215 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
2216 | if (rcu_kthreads_spawnable) { | ||
2217 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
2218 | if (rnp->node_kthread_task == NULL) | ||
2219 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
2220 | } | ||
2221 | } | ||
2222 | |||
2223 | /* | ||
2224 | * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, | ||
2225 | * but the RCU threads are woken on demand, and if demand is low this | ||
2226 | * could be a while triggering the hung task watchdog. | ||
2227 | * | ||
2228 | * In order to avoid this, poke all tasks once the CPU is fully | ||
2229 | * up and running. | ||
2230 | */ | ||
2231 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2232 | { | ||
2233 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2234 | struct rcu_node *rnp = rdp->mynode; | ||
2235 | struct task_struct *t; | ||
2236 | |||
2237 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
2238 | if (t) | ||
2239 | wake_up_process(t); | ||
2240 | |||
2241 | t = rnp->node_kthread_task; | ||
2242 | if (t) | ||
2243 | wake_up_process(t); | ||
2244 | |||
2245 | rcu_wake_one_boost_kthread(rnp); | ||
2246 | } | ||
2247 | |||
2248 | /* | 1888 | /* |
2249 | * Handle CPU online/offline notification events. | 1889 | * Handle CPU online/offline notification events. |
2250 | */ | 1890 | */ |
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2262 | rcu_prepare_kthreads(cpu); | 1902 | rcu_prepare_kthreads(cpu); |
2263 | break; | 1903 | break; |
2264 | case CPU_ONLINE: | 1904 | case CPU_ONLINE: |
2265 | rcu_online_kthreads(cpu); | ||
2266 | case CPU_DOWN_FAILED: | 1905 | case CPU_DOWN_FAILED: |
2267 | rcu_node_kthread_setaffinity(rnp, -1); | 1906 | rcu_node_kthread_setaffinity(rnp, -1); |
2268 | rcu_cpu_kthread_setrt(cpu, 1); | 1907 | rcu_cpu_kthread_setrt(cpu, 1); |
@@ -2410,6 +2049,7 @@ void __init rcu_init(void) | |||
2410 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 2049 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
2411 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 2050 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
2412 | __rcu_init_preempt(); | 2051 | __rcu_init_preempt(); |
2052 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
2413 | 2053 | ||
2414 | /* | 2054 | /* |
2415 | * We don't need protection against CPU-hotplug here because | 2055 | * We don't need protection against CPU-hotplug here because |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 7b9a08b4aaea..01b2ccda26fb 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -369,6 +369,7 @@ struct rcu_state { | |||
369 | /* period because */ | 369 | /* period because */ |
370 | /* force_quiescent_state() */ | 370 | /* force_quiescent_state() */ |
371 | /* was running. */ | 371 | /* was running. */ |
372 | u8 boost; /* Subject to priority boost. */ | ||
372 | unsigned long gpnum; /* Current gp number. */ | 373 | unsigned long gpnum; /* Current gp number. */ |
373 | unsigned long completed; /* # of last completed gp. */ | 374 | unsigned long completed; /* # of last completed gp. */ |
374 | 375 | ||
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | |||
426 | #ifdef CONFIG_HOTPLUG_CPU | 427 | #ifdef CONFIG_HOTPLUG_CPU |
427 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 428 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
428 | unsigned long flags); | 429 | unsigned long flags); |
430 | static void rcu_stop_cpu_kthread(int cpu); | ||
429 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 431 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
430 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 432 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
431 | static void rcu_print_task_stall(struct rcu_node *rnp); | 433 | static void rcu_print_task_stall(struct rcu_node *rnp); |
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void); | |||
450 | static void __init __rcu_init_preempt(void); | 452 | static void __init __rcu_init_preempt(void); |
451 | static void rcu_needs_cpu_flush(void); | 453 | static void rcu_needs_cpu_flush(void); |
452 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 454 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
455 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | ||
456 | static void invoke_rcu_callbacks_kthread(void); | ||
457 | #ifdef CONFIG_RCU_BOOST | ||
458 | static void rcu_preempt_do_callbacks(void); | ||
453 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 459 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
454 | cpumask_var_t cm); | 460 | cpumask_var_t cm); |
455 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | ||
456 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 461 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
457 | struct rcu_node *rnp, | 462 | struct rcu_node *rnp, |
458 | int rnp_index); | 463 | int rnp_index); |
464 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | ||
465 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | ||
466 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
467 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | ||
468 | static void __cpuinit rcu_prepare_kthreads(int cpu); | ||
459 | 469 | ||
460 | #endif /* #ifndef RCU_TREE_NONCORE */ | 470 | #endif /* #ifndef RCU_TREE_NONCORE */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c8bff3099a89..14dc7dd00902 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void) | |||
602 | &__get_cpu_var(rcu_preempt_data)); | 602 | &__get_cpu_var(rcu_preempt_data)); |
603 | } | 603 | } |
604 | 604 | ||
605 | #ifdef CONFIG_RCU_BOOST | ||
606 | |||
607 | static void rcu_preempt_do_callbacks(void) | ||
608 | { | ||
609 | rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data)); | ||
610 | } | ||
611 | |||
612 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
613 | |||
605 | /* | 614 | /* |
606 | * Queue a preemptible-RCU callback for invocation after a grace period. | 615 | * Queue a preemptible-RCU callback for invocation after a grace period. |
607 | */ | 616 | */ |
@@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
1249 | } | 1258 | } |
1250 | 1259 | ||
1251 | /* | 1260 | /* |
1261 | * Wake up the per-CPU kthread to invoke RCU callbacks. | ||
1262 | */ | ||
1263 | static void invoke_rcu_callbacks_kthread(void) | ||
1264 | { | ||
1265 | unsigned long flags; | ||
1266 | |||
1267 | local_irq_save(flags); | ||
1268 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1269 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1270 | local_irq_restore(flags); | ||
1271 | return; | ||
1272 | } | ||
1273 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | ||
1274 | local_irq_restore(flags); | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1252 | * Set the affinity of the boost kthread. The CPU-hotplug locks are | 1278 | * Set the affinity of the boost kthread. The CPU-hotplug locks are |
1253 | * held, so no one should be messing with the existence of the boost | 1279 | * held, so no one should be messing with the existence of the boost |
1254 | * kthread. | 1280 | * kthread. |
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1288 | 1314 | ||
1289 | if (&rcu_preempt_state != rsp) | 1315 | if (&rcu_preempt_state != rsp) |
1290 | return 0; | 1316 | return 0; |
1317 | rsp->boost = 1; | ||
1291 | if (rnp->boost_kthread_task != NULL) | 1318 | if (rnp->boost_kthread_task != NULL) |
1292 | return 0; | 1319 | return 0; |
1293 | t = kthread_create(rcu_boost_kthread, (void *)rnp, | 1320 | t = kthread_create(rcu_boost_kthread, (void *)rnp, |
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1326 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1300 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1327 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1301 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1328 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1329 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
1302 | return 0; | 1330 | return 0; |
1303 | } | 1331 | } |
1304 | 1332 | ||
1305 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | 1333 | #ifdef CONFIG_HOTPLUG_CPU |
1334 | |||
1335 | /* | ||
1336 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | ||
1337 | */ | ||
1338 | static void rcu_stop_cpu_kthread(int cpu) | ||
1306 | { | 1339 | { |
1307 | if (rnp->boost_kthread_task) | 1340 | struct task_struct *t; |
1308 | wake_up_process(rnp->boost_kthread_task); | 1341 | |
1342 | /* Stop the CPU's kthread. */ | ||
1343 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1344 | if (t != NULL) { | ||
1345 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1346 | kthread_stop(t); | ||
1347 | } | ||
1348 | } | ||
1349 | |||
1350 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1351 | |||
1352 | static void rcu_kthread_do_work(void) | ||
1353 | { | ||
1354 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | ||
1355 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | ||
1356 | rcu_preempt_do_callbacks(); | ||
1357 | } | ||
1358 | |||
1359 | /* | ||
1360 | * Wake up the specified per-rcu_node-structure kthread. | ||
1361 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1362 | * to do anything to keep them alive. | ||
1363 | */ | ||
1364 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1365 | { | ||
1366 | struct task_struct *t; | ||
1367 | |||
1368 | t = rnp->node_kthread_task; | ||
1369 | if (t != NULL) | ||
1370 | wake_up_process(t); | ||
1371 | } | ||
1372 | |||
1373 | /* | ||
1374 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1375 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1376 | * is not going away. | ||
1377 | */ | ||
1378 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1379 | { | ||
1380 | int policy; | ||
1381 | struct sched_param sp; | ||
1382 | struct task_struct *t; | ||
1383 | |||
1384 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1385 | if (t == NULL) | ||
1386 | return; | ||
1387 | if (to_rt) { | ||
1388 | policy = SCHED_FIFO; | ||
1389 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1390 | } else { | ||
1391 | policy = SCHED_NORMAL; | ||
1392 | sp.sched_priority = 0; | ||
1393 | } | ||
1394 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1395 | } | ||
1396 | |||
1397 | /* | ||
1398 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1399 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1400 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1401 | * the booster kthread. | ||
1402 | */ | ||
1403 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1404 | { | ||
1405 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
1406 | struct rcu_node *rnp = rdp->mynode; | ||
1407 | |||
1408 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
1409 | invoke_rcu_node_kthread(rnp); | ||
1410 | } | ||
1411 | |||
1412 | /* | ||
1413 | * Drop to non-real-time priority and yield, but only after posting a | ||
1414 | * timer that will cause us to regain our real-time priority if we | ||
1415 | * remain preempted. Either way, we restore our real-time priority | ||
1416 | * before returning. | ||
1417 | */ | ||
1418 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1419 | { | ||
1420 | struct sched_param sp; | ||
1421 | struct timer_list yield_timer; | ||
1422 | |||
1423 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1424 | mod_timer(&yield_timer, jiffies + 2); | ||
1425 | sp.sched_priority = 0; | ||
1426 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1427 | set_user_nice(current, 19); | ||
1428 | schedule(); | ||
1429 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1430 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1431 | del_timer(&yield_timer); | ||
1432 | } | ||
1433 | |||
1434 | /* | ||
1435 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1436 | * This can happen while the corresponding CPU is either coming online | ||
1437 | * or going offline. We cannot wait until the CPU is fully online | ||
1438 | * before starting the kthread, because the various notifier functions | ||
1439 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1440 | * the corresponding CPU is online. | ||
1441 | * | ||
1442 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1443 | * | ||
1444 | * Caller must disable bh. This function can momentarily enable it. | ||
1445 | */ | ||
1446 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1447 | { | ||
1448 | while (cpu_is_offline(cpu) || | ||
1449 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1450 | smp_processor_id() != cpu) { | ||
1451 | if (kthread_should_stop()) | ||
1452 | return 1; | ||
1453 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1454 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1455 | local_bh_enable(); | ||
1456 | schedule_timeout_uninterruptible(1); | ||
1457 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1458 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1459 | local_bh_disable(); | ||
1460 | } | ||
1461 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1462 | return 0; | ||
1463 | } | ||
1464 | |||
1465 | /* | ||
1466 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
1467 | * earlier RCU softirq. | ||
1468 | */ | ||
1469 | static int rcu_cpu_kthread(void *arg) | ||
1470 | { | ||
1471 | int cpu = (int)(long)arg; | ||
1472 | unsigned long flags; | ||
1473 | int spincnt = 0; | ||
1474 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1475 | char work; | ||
1476 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1477 | |||
1478 | for (;;) { | ||
1479 | *statusp = RCU_KTHREAD_WAITING; | ||
1480 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
1481 | local_bh_disable(); | ||
1482 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1483 | local_bh_enable(); | ||
1484 | break; | ||
1485 | } | ||
1486 | *statusp = RCU_KTHREAD_RUNNING; | ||
1487 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
1488 | local_irq_save(flags); | ||
1489 | work = *workp; | ||
1490 | *workp = 0; | ||
1491 | local_irq_restore(flags); | ||
1492 | if (work) | ||
1493 | rcu_kthread_do_work(); | ||
1494 | local_bh_enable(); | ||
1495 | if (*workp != 0) | ||
1496 | spincnt++; | ||
1497 | else | ||
1498 | spincnt = 0; | ||
1499 | if (spincnt > 10) { | ||
1500 | *statusp = RCU_KTHREAD_YIELDING; | ||
1501 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1502 | spincnt = 0; | ||
1503 | } | ||
1504 | } | ||
1505 | *statusp = RCU_KTHREAD_STOPPED; | ||
1506 | return 0; | ||
1507 | } | ||
1508 | |||
1509 | /* | ||
1510 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1511 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1512 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1513 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1514 | * will enforce sufficient ordering. | ||
1515 | * | ||
1516 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
1517 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
1518 | * which can result in softlockup complaints if the task ends up being | ||
1519 | * idle for more than a couple of minutes. | ||
1520 | * | ||
1521 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
1522 | * CPU until that CPU is fully online. We also cannot wait until the | ||
1523 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
1524 | * deadlock the system when CPU notifiers tried waiting for grace | ||
1525 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
1526 | * is online. If its CPU is not yet fully online, then the code in | ||
1527 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
1528 | * the binding. | ||
1529 | */ | ||
1530 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1531 | { | ||
1532 | struct sched_param sp; | ||
1533 | struct task_struct *t; | ||
1534 | |||
1535 | if (!rcu_kthreads_spawnable || | ||
1536 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1537 | return 0; | ||
1538 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
1539 | if (IS_ERR(t)) | ||
1540 | return PTR_ERR(t); | ||
1541 | if (cpu_online(cpu)) | ||
1542 | kthread_bind(t, cpu); | ||
1543 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1544 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1545 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1546 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1547 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1548 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
1549 | return 0; | ||
1550 | } | ||
1551 | |||
1552 | /* | ||
1553 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1554 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1555 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1556 | * takes care of this case. | ||
1557 | */ | ||
1558 | static int rcu_node_kthread(void *arg) | ||
1559 | { | ||
1560 | int cpu; | ||
1561 | unsigned long flags; | ||
1562 | unsigned long mask; | ||
1563 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1564 | struct sched_param sp; | ||
1565 | struct task_struct *t; | ||
1566 | |||
1567 | for (;;) { | ||
1568 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1569 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
1570 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1571 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1572 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
1573 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1574 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1575 | if ((mask & 0x1) == 0) | ||
1576 | continue; | ||
1577 | preempt_disable(); | ||
1578 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1579 | if (!cpu_online(cpu) || t == NULL) { | ||
1580 | preempt_enable(); | ||
1581 | continue; | ||
1582 | } | ||
1583 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1584 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1585 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1586 | preempt_enable(); | ||
1587 | } | ||
1588 | } | ||
1589 | /* NOTREACHED */ | ||
1590 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
1591 | return 0; | ||
1592 | } | ||
1593 | |||
1594 | /* | ||
1595 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
1596 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
1597 | * held, so the value of rnp->qsmaskinit will be stable. | ||
1598 | * | ||
1599 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
1600 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
1601 | * this function allows the kthread to execute on any CPU. | ||
1602 | */ | ||
1603 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1604 | { | ||
1605 | cpumask_var_t cm; | ||
1606 | int cpu; | ||
1607 | unsigned long mask = rnp->qsmaskinit; | ||
1608 | |||
1609 | if (rnp->node_kthread_task == NULL) | ||
1610 | return; | ||
1611 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
1612 | return; | ||
1613 | cpumask_clear(cm); | ||
1614 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
1615 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
1616 | cpumask_set_cpu(cpu, cm); | ||
1617 | if (cpumask_weight(cm) == 0) { | ||
1618 | cpumask_setall(cm); | ||
1619 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1620 | cpumask_clear_cpu(cpu, cm); | ||
1621 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1622 | } | ||
1623 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
1624 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1625 | free_cpumask_var(cm); | ||
1626 | } | ||
1627 | |||
1628 | /* | ||
1629 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
1630 | * Called during boot before online/offline can happen, or, if | ||
1631 | * during runtime, with the main CPU-hotplug locks held. So only | ||
1632 | * one of these can be executing at a time. | ||
1633 | */ | ||
1634 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
1635 | struct rcu_node *rnp) | ||
1636 | { | ||
1637 | unsigned long flags; | ||
1638 | int rnp_index = rnp - &rsp->node[0]; | ||
1639 | struct sched_param sp; | ||
1640 | struct task_struct *t; | ||
1641 | |||
1642 | if (!rcu_kthreads_spawnable || | ||
1643 | rnp->qsmaskinit == 0) | ||
1644 | return 0; | ||
1645 | if (rnp->node_kthread_task == NULL) { | ||
1646 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1647 | "rcun%d", rnp_index); | ||
1648 | if (IS_ERR(t)) | ||
1649 | return PTR_ERR(t); | ||
1650 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1651 | rnp->node_kthread_task = t; | ||
1652 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1653 | sp.sched_priority = 99; | ||
1654 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1655 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
1656 | } | ||
1657 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1658 | } | ||
1659 | |||
1660 | /* | ||
1661 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
1662 | */ | ||
1663 | static int __init rcu_spawn_kthreads(void) | ||
1664 | { | ||
1665 | int cpu; | ||
1666 | struct rcu_node *rnp; | ||
1667 | |||
1668 | rcu_kthreads_spawnable = 1; | ||
1669 | for_each_possible_cpu(cpu) { | ||
1670 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
1671 | if (cpu_online(cpu)) | ||
1672 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1673 | } | ||
1674 | rnp = rcu_get_root(rcu_state); | ||
1675 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1676 | if (NUM_RCU_NODES > 1) { | ||
1677 | rcu_for_each_leaf_node(rcu_state, rnp) | ||
1678 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1679 | } | ||
1680 | return 0; | ||
1681 | } | ||
1682 | early_initcall(rcu_spawn_kthreads); | ||
1683 | |||
1684 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
1685 | { | ||
1686 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
1687 | struct rcu_node *rnp = rdp->mynode; | ||
1688 | |||
1689 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
1690 | if (rcu_kthreads_spawnable) { | ||
1691 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1692 | if (rnp->node_kthread_task == NULL) | ||
1693 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1694 | } | ||
1309 | } | 1695 | } |
1310 | 1696 | ||
1311 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1697 | #else /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | |||
1315 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1701 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1316 | } | 1702 | } |
1317 | 1703 | ||
1318 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 1704 | static void invoke_rcu_callbacks_kthread(void) |
1319 | cpumask_var_t cm) | ||
1320 | { | 1705 | { |
1706 | WARN_ON_ONCE(1); | ||
1321 | } | 1707 | } |
1322 | 1708 | ||
1323 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | 1709 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) |
1324 | { | 1710 | { |
1325 | } | 1711 | } |
1326 | 1712 | ||
1327 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1713 | #ifdef CONFIG_HOTPLUG_CPU |
1328 | struct rcu_node *rnp, | 1714 | |
1329 | int rnp_index) | 1715 | static void rcu_stop_cpu_kthread(int cpu) |
1716 | { | ||
1717 | } | ||
1718 | |||
1719 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1720 | |||
1721 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1722 | { | ||
1723 | } | ||
1724 | |||
1725 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1330 | { | 1726 | { |
1331 | return 0; | ||
1332 | } | 1727 | } |
1333 | 1728 | ||
1334 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | 1729 | static void __cpuinit rcu_prepare_kthreads(int cpu) |
1335 | { | 1730 | { |
1336 | } | 1731 | } |
1337 | 1732 | ||
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | |||
1509 | * | 1904 | * |
1510 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 1905 | * Because it is not legal to invoke rcu_process_callbacks() with irqs |
1511 | * disabled, we do one pass of force_quiescent_state(), then do a | 1906 | * disabled, we do one pass of force_quiescent_state(), then do a |
1512 | * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked | 1907 | * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked |
1513 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. | 1908 | * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. |
1514 | */ | 1909 | */ |
1515 | int rcu_needs_cpu(int cpu) | 1910 | int rcu_needs_cpu(int cpu) |
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu) | |||
1560 | 1955 | ||
1561 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 1956 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ |
1562 | if (c) | 1957 | if (c) |
1563 | invoke_rcu_cpu_kthread(); | 1958 | invoke_rcu_core(); |
1564 | return c; | 1959 | return c; |
1565 | } | 1960 | } |
1566 | 1961 | ||
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 9678cc3650f5..4e144876dc68 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #define RCU_TREE_NONCORE | 46 | #define RCU_TREE_NONCORE |
47 | #include "rcutree.h" | 47 | #include "rcutree.h" |
48 | 48 | ||
49 | #ifdef CONFIG_RCU_BOOST | ||
50 | |||
49 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 51 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
50 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); | 52 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); |
51 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 53 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status) | |||
58 | return "SRWOY"[kthread_status]; | 60 | return "SRWOY"[kthread_status]; |
59 | } | 61 | } |
60 | 62 | ||
63 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
64 | |||
61 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | 65 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) |
62 | { | 66 | { |
63 | if (!rdp->beenonline) | 67 | if (!rdp->beenonline) |
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
76 | rdp->dynticks_fqs); | 80 | rdp->dynticks_fqs); |
77 | #endif /* #ifdef CONFIG_NO_HZ */ | 81 | #endif /* #ifdef CONFIG_NO_HZ */ |
78 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); | 82 | seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); |
79 | seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", | 83 | seq_printf(m, " ql=%ld qs=%c%c%c%c", |
80 | rdp->qlen, | 84 | rdp->qlen, |
81 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | 85 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != |
82 | rdp->nxttail[RCU_NEXT_TAIL]], | 86 | rdp->nxttail[RCU_NEXT_TAIL]], |
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
84 | rdp->nxttail[RCU_NEXT_READY_TAIL]], | 88 | rdp->nxttail[RCU_NEXT_READY_TAIL]], |
85 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != | 89 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != |
86 | rdp->nxttail[RCU_WAIT_TAIL]], | 90 | rdp->nxttail[RCU_WAIT_TAIL]], |
87 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], | 91 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); |
92 | #ifdef CONFIG_RCU_BOOST | ||
93 | seq_printf(m, " kt=%d/%c/%d ktl=%x", | ||
88 | per_cpu(rcu_cpu_has_work, rdp->cpu), | 94 | per_cpu(rcu_cpu_has_work, rdp->cpu), |
89 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | 95 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, |
90 | rdp->cpu)), | 96 | rdp->cpu)), |
91 | per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), | 97 | per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), |
92 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, | 98 | per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); |
93 | rdp->blimit); | 99 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
100 | seq_printf(m, " b=%ld", rdp->blimit); | ||
94 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", | 101 | seq_printf(m, " ci=%lu co=%lu ca=%lu\n", |
95 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | 102 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); |
96 | } | 103 | } |
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
147 | rdp->dynticks_fqs); | 154 | rdp->dynticks_fqs); |
148 | #endif /* #ifdef CONFIG_NO_HZ */ | 155 | #endif /* #ifdef CONFIG_NO_HZ */ |
149 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); | 156 | seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); |
150 | seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, | 157 | seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen, |
151 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != | 158 | ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != |
152 | rdp->nxttail[RCU_NEXT_TAIL]], | 159 | rdp->nxttail[RCU_NEXT_TAIL]], |
153 | ".R"[rdp->nxttail[RCU_WAIT_TAIL] != | 160 | ".R"[rdp->nxttail[RCU_WAIT_TAIL] != |
154 | rdp->nxttail[RCU_NEXT_READY_TAIL]], | 161 | rdp->nxttail[RCU_NEXT_READY_TAIL]], |
155 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != | 162 | ".W"[rdp->nxttail[RCU_DONE_TAIL] != |
156 | rdp->nxttail[RCU_WAIT_TAIL]], | 163 | rdp->nxttail[RCU_WAIT_TAIL]], |
157 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], | 164 | ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); |
165 | #ifdef CONFIG_RCU_BOOST | ||
166 | seq_printf(m, ",%d,\"%c\"", | ||
158 | per_cpu(rcu_cpu_has_work, rdp->cpu), | 167 | per_cpu(rcu_cpu_has_work, rdp->cpu), |
159 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, | 168 | convert_kthread_status(per_cpu(rcu_cpu_kthread_status, |
160 | rdp->cpu)), | 169 | rdp->cpu))); |
161 | rdp->blimit); | 170 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
171 | seq_printf(m, ",%ld", rdp->blimit); | ||
162 | seq_printf(m, ",%lu,%lu,%lu\n", | 172 | seq_printf(m, ",%lu,%lu,%lu\n", |
163 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); | 173 | rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); |
164 | } | 174 | } |
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) | |||
169 | #ifdef CONFIG_NO_HZ | 179 | #ifdef CONFIG_NO_HZ |
170 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); | 180 | seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); |
171 | #endif /* #ifdef CONFIG_NO_HZ */ | 181 | #endif /* #ifdef CONFIG_NO_HZ */ |
172 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); | 182 | seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\""); |
183 | #ifdef CONFIG_RCU_BOOST | ||
184 | seq_puts(m, "\"kt\",\"ktl\""); | ||
185 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
186 | seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n"); | ||
173 | #ifdef CONFIG_TREE_PREEMPT_RCU | 187 | #ifdef CONFIG_TREE_PREEMPT_RCU |
174 | seq_puts(m, "\"rcu_preempt:\"\n"); | 188 | seq_puts(m, "\"rcu_preempt:\"\n"); |
175 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); | 189 | PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); |
diff --git a/kernel/resource.c b/kernel/resource.c index 798e2fae2a06..3ff40178dce7 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -38,6 +38,14 @@ struct resource iomem_resource = { | |||
38 | }; | 38 | }; |
39 | EXPORT_SYMBOL(iomem_resource); | 39 | EXPORT_SYMBOL(iomem_resource); |
40 | 40 | ||
41 | /* constraints to be met while allocating resources */ | ||
42 | struct resource_constraint { | ||
43 | resource_size_t min, max, align; | ||
44 | resource_size_t (*alignf)(void *, const struct resource *, | ||
45 | resource_size_t, resource_size_t); | ||
46 | void *alignf_data; | ||
47 | }; | ||
48 | |||
41 | static DEFINE_RWLOCK(resource_lock); | 49 | static DEFINE_RWLOCK(resource_lock); |
42 | 50 | ||
43 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 51 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
@@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2) | |||
384 | } | 392 | } |
385 | 393 | ||
386 | /* | 394 | /* |
387 | * Find empty slot in the resource tree given range and alignment. | 395 | * Find empty slot in the resource tree with the given range and |
396 | * alignment constraints | ||
388 | */ | 397 | */ |
389 | static int find_resource(struct resource *root, struct resource *new, | 398 | static int __find_resource(struct resource *root, struct resource *old, |
390 | resource_size_t size, resource_size_t min, | 399 | struct resource *new, |
391 | resource_size_t max, resource_size_t align, | 400 | resource_size_t size, |
392 | resource_size_t (*alignf)(void *, | 401 | struct resource_constraint *constraint) |
393 | const struct resource *, | ||
394 | resource_size_t, | ||
395 | resource_size_t), | ||
396 | void *alignf_data) | ||
397 | { | 402 | { |
398 | struct resource *this = root->child; | 403 | struct resource *this = root->child; |
399 | struct resource tmp = *new, avail, alloc; | 404 | struct resource tmp = *new, avail, alloc; |
@@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new, | |||
404 | * Skip past an allocated resource that starts at 0, since the assignment | 409 | * Skip past an allocated resource that starts at 0, since the assignment |
405 | * of this->start - 1 to tmp->end below would cause an underflow. | 410 | * of this->start - 1 to tmp->end below would cause an underflow. |
406 | */ | 411 | */ |
407 | if (this && this->start == 0) { | 412 | if (this && this->start == root->start) { |
408 | tmp.start = this->end + 1; | 413 | tmp.start = (this == old) ? old->start : this->end + 1; |
409 | this = this->sibling; | 414 | this = this->sibling; |
410 | } | 415 | } |
411 | for(;;) { | 416 | for(;;) { |
412 | if (this) | 417 | if (this) |
413 | tmp.end = this->start - 1; | 418 | tmp.end = (this == old) ? this->end : this->start - 1; |
414 | else | 419 | else |
415 | tmp.end = root->end; | 420 | tmp.end = root->end; |
416 | 421 | ||
417 | resource_clip(&tmp, min, max); | 422 | resource_clip(&tmp, constraint->min, constraint->max); |
418 | arch_remove_reservations(&tmp); | 423 | arch_remove_reservations(&tmp); |
419 | 424 | ||
420 | /* Check for overflow after ALIGN() */ | 425 | /* Check for overflow after ALIGN() */ |
421 | avail = *new; | 426 | avail = *new; |
422 | avail.start = ALIGN(tmp.start, align); | 427 | avail.start = ALIGN(tmp.start, constraint->align); |
423 | avail.end = tmp.end; | 428 | avail.end = tmp.end; |
424 | if (avail.start >= tmp.start) { | 429 | if (avail.start >= tmp.start) { |
425 | alloc.start = alignf(alignf_data, &avail, size, align); | 430 | alloc.start = constraint->alignf(constraint->alignf_data, &avail, |
431 | size, constraint->align); | ||
426 | alloc.end = alloc.start + size - 1; | 432 | alloc.end = alloc.start + size - 1; |
427 | if (resource_contains(&avail, &alloc)) { | 433 | if (resource_contains(&avail, &alloc)) { |
428 | new->start = alloc.start; | 434 | new->start = alloc.start; |
@@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new, | |||
432 | } | 438 | } |
433 | if (!this) | 439 | if (!this) |
434 | break; | 440 | break; |
435 | tmp.start = this->end + 1; | 441 | if (this != old) |
442 | tmp.start = this->end + 1; | ||
436 | this = this->sibling; | 443 | this = this->sibling; |
437 | } | 444 | } |
438 | return -EBUSY; | 445 | return -EBUSY; |
439 | } | 446 | } |
440 | 447 | ||
448 | /* | ||
449 | * Find empty slot in the resource tree given range and alignment. | ||
450 | */ | ||
451 | static int find_resource(struct resource *root, struct resource *new, | ||
452 | resource_size_t size, | ||
453 | struct resource_constraint *constraint) | ||
454 | { | ||
455 | return __find_resource(root, NULL, new, size, constraint); | ||
456 | } | ||
457 | |||
441 | /** | 458 | /** |
442 | * allocate_resource - allocate empty slot in the resource tree given range & alignment | 459 | * reallocate_resource - allocate a slot in the resource tree given range & alignment. |
460 | * The resource will be relocated if the new size cannot be reallocated in the | ||
461 | * current location. | ||
462 | * | ||
463 | * @root: root resource descriptor | ||
464 | * @old: resource descriptor desired by caller | ||
465 | * @newsize: new size of the resource descriptor | ||
466 | * @constraint: the size and alignment constraints to be met. | ||
467 | */ | ||
468 | int reallocate_resource(struct resource *root, struct resource *old, | ||
469 | resource_size_t newsize, | ||
470 | struct resource_constraint *constraint) | ||
471 | { | ||
472 | int err=0; | ||
473 | struct resource new = *old; | ||
474 | struct resource *conflict; | ||
475 | |||
476 | write_lock(&resource_lock); | ||
477 | |||
478 | if ((err = __find_resource(root, old, &new, newsize, constraint))) | ||
479 | goto out; | ||
480 | |||
481 | if (resource_contains(&new, old)) { | ||
482 | old->start = new.start; | ||
483 | old->end = new.end; | ||
484 | goto out; | ||
485 | } | ||
486 | |||
487 | if (old->child) { | ||
488 | err = -EBUSY; | ||
489 | goto out; | ||
490 | } | ||
491 | |||
492 | if (resource_contains(old, &new)) { | ||
493 | old->start = new.start; | ||
494 | old->end = new.end; | ||
495 | } else { | ||
496 | __release_resource(old); | ||
497 | *old = new; | ||
498 | conflict = __request_resource(root, old); | ||
499 | BUG_ON(conflict); | ||
500 | } | ||
501 | out: | ||
502 | write_unlock(&resource_lock); | ||
503 | return err; | ||
504 | } | ||
505 | |||
506 | |||
507 | /** | ||
508 | * allocate_resource - allocate empty slot in the resource tree given range & alignment. | ||
509 | * The resource will be reallocated with a new size if it was already allocated | ||
443 | * @root: root resource descriptor | 510 | * @root: root resource descriptor |
444 | * @new: resource descriptor desired by caller | 511 | * @new: resource descriptor desired by caller |
445 | * @size: requested resource region size | 512 | * @size: requested resource region size |
@@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new, | |||
459 | void *alignf_data) | 526 | void *alignf_data) |
460 | { | 527 | { |
461 | int err; | 528 | int err; |
529 | struct resource_constraint constraint; | ||
462 | 530 | ||
463 | if (!alignf) | 531 | if (!alignf) |
464 | alignf = simple_align_resource; | 532 | alignf = simple_align_resource; |
465 | 533 | ||
534 | constraint.min = min; | ||
535 | constraint.max = max; | ||
536 | constraint.align = align; | ||
537 | constraint.alignf = alignf; | ||
538 | constraint.alignf_data = alignf_data; | ||
539 | |||
540 | if ( new->parent ) { | ||
541 | /* resource is already allocated, try reallocating with | ||
542 | the new constraints */ | ||
543 | return reallocate_resource(root, new, size, &constraint); | ||
544 | } | ||
545 | |||
466 | write_lock(&resource_lock); | 546 | write_lock(&resource_lock); |
467 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | 547 | err = find_resource(root, new, size, &constraint); |
468 | if (err >= 0 && __request_resource(root, new)) | 548 | if (err >= 0 && __request_resource(root, new)) |
469 | err = -EBUSY; | 549 | err = -EBUSY; |
470 | write_unlock(&resource_lock); | 550 | write_unlock(&resource_lock); |
diff --git a/kernel/sched.c b/kernel/sched.c index 71e5a25a8a58..ad8ab90bb301 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
292 | * (The default weight is 1024 - so there's no practical | 292 | * (The default weight is 1024 - so there's no practical |
293 | * limitation from this.) | 293 | * limitation from this.) |
294 | */ | 294 | */ |
295 | #define MIN_SHARES 2 | 295 | #define MIN_SHARES (1UL << 1) |
296 | #define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION)) | 296 | #define MAX_SHARES (1UL << 18) |
297 | 297 | ||
298 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; | 298 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; |
299 | #endif | 299 | #endif |
@@ -8449,10 +8449,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
8449 | if (!tg->se[0]) | 8449 | if (!tg->se[0]) |
8450 | return -EINVAL; | 8450 | return -EINVAL; |
8451 | 8451 | ||
8452 | if (shares < MIN_SHARES) | 8452 | shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); |
8453 | shares = MIN_SHARES; | ||
8454 | else if (shares > MAX_SHARES) | ||
8455 | shares = MAX_SHARES; | ||
8456 | 8453 | ||
8457 | mutex_lock(&shares_mutex); | 8454 | mutex_lock(&shares_mutex); |
8458 | if (tg->shares == shares) | 8455 | if (tg->shares == shares) |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 88725c939e0b..10d018212bab 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1096,7 +1096,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag | |||
1096 | * to move current somewhere else, making room for our non-migratable | 1096 | * to move current somewhere else, making room for our non-migratable |
1097 | * task. | 1097 | * task. |
1098 | */ | 1098 | */ |
1099 | if (p->prio == rq->curr->prio && !need_resched()) | 1099 | if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) |
1100 | check_preempt_equal_prio(rq, p); | 1100 | check_preempt_equal_prio(rq, p); |
1101 | #endif | 1101 | #endif |
1102 | } | 1102 | } |
@@ -1239,6 +1239,10 @@ static int find_lowest_rq(struct task_struct *task) | |||
1239 | int this_cpu = smp_processor_id(); | 1239 | int this_cpu = smp_processor_id(); |
1240 | int cpu = task_cpu(task); | 1240 | int cpu = task_cpu(task); |
1241 | 1241 | ||
1242 | /* Make sure the mask is initialized first */ | ||
1243 | if (unlikely(!lowest_mask)) | ||
1244 | return -1; | ||
1245 | |||
1242 | if (task->rt.nr_cpus_allowed == 1) | 1246 | if (task->rt.nr_cpus_allowed == 1) |
1243 | return -1; /* No other targets possible */ | 1247 | return -1; /* No other targets possible */ |
1244 | 1248 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 86c32b884f8e..ff7678603328 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2365,7 +2365,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |||
2365 | /** | 2365 | /** |
2366 | * sys_rt_sigprocmask - change the list of currently blocked signals | 2366 | * sys_rt_sigprocmask - change the list of currently blocked signals |
2367 | * @how: whether to add, remove, or set signals | 2367 | * @how: whether to add, remove, or set signals |
2368 | * @set: stores pending signals | 2368 | * @nset: stores pending signals |
2369 | * @oset: previous value of signal mask if non-null | 2369 | * @oset: previous value of signal mask if non-null |
2370 | * @sigsetsize: size of sigset_t type | 2370 | * @sigsetsize: size of sigset_t type |
2371 | */ | 2371 | */ |
diff --git a/kernel/smp.c b/kernel/smp.c index 73a195193558..fb67dfa8394e 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { | |||
74 | .notifier_call = hotplug_cfd, | 74 | .notifier_call = hotplug_cfd, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | static int __cpuinit init_call_single_data(void) | 77 | void __init call_function_init(void) |
78 | { | 78 | { |
79 | void *cpu = (void *)(long)smp_processor_id(); | 79 | void *cpu = (void *)(long)smp_processor_id(); |
80 | int i; | 80 | int i; |
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void) | |||
88 | 88 | ||
89 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); | 89 | hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); |
90 | register_cpu_notifier(&hotplug_cfd_notifier); | 90 | register_cpu_notifier(&hotplug_cfd_notifier); |
91 | |||
92 | return 0; | ||
93 | } | 91 | } |
94 | early_initcall(init_call_single_data); | ||
95 | 92 | ||
96 | /* | 93 | /* |
97 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources | 94 | * csd_lock/csd_unlock used to serialize access to per-cpu csd resources |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 13960170cad4..40cf63ddd4b3 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | |||
58 | 58 | ||
59 | char *softirq_to_name[NR_SOFTIRQS] = { | 59 | char *softirq_to_name[NR_SOFTIRQS] = { |
60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", | 60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
61 | "TASKLET", "SCHED", "HRTIMER" | 61 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
62 | }; | 62 | }; |
63 | 63 | ||
64 | /* | 64 | /* |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 9ffea360a778..fc0f22005417 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -285,16 +285,18 @@ ret: | |||
285 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) | 285 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) |
286 | { | 286 | { |
287 | struct listener_list *listeners; | 287 | struct listener_list *listeners; |
288 | struct listener *s, *tmp; | 288 | struct listener *s, *tmp, *s2; |
289 | unsigned int cpu; | 289 | unsigned int cpu; |
290 | 290 | ||
291 | if (!cpumask_subset(mask, cpu_possible_mask)) | 291 | if (!cpumask_subset(mask, cpu_possible_mask)) |
292 | return -EINVAL; | 292 | return -EINVAL; |
293 | 293 | ||
294 | s = NULL; | ||
294 | if (isadd == REGISTER) { | 295 | if (isadd == REGISTER) { |
295 | for_each_cpu(cpu, mask) { | 296 | for_each_cpu(cpu, mask) { |
296 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, | 297 | if (!s) |
297 | cpu_to_node(cpu)); | 298 | s = kmalloc_node(sizeof(struct listener), |
299 | GFP_KERNEL, cpu_to_node(cpu)); | ||
298 | if (!s) | 300 | if (!s) |
299 | goto cleanup; | 301 | goto cleanup; |
300 | s->pid = pid; | 302 | s->pid = pid; |
@@ -303,9 +305,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) | |||
303 | 305 | ||
304 | listeners = &per_cpu(listener_array, cpu); | 306 | listeners = &per_cpu(listener_array, cpu); |
305 | down_write(&listeners->sem); | 307 | down_write(&listeners->sem); |
308 | list_for_each_entry_safe(s2, tmp, &listeners->list, list) { | ||
309 | if (s2->pid == pid) | ||
310 | goto next_cpu; | ||
311 | } | ||
306 | list_add(&s->list, &listeners->list); | 312 | list_add(&s->list, &listeners->list); |
313 | s = NULL; | ||
314 | next_cpu: | ||
307 | up_write(&listeners->sem); | 315 | up_write(&listeners->sem); |
308 | } | 316 | } |
317 | kfree(s); | ||
309 | return 0; | 318 | return 0; |
310 | } | 319 | } |
311 | 320 | ||
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 2d966244ea60..59f369f98a04 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -42,15 +42,75 @@ static struct alarm_base { | |||
42 | clockid_t base_clockid; | 42 | clockid_t base_clockid; |
43 | } alarm_bases[ALARM_NUMTYPE]; | 43 | } alarm_bases[ALARM_NUMTYPE]; |
44 | 44 | ||
45 | /* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ | ||
46 | static ktime_t freezer_delta; | ||
47 | static DEFINE_SPINLOCK(freezer_delta_lock); | ||
48 | |||
45 | #ifdef CONFIG_RTC_CLASS | 49 | #ifdef CONFIG_RTC_CLASS |
46 | /* rtc timer and device for setting alarm wakeups at suspend */ | 50 | /* rtc timer and device for setting alarm wakeups at suspend */ |
47 | static struct rtc_timer rtctimer; | 51 | static struct rtc_timer rtctimer; |
48 | static struct rtc_device *rtcdev; | 52 | static struct rtc_device *rtcdev; |
49 | #endif | 53 | static DEFINE_SPINLOCK(rtcdev_lock); |
50 | 54 | ||
51 | /* freezer delta & lock used to handle clock_nanosleep triggered wakeups */ | 55 | /** |
52 | static ktime_t freezer_delta; | 56 | * has_wakealarm - check rtc device has wakealarm ability |
53 | static DEFINE_SPINLOCK(freezer_delta_lock); | 57 | * @dev: current device |
58 | * @name_ptr: name to be returned | ||
59 | * | ||
60 | * This helper function checks to see if the rtc device can wake | ||
61 | * from suspend. | ||
62 | */ | ||
63 | static int has_wakealarm(struct device *dev, void *name_ptr) | ||
64 | { | ||
65 | struct rtc_device *candidate = to_rtc_device(dev); | ||
66 | |||
67 | if (!candidate->ops->set_alarm) | ||
68 | return 0; | ||
69 | if (!device_may_wakeup(candidate->dev.parent)) | ||
70 | return 0; | ||
71 | |||
72 | *(const char **)name_ptr = dev_name(dev); | ||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * alarmtimer_get_rtcdev - Return selected rtcdevice | ||
78 | * | ||
79 | * This function returns the rtc device to use for wakealarms. | ||
80 | * If one has not already been chosen, it checks to see if a | ||
81 | * functional rtc device is available. | ||
82 | */ | ||
83 | static struct rtc_device *alarmtimer_get_rtcdev(void) | ||
84 | { | ||
85 | struct device *dev; | ||
86 | char *str; | ||
87 | unsigned long flags; | ||
88 | struct rtc_device *ret; | ||
89 | |||
90 | spin_lock_irqsave(&rtcdev_lock, flags); | ||
91 | if (!rtcdev) { | ||
92 | /* Find an rtc device and init the rtc_timer */ | ||
93 | dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); | ||
94 | /* If we have a device then str is valid. See has_wakealarm() */ | ||
95 | if (dev) { | ||
96 | rtcdev = rtc_class_open(str); | ||
97 | /* | ||
98 | * Drop the reference we got in class_find_device, | ||
99 | * rtc_open takes its own. | ||
100 | */ | ||
101 | put_device(dev); | ||
102 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
103 | } | ||
104 | } | ||
105 | ret = rtcdev; | ||
106 | spin_unlock_irqrestore(&rtcdev_lock, flags); | ||
107 | |||
108 | return ret; | ||
109 | } | ||
110 | #else | ||
111 | #define alarmtimer_get_rtcdev() (0) | ||
112 | #define rtcdev (0) | ||
113 | #endif | ||
54 | 114 | ||
55 | 115 | ||
56 | /** | 116 | /** |
@@ -166,6 +226,7 @@ static int alarmtimer_suspend(struct device *dev) | |||
166 | struct rtc_time tm; | 226 | struct rtc_time tm; |
167 | ktime_t min, now; | 227 | ktime_t min, now; |
168 | unsigned long flags; | 228 | unsigned long flags; |
229 | struct rtc_device *rtc; | ||
169 | int i; | 230 | int i; |
170 | 231 | ||
171 | spin_lock_irqsave(&freezer_delta_lock, flags); | 232 | spin_lock_irqsave(&freezer_delta_lock, flags); |
@@ -173,8 +234,9 @@ static int alarmtimer_suspend(struct device *dev) | |||
173 | freezer_delta = ktime_set(0, 0); | 234 | freezer_delta = ktime_set(0, 0); |
174 | spin_unlock_irqrestore(&freezer_delta_lock, flags); | 235 | spin_unlock_irqrestore(&freezer_delta_lock, flags); |
175 | 236 | ||
237 | rtc = rtcdev; | ||
176 | /* If we have no rtcdev, just return */ | 238 | /* If we have no rtcdev, just return */ |
177 | if (!rtcdev) | 239 | if (!rtc) |
178 | return 0; | 240 | return 0; |
179 | 241 | ||
180 | /* Find the soonest timer to expire*/ | 242 | /* Find the soonest timer to expire*/ |
@@ -199,12 +261,12 @@ static int alarmtimer_suspend(struct device *dev) | |||
199 | WARN_ON(min.tv64 < NSEC_PER_SEC); | 261 | WARN_ON(min.tv64 < NSEC_PER_SEC); |
200 | 262 | ||
201 | /* Setup an rtc timer to fire that far in the future */ | 263 | /* Setup an rtc timer to fire that far in the future */ |
202 | rtc_timer_cancel(rtcdev, &rtctimer); | 264 | rtc_timer_cancel(rtc, &rtctimer); |
203 | rtc_read_time(rtcdev, &tm); | 265 | rtc_read_time(rtc, &tm); |
204 | now = rtc_tm_to_ktime(tm); | 266 | now = rtc_tm_to_ktime(tm); |
205 | now = ktime_add(now, min); | 267 | now = ktime_add(now, min); |
206 | 268 | ||
207 | rtc_timer_start(rtcdev, &rtctimer, now, ktime_set(0, 0)); | 269 | rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0)); |
208 | 270 | ||
209 | return 0; | 271 | return 0; |
210 | } | 272 | } |
@@ -322,6 +384,9 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp) | |||
322 | { | 384 | { |
323 | clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; | 385 | clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; |
324 | 386 | ||
387 | if (!alarmtimer_get_rtcdev()) | ||
388 | return -ENOTSUPP; | ||
389 | |||
325 | return hrtimer_get_res(baseid, tp); | 390 | return hrtimer_get_res(baseid, tp); |
326 | } | 391 | } |
327 | 392 | ||
@@ -336,6 +401,9 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp) | |||
336 | { | 401 | { |
337 | struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; | 402 | struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; |
338 | 403 | ||
404 | if (!alarmtimer_get_rtcdev()) | ||
405 | return -ENOTSUPP; | ||
406 | |||
339 | *tp = ktime_to_timespec(base->gettime()); | 407 | *tp = ktime_to_timespec(base->gettime()); |
340 | return 0; | 408 | return 0; |
341 | } | 409 | } |
@@ -351,6 +419,9 @@ static int alarm_timer_create(struct k_itimer *new_timer) | |||
351 | enum alarmtimer_type type; | 419 | enum alarmtimer_type type; |
352 | struct alarm_base *base; | 420 | struct alarm_base *base; |
353 | 421 | ||
422 | if (!alarmtimer_get_rtcdev()) | ||
423 | return -ENOTSUPP; | ||
424 | |||
354 | if (!capable(CAP_WAKE_ALARM)) | 425 | if (!capable(CAP_WAKE_ALARM)) |
355 | return -EPERM; | 426 | return -EPERM; |
356 | 427 | ||
@@ -385,6 +456,9 @@ static void alarm_timer_get(struct k_itimer *timr, | |||
385 | */ | 456 | */ |
386 | static int alarm_timer_del(struct k_itimer *timr) | 457 | static int alarm_timer_del(struct k_itimer *timr) |
387 | { | 458 | { |
459 | if (!rtcdev) | ||
460 | return -ENOTSUPP; | ||
461 | |||
388 | alarm_cancel(&timr->it.alarmtimer); | 462 | alarm_cancel(&timr->it.alarmtimer); |
389 | return 0; | 463 | return 0; |
390 | } | 464 | } |
@@ -402,6 +476,9 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, | |||
402 | struct itimerspec *new_setting, | 476 | struct itimerspec *new_setting, |
403 | struct itimerspec *old_setting) | 477 | struct itimerspec *old_setting) |
404 | { | 478 | { |
479 | if (!rtcdev) | ||
480 | return -ENOTSUPP; | ||
481 | |||
405 | /* Save old values */ | 482 | /* Save old values */ |
406 | old_setting->it_interval = | 483 | old_setting->it_interval = |
407 | ktime_to_timespec(timr->it.alarmtimer.period); | 484 | ktime_to_timespec(timr->it.alarmtimer.period); |
@@ -541,6 +618,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, | |||
541 | int ret = 0; | 618 | int ret = 0; |
542 | struct restart_block *restart; | 619 | struct restart_block *restart; |
543 | 620 | ||
621 | if (!alarmtimer_get_rtcdev()) | ||
622 | return -ENOTSUPP; | ||
623 | |||
544 | if (!capable(CAP_WAKE_ALARM)) | 624 | if (!capable(CAP_WAKE_ALARM)) |
545 | return -EPERM; | 625 | return -EPERM; |
546 | 626 | ||
@@ -638,65 +718,3 @@ static int __init alarmtimer_init(void) | |||
638 | } | 718 | } |
639 | device_initcall(alarmtimer_init); | 719 | device_initcall(alarmtimer_init); |
640 | 720 | ||
641 | #ifdef CONFIG_RTC_CLASS | ||
642 | /** | ||
643 | * has_wakealarm - check rtc device has wakealarm ability | ||
644 | * @dev: current device | ||
645 | * @name_ptr: name to be returned | ||
646 | * | ||
647 | * This helper function checks to see if the rtc device can wake | ||
648 | * from suspend. | ||
649 | */ | ||
650 | static int __init has_wakealarm(struct device *dev, void *name_ptr) | ||
651 | { | ||
652 | struct rtc_device *candidate = to_rtc_device(dev); | ||
653 | |||
654 | if (!candidate->ops->set_alarm) | ||
655 | return 0; | ||
656 | if (!device_may_wakeup(candidate->dev.parent)) | ||
657 | return 0; | ||
658 | |||
659 | *(const char **)name_ptr = dev_name(dev); | ||
660 | return 1; | ||
661 | } | ||
662 | |||
663 | /** | ||
664 | * alarmtimer_init_late - Late initializing of alarmtimer code | ||
665 | * | ||
666 | * This function locates a rtc device to use for wakealarms. | ||
667 | * Run as late_initcall to make sure rtc devices have been | ||
668 | * registered. | ||
669 | */ | ||
670 | static int __init alarmtimer_init_late(void) | ||
671 | { | ||
672 | struct device *dev; | ||
673 | char *str; | ||
674 | |||
675 | /* Find an rtc device and init the rtc_timer */ | ||
676 | dev = class_find_device(rtc_class, NULL, &str, has_wakealarm); | ||
677 | /* If we have a device then str is valid. See has_wakealarm() */ | ||
678 | if (dev) { | ||
679 | rtcdev = rtc_class_open(str); | ||
680 | /* | ||
681 | * Drop the reference we got in class_find_device, | ||
682 | * rtc_open takes its own. | ||
683 | */ | ||
684 | put_device(dev); | ||
685 | } | ||
686 | if (!rtcdev) { | ||
687 | printk(KERN_WARNING "No RTC device found, ALARM timers will" | ||
688 | " not wake from suspend"); | ||
689 | } | ||
690 | rtc_timer_init(&rtctimer, NULL, NULL); | ||
691 | |||
692 | return 0; | ||
693 | } | ||
694 | #else | ||
695 | static int __init alarmtimer_init_late(void) | ||
696 | { | ||
697 | printk(KERN_WARNING "Kernel not built with RTC support, ALARM timers" | ||
698 | " will not wake from suspend"); | ||
699 | return 0; | ||
700 | } | ||
701 | #endif | ||
702 | late_initcall(alarmtimer_init_late); | ||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 1c95fd677328..e0980f0d9a0a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -185,7 +185,6 @@ static struct clocksource *watchdog; | |||
185 | static struct timer_list watchdog_timer; | 185 | static struct timer_list watchdog_timer; |
186 | static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); | 186 | static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); |
187 | static DEFINE_SPINLOCK(watchdog_lock); | 187 | static DEFINE_SPINLOCK(watchdog_lock); |
188 | static cycle_t watchdog_last; | ||
189 | static int watchdog_running; | 188 | static int watchdog_running; |
190 | 189 | ||
191 | static int clocksource_watchdog_kthread(void *data); | 190 | static int clocksource_watchdog_kthread(void *data); |
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data) | |||
254 | if (!watchdog_running) | 253 | if (!watchdog_running) |
255 | goto out; | 254 | goto out; |
256 | 255 | ||
257 | wdnow = watchdog->read(watchdog); | ||
258 | wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask, | ||
259 | watchdog->mult, watchdog->shift); | ||
260 | watchdog_last = wdnow; | ||
261 | |||
262 | list_for_each_entry(cs, &watchdog_list, wd_list) { | 256 | list_for_each_entry(cs, &watchdog_list, wd_list) { |
263 | 257 | ||
264 | /* Clocksource already marked unstable? */ | 258 | /* Clocksource already marked unstable? */ |
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data) | |||
268 | continue; | 262 | continue; |
269 | } | 263 | } |
270 | 264 | ||
265 | local_irq_disable(); | ||
271 | csnow = cs->read(cs); | 266 | csnow = cs->read(cs); |
267 | wdnow = watchdog->read(watchdog); | ||
268 | local_irq_enable(); | ||
272 | 269 | ||
273 | /* Clocksource initialized ? */ | 270 | /* Clocksource initialized ? */ |
274 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { | 271 | if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { |
275 | cs->flags |= CLOCK_SOURCE_WATCHDOG; | 272 | cs->flags |= CLOCK_SOURCE_WATCHDOG; |
276 | cs->wd_last = csnow; | 273 | cs->wd_last = wdnow; |
274 | cs->cs_last = csnow; | ||
277 | continue; | 275 | continue; |
278 | } | 276 | } |
279 | 277 | ||
280 | /* Check the deviation from the watchdog clocksource. */ | 278 | wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, |
281 | cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & | 279 | watchdog->mult, watchdog->shift); |
280 | |||
281 | cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & | ||
282 | cs->mask, cs->mult, cs->shift); | 282 | cs->mask, cs->mult, cs->shift); |
283 | cs->wd_last = csnow; | 283 | cs->cs_last = csnow; |
284 | cs->wd_last = wdnow; | ||
285 | |||
286 | /* Check the deviation from the watchdog clocksource. */ | ||
284 | if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { | 287 | if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { |
285 | clocksource_unstable(cs, cs_nsec - wd_nsec); | 288 | clocksource_unstable(cs, cs_nsec - wd_nsec); |
286 | continue; | 289 | continue; |
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void) | |||
318 | return; | 321 | return; |
319 | init_timer(&watchdog_timer); | 322 | init_timer(&watchdog_timer); |
320 | watchdog_timer.function = clocksource_watchdog; | 323 | watchdog_timer.function = clocksource_watchdog; |
321 | watchdog_last = watchdog->read(watchdog); | ||
322 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 324 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
323 | add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); | 325 | add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); |
324 | watchdog_running = 1; | 326 | watchdog_running = 1; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1ee417fcbfa5..908038f57440 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash, | |||
2740 | { | 2740 | { |
2741 | char *func, *command, *next = buff; | 2741 | char *func, *command, *next = buff; |
2742 | struct ftrace_func_command *p; | 2742 | struct ftrace_func_command *p; |
2743 | int ret; | 2743 | int ret = -EINVAL; |
2744 | 2744 | ||
2745 | func = strsep(&next, ":"); | 2745 | func = strsep(&next, ":"); |
2746 | 2746 | ||
@@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod, | |||
3330 | { | 3330 | { |
3331 | unsigned long *p; | 3331 | unsigned long *p; |
3332 | unsigned long addr; | 3332 | unsigned long addr; |
3333 | unsigned long flags; | ||
3333 | 3334 | ||
3334 | mutex_lock(&ftrace_lock); | 3335 | mutex_lock(&ftrace_lock); |
3335 | p = start; | 3336 | p = start; |
@@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod, | |||
3346 | ftrace_record_ip(addr); | 3347 | ftrace_record_ip(addr); |
3347 | } | 3348 | } |
3348 | 3349 | ||
3350 | /* | ||
3351 | * Disable interrupts to prevent interrupts from executing | ||
3352 | * code that is being modified. | ||
3353 | */ | ||
3354 | local_irq_save(flags); | ||
3349 | ftrace_update_code(mod); | 3355 | ftrace_update_code(mod); |
3356 | local_irq_restore(flags); | ||
3350 | mutex_unlock(&ftrace_lock); | 3357 | mutex_unlock(&ftrace_lock); |
3351 | 3358 | ||
3352 | return 0; | 3359 | return 0; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f925c45f0afa..27d13b36b8be 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace); | |||
1870 | 1870 | ||
1871 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 1871 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
1872 | 1872 | ||
1873 | static int kprobe_trace_selftest_target(int a1, int a2, int a3, | 1873 | /* |
1874 | int a4, int a5, int a6) | 1874 | * The "__used" keeps gcc from removing the function symbol |
1875 | * from the kallsyms table. | ||
1876 | */ | ||
1877 | static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, | ||
1878 | int a4, int a5, int a6) | ||
1875 | { | 1879 | { |
1876 | return a1 + a2 + a3 + a4 + a5 + a6; | 1880 | return a1 + a2 + a3 + a4 + a5 + a6; |
1877 | } | 1881 | } |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index dff763b7baf1..1f06468a10d7 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos) | |||
240 | const char **fmt = v; | 240 | const char **fmt = v; |
241 | int start_index; | 241 | int start_index; |
242 | 242 | ||
243 | if (!fmt) | ||
244 | fmt = __start___trace_bprintk_fmt + *pos; | ||
245 | |||
246 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; | 243 | start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; |
247 | 244 | ||
248 | if (*pos < start_index) | 245 | if (*pos < start_index) |
249 | return fmt; | 246 | return __start___trace_bprintk_fmt + *pos; |
250 | 247 | ||
251 | return find_next_mod_format(start_index, v, fmt, pos); | 248 | return find_next_mod_format(start_index, v, fmt, pos); |
252 | } | 249 | } |