diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpuset.c | 10 | ||||
| -rw-r--r-- | kernel/kprobes.c | 15 | ||||
| -rw-r--r-- | kernel/rcupreempt.c | 2 | ||||
| -rw-r--r-- | kernel/sched.c | 34 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 63 | ||||
| -rw-r--r-- | kernel/sched_stats.h | 6 | ||||
| -rw-r--r-- | kernel/softlockup.c | 15 |
7 files changed, 100 insertions, 45 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 039baa4cd90c..9fceb97e989c 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void) | |||
| 1037 | 1037 | ||
| 1038 | static int update_relax_domain_level(struct cpuset *cs, s64 val) | 1038 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
| 1039 | { | 1039 | { |
| 1040 | if ((int)val < 0) | 1040 | if (val < -1 || val >= SD_LV_MAX) |
| 1041 | val = -1; | 1041 | return -EINVAL; |
| 1042 | 1042 | ||
| 1043 | if (val != cs->relax_domain_level) { | 1043 | if (val != cs->relax_domain_level) { |
| 1044 | cs->relax_domain_level = val; | 1044 | cs->relax_domain_level = val; |
| @@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void) | |||
| 1890 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 1890 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
| 1891 | scan_for_empty_cpusets(&top_cpuset); | 1891 | scan_for_empty_cpusets(&top_cpuset); |
| 1892 | 1892 | ||
| 1893 | /* | ||
| 1894 | * Scheduler destroys domains on hotplug events. | ||
| 1895 | * Rebuild them based on the current settings. | ||
| 1896 | */ | ||
| 1897 | rebuild_sched_domains(); | ||
| 1898 | |||
| 1893 | cgroup_unlock(); | 1899 | cgroup_unlock(); |
| 1894 | } | 1900 | } |
| 1895 | 1901 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1e0250cb9486..d4998f81e229 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -699,8 +699,9 @@ static int __register_kprobes(struct kprobe **kps, int num, | |||
| 699 | return -EINVAL; | 699 | return -EINVAL; |
| 700 | for (i = 0; i < num; i++) { | 700 | for (i = 0; i < num; i++) { |
| 701 | ret = __register_kprobe(kps[i], called_from); | 701 | ret = __register_kprobe(kps[i], called_from); |
| 702 | if (ret < 0 && i > 0) { | 702 | if (ret < 0) { |
| 703 | unregister_kprobes(kps, i); | 703 | if (i > 0) |
| 704 | unregister_kprobes(kps, i); | ||
| 704 | break; | 705 | break; |
| 705 | } | 706 | } |
| 706 | } | 707 | } |
| @@ -776,8 +777,9 @@ static int __register_jprobes(struct jprobe **jps, int num, | |||
| 776 | jp->kp.break_handler = longjmp_break_handler; | 777 | jp->kp.break_handler = longjmp_break_handler; |
| 777 | ret = __register_kprobe(&jp->kp, called_from); | 778 | ret = __register_kprobe(&jp->kp, called_from); |
| 778 | } | 779 | } |
| 779 | if (ret < 0 && i > 0) { | 780 | if (ret < 0) { |
| 780 | unregister_jprobes(jps, i); | 781 | if (i > 0) |
| 782 | unregister_jprobes(jps, i); | ||
| 781 | break; | 783 | break; |
| 782 | } | 784 | } |
| 783 | } | 785 | } |
| @@ -920,8 +922,9 @@ static int __register_kretprobes(struct kretprobe **rps, int num, | |||
| 920 | return -EINVAL; | 922 | return -EINVAL; |
| 921 | for (i = 0; i < num; i++) { | 923 | for (i = 0; i < num; i++) { |
| 922 | ret = __register_kretprobe(rps[i], called_from); | 924 | ret = __register_kretprobe(rps[i], called_from); |
| 923 | if (ret < 0 && i > 0) { | 925 | if (ret < 0) { |
| 924 | unregister_kretprobes(rps, i); | 926 | if (i > 0) |
| 927 | unregister_kretprobes(rps, i); | ||
| 925 | break; | 928 | break; |
| 926 | } | 929 | } |
| 927 | } | 930 | } |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index e1cdf196a515..5e02b7740702 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
| @@ -217,8 +217,6 @@ long rcu_batches_completed(void) | |||
| 217 | } | 217 | } |
| 218 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 218 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
| 219 | 219 | ||
| 220 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | ||
| 221 | |||
| 222 | void __rcu_read_lock(void) | 220 | void __rcu_read_lock(void) |
| 223 | { | 221 | { |
| 224 | int idx; | 222 | int idx; |
diff --git a/kernel/sched.c b/kernel/sched.c index eaf6751e7612..b048ad8a11af 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
| 1127 | return HRTIMER_NORESTART; | 1127 | return HRTIMER_NORESTART; |
| 1128 | } | 1128 | } |
| 1129 | 1129 | ||
| 1130 | #ifdef CONFIG_SMP | ||
| 1130 | static void hotplug_hrtick_disable(int cpu) | 1131 | static void hotplug_hrtick_disable(int cpu) |
| 1131 | { | 1132 | { |
| 1132 | struct rq *rq = cpu_rq(cpu); | 1133 | struct rq *rq = cpu_rq(cpu); |
| @@ -1182,6 +1183,7 @@ static void init_hrtick(void) | |||
| 1182 | { | 1183 | { |
| 1183 | hotcpu_notifier(hotplug_hrtick, 0); | 1184 | hotcpu_notifier(hotplug_hrtick, 0); |
| 1184 | } | 1185 | } |
| 1186 | #endif /* CONFIG_SMP */ | ||
| 1185 | 1187 | ||
| 1186 | static void init_rq_hrtick(struct rq *rq) | 1188 | static void init_rq_hrtick(struct rq *rq) |
| 1187 | { | 1189 | { |
| @@ -6877,7 +6879,12 @@ static int default_relax_domain_level = -1; | |||
| 6877 | 6879 | ||
| 6878 | static int __init setup_relax_domain_level(char *str) | 6880 | static int __init setup_relax_domain_level(char *str) |
| 6879 | { | 6881 | { |
| 6880 | default_relax_domain_level = simple_strtoul(str, NULL, 0); | 6882 | unsigned long val; |
| 6883 | |||
| 6884 | val = simple_strtoul(str, NULL, 0); | ||
| 6885 | if (val < SD_LV_MAX) | ||
| 6886 | default_relax_domain_level = val; | ||
| 6887 | |||
| 6881 | return 1; | 6888 | return 1; |
| 6882 | } | 6889 | } |
| 6883 | __setup("relax_domain_level=", setup_relax_domain_level); | 6890 | __setup("relax_domain_level=", setup_relax_domain_level); |
| @@ -7236,6 +7243,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
| 7236 | } | 7243 | } |
| 7237 | 7244 | ||
| 7238 | /* | 7245 | /* |
| 7246 | * Free current domain masks. | ||
| 7247 | * Called after all cpus are attached to NULL domain. | ||
| 7248 | */ | ||
| 7249 | static void free_sched_domains(void) | ||
| 7250 | { | ||
| 7251 | ndoms_cur = 0; | ||
| 7252 | if (doms_cur != &fallback_doms) | ||
| 7253 | kfree(doms_cur); | ||
| 7254 | doms_cur = &fallback_doms; | ||
| 7255 | } | ||
| 7256 | |||
| 7257 | /* | ||
| 7239 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 7258 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
| 7240 | * For now this just excludes isolated cpus, but could be used to | 7259 | * For now this just excludes isolated cpus, but could be used to |
| 7241 | * exclude other special cases in the future. | 7260 | * exclude other special cases in the future. |
| @@ -7382,6 +7401,7 @@ int arch_reinit_sched_domains(void) | |||
| 7382 | get_online_cpus(); | 7401 | get_online_cpus(); |
| 7383 | mutex_lock(&sched_domains_mutex); | 7402 | mutex_lock(&sched_domains_mutex); |
| 7384 | detach_destroy_domains(&cpu_online_map); | 7403 | detach_destroy_domains(&cpu_online_map); |
| 7404 | free_sched_domains(); | ||
| 7385 | err = arch_init_sched_domains(&cpu_online_map); | 7405 | err = arch_init_sched_domains(&cpu_online_map); |
| 7386 | mutex_unlock(&sched_domains_mutex); | 7406 | mutex_unlock(&sched_domains_mutex); |
| 7387 | put_online_cpus(); | 7407 | put_online_cpus(); |
| @@ -7467,6 +7487,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
| 7467 | case CPU_DOWN_PREPARE: | 7487 | case CPU_DOWN_PREPARE: |
| 7468 | case CPU_DOWN_PREPARE_FROZEN: | 7488 | case CPU_DOWN_PREPARE_FROZEN: |
| 7469 | detach_destroy_domains(&cpu_online_map); | 7489 | detach_destroy_domains(&cpu_online_map); |
| 7490 | free_sched_domains(); | ||
| 7470 | return NOTIFY_OK; | 7491 | return NOTIFY_OK; |
| 7471 | 7492 | ||
| 7472 | case CPU_UP_CANCELED: | 7493 | case CPU_UP_CANCELED: |
| @@ -7485,8 +7506,16 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
| 7485 | return NOTIFY_DONE; | 7506 | return NOTIFY_DONE; |
| 7486 | } | 7507 | } |
| 7487 | 7508 | ||
| 7509 | #ifndef CONFIG_CPUSETS | ||
| 7510 | /* | ||
| 7511 | * Create default domain partitioning if cpusets are disabled. | ||
| 7512 | * Otherwise we let cpusets rebuild the domains based on the | ||
| 7513 | * current setup. | ||
| 7514 | */ | ||
| 7515 | |||
| 7488 | /* The hotplug lock is already held by cpu_up/cpu_down */ | 7516 | /* The hotplug lock is already held by cpu_up/cpu_down */ |
| 7489 | arch_init_sched_domains(&cpu_online_map); | 7517 | arch_init_sched_domains(&cpu_online_map); |
| 7518 | #endif | ||
| 7490 | 7519 | ||
| 7491 | return NOTIFY_OK; | 7520 | return NOTIFY_OK; |
| 7492 | } | 7521 | } |
| @@ -7626,7 +7655,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, | |||
| 7626 | else | 7655 | else |
| 7627 | rt_se->rt_rq = parent->my_q; | 7656 | rt_se->rt_rq = parent->my_q; |
| 7628 | 7657 | ||
| 7629 | rt_se->rt_rq = &rq->rt; | ||
| 7630 | rt_se->my_q = rt_rq; | 7658 | rt_se->my_q = rt_rq; |
| 7631 | rt_se->parent = parent; | 7659 | rt_se->parent = parent; |
| 7632 | INIT_LIST_HEAD(&rt_se->run_list); | 7660 | INIT_LIST_HEAD(&rt_se->run_list); |
| @@ -8348,7 +8376,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
| 8348 | #ifdef CONFIG_CGROUP_SCHED | 8376 | #ifdef CONFIG_CGROUP_SCHED |
| 8349 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 8377 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
| 8350 | { | 8378 | { |
| 8351 | struct task_group *tgi, *parent = tg->parent; | 8379 | struct task_group *tgi, *parent = tg ? tg->parent : NULL; |
| 8352 | unsigned long total = 0; | 8380 | unsigned long total = 0; |
| 8353 | 8381 | ||
| 8354 | if (!parent) { | 8382 | if (!parent) { |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3432d573205d..1dad5bbb59b6 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -449,13 +449,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 449 | #endif | 449 | #endif |
| 450 | } | 450 | } |
| 451 | 451 | ||
| 452 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 452 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
| 453 | { | 453 | { |
| 454 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 454 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
| 455 | struct rt_prio_array *array = &rt_rq->active; | 455 | struct rt_prio_array *array = &rt_rq->active; |
| 456 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 456 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
| 457 | 457 | ||
| 458 | if (group_rq && rt_rq_throttled(group_rq)) | 458 | /* |
| 459 | * Don't enqueue the group if its throttled, or when empty. | ||
| 460 | * The latter is a consequence of the former when a child group | ||
| 461 | * get throttled and the current group doesn't have any other | ||
| 462 | * active members. | ||
| 463 | */ | ||
| 464 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | ||
| 459 | return; | 465 | return; |
| 460 | 466 | ||
| 461 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 467 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
| @@ -464,7 +470,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 464 | inc_rt_tasks(rt_se, rt_rq); | 470 | inc_rt_tasks(rt_se, rt_rq); |
| 465 | } | 471 | } |
| 466 | 472 | ||
| 467 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | 473 | static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) |
| 468 | { | 474 | { |
| 469 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 475 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
| 470 | struct rt_prio_array *array = &rt_rq->active; | 476 | struct rt_prio_array *array = &rt_rq->active; |
| @@ -480,11 +486,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | |||
| 480 | * Because the prio of an upper entry depends on the lower | 486 | * Because the prio of an upper entry depends on the lower |
| 481 | * entries, we must remove entries top - down. | 487 | * entries, we must remove entries top - down. |
| 482 | */ | 488 | */ |
| 483 | static void dequeue_rt_stack(struct task_struct *p) | 489 | static void dequeue_rt_stack(struct sched_rt_entity *rt_se) |
| 484 | { | 490 | { |
| 485 | struct sched_rt_entity *rt_se, *back = NULL; | 491 | struct sched_rt_entity *back = NULL; |
| 486 | 492 | ||
| 487 | rt_se = &p->rt; | ||
| 488 | for_each_sched_rt_entity(rt_se) { | 493 | for_each_sched_rt_entity(rt_se) { |
| 489 | rt_se->back = back; | 494 | rt_se->back = back; |
| 490 | back = rt_se; | 495 | back = rt_se; |
| @@ -492,7 +497,26 @@ static void dequeue_rt_stack(struct task_struct *p) | |||
| 492 | 497 | ||
| 493 | for (rt_se = back; rt_se; rt_se = rt_se->back) { | 498 | for (rt_se = back; rt_se; rt_se = rt_se->back) { |
| 494 | if (on_rt_rq(rt_se)) | 499 | if (on_rt_rq(rt_se)) |
| 495 | dequeue_rt_entity(rt_se); | 500 | __dequeue_rt_entity(rt_se); |
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | ||
| 505 | { | ||
| 506 | dequeue_rt_stack(rt_se); | ||
| 507 | for_each_sched_rt_entity(rt_se) | ||
| 508 | __enqueue_rt_entity(rt_se); | ||
| 509 | } | ||
| 510 | |||
| 511 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) | ||
| 512 | { | ||
| 513 | dequeue_rt_stack(rt_se); | ||
| 514 | |||
| 515 | for_each_sched_rt_entity(rt_se) { | ||
| 516 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
| 517 | |||
| 518 | if (rt_rq && rt_rq->rt_nr_running) | ||
| 519 | __enqueue_rt_entity(rt_se); | ||
| 496 | } | 520 | } |
| 497 | } | 521 | } |
| 498 | 522 | ||
| @@ -506,32 +530,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 506 | if (wakeup) | 530 | if (wakeup) |
| 507 | rt_se->timeout = 0; | 531 | rt_se->timeout = 0; |
| 508 | 532 | ||
| 509 | dequeue_rt_stack(p); | 533 | enqueue_rt_entity(rt_se); |
| 510 | |||
| 511 | /* | ||
| 512 | * enqueue everybody, bottom - up. | ||
| 513 | */ | ||
| 514 | for_each_sched_rt_entity(rt_se) | ||
| 515 | enqueue_rt_entity(rt_se); | ||
| 516 | } | 534 | } |
| 517 | 535 | ||
| 518 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 536 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
| 519 | { | 537 | { |
| 520 | struct sched_rt_entity *rt_se = &p->rt; | 538 | struct sched_rt_entity *rt_se = &p->rt; |
| 521 | struct rt_rq *rt_rq; | ||
| 522 | 539 | ||
| 523 | update_curr_rt(rq); | 540 | update_curr_rt(rq); |
| 524 | 541 | dequeue_rt_entity(rt_se); | |
| 525 | dequeue_rt_stack(p); | ||
| 526 | |||
| 527 | /* | ||
| 528 | * re-enqueue all non-empty rt_rq entities. | ||
| 529 | */ | ||
| 530 | for_each_sched_rt_entity(rt_se) { | ||
| 531 | rt_rq = group_rt_rq(rt_se); | ||
| 532 | if (rt_rq && rt_rq->rt_nr_running) | ||
| 533 | enqueue_rt_entity(rt_se); | ||
| 534 | } | ||
| 535 | } | 542 | } |
| 536 | 543 | ||
| 537 | /* | 544 | /* |
| @@ -542,8 +549,10 @@ static | |||
| 542 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 549 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) |
| 543 | { | 550 | { |
| 544 | struct rt_prio_array *array = &rt_rq->active; | 551 | struct rt_prio_array *array = &rt_rq->active; |
| 552 | struct list_head *queue = array->queue + rt_se_prio(rt_se); | ||
| 545 | 553 | ||
| 546 | list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 554 | if (on_rt_rq(rt_se)) |
| 555 | list_move_tail(&rt_se->run_list, queue); | ||
| 547 | } | 556 | } |
| 548 | 557 | ||
| 549 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 558 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index a38878e0e49d..80179ef7450e 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t) | |||
| 198 | /* | 198 | /* |
| 199 | * Called when a process ceases being the active-running process, either | 199 | * Called when a process ceases being the active-running process, either |
| 200 | * voluntarily or involuntarily. Now we can calculate how long we ran. | 200 | * voluntarily or involuntarily. Now we can calculate how long we ran. |
| 201 | * Also, if the process is still in the TASK_RUNNING state, call | ||
| 202 | * sched_info_queued() to mark that it has now again started waiting on | ||
| 203 | * the runqueue. | ||
| 201 | */ | 204 | */ |
| 202 | static inline void sched_info_depart(struct task_struct *t) | 205 | static inline void sched_info_depart(struct task_struct *t) |
| 203 | { | 206 | { |
| @@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t) | |||
| 206 | 209 | ||
| 207 | t->sched_info.cpu_time += delta; | 210 | t->sched_info.cpu_time += delta; |
| 208 | rq_sched_info_depart(task_rq(t), delta); | 211 | rq_sched_info_depart(task_rq(t), delta); |
| 212 | |||
| 213 | if (t->state == TASK_RUNNING) | ||
| 214 | sched_info_queued(t); | ||
| 209 | } | 215 | } |
| 210 | 216 | ||
| 211 | /* | 217 | /* |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 01b6522fd92b..c828c2339cc9 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu) | |||
| 49 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ | 49 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | void touch_softlockup_watchdog(void) | 52 | static void __touch_softlockup_watchdog(void) |
| 53 | { | 53 | { |
| 54 | int this_cpu = raw_smp_processor_id(); | 54 | int this_cpu = raw_smp_processor_id(); |
| 55 | 55 | ||
| 56 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); | 56 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); |
| 57 | } | 57 | } |
| 58 | |||
| 59 | void touch_softlockup_watchdog(void) | ||
| 60 | { | ||
| 61 | __raw_get_cpu_var(touch_timestamp) = 0; | ||
| 62 | } | ||
| 58 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 63 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
| 59 | 64 | ||
| 60 | void touch_all_softlockup_watchdogs(void) | 65 | void touch_all_softlockup_watchdogs(void) |
| @@ -80,7 +85,7 @@ void softlockup_tick(void) | |||
| 80 | unsigned long now; | 85 | unsigned long now; |
| 81 | 86 | ||
| 82 | if (touch_timestamp == 0) { | 87 | if (touch_timestamp == 0) { |
| 83 | touch_softlockup_watchdog(); | 88 | __touch_softlockup_watchdog(); |
| 84 | return; | 89 | return; |
| 85 | } | 90 | } |
| 86 | 91 | ||
| @@ -95,7 +100,7 @@ void softlockup_tick(void) | |||
| 95 | 100 | ||
| 96 | /* do not print during early bootup: */ | 101 | /* do not print during early bootup: */ |
| 97 | if (unlikely(system_state != SYSTEM_RUNNING)) { | 102 | if (unlikely(system_state != SYSTEM_RUNNING)) { |
| 98 | touch_softlockup_watchdog(); | 103 | __touch_softlockup_watchdog(); |
| 99 | return; | 104 | return; |
| 100 | } | 105 | } |
| 101 | 106 | ||
| @@ -214,7 +219,7 @@ static int watchdog(void *__bind_cpu) | |||
| 214 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 219 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
| 215 | 220 | ||
| 216 | /* initialize timestamp */ | 221 | /* initialize timestamp */ |
| 217 | touch_softlockup_watchdog(); | 222 | __touch_softlockup_watchdog(); |
| 218 | 223 | ||
| 219 | set_current_state(TASK_INTERRUPTIBLE); | 224 | set_current_state(TASK_INTERRUPTIBLE); |
| 220 | /* | 225 | /* |
| @@ -223,7 +228,7 @@ static int watchdog(void *__bind_cpu) | |||
| 223 | * debug-printout triggers in softlockup_tick(). | 228 | * debug-printout triggers in softlockup_tick(). |
| 224 | */ | 229 | */ |
| 225 | while (!kthread_should_stop()) { | 230 | while (!kthread_should_stop()) { |
| 226 | touch_softlockup_watchdog(); | 231 | __touch_softlockup_watchdog(); |
| 227 | schedule(); | 232 | schedule(); |
| 228 | 233 | ||
| 229 | if (kthread_should_stop()) | 234 | if (kthread_should_stop()) |
