diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 3 | ||||
-rw-r--r-- | kernel/hrtimer.c | 55 | ||||
-rw-r--r-- | kernel/kprobes.c | 281 | ||||
-rw-r--r-- | kernel/lockdep.c | 5 | ||||
-rw-r--r-- | kernel/module.c | 3 | ||||
-rw-r--r-- | kernel/rcuclassic.c | 23 | ||||
-rw-r--r-- | kernel/rcupreempt.c | 48 | ||||
-rw-r--r-- | kernel/rcutree.c | 20 | ||||
-rw-r--r-- | kernel/rcutree.h | 10 | ||||
-rw-r--r-- | kernel/rcutree_trace.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 14 | ||||
-rw-r--r-- | kernel/softirq.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 27 | ||||
-rw-r--r-- | kernel/trace/kmemtrace.c | 319 | ||||
-rw-r--r-- | kernel/trace/trace.h | 6 |
15 files changed, 603 insertions, 215 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 789b8862fe3b..abf9cf3b95c6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -837,8 +837,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
837 | */ | 837 | */ |
838 | if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && | 838 | if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && |
839 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || | 839 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || |
840 | tsk->self_exec_id != tsk->parent_exec_id) && | 840 | tsk->self_exec_id != tsk->parent_exec_id)) |
841 | !capable(CAP_KILL)) | ||
842 | tsk->exit_signal = SIGCHLD; | 841 | tsk->exit_signal = SIGCHLD; |
843 | 842 | ||
844 | signal = tracehook_notify_death(tsk, &cookie, group_dead); | 843 | signal = tracehook_notify_death(tsk, &cookie, group_dead); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f394d2a42ca3..cb8a15c19583 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |||
651 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | 651 | * and expiry check is done in the hrtimer_interrupt or in the softirq. |
652 | */ | 652 | */ |
653 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 653 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
654 | struct hrtimer_clock_base *base) | 654 | struct hrtimer_clock_base *base, |
655 | int wakeup) | ||
655 | { | 656 | { |
656 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 657 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
657 | spin_unlock(&base->cpu_base->lock); | 658 | if (wakeup) { |
658 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 659 | spin_unlock(&base->cpu_base->lock); |
659 | spin_lock(&base->cpu_base->lock); | 660 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
661 | spin_lock(&base->cpu_base->lock); | ||
662 | } else | ||
663 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | ||
664 | |||
660 | return 1; | 665 | return 1; |
661 | } | 666 | } |
667 | |||
662 | return 0; | 668 | return 0; |
663 | } | 669 | } |
664 | 670 | ||
@@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } | |||
703 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 709 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
704 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } | 710 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
705 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 711 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
706 | struct hrtimer_clock_base *base) | 712 | struct hrtimer_clock_base *base, |
713 | int wakeup) | ||
707 | { | 714 | { |
708 | return 0; | 715 | return 0; |
709 | } | 716 | } |
@@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |||
886 | return 0; | 893 | return 0; |
887 | } | 894 | } |
888 | 895 | ||
889 | /** | 896 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
890 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | 897 | unsigned long delta_ns, const enum hrtimer_mode mode, |
891 | * @timer: the timer to be added | 898 | int wakeup) |
892 | * @tim: expiry time | ||
893 | * @delta_ns: "slack" range for the timer | ||
894 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | ||
895 | * | ||
896 | * Returns: | ||
897 | * 0 on success | ||
898 | * 1 when the timer was active | ||
899 | */ | ||
900 | int | ||
901 | hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, | ||
902 | const enum hrtimer_mode mode) | ||
903 | { | 899 | { |
904 | struct hrtimer_clock_base *base, *new_base; | 900 | struct hrtimer_clock_base *base, *new_base; |
905 | unsigned long flags; | 901 | unsigned long flags; |
@@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
940 | * XXX send_remote_softirq() ? | 936 | * XXX send_remote_softirq() ? |
941 | */ | 937 | */ |
942 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) | 938 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
943 | hrtimer_enqueue_reprogram(timer, new_base); | 939 | hrtimer_enqueue_reprogram(timer, new_base, wakeup); |
944 | 940 | ||
945 | unlock_hrtimer_base(timer, &flags); | 941 | unlock_hrtimer_base(timer, &flags); |
946 | 942 | ||
947 | return ret; | 943 | return ret; |
948 | } | 944 | } |
945 | |||
946 | /** | ||
947 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | ||
948 | * @timer: the timer to be added | ||
949 | * @tim: expiry time | ||
950 | * @delta_ns: "slack" range for the timer | ||
951 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | ||
952 | * | ||
953 | * Returns: | ||
954 | * 0 on success | ||
955 | * 1 when the timer was active | ||
956 | */ | ||
957 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | ||
958 | unsigned long delta_ns, const enum hrtimer_mode mode) | ||
959 | { | ||
960 | return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); | ||
961 | } | ||
949 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | 962 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
950 | 963 | ||
951 | /** | 964 | /** |
@@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | |||
961 | int | 974 | int |
962 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | 975 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
963 | { | 976 | { |
964 | return hrtimer_start_range_ns(timer, tim, 0, mode); | 977 | return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); |
965 | } | 978 | } |
966 | EXPORT_SYMBOL_GPL(hrtimer_start); | 979 | EXPORT_SYMBOL_GPL(hrtimer_start); |
967 | 980 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5016bfb682b9..a5e74ddee0e2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -68,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | |||
68 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 68 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
69 | 69 | ||
70 | /* NOTE: change this value only with kprobe_mutex held */ | 70 | /* NOTE: change this value only with kprobe_mutex held */ |
71 | static bool kprobe_enabled; | 71 | static bool kprobes_all_disarmed; |
72 | 72 | ||
73 | static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 73 | static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
74 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 74 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
@@ -328,7 +328,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
328 | struct kprobe *kp; | 328 | struct kprobe *kp; |
329 | 329 | ||
330 | list_for_each_entry_rcu(kp, &p->list, list) { | 330 | list_for_each_entry_rcu(kp, &p->list, list) { |
331 | if (kp->pre_handler && !kprobe_gone(kp)) { | 331 | if (kp->pre_handler && likely(!kprobe_disabled(kp))) { |
332 | set_kprobe_instance(kp); | 332 | set_kprobe_instance(kp); |
333 | if (kp->pre_handler(kp, regs)) | 333 | if (kp->pre_handler(kp, regs)) |
334 | return 1; | 334 | return 1; |
@@ -344,7 +344,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
344 | struct kprobe *kp; | 344 | struct kprobe *kp; |
345 | 345 | ||
346 | list_for_each_entry_rcu(kp, &p->list, list) { | 346 | list_for_each_entry_rcu(kp, &p->list, list) { |
347 | if (kp->post_handler && !kprobe_gone(kp)) { | 347 | if (kp->post_handler && likely(!kprobe_disabled(kp))) { |
348 | set_kprobe_instance(kp); | 348 | set_kprobe_instance(kp); |
349 | kp->post_handler(kp, regs, flags); | 349 | kp->post_handler(kp, regs, flags); |
350 | reset_kprobe_instance(); | 350 | reset_kprobe_instance(); |
@@ -518,20 +518,28 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | |||
518 | } | 518 | } |
519 | 519 | ||
520 | /* | 520 | /* |
521 | * Add the new probe to old_p->list. Fail if this is the | 521 | * Add the new probe to ap->list. Fail if this is the |
522 | * second jprobe at the address - two jprobes can't coexist | 522 | * second jprobe at the address - two jprobes can't coexist |
523 | */ | 523 | */ |
524 | static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) | 524 | static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) |
525 | { | 525 | { |
526 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); | ||
526 | if (p->break_handler) { | 527 | if (p->break_handler) { |
527 | if (old_p->break_handler) | 528 | if (ap->break_handler) |
528 | return -EEXIST; | 529 | return -EEXIST; |
529 | list_add_tail_rcu(&p->list, &old_p->list); | 530 | list_add_tail_rcu(&p->list, &ap->list); |
530 | old_p->break_handler = aggr_break_handler; | 531 | ap->break_handler = aggr_break_handler; |
531 | } else | 532 | } else |
532 | list_add_rcu(&p->list, &old_p->list); | 533 | list_add_rcu(&p->list, &ap->list); |
533 | if (p->post_handler && !old_p->post_handler) | 534 | if (p->post_handler && !ap->post_handler) |
534 | old_p->post_handler = aggr_post_handler; | 535 | ap->post_handler = aggr_post_handler; |
536 | |||
537 | if (kprobe_disabled(ap) && !kprobe_disabled(p)) { | ||
538 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
539 | if (!kprobes_all_disarmed) | ||
540 | /* Arm the breakpoint again. */ | ||
541 | arch_arm_kprobe(ap); | ||
542 | } | ||
535 | return 0; | 543 | return 0; |
536 | } | 544 | } |
537 | 545 | ||
@@ -544,6 +552,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
544 | copy_kprobe(p, ap); | 552 | copy_kprobe(p, ap); |
545 | flush_insn_slot(ap); | 553 | flush_insn_slot(ap); |
546 | ap->addr = p->addr; | 554 | ap->addr = p->addr; |
555 | ap->flags = p->flags; | ||
547 | ap->pre_handler = aggr_pre_handler; | 556 | ap->pre_handler = aggr_pre_handler; |
548 | ap->fault_handler = aggr_fault_handler; | 557 | ap->fault_handler = aggr_fault_handler; |
549 | /* We don't care the kprobe which has gone. */ | 558 | /* We don't care the kprobe which has gone. */ |
@@ -566,44 +575,59 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
566 | struct kprobe *p) | 575 | struct kprobe *p) |
567 | { | 576 | { |
568 | int ret = 0; | 577 | int ret = 0; |
569 | struct kprobe *ap; | 578 | struct kprobe *ap = old_p; |
570 | 579 | ||
571 | if (kprobe_gone(old_p)) { | 580 | if (old_p->pre_handler != aggr_pre_handler) { |
581 | /* If old_p is not an aggr_probe, create new aggr_kprobe. */ | ||
582 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); | ||
583 | if (!ap) | ||
584 | return -ENOMEM; | ||
585 | add_aggr_kprobe(ap, old_p); | ||
586 | } | ||
587 | |||
588 | if (kprobe_gone(ap)) { | ||
572 | /* | 589 | /* |
573 | * Attempting to insert new probe at the same location that | 590 | * Attempting to insert new probe at the same location that |
574 | * had a probe in the module vaddr area which already | 591 | * had a probe in the module vaddr area which already |
575 | * freed. So, the instruction slot has already been | 592 | * freed. So, the instruction slot has already been |
576 | * released. We need a new slot for the new probe. | 593 | * released. We need a new slot for the new probe. |
577 | */ | 594 | */ |
578 | ret = arch_prepare_kprobe(old_p); | 595 | ret = arch_prepare_kprobe(ap); |
579 | if (ret) | 596 | if (ret) |
597 | /* | ||
598 | * Even if fail to allocate new slot, don't need to | ||
599 | * free aggr_probe. It will be used next time, or | ||
600 | * freed by unregister_kprobe. | ||
601 | */ | ||
580 | return ret; | 602 | return ret; |
581 | } | 603 | |
582 | if (old_p->pre_handler == aggr_pre_handler) { | ||
583 | copy_kprobe(old_p, p); | ||
584 | ret = add_new_kprobe(old_p, p); | ||
585 | ap = old_p; | ||
586 | } else { | ||
587 | ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); | ||
588 | if (!ap) { | ||
589 | if (kprobe_gone(old_p)) | ||
590 | arch_remove_kprobe(old_p); | ||
591 | return -ENOMEM; | ||
592 | } | ||
593 | add_aggr_kprobe(ap, old_p); | ||
594 | copy_kprobe(ap, p); | ||
595 | ret = add_new_kprobe(ap, p); | ||
596 | } | ||
597 | if (kprobe_gone(old_p)) { | ||
598 | /* | 604 | /* |
599 | * If the old_p has gone, its breakpoint has been disarmed. | 605 | * Clear gone flag to prevent allocating new slot again, and |
600 | * We have to arm it again after preparing real kprobes. | 606 | * set disabled flag because it is not armed yet. |
601 | */ | 607 | */ |
602 | ap->flags &= ~KPROBE_FLAG_GONE; | 608 | ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) |
603 | if (kprobe_enabled) | 609 | | KPROBE_FLAG_DISABLED; |
604 | arch_arm_kprobe(ap); | ||
605 | } | 610 | } |
606 | return ret; | 611 | |
612 | copy_kprobe(ap, p); | ||
613 | return add_new_kprobe(ap, p); | ||
614 | } | ||
615 | |||
616 | /* Try to disable aggr_kprobe, and return 1 if succeeded.*/ | ||
617 | static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) | ||
618 | { | ||
619 | struct kprobe *kp; | ||
620 | |||
621 | list_for_each_entry_rcu(kp, &p->list, list) { | ||
622 | if (!kprobe_disabled(kp)) | ||
623 | /* | ||
624 | * There is an active probe on the list. | ||
625 | * We can't disable aggr_kprobe. | ||
626 | */ | ||
627 | return 0; | ||
628 | } | ||
629 | p->flags |= KPROBE_FLAG_DISABLED; | ||
630 | return 1; | ||
607 | } | 631 | } |
608 | 632 | ||
609 | static int __kprobes in_kprobes_functions(unsigned long addr) | 633 | static int __kprobes in_kprobes_functions(unsigned long addr) |
@@ -664,7 +688,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
664 | return -EINVAL; | 688 | return -EINVAL; |
665 | } | 689 | } |
666 | 690 | ||
667 | p->flags = 0; | 691 | /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ |
692 | p->flags &= KPROBE_FLAG_DISABLED; | ||
693 | |||
668 | /* | 694 | /* |
669 | * Check if are we probing a module. | 695 | * Check if are we probing a module. |
670 | */ | 696 | */ |
@@ -709,7 +735,7 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
709 | hlist_add_head_rcu(&p->hlist, | 735 | hlist_add_head_rcu(&p->hlist, |
710 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 736 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
711 | 737 | ||
712 | if (kprobe_enabled) | 738 | if (!kprobes_all_disarmed && !kprobe_disabled(p)) |
713 | arch_arm_kprobe(p); | 739 | arch_arm_kprobe(p); |
714 | 740 | ||
715 | out_unlock_text: | 741 | out_unlock_text: |
@@ -722,26 +748,39 @@ out: | |||
722 | 748 | ||
723 | return ret; | 749 | return ret; |
724 | } | 750 | } |
751 | EXPORT_SYMBOL_GPL(register_kprobe); | ||
725 | 752 | ||
726 | /* | 753 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ |
727 | * Unregister a kprobe without a scheduler synchronization. | 754 | static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) |
728 | */ | ||
729 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) | ||
730 | { | 755 | { |
731 | struct kprobe *old_p, *list_p; | 756 | struct kprobe *old_p, *list_p; |
732 | 757 | ||
733 | old_p = get_kprobe(p->addr); | 758 | old_p = get_kprobe(p->addr); |
734 | if (unlikely(!old_p)) | 759 | if (unlikely(!old_p)) |
735 | return -EINVAL; | 760 | return NULL; |
736 | 761 | ||
737 | if (p != old_p) { | 762 | if (p != old_p) { |
738 | list_for_each_entry_rcu(list_p, &old_p->list, list) | 763 | list_for_each_entry_rcu(list_p, &old_p->list, list) |
739 | if (list_p == p) | 764 | if (list_p == p) |
740 | /* kprobe p is a valid probe */ | 765 | /* kprobe p is a valid probe */ |
741 | goto valid_p; | 766 | goto valid; |
742 | return -EINVAL; | 767 | return NULL; |
743 | } | 768 | } |
744 | valid_p: | 769 | valid: |
770 | return old_p; | ||
771 | } | ||
772 | |||
773 | /* | ||
774 | * Unregister a kprobe without a scheduler synchronization. | ||
775 | */ | ||
776 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) | ||
777 | { | ||
778 | struct kprobe *old_p, *list_p; | ||
779 | |||
780 | old_p = __get_valid_kprobe(p); | ||
781 | if (old_p == NULL) | ||
782 | return -EINVAL; | ||
783 | |||
745 | if (old_p == p || | 784 | if (old_p == p || |
746 | (old_p->pre_handler == aggr_pre_handler && | 785 | (old_p->pre_handler == aggr_pre_handler && |
747 | list_is_singular(&old_p->list))) { | 786 | list_is_singular(&old_p->list))) { |
@@ -750,7 +789,7 @@ valid_p: | |||
750 | * enabled and not gone - otherwise, the breakpoint would | 789 | * enabled and not gone - otherwise, the breakpoint would |
751 | * already have been removed. We save on flushing icache. | 790 | * already have been removed. We save on flushing icache. |
752 | */ | 791 | */ |
753 | if (kprobe_enabled && !kprobe_gone(old_p)) { | 792 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { |
754 | mutex_lock(&text_mutex); | 793 | mutex_lock(&text_mutex); |
755 | arch_disarm_kprobe(p); | 794 | arch_disarm_kprobe(p); |
756 | mutex_unlock(&text_mutex); | 795 | mutex_unlock(&text_mutex); |
@@ -768,6 +807,11 @@ valid_p: | |||
768 | } | 807 | } |
769 | noclean: | 808 | noclean: |
770 | list_del_rcu(&p->list); | 809 | list_del_rcu(&p->list); |
810 | if (!kprobe_disabled(old_p)) { | ||
811 | try_to_disable_aggr_kprobe(old_p); | ||
812 | if (!kprobes_all_disarmed && kprobe_disabled(old_p)) | ||
813 | arch_disarm_kprobe(old_p); | ||
814 | } | ||
771 | } | 815 | } |
772 | return 0; | 816 | return 0; |
773 | } | 817 | } |
@@ -803,11 +847,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num) | |||
803 | } | 847 | } |
804 | return ret; | 848 | return ret; |
805 | } | 849 | } |
850 | EXPORT_SYMBOL_GPL(register_kprobes); | ||
806 | 851 | ||
807 | void __kprobes unregister_kprobe(struct kprobe *p) | 852 | void __kprobes unregister_kprobe(struct kprobe *p) |
808 | { | 853 | { |
809 | unregister_kprobes(&p, 1); | 854 | unregister_kprobes(&p, 1); |
810 | } | 855 | } |
856 | EXPORT_SYMBOL_GPL(unregister_kprobe); | ||
811 | 857 | ||
812 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) | 858 | void __kprobes unregister_kprobes(struct kprobe **kps, int num) |
813 | { | 859 | { |
@@ -826,6 +872,7 @@ void __kprobes unregister_kprobes(struct kprobe **kps, int num) | |||
826 | if (kps[i]->addr) | 872 | if (kps[i]->addr) |
827 | __unregister_kprobe_bottom(kps[i]); | 873 | __unregister_kprobe_bottom(kps[i]); |
828 | } | 874 | } |
875 | EXPORT_SYMBOL_GPL(unregister_kprobes); | ||
829 | 876 | ||
830 | static struct notifier_block kprobe_exceptions_nb = { | 877 | static struct notifier_block kprobe_exceptions_nb = { |
831 | .notifier_call = kprobe_exceptions_notify, | 878 | .notifier_call = kprobe_exceptions_notify, |
@@ -865,16 +912,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num) | |||
865 | } | 912 | } |
866 | return ret; | 913 | return ret; |
867 | } | 914 | } |
915 | EXPORT_SYMBOL_GPL(register_jprobes); | ||
868 | 916 | ||
869 | int __kprobes register_jprobe(struct jprobe *jp) | 917 | int __kprobes register_jprobe(struct jprobe *jp) |
870 | { | 918 | { |
871 | return register_jprobes(&jp, 1); | 919 | return register_jprobes(&jp, 1); |
872 | } | 920 | } |
921 | EXPORT_SYMBOL_GPL(register_jprobe); | ||
873 | 922 | ||
874 | void __kprobes unregister_jprobe(struct jprobe *jp) | 923 | void __kprobes unregister_jprobe(struct jprobe *jp) |
875 | { | 924 | { |
876 | unregister_jprobes(&jp, 1); | 925 | unregister_jprobes(&jp, 1); |
877 | } | 926 | } |
927 | EXPORT_SYMBOL_GPL(unregister_jprobe); | ||
878 | 928 | ||
879 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) | 929 | void __kprobes unregister_jprobes(struct jprobe **jps, int num) |
880 | { | 930 | { |
@@ -894,6 +944,7 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num) | |||
894 | __unregister_kprobe_bottom(&jps[i]->kp); | 944 | __unregister_kprobe_bottom(&jps[i]->kp); |
895 | } | 945 | } |
896 | } | 946 | } |
947 | EXPORT_SYMBOL_GPL(unregister_jprobes); | ||
897 | 948 | ||
898 | #ifdef CONFIG_KRETPROBES | 949 | #ifdef CONFIG_KRETPROBES |
899 | /* | 950 | /* |
@@ -987,6 +1038,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
987 | free_rp_inst(rp); | 1038 | free_rp_inst(rp); |
988 | return ret; | 1039 | return ret; |
989 | } | 1040 | } |
1041 | EXPORT_SYMBOL_GPL(register_kretprobe); | ||
990 | 1042 | ||
991 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) | 1043 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) |
992 | { | 1044 | { |
@@ -1004,11 +1056,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num) | |||
1004 | } | 1056 | } |
1005 | return ret; | 1057 | return ret; |
1006 | } | 1058 | } |
1059 | EXPORT_SYMBOL_GPL(register_kretprobes); | ||
1007 | 1060 | ||
1008 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | 1061 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
1009 | { | 1062 | { |
1010 | unregister_kretprobes(&rp, 1); | 1063 | unregister_kretprobes(&rp, 1); |
1011 | } | 1064 | } |
1065 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | ||
1012 | 1066 | ||
1013 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) | 1067 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) |
1014 | { | 1068 | { |
@@ -1030,24 +1084,30 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) | |||
1030 | } | 1084 | } |
1031 | } | 1085 | } |
1032 | } | 1086 | } |
1087 | EXPORT_SYMBOL_GPL(unregister_kretprobes); | ||
1033 | 1088 | ||
1034 | #else /* CONFIG_KRETPROBES */ | 1089 | #else /* CONFIG_KRETPROBES */ |
1035 | int __kprobes register_kretprobe(struct kretprobe *rp) | 1090 | int __kprobes register_kretprobe(struct kretprobe *rp) |
1036 | { | 1091 | { |
1037 | return -ENOSYS; | 1092 | return -ENOSYS; |
1038 | } | 1093 | } |
1094 | EXPORT_SYMBOL_GPL(register_kretprobe); | ||
1039 | 1095 | ||
1040 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) | 1096 | int __kprobes register_kretprobes(struct kretprobe **rps, int num) |
1041 | { | 1097 | { |
1042 | return -ENOSYS; | 1098 | return -ENOSYS; |
1043 | } | 1099 | } |
1100 | EXPORT_SYMBOL_GPL(register_kretprobes); | ||
1101 | |||
1044 | void __kprobes unregister_kretprobe(struct kretprobe *rp) | 1102 | void __kprobes unregister_kretprobe(struct kretprobe *rp) |
1045 | { | 1103 | { |
1046 | } | 1104 | } |
1105 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | ||
1047 | 1106 | ||
1048 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) | 1107 | void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) |
1049 | { | 1108 | { |
1050 | } | 1109 | } |
1110 | EXPORT_SYMBOL_GPL(unregister_kretprobes); | ||
1051 | 1111 | ||
1052 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, | 1112 | static int __kprobes pre_handler_kretprobe(struct kprobe *p, |
1053 | struct pt_regs *regs) | 1113 | struct pt_regs *regs) |
@@ -1061,6 +1121,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1061 | static void __kprobes kill_kprobe(struct kprobe *p) | 1121 | static void __kprobes kill_kprobe(struct kprobe *p) |
1062 | { | 1122 | { |
1063 | struct kprobe *kp; | 1123 | struct kprobe *kp; |
1124 | |||
1064 | p->flags |= KPROBE_FLAG_GONE; | 1125 | p->flags |= KPROBE_FLAG_GONE; |
1065 | if (p->pre_handler == aggr_pre_handler) { | 1126 | if (p->pre_handler == aggr_pre_handler) { |
1066 | /* | 1127 | /* |
@@ -1173,8 +1234,8 @@ static int __init init_kprobes(void) | |||
1173 | } | 1234 | } |
1174 | } | 1235 | } |
1175 | 1236 | ||
1176 | /* By default, kprobes are enabled */ | 1237 | /* By default, kprobes are armed */ |
1177 | kprobe_enabled = true; | 1238 | kprobes_all_disarmed = false; |
1178 | 1239 | ||
1179 | err = arch_init_kprobes(); | 1240 | err = arch_init_kprobes(); |
1180 | if (!err) | 1241 | if (!err) |
@@ -1202,12 +1263,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, | |||
1202 | else | 1263 | else |
1203 | kprobe_type = "k"; | 1264 | kprobe_type = "k"; |
1204 | if (sym) | 1265 | if (sym) |
1205 | seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, | 1266 | seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", |
1206 | sym, offset, (modname ? modname : " "), | 1267 | p->addr, kprobe_type, sym, offset, |
1207 | (kprobe_gone(p) ? "[GONE]" : "")); | 1268 | (modname ? modname : " "), |
1269 | (kprobe_gone(p) ? "[GONE]" : ""), | ||
1270 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? | ||
1271 | "[DISABLED]" : "")); | ||
1208 | else | 1272 | else |
1209 | seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, | 1273 | seq_printf(pi, "%p %s %p %s%s\n", |
1210 | (kprobe_gone(p) ? "[GONE]" : "")); | 1274 | p->addr, kprobe_type, p->addr, |
1275 | (kprobe_gone(p) ? "[GONE]" : ""), | ||
1276 | ((kprobe_disabled(p) && !kprobe_gone(p)) ? | ||
1277 | "[DISABLED]" : "")); | ||
1211 | } | 1278 | } |
1212 | 1279 | ||
1213 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) | 1280 | static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) |
@@ -1272,7 +1339,72 @@ static struct file_operations debugfs_kprobes_operations = { | |||
1272 | .release = seq_release, | 1339 | .release = seq_release, |
1273 | }; | 1340 | }; |
1274 | 1341 | ||
1275 | static void __kprobes enable_all_kprobes(void) | 1342 | /* Disable one kprobe */ |
1343 | int __kprobes disable_kprobe(struct kprobe *kp) | ||
1344 | { | ||
1345 | int ret = 0; | ||
1346 | struct kprobe *p; | ||
1347 | |||
1348 | mutex_lock(&kprobe_mutex); | ||
1349 | |||
1350 | /* Check whether specified probe is valid. */ | ||
1351 | p = __get_valid_kprobe(kp); | ||
1352 | if (unlikely(p == NULL)) { | ||
1353 | ret = -EINVAL; | ||
1354 | goto out; | ||
1355 | } | ||
1356 | |||
1357 | /* If the probe is already disabled (or gone), just return */ | ||
1358 | if (kprobe_disabled(kp)) | ||
1359 | goto out; | ||
1360 | |||
1361 | kp->flags |= KPROBE_FLAG_DISABLED; | ||
1362 | if (p != kp) | ||
1363 | /* When kp != p, p is always enabled. */ | ||
1364 | try_to_disable_aggr_kprobe(p); | ||
1365 | |||
1366 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | ||
1367 | arch_disarm_kprobe(p); | ||
1368 | out: | ||
1369 | mutex_unlock(&kprobe_mutex); | ||
1370 | return ret; | ||
1371 | } | ||
1372 | EXPORT_SYMBOL_GPL(disable_kprobe); | ||
1373 | |||
1374 | /* Enable one kprobe */ | ||
1375 | int __kprobes enable_kprobe(struct kprobe *kp) | ||
1376 | { | ||
1377 | int ret = 0; | ||
1378 | struct kprobe *p; | ||
1379 | |||
1380 | mutex_lock(&kprobe_mutex); | ||
1381 | |||
1382 | /* Check whether specified probe is valid. */ | ||
1383 | p = __get_valid_kprobe(kp); | ||
1384 | if (unlikely(p == NULL)) { | ||
1385 | ret = -EINVAL; | ||
1386 | goto out; | ||
1387 | } | ||
1388 | |||
1389 | if (kprobe_gone(kp)) { | ||
1390 | /* This kprobe has gone, we couldn't enable it. */ | ||
1391 | ret = -EINVAL; | ||
1392 | goto out; | ||
1393 | } | ||
1394 | |||
1395 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | ||
1396 | arch_arm_kprobe(p); | ||
1397 | |||
1398 | p->flags &= ~KPROBE_FLAG_DISABLED; | ||
1399 | if (p != kp) | ||
1400 | kp->flags &= ~KPROBE_FLAG_DISABLED; | ||
1401 | out: | ||
1402 | mutex_unlock(&kprobe_mutex); | ||
1403 | return ret; | ||
1404 | } | ||
1405 | EXPORT_SYMBOL_GPL(enable_kprobe); | ||
1406 | |||
1407 | static void __kprobes arm_all_kprobes(void) | ||
1276 | { | 1408 | { |
1277 | struct hlist_head *head; | 1409 | struct hlist_head *head; |
1278 | struct hlist_node *node; | 1410 | struct hlist_node *node; |
@@ -1281,20 +1413,20 @@ static void __kprobes enable_all_kprobes(void) | |||
1281 | 1413 | ||
1282 | mutex_lock(&kprobe_mutex); | 1414 | mutex_lock(&kprobe_mutex); |
1283 | 1415 | ||
1284 | /* If kprobes are already enabled, just return */ | 1416 | /* If kprobes are armed, just return */ |
1285 | if (kprobe_enabled) | 1417 | if (!kprobes_all_disarmed) |
1286 | goto already_enabled; | 1418 | goto already_enabled; |
1287 | 1419 | ||
1288 | mutex_lock(&text_mutex); | 1420 | mutex_lock(&text_mutex); |
1289 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1421 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1290 | head = &kprobe_table[i]; | 1422 | head = &kprobe_table[i]; |
1291 | hlist_for_each_entry_rcu(p, node, head, hlist) | 1423 | hlist_for_each_entry_rcu(p, node, head, hlist) |
1292 | if (!kprobe_gone(p)) | 1424 | if (!kprobe_disabled(p)) |
1293 | arch_arm_kprobe(p); | 1425 | arch_arm_kprobe(p); |
1294 | } | 1426 | } |
1295 | mutex_unlock(&text_mutex); | 1427 | mutex_unlock(&text_mutex); |
1296 | 1428 | ||
1297 | kprobe_enabled = true; | 1429 | kprobes_all_disarmed = false; |
1298 | printk(KERN_INFO "Kprobes globally enabled\n"); | 1430 | printk(KERN_INFO "Kprobes globally enabled\n"); |
1299 | 1431 | ||
1300 | already_enabled: | 1432 | already_enabled: |
@@ -1302,7 +1434,7 @@ already_enabled: | |||
1302 | return; | 1434 | return; |
1303 | } | 1435 | } |
1304 | 1436 | ||
1305 | static void __kprobes disable_all_kprobes(void) | 1437 | static void __kprobes disarm_all_kprobes(void) |
1306 | { | 1438 | { |
1307 | struct hlist_head *head; | 1439 | struct hlist_head *head; |
1308 | struct hlist_node *node; | 1440 | struct hlist_node *node; |
@@ -1311,17 +1443,17 @@ static void __kprobes disable_all_kprobes(void) | |||
1311 | 1443 | ||
1312 | mutex_lock(&kprobe_mutex); | 1444 | mutex_lock(&kprobe_mutex); |
1313 | 1445 | ||
1314 | /* If kprobes are already disabled, just return */ | 1446 | /* If kprobes are already disarmed, just return */ |
1315 | if (!kprobe_enabled) | 1447 | if (kprobes_all_disarmed) |
1316 | goto already_disabled; | 1448 | goto already_disabled; |
1317 | 1449 | ||
1318 | kprobe_enabled = false; | 1450 | kprobes_all_disarmed = true; |
1319 | printk(KERN_INFO "Kprobes globally disabled\n"); | 1451 | printk(KERN_INFO "Kprobes globally disabled\n"); |
1320 | mutex_lock(&text_mutex); | 1452 | mutex_lock(&text_mutex); |
1321 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1453 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1322 | head = &kprobe_table[i]; | 1454 | head = &kprobe_table[i]; |
1323 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 1455 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1324 | if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) | 1456 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
1325 | arch_disarm_kprobe(p); | 1457 | arch_disarm_kprobe(p); |
1326 | } | 1458 | } |
1327 | } | 1459 | } |
@@ -1347,7 +1479,7 @@ static ssize_t read_enabled_file_bool(struct file *file, | |||
1347 | { | 1479 | { |
1348 | char buf[3]; | 1480 | char buf[3]; |
1349 | 1481 | ||
1350 | if (kprobe_enabled) | 1482 | if (!kprobes_all_disarmed) |
1351 | buf[0] = '1'; | 1483 | buf[0] = '1'; |
1352 | else | 1484 | else |
1353 | buf[0] = '0'; | 1485 | buf[0] = '0'; |
@@ -1370,12 +1502,12 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
1370 | case 'y': | 1502 | case 'y': |
1371 | case 'Y': | 1503 | case 'Y': |
1372 | case '1': | 1504 | case '1': |
1373 | enable_all_kprobes(); | 1505 | arm_all_kprobes(); |
1374 | break; | 1506 | break; |
1375 | case 'n': | 1507 | case 'n': |
1376 | case 'N': | 1508 | case 'N': |
1377 | case '0': | 1509 | case '0': |
1378 | disable_all_kprobes(); | 1510 | disarm_all_kprobes(); |
1379 | break; | 1511 | break; |
1380 | } | 1512 | } |
1381 | 1513 | ||
@@ -1418,16 +1550,5 @@ late_initcall(debugfs_kprobe_init); | |||
1418 | 1550 | ||
1419 | module_init(init_kprobes); | 1551 | module_init(init_kprobes); |
1420 | 1552 | ||
1421 | EXPORT_SYMBOL_GPL(register_kprobe); | 1553 | /* defined in arch/.../kernel/kprobes.c */ |
1422 | EXPORT_SYMBOL_GPL(unregister_kprobe); | ||
1423 | EXPORT_SYMBOL_GPL(register_kprobes); | ||
1424 | EXPORT_SYMBOL_GPL(unregister_kprobes); | ||
1425 | EXPORT_SYMBOL_GPL(register_jprobe); | ||
1426 | EXPORT_SYMBOL_GPL(unregister_jprobe); | ||
1427 | EXPORT_SYMBOL_GPL(register_jprobes); | ||
1428 | EXPORT_SYMBOL_GPL(unregister_jprobes); | ||
1429 | EXPORT_SYMBOL_GPL(jprobe_return); | 1554 | EXPORT_SYMBOL_GPL(jprobe_return); |
1430 | EXPORT_SYMBOL_GPL(register_kretprobe); | ||
1431 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | ||
1432 | EXPORT_SYMBOL_GPL(register_kretprobes); | ||
1433 | EXPORT_SYMBOL_GPL(unregister_kretprobes); | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 81b5f33970b8..b0f011866969 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -793,6 +793,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
793 | 793 | ||
794 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | 794 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); |
795 | printk("turning off the locking correctness validator.\n"); | 795 | printk("turning off the locking correctness validator.\n"); |
796 | dump_stack(); | ||
796 | return NULL; | 797 | return NULL; |
797 | } | 798 | } |
798 | class = lock_classes + nr_lock_classes++; | 799 | class = lock_classes + nr_lock_classes++; |
@@ -856,6 +857,7 @@ static struct lock_list *alloc_list_entry(void) | |||
856 | 857 | ||
857 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); | 858 | printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); |
858 | printk("turning off the locking correctness validator.\n"); | 859 | printk("turning off the locking correctness validator.\n"); |
860 | dump_stack(); | ||
859 | return NULL; | 861 | return NULL; |
860 | } | 862 | } |
861 | return list_entries + nr_list_entries++; | 863 | return list_entries + nr_list_entries++; |
@@ -1682,6 +1684,7 @@ cache_hit: | |||
1682 | 1684 | ||
1683 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); | 1685 | printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); |
1684 | printk("turning off the locking correctness validator.\n"); | 1686 | printk("turning off the locking correctness validator.\n"); |
1687 | dump_stack(); | ||
1685 | return 0; | 1688 | return 0; |
1686 | } | 1689 | } |
1687 | chain = lock_chains + nr_lock_chains++; | 1690 | chain = lock_chains + nr_lock_chains++; |
@@ -2541,6 +2544,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2541 | debug_locks_off(); | 2544 | debug_locks_off(); |
2542 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); | 2545 | printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); |
2543 | printk("turning off the locking correctness validator.\n"); | 2546 | printk("turning off the locking correctness validator.\n"); |
2547 | dump_stack(); | ||
2544 | return 0; | 2548 | return 0; |
2545 | } | 2549 | } |
2546 | 2550 | ||
@@ -2637,6 +2641,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2637 | debug_locks_off(); | 2641 | debug_locks_off(); |
2638 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); | 2642 | printk("BUG: MAX_LOCK_DEPTH too low!\n"); |
2639 | printk("turning off the locking correctness validator.\n"); | 2643 | printk("turning off the locking correctness validator.\n"); |
2644 | dump_stack(); | ||
2640 | return 0; | 2645 | return 0; |
2641 | } | 2646 | } |
2642 | 2647 | ||
diff --git a/kernel/module.c b/kernel/module.c index c268a771595c..05f014efa32c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1952,9 +1952,6 @@ static noinline struct module *load_module(void __user *umod, | |||
1952 | if (strstarts(secstrings+sechdrs[i].sh_name, ".exit")) | 1952 | if (strstarts(secstrings+sechdrs[i].sh_name, ".exit")) |
1953 | sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; | 1953 | sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; |
1954 | #endif | 1954 | #endif |
1955 | /* Don't keep __versions around; it's just for loading. */ | ||
1956 | if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0) | ||
1957 | sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; | ||
1958 | } | 1955 | } |
1959 | 1956 | ||
1960 | modindex = find_sec(hdr, sechdrs, secstrings, | 1957 | modindex = find_sec(hdr, sechdrs, secstrings, |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 654c640a6b9c..0f2b0b311304 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -65,6 +65,7 @@ static struct rcu_ctrlblk rcu_ctrlblk = { | |||
65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | 65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
66 | .cpumask = CPU_BITS_NONE, | 66 | .cpumask = CPU_BITS_NONE, |
67 | }; | 67 | }; |
68 | |||
68 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 69 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
69 | .cur = -300, | 70 | .cur = -300, |
70 | .completed = -300, | 71 | .completed = -300, |
@@ -73,8 +74,26 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = { | |||
73 | .cpumask = CPU_BITS_NONE, | 74 | .cpumask = CPU_BITS_NONE, |
74 | }; | 75 | }; |
75 | 76 | ||
76 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | 77 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); |
77 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | 78 | static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
79 | |||
80 | /* | ||
81 | * Increment the quiescent state counter. | ||
82 | * The counter is a bit degenerated: We do not need to know | ||
83 | * how many quiescent states passed, just if there was at least | ||
84 | * one since the start of the grace period. Thus just a flag. | ||
85 | */ | ||
86 | void rcu_qsctr_inc(int cpu) | ||
87 | { | ||
88 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
89 | rdp->passed_quiesc = 1; | ||
90 | } | ||
91 | |||
92 | void rcu_bh_qsctr_inc(int cpu) | ||
93 | { | ||
94 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
95 | rdp->passed_quiesc = 1; | ||
96 | } | ||
78 | 97 | ||
79 | static int blimit = 10; | 98 | static int blimit = 10; |
80 | static int qhimark = 10000; | 99 | static int qhimark = 10000; |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 5d59e850fb71..ce97a4df64d3 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -147,7 +147,51 @@ struct rcu_ctrlblk { | |||
147 | wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ | 147 | wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */ |
148 | }; | 148 | }; |
149 | 149 | ||
150 | struct rcu_dyntick_sched { | ||
151 | int dynticks; | ||
152 | int dynticks_snap; | ||
153 | int sched_qs; | ||
154 | int sched_qs_snap; | ||
155 | int sched_dynticks_snap; | ||
156 | }; | ||
157 | |||
158 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { | ||
159 | .dynticks = 1, | ||
160 | }; | ||
161 | |||
162 | void rcu_qsctr_inc(int cpu) | ||
163 | { | ||
164 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
165 | |||
166 | rdssp->sched_qs++; | ||
167 | } | ||
168 | |||
169 | #ifdef CONFIG_NO_HZ | ||
170 | |||
171 | void rcu_enter_nohz(void) | ||
172 | { | ||
173 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | ||
174 | |||
175 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | ||
176 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | ||
177 | WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); | ||
178 | } | ||
179 | |||
180 | void rcu_exit_nohz(void) | ||
181 | { | ||
182 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | ||
183 | |||
184 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | ||
185 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
186 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), | ||
187 | &rs); | ||
188 | } | ||
189 | |||
190 | #endif /* CONFIG_NO_HZ */ | ||
191 | |||
192 | |||
150 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); | 193 | static DEFINE_PER_CPU(struct rcu_data, rcu_data); |
194 | |||
151 | static struct rcu_ctrlblk rcu_ctrlblk = { | 195 | static struct rcu_ctrlblk rcu_ctrlblk = { |
152 | .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), | 196 | .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), |
153 | .completed = 0, | 197 | .completed = 0, |
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp) | |||
427 | } | 471 | } |
428 | } | 472 | } |
429 | 473 | ||
430 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = { | ||
431 | .dynticks = 1, | ||
432 | }; | ||
433 | |||
434 | #ifdef CONFIG_NO_HZ | 474 | #ifdef CONFIG_NO_HZ |
435 | static DEFINE_PER_CPU(int, rcu_update_flag); | 475 | static DEFINE_PER_CPU(int, rcu_update_flag); |
436 | 476 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 97ce31579ec0..7f3266922572 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -78,6 +78,26 @@ DEFINE_PER_CPU(struct rcu_data, rcu_data); | |||
78 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 78 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
80 | 80 | ||
81 | /* | ||
82 | * Increment the quiescent state counter. | ||
83 | * The counter is a bit degenerated: We do not need to know | ||
84 | * how many quiescent states passed, just if there was at least | ||
85 | * one since the start of the grace period. Thus just a flag. | ||
86 | */ | ||
87 | void rcu_qsctr_inc(int cpu) | ||
88 | { | ||
89 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
90 | rdp->passed_quiesc = 1; | ||
91 | rdp->passed_quiesc_completed = rdp->completed; | ||
92 | } | ||
93 | |||
94 | void rcu_bh_qsctr_inc(int cpu) | ||
95 | { | ||
96 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
97 | rdp->passed_quiesc = 1; | ||
98 | rdp->passed_quiesc_completed = rdp->completed; | ||
99 | } | ||
100 | |||
81 | #ifdef CONFIG_NO_HZ | 101 | #ifdef CONFIG_NO_HZ |
82 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 102 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
83 | .dynticks_nesting = 1, | 103 | .dynticks_nesting = 1, |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h new file mode 100644 index 000000000000..5e872bbf07f5 --- /dev/null +++ b/kernel/rcutree.h | |||
@@ -0,0 +1,10 @@ | |||
1 | |||
2 | /* | ||
3 | * RCU implementation internal declarations: | ||
4 | */ | ||
5 | extern struct rcu_state rcu_state; | ||
6 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
7 | |||
8 | extern struct rcu_state rcu_bh_state; | ||
9 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
10 | |||
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index d6db3e837826..4ee954f6a8d5 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -43,6 +43,8 @@ | |||
43 | #include <linux/debugfs.h> | 43 | #include <linux/debugfs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | 45 | ||
46 | #include "rcutree.h" | ||
47 | |||
46 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | 48 | static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) |
47 | { | 49 | { |
48 | if (!rdp->beenonline) | 50 | if (!rdp->beenonline) |
diff --git a/kernel/sched.c b/kernel/sched.c index bec249885e17..6cc1fd5d5072 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
231 | 231 | ||
232 | spin_lock(&rt_b->rt_runtime_lock); | 232 | spin_lock(&rt_b->rt_runtime_lock); |
233 | for (;;) { | 233 | for (;;) { |
234 | unsigned long delta; | ||
235 | ktime_t soft, hard; | ||
236 | |||
234 | if (hrtimer_active(&rt_b->rt_period_timer)) | 237 | if (hrtimer_active(&rt_b->rt_period_timer)) |
235 | break; | 238 | break; |
236 | 239 | ||
237 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); | 240 | now = hrtimer_cb_get_time(&rt_b->rt_period_timer); |
238 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); | 241 | hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); |
239 | hrtimer_start_expires(&rt_b->rt_period_timer, | 242 | |
240 | HRTIMER_MODE_ABS); | 243 | soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); |
244 | hard = hrtimer_get_expires(&rt_b->rt_period_timer); | ||
245 | delta = ktime_to_ns(ktime_sub(hard, soft)); | ||
246 | __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, | ||
247 | HRTIMER_MODE_ABS, 0); | ||
241 | } | 248 | } |
242 | spin_unlock(&rt_b->rt_runtime_lock); | 249 | spin_unlock(&rt_b->rt_runtime_lock); |
243 | } | 250 | } |
@@ -1146,7 +1153,8 @@ static __init void init_hrtick(void) | |||
1146 | */ | 1153 | */ |
1147 | static void hrtick_start(struct rq *rq, u64 delay) | 1154 | static void hrtick_start(struct rq *rq, u64 delay) |
1148 | { | 1155 | { |
1149 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); | 1156 | __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
1157 | HRTIMER_MODE_REL, 0); | ||
1150 | } | 1158 | } |
1151 | 1159 | ||
1152 | static inline void init_hrtick(void) | 1160 | static inline void init_hrtick(void) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index d105a82543d0..2fecefacdc5b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -65,7 +65,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
65 | * to the pending events, so lets the scheduler to balance | 65 | * to the pending events, so lets the scheduler to balance |
66 | * the softirq load for us. | 66 | * the softirq load for us. |
67 | */ | 67 | */ |
68 | static inline void wakeup_softirqd(void) | 68 | void wakeup_softirqd(void) |
69 | { | 69 | { |
70 | /* Interrupts are disabled: no need to stop preemption */ | 70 | /* Interrupts are disabled: no need to stop preemption */ |
71 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 71 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 82350f8f04f6..72eb1a41dcab 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -97,10 +97,11 @@ static int neg_one = -1; | |||
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | static int zero; | 99 | static int zero; |
100 | static int one = 1; | 100 | static int __maybe_unused one = 1; |
101 | static int two = 2; | 101 | static int __maybe_unused two = 2; |
102 | static unsigned long one_ul = 1; | 102 | static unsigned long one_ul = 1; |
103 | static int one_hundred = 100; | 103 | static int one_hundred = 100; |
104 | static int one_thousand = 1000; | ||
104 | 105 | ||
105 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ | 106 | /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ |
106 | static int maxolduid = 65535; | 107 | static int maxolduid = 65535; |
@@ -1027,6 +1028,28 @@ static struct ctl_table vm_table[] = { | |||
1027 | .proc_handler = &proc_dointvec, | 1028 | .proc_handler = &proc_dointvec, |
1028 | }, | 1029 | }, |
1029 | { | 1030 | { |
1031 | .ctl_name = CTL_UNNUMBERED, | ||
1032 | .procname = "nr_pdflush_threads_min", | ||
1033 | .data = &nr_pdflush_threads_min, | ||
1034 | .maxlen = sizeof nr_pdflush_threads_min, | ||
1035 | .mode = 0644 /* read-write */, | ||
1036 | .proc_handler = &proc_dointvec_minmax, | ||
1037 | .strategy = &sysctl_intvec, | ||
1038 | .extra1 = &one, | ||
1039 | .extra2 = &nr_pdflush_threads_max, | ||
1040 | }, | ||
1041 | { | ||
1042 | .ctl_name = CTL_UNNUMBERED, | ||
1043 | .procname = "nr_pdflush_threads_max", | ||
1044 | .data = &nr_pdflush_threads_max, | ||
1045 | .maxlen = sizeof nr_pdflush_threads_max, | ||
1046 | .mode = 0644 /* read-write */, | ||
1047 | .proc_handler = &proc_dointvec_minmax, | ||
1048 | .strategy = &sysctl_intvec, | ||
1049 | .extra1 = &nr_pdflush_threads_min, | ||
1050 | .extra2 = &one_thousand, | ||
1051 | }, | ||
1052 | { | ||
1030 | .ctl_name = VM_SWAPPINESS, | 1053 | .ctl_name = VM_SWAPPINESS, |
1031 | .procname = "swappiness", | 1054 | .procname = "swappiness", |
1032 | .data = &vm_swappiness, | 1055 | .data = &vm_swappiness, |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c index ae201b3eda89..5011f4d91e37 100644 --- a/kernel/trace/kmemtrace.c +++ b/kernel/trace/kmemtrace.c | |||
@@ -6,14 +6,16 @@ | |||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/dcache.h> | 9 | #include <linux/tracepoint.h> |
10 | #include <linux/seq_file.h> | ||
10 | #include <linux/debugfs.h> | 11 | #include <linux/debugfs.h> |
12 | #include <linux/dcache.h> | ||
11 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
12 | #include <linux/seq_file.h> | 14 | |
13 | #include <trace/kmemtrace.h> | 15 | #include <trace/kmemtrace.h> |
14 | 16 | ||
15 | #include "trace.h" | ||
16 | #include "trace_output.h" | 17 | #include "trace_output.h" |
18 | #include "trace.h" | ||
17 | 19 | ||
18 | /* Select an alternative, minimalistic output than the original one */ | 20 | /* Select an alternative, minimalistic output than the original one */ |
19 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | 21 | #define TRACE_KMEM_OPT_MINIMAL 0x1 |
@@ -25,14 +27,156 @@ static struct tracer_opt kmem_opts[] = { | |||
25 | }; | 27 | }; |
26 | 28 | ||
27 | static struct tracer_flags kmem_tracer_flags = { | 29 | static struct tracer_flags kmem_tracer_flags = { |
28 | .val = 0, | 30 | .val = 0, |
29 | .opts = kmem_opts | 31 | .opts = kmem_opts |
30 | }; | 32 | }; |
31 | 33 | ||
32 | |||
33 | static bool kmem_tracing_enabled __read_mostly; | ||
34 | static struct trace_array *kmemtrace_array; | 34 | static struct trace_array *kmemtrace_array; |
35 | 35 | ||
36 | /* Trace allocations */ | ||
37 | static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | ||
38 | unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node) | ||
44 | { | ||
45 | struct trace_array *tr = kmemtrace_array; | ||
46 | struct kmemtrace_alloc_entry *entry; | ||
47 | struct ring_buffer_event *event; | ||
48 | |||
49 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
50 | if (!event) | ||
51 | return; | ||
52 | |||
53 | entry = ring_buffer_event_data(event); | ||
54 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
55 | |||
56 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
57 | entry->type_id = type_id; | ||
58 | entry->call_site = call_site; | ||
59 | entry->ptr = ptr; | ||
60 | entry->bytes_req = bytes_req; | ||
61 | entry->bytes_alloc = bytes_alloc; | ||
62 | entry->gfp_flags = gfp_flags; | ||
63 | entry->node = node; | ||
64 | |||
65 | ring_buffer_unlock_commit(tr->buffer, event); | ||
66 | |||
67 | trace_wake_up(); | ||
68 | } | ||
69 | |||
70 | static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | ||
71 | unsigned long call_site, | ||
72 | const void *ptr) | ||
73 | { | ||
74 | struct trace_array *tr = kmemtrace_array; | ||
75 | struct kmemtrace_free_entry *entry; | ||
76 | struct ring_buffer_event *event; | ||
77 | |||
78 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
79 | if (!event) | ||
80 | return; | ||
81 | entry = ring_buffer_event_data(event); | ||
82 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
83 | |||
84 | entry->ent.type = TRACE_KMEM_FREE; | ||
85 | entry->type_id = type_id; | ||
86 | entry->call_site = call_site; | ||
87 | entry->ptr = ptr; | ||
88 | |||
89 | ring_buffer_unlock_commit(tr->buffer, event); | ||
90 | |||
91 | trace_wake_up(); | ||
92 | } | ||
93 | |||
94 | static void kmemtrace_kmalloc(unsigned long call_site, | ||
95 | const void *ptr, | ||
96 | size_t bytes_req, | ||
97 | size_t bytes_alloc, | ||
98 | gfp_t gfp_flags) | ||
99 | { | ||
100 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
101 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
102 | } | ||
103 | |||
104 | static void kmemtrace_kmem_cache_alloc(unsigned long call_site, | ||
105 | const void *ptr, | ||
106 | size_t bytes_req, | ||
107 | size_t bytes_alloc, | ||
108 | gfp_t gfp_flags) | ||
109 | { | ||
110 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
111 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
112 | } | ||
113 | |||
114 | static void kmemtrace_kmalloc_node(unsigned long call_site, | ||
115 | const void *ptr, | ||
116 | size_t bytes_req, | ||
117 | size_t bytes_alloc, | ||
118 | gfp_t gfp_flags, | ||
119 | int node) | ||
120 | { | ||
121 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
122 | bytes_req, bytes_alloc, gfp_flags, node); | ||
123 | } | ||
124 | |||
125 | static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, | ||
126 | const void *ptr, | ||
127 | size_t bytes_req, | ||
128 | size_t bytes_alloc, | ||
129 | gfp_t gfp_flags, | ||
130 | int node) | ||
131 | { | ||
132 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
133 | bytes_req, bytes_alloc, gfp_flags, node); | ||
134 | } | ||
135 | |||
136 | static void kmemtrace_kfree(unsigned long call_site, const void *ptr) | ||
137 | { | ||
138 | kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); | ||
139 | } | ||
140 | |||
141 | static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr) | ||
142 | { | ||
143 | kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); | ||
144 | } | ||
145 | |||
146 | static int kmemtrace_start_probes(void) | ||
147 | { | ||
148 | int err; | ||
149 | |||
150 | err = register_trace_kmalloc(kmemtrace_kmalloc); | ||
151 | if (err) | ||
152 | return err; | ||
153 | err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); | ||
154 | if (err) | ||
155 | return err; | ||
156 | err = register_trace_kmalloc_node(kmemtrace_kmalloc_node); | ||
157 | if (err) | ||
158 | return err; | ||
159 | err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); | ||
160 | if (err) | ||
161 | return err; | ||
162 | err = register_trace_kfree(kmemtrace_kfree); | ||
163 | if (err) | ||
164 | return err; | ||
165 | err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free); | ||
166 | |||
167 | return err; | ||
168 | } | ||
169 | |||
170 | static void kmemtrace_stop_probes(void) | ||
171 | { | ||
172 | unregister_trace_kmalloc(kmemtrace_kmalloc); | ||
173 | unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); | ||
174 | unregister_trace_kmalloc_node(kmemtrace_kmalloc_node); | ||
175 | unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); | ||
176 | unregister_trace_kfree(kmemtrace_kfree); | ||
177 | unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free); | ||
178 | } | ||
179 | |||
36 | static int kmem_trace_init(struct trace_array *tr) | 180 | static int kmem_trace_init(struct trace_array *tr) |
37 | { | 181 | { |
38 | int cpu; | 182 | int cpu; |
@@ -41,14 +185,14 @@ static int kmem_trace_init(struct trace_array *tr) | |||
41 | for_each_cpu_mask(cpu, cpu_possible_map) | 185 | for_each_cpu_mask(cpu, cpu_possible_map) |
42 | tracing_reset(tr, cpu); | 186 | tracing_reset(tr, cpu); |
43 | 187 | ||
44 | kmem_tracing_enabled = true; | 188 | kmemtrace_start_probes(); |
45 | 189 | ||
46 | return 0; | 190 | return 0; |
47 | } | 191 | } |
48 | 192 | ||
49 | static void kmem_trace_reset(struct trace_array *tr) | 193 | static void kmem_trace_reset(struct trace_array *tr) |
50 | { | 194 | { |
51 | kmem_tracing_enabled = false; | 195 | kmemtrace_stop_probes(); |
52 | } | 196 | } |
53 | 197 | ||
54 | static void kmemtrace_headers(struct seq_file *s) | 198 | static void kmemtrace_headers(struct seq_file *s) |
@@ -66,47 +210,84 @@ static void kmemtrace_headers(struct seq_file *s) | |||
66 | } | 210 | } |
67 | 211 | ||
68 | /* | 212 | /* |
69 | * The two following functions give the original output from kmemtrace, | 213 | * The following functions give the original output from kmemtrace, |
70 | * or something close to....perhaps they need some missing things | 214 | * plus the origin CPU, since reordering occurs in-kernel now. |
71 | */ | 215 | */ |
216 | |||
217 | #define KMEMTRACE_USER_ALLOC 0 | ||
218 | #define KMEMTRACE_USER_FREE 1 | ||
219 | |||
220 | struct kmemtrace_user_event { | ||
221 | u8 event_id; | ||
222 | u8 type_id; | ||
223 | u16 event_size; | ||
224 | u32 cpu; | ||
225 | u64 timestamp; | ||
226 | unsigned long call_site; | ||
227 | unsigned long ptr; | ||
228 | }; | ||
229 | |||
230 | struct kmemtrace_user_event_alloc { | ||
231 | size_t bytes_req; | ||
232 | size_t bytes_alloc; | ||
233 | unsigned gfp_flags; | ||
234 | int node; | ||
235 | }; | ||
236 | |||
72 | static enum print_line_t | 237 | static enum print_line_t |
73 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | 238 | kmemtrace_print_alloc_user(struct trace_iterator *iter, |
74 | struct kmemtrace_alloc_entry *entry) | 239 | struct kmemtrace_alloc_entry *entry) |
75 | { | 240 | { |
241 | struct kmemtrace_user_event_alloc *ev_alloc; | ||
76 | struct trace_seq *s = &iter->seq; | 242 | struct trace_seq *s = &iter->seq; |
77 | int ret; | 243 | struct kmemtrace_user_event *ev; |
244 | |||
245 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
246 | if (!ev) | ||
247 | return TRACE_TYPE_PARTIAL_LINE; | ||
78 | 248 | ||
79 | /* Taken from the old linux/kmemtrace.h */ | 249 | ev->event_id = KMEMTRACE_USER_ALLOC; |
80 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | 250 | ev->type_id = entry->type_id; |
81 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | 251 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); |
82 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | 252 | ev->cpu = iter->cpu; |
83 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | 253 | ev->timestamp = iter->ts; |
84 | (unsigned long) entry->gfp_flags, entry->node); | 254 | ev->call_site = entry->call_site; |
255 | ev->ptr = (unsigned long)entry->ptr; | ||
85 | 256 | ||
86 | if (!ret) | 257 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); |
258 | if (!ev_alloc) | ||
87 | return TRACE_TYPE_PARTIAL_LINE; | 259 | return TRACE_TYPE_PARTIAL_LINE; |
88 | 260 | ||
261 | ev_alloc->bytes_req = entry->bytes_req; | ||
262 | ev_alloc->bytes_alloc = entry->bytes_alloc; | ||
263 | ev_alloc->gfp_flags = entry->gfp_flags; | ||
264 | ev_alloc->node = entry->node; | ||
265 | |||
89 | return TRACE_TYPE_HANDLED; | 266 | return TRACE_TYPE_HANDLED; |
90 | } | 267 | } |
91 | 268 | ||
92 | static enum print_line_t | 269 | static enum print_line_t |
93 | kmemtrace_print_free_original(struct trace_iterator *iter, | 270 | kmemtrace_print_free_user(struct trace_iterator *iter, |
94 | struct kmemtrace_free_entry *entry) | 271 | struct kmemtrace_free_entry *entry) |
95 | { | 272 | { |
96 | struct trace_seq *s = &iter->seq; | 273 | struct trace_seq *s = &iter->seq; |
97 | int ret; | 274 | struct kmemtrace_user_event *ev; |
98 | 275 | ||
99 | /* Taken from the old linux/kmemtrace.h */ | 276 | ev = trace_seq_reserve(s, sizeof(*ev)); |
100 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | 277 | if (!ev) |
101 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | ||
102 | |||
103 | if (!ret) | ||
104 | return TRACE_TYPE_PARTIAL_LINE; | 278 | return TRACE_TYPE_PARTIAL_LINE; |
105 | 279 | ||
280 | ev->event_id = KMEMTRACE_USER_FREE; | ||
281 | ev->type_id = entry->type_id; | ||
282 | ev->event_size = sizeof(*ev); | ||
283 | ev->cpu = iter->cpu; | ||
284 | ev->timestamp = iter->ts; | ||
285 | ev->call_site = entry->call_site; | ||
286 | ev->ptr = (unsigned long)entry->ptr; | ||
287 | |||
106 | return TRACE_TYPE_HANDLED; | 288 | return TRACE_TYPE_HANDLED; |
107 | } | 289 | } |
108 | 290 | ||
109 | |||
110 | /* The two other following provide a more minimalistic output */ | 291 | /* The two other following provide a more minimalistic output */ |
111 | static enum print_line_t | 292 | static enum print_line_t |
112 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | 293 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, |
@@ -178,7 +359,7 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter, | |||
178 | 359 | ||
179 | static enum print_line_t | 360 | static enum print_line_t |
180 | kmemtrace_print_free_compress(struct trace_iterator *iter, | 361 | kmemtrace_print_free_compress(struct trace_iterator *iter, |
181 | struct kmemtrace_free_entry *entry) | 362 | struct kmemtrace_free_entry *entry) |
182 | { | 363 | { |
183 | struct trace_seq *s = &iter->seq; | 364 | struct trace_seq *s = &iter->seq; |
184 | int ret; | 365 | int ret; |
@@ -239,20 +420,22 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
239 | switch (entry->type) { | 420 | switch (entry->type) { |
240 | case TRACE_KMEM_ALLOC: { | 421 | case TRACE_KMEM_ALLOC: { |
241 | struct kmemtrace_alloc_entry *field; | 422 | struct kmemtrace_alloc_entry *field; |
423 | |||
242 | trace_assign_type(field, entry); | 424 | trace_assign_type(field, entry); |
243 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | 425 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) |
244 | return kmemtrace_print_alloc_compress(iter, field); | 426 | return kmemtrace_print_alloc_compress(iter, field); |
245 | else | 427 | else |
246 | return kmemtrace_print_alloc_original(iter, field); | 428 | return kmemtrace_print_alloc_user(iter, field); |
247 | } | 429 | } |
248 | 430 | ||
249 | case TRACE_KMEM_FREE: { | 431 | case TRACE_KMEM_FREE: { |
250 | struct kmemtrace_free_entry *field; | 432 | struct kmemtrace_free_entry *field; |
433 | |||
251 | trace_assign_type(field, entry); | 434 | trace_assign_type(field, entry); |
252 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | 435 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) |
253 | return kmemtrace_print_free_compress(iter, field); | 436 | return kmemtrace_print_free_compress(iter, field); |
254 | else | 437 | else |
255 | return kmemtrace_print_free_original(iter, field); | 438 | return kmemtrace_print_free_user(iter, field); |
256 | } | 439 | } |
257 | 440 | ||
258 | default: | 441 | default: |
@@ -260,70 +443,13 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | |||
260 | } | 443 | } |
261 | } | 444 | } |
262 | 445 | ||
263 | /* Trace allocations */ | ||
264 | void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
265 | unsigned long call_site, | ||
266 | const void *ptr, | ||
267 | size_t bytes_req, | ||
268 | size_t bytes_alloc, | ||
269 | gfp_t gfp_flags, | ||
270 | int node) | ||
271 | { | ||
272 | struct ring_buffer_event *event; | ||
273 | struct kmemtrace_alloc_entry *entry; | ||
274 | struct trace_array *tr = kmemtrace_array; | ||
275 | |||
276 | if (!kmem_tracing_enabled) | ||
277 | return; | ||
278 | |||
279 | event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC, | ||
280 | sizeof(*entry), 0, 0); | ||
281 | if (!event) | ||
282 | return; | ||
283 | entry = ring_buffer_event_data(event); | ||
284 | |||
285 | entry->call_site = call_site; | ||
286 | entry->ptr = ptr; | ||
287 | entry->bytes_req = bytes_req; | ||
288 | entry->bytes_alloc = bytes_alloc; | ||
289 | entry->gfp_flags = gfp_flags; | ||
290 | entry->node = node; | ||
291 | |||
292 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
293 | } | ||
294 | EXPORT_SYMBOL(kmemtrace_mark_alloc_node); | ||
295 | |||
296 | void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
297 | unsigned long call_site, | ||
298 | const void *ptr) | ||
299 | { | ||
300 | struct ring_buffer_event *event; | ||
301 | struct kmemtrace_free_entry *entry; | ||
302 | struct trace_array *tr = kmemtrace_array; | ||
303 | |||
304 | if (!kmem_tracing_enabled) | ||
305 | return; | ||
306 | |||
307 | event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE, | ||
308 | sizeof(*entry), 0, 0); | ||
309 | if (!event) | ||
310 | return; | ||
311 | entry = ring_buffer_event_data(event); | ||
312 | entry->type_id = type_id; | ||
313 | entry->call_site = call_site; | ||
314 | entry->ptr = ptr; | ||
315 | |||
316 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
317 | } | ||
318 | EXPORT_SYMBOL(kmemtrace_mark_free); | ||
319 | |||
320 | static struct tracer kmem_tracer __read_mostly = { | 446 | static struct tracer kmem_tracer __read_mostly = { |
321 | .name = "kmemtrace", | 447 | .name = "kmemtrace", |
322 | .init = kmem_trace_init, | 448 | .init = kmem_trace_init, |
323 | .reset = kmem_trace_reset, | 449 | .reset = kmem_trace_reset, |
324 | .print_line = kmemtrace_print_line, | 450 | .print_line = kmemtrace_print_line, |
325 | .print_header = kmemtrace_headers, | 451 | .print_header = kmemtrace_headers, |
326 | .flags = &kmem_tracer_flags | 452 | .flags = &kmem_tracer_flags |
327 | }; | 453 | }; |
328 | 454 | ||
329 | void kmemtrace_init(void) | 455 | void kmemtrace_init(void) |
@@ -335,5 +461,4 @@ static int __init init_kmem_tracer(void) | |||
335 | { | 461 | { |
336 | return register_tracer(&kmem_tracer); | 462 | return register_tracer(&kmem_tracer); |
337 | } | 463 | } |
338 | |||
339 | device_initcall(init_kmem_tracer); | 464 | device_initcall(init_kmem_tracer); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index cb0ce3fc36d3..cbc168f1e43d 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -182,6 +182,12 @@ struct trace_power { | |||
182 | struct power_trace state_data; | 182 | struct power_trace state_data; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | enum kmemtrace_type_id { | ||
186 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | ||
187 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
188 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
189 | }; | ||
190 | |||
185 | struct kmemtrace_alloc_entry { | 191 | struct kmemtrace_alloc_entry { |
186 | struct trace_entry ent; | 192 | struct trace_entry ent; |
187 | enum kmemtrace_type_id type_id; | 193 | enum kmemtrace_type_id type_id; |