aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ds.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/ds.c')
-rw-r--r--arch/x86/kernel/ds.c72
1 files changed, 41 insertions, 31 deletions
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 5cd137ab2672..f03f117eff8c 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -193,12 +193,28 @@ static DEFINE_SPINLOCK(ds_lock);
193 */ 193 */
194static atomic_t tracers = ATOMIC_INIT(0); 194static atomic_t tracers = ATOMIC_INIT(0);
195 195
196static inline void get_tracer(struct task_struct *task) 196static inline int get_tracer(struct task_struct *task)
197{ 197{
198 if (task) 198 int error;
199
200 spin_lock_irq(&ds_lock);
201
202 if (task) {
203 error = -EPERM;
204 if (atomic_read(&tracers) < 0)
205 goto out;
199 atomic_inc(&tracers); 206 atomic_inc(&tracers);
200 else 207 } else {
208 error = -EPERM;
209 if (atomic_read(&tracers) > 0)
210 goto out;
201 atomic_dec(&tracers); 211 atomic_dec(&tracers);
212 }
213
214 error = 0;
215out:
216 spin_unlock_irq(&ds_lock);
217 return error;
202} 218}
203 219
204static inline void put_tracer(struct task_struct *task) 220static inline void put_tracer(struct task_struct *task)
@@ -209,14 +225,6 @@ static inline void put_tracer(struct task_struct *task)
209 atomic_inc(&tracers); 225 atomic_inc(&tracers);
210} 226}
211 227
212static inline int check_tracer(struct task_struct *task)
213{
214 return task ?
215 (atomic_read(&tracers) >= 0) :
216 (atomic_read(&tracers) <= 0);
217}
218
219
220/* 228/*
221 * The DS context is either attached to a thread or to a cpu: 229 * The DS context is either attached to a thread or to a cpu:
222 * - in the former case, the thread_struct contains a pointer to the 230 * - in the former case, the thread_struct contains a pointer to the
@@ -677,6 +685,10 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
677 if (ovfl) 685 if (ovfl)
678 goto out; 686 goto out;
679 687
688 error = get_tracer(task);
689 if (error < 0)
690 goto out;
691
680 /* 692 /*
681 * Per-cpu tracing is typically requested using smp_call_function(). 693 * Per-cpu tracing is typically requested using smp_call_function().
682 * We must not sleep. 694 * We must not sleep.
@@ -684,7 +696,7 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
684 error = -ENOMEM; 696 error = -ENOMEM;
685 tracer = kzalloc(sizeof(*tracer), GFP_ATOMIC); 697 tracer = kzalloc(sizeof(*tracer), GFP_ATOMIC);
686 if (!tracer) 698 if (!tracer)
687 goto out; 699 goto out_put_tracer;
688 tracer->ovfl = ovfl; 700 tracer->ovfl = ovfl;
689 701
690 error = ds_request(&tracer->ds, &tracer->trace.ds, 702 error = ds_request(&tracer->ds, &tracer->trace.ds,
@@ -696,13 +708,8 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
696 spin_lock_irqsave(&ds_lock, irq); 708 spin_lock_irqsave(&ds_lock, irq);
697 709
698 error = -EPERM; 710 error = -EPERM;
699 if (!check_tracer(task))
700 goto out_unlock;
701 get_tracer(task);
702
703 error = -EPERM;
704 if (tracer->ds.context->bts_master) 711 if (tracer->ds.context->bts_master)
705 goto out_put_tracer; 712 goto out_unlock;
706 tracer->ds.context->bts_master = tracer; 713 tracer->ds.context->bts_master = tracer;
707 714
708 spin_unlock_irqrestore(&ds_lock, irq); 715 spin_unlock_irqrestore(&ds_lock, irq);
@@ -716,13 +723,13 @@ struct bts_tracer *ds_request_bts(struct task_struct *task,
716 723
717 return tracer; 724 return tracer;
718 725
719 out_put_tracer:
720 put_tracer(task);
721 out_unlock: 726 out_unlock:
722 spin_unlock_irqrestore(&ds_lock, irq); 727 spin_unlock_irqrestore(&ds_lock, irq);
723 ds_put_context(tracer->ds.context); 728 ds_put_context(tracer->ds.context);
724 out_tracer: 729 out_tracer:
725 kfree(tracer); 730 kfree(tracer);
731 out_put_tracer:
732 put_tracer(task);
726 out: 733 out:
727 return ERR_PTR(error); 734 return ERR_PTR(error);
728} 735}
@@ -741,6 +748,10 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
741 if (ovfl) 748 if (ovfl)
742 goto out; 749 goto out;
743 750
751 error = get_tracer(task);
752 if (error < 0)
753 goto out;
754
744 /* 755 /*
745 * Per-cpu tracing is typically requested using smp_call_function(). 756 * Per-cpu tracing is typically requested using smp_call_function().
746 * We must not sleep. 757 * We must not sleep.
@@ -748,7 +759,7 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
748 error = -ENOMEM; 759 error = -ENOMEM;
749 tracer = kzalloc(sizeof(*tracer), GFP_ATOMIC); 760 tracer = kzalloc(sizeof(*tracer), GFP_ATOMIC);
750 if (!tracer) 761 if (!tracer)
751 goto out; 762 goto out_put_tracer;
752 tracer->ovfl = ovfl; 763 tracer->ovfl = ovfl;
753 764
754 error = ds_request(&tracer->ds, &tracer->trace.ds, 765 error = ds_request(&tracer->ds, &tracer->trace.ds,
@@ -759,13 +770,8 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
759 spin_lock_irqsave(&ds_lock, irq); 770 spin_lock_irqsave(&ds_lock, irq);
760 771
761 error = -EPERM; 772 error = -EPERM;
762 if (!check_tracer(task))
763 goto out_unlock;
764 get_tracer(task);
765
766 error = -EPERM;
767 if (tracer->ds.context->pebs_master) 773 if (tracer->ds.context->pebs_master)
768 goto out_put_tracer; 774 goto out_unlock;
769 tracer->ds.context->pebs_master = tracer; 775 tracer->ds.context->pebs_master = tracer;
770 776
771 spin_unlock_irqrestore(&ds_lock, irq); 777 spin_unlock_irqrestore(&ds_lock, irq);
@@ -775,13 +781,13 @@ struct pebs_tracer *ds_request_pebs(struct task_struct *task,
775 781
776 return tracer; 782 return tracer;
777 783
778 out_put_tracer:
779 put_tracer(task);
780 out_unlock: 784 out_unlock:
781 spin_unlock_irqrestore(&ds_lock, irq); 785 spin_unlock_irqrestore(&ds_lock, irq);
782 ds_put_context(tracer->ds.context); 786 ds_put_context(tracer->ds.context);
783 out_tracer: 787 out_tracer:
784 kfree(tracer); 788 kfree(tracer);
789 out_put_tracer:
790 put_tracer(task);
785 out: 791 out:
786 return ERR_PTR(error); 792 return ERR_PTR(error);
787} 793}
@@ -804,8 +810,8 @@ void ds_release_bts(struct bts_tracer *tracer)
804 if (task && (task != current)) 810 if (task && (task != current))
805 wait_task_context_switch(task); 811 wait_task_context_switch(task);
806 812
807 put_tracer(task);
808 ds_put_context(tracer->ds.context); 813 ds_put_context(tracer->ds.context);
814 put_tracer(task);
809 815
810 kfree(tracer); 816 kfree(tracer);
811} 817}
@@ -861,16 +867,20 @@ void ds_resume_bts(struct bts_tracer *tracer)
861 867
862void ds_release_pebs(struct pebs_tracer *tracer) 868void ds_release_pebs(struct pebs_tracer *tracer)
863{ 869{
870 struct task_struct *task;
871
864 if (!tracer) 872 if (!tracer)
865 return; 873 return;
866 874
875 task = tracer->ds.context->task;
876
867 ds_suspend_pebs(tracer); 877 ds_suspend_pebs(tracer);
868 878
869 WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer); 879 WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
870 tracer->ds.context->pebs_master = NULL; 880 tracer->ds.context->pebs_master = NULL;
871 881
872 put_tracer(tracer->ds.context->task);
873 ds_put_context(tracer->ds.context); 882 ds_put_context(tracer->ds.context);
883 put_tracer(task);
874 884
875 kfree(tracer); 885 kfree(tracer);
876} 886}