diff options
-rw-r--r-- | include/linux/kprobes.h | 2 | ||||
-rw-r--r-- | kernel/kprobes.c | 34 |
2 files changed, 18 insertions, 18 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index dd7c12e875bc..dce6e4dbeda7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -181,7 +181,7 @@ struct kretprobe { | |||
181 | int nmissed; | 181 | int nmissed; |
182 | size_t data_size; | 182 | size_t data_size; |
183 | struct hlist_head free_instances; | 183 | struct hlist_head free_instances; |
184 | spinlock_t lock; | 184 | raw_spinlock_t lock; |
185 | }; | 185 | }; |
186 | 186 | ||
187 | struct kretprobe_instance { | 187 | struct kretprobe_instance { |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index b30fd54eb985..2f193d0ba7f2 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed; | |||
78 | static DEFINE_MUTEX(kprobe_mutex); | 78 | static DEFINE_MUTEX(kprobe_mutex); |
79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 79 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
80 | static struct { | 80 | static struct { |
81 | spinlock_t lock ____cacheline_aligned_in_smp; | 81 | raw_spinlock_t lock ____cacheline_aligned_in_smp; |
82 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; | 82 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; |
83 | 83 | ||
84 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) | 84 | static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) |
85 | { | 85 | { |
86 | return &(kretprobe_table_locks[hash].lock); | 86 | return &(kretprobe_table_locks[hash].lock); |
87 | } | 87 | } |
@@ -1013,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | |||
1013 | hlist_del(&ri->hlist); | 1013 | hlist_del(&ri->hlist); |
1014 | INIT_HLIST_NODE(&ri->hlist); | 1014 | INIT_HLIST_NODE(&ri->hlist); |
1015 | if (likely(rp)) { | 1015 | if (likely(rp)) { |
1016 | spin_lock(&rp->lock); | 1016 | raw_spin_lock(&rp->lock); |
1017 | hlist_add_head(&ri->hlist, &rp->free_instances); | 1017 | hlist_add_head(&ri->hlist, &rp->free_instances); |
1018 | spin_unlock(&rp->lock); | 1018 | raw_spin_unlock(&rp->lock); |
1019 | } else | 1019 | } else |
1020 | /* Unregistering */ | 1020 | /* Unregistering */ |
1021 | hlist_add_head(&ri->hlist, head); | 1021 | hlist_add_head(&ri->hlist, head); |
@@ -1026,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk, | |||
1026 | __acquires(hlist_lock) | 1026 | __acquires(hlist_lock) |
1027 | { | 1027 | { |
1028 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 1028 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
1029 | spinlock_t *hlist_lock; | 1029 | raw_spinlock_t *hlist_lock; |
1030 | 1030 | ||
1031 | *head = &kretprobe_inst_table[hash]; | 1031 | *head = &kretprobe_inst_table[hash]; |
1032 | hlist_lock = kretprobe_table_lock_ptr(hash); | 1032 | hlist_lock = kretprobe_table_lock_ptr(hash); |
1033 | spin_lock_irqsave(hlist_lock, *flags); | 1033 | raw_spin_lock_irqsave(hlist_lock, *flags); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | static void __kprobes kretprobe_table_lock(unsigned long hash, | 1036 | static void __kprobes kretprobe_table_lock(unsigned long hash, |
1037 | unsigned long *flags) | 1037 | unsigned long *flags) |
1038 | __acquires(hlist_lock) | 1038 | __acquires(hlist_lock) |
1039 | { | 1039 | { |
1040 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 1040 | raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
1041 | spin_lock_irqsave(hlist_lock, *flags); | 1041 | raw_spin_lock_irqsave(hlist_lock, *flags); |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | 1044 | void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, |
@@ -1046,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, | |||
1046 | __releases(hlist_lock) | 1046 | __releases(hlist_lock) |
1047 | { | 1047 | { |
1048 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | 1048 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
1049 | spinlock_t *hlist_lock; | 1049 | raw_spinlock_t *hlist_lock; |
1050 | 1050 | ||
1051 | hlist_lock = kretprobe_table_lock_ptr(hash); | 1051 | hlist_lock = kretprobe_table_lock_ptr(hash); |
1052 | spin_unlock_irqrestore(hlist_lock, *flags); | 1052 | raw_spin_unlock_irqrestore(hlist_lock, *flags); |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | static void __kprobes kretprobe_table_unlock(unsigned long hash, | 1055 | static void __kprobes kretprobe_table_unlock(unsigned long hash, |
1056 | unsigned long *flags) | 1056 | unsigned long *flags) |
1057 | __releases(hlist_lock) | 1057 | __releases(hlist_lock) |
1058 | { | 1058 | { |
1059 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | 1059 | raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); |
1060 | spin_unlock_irqrestore(hlist_lock, *flags); | 1060 | raw_spin_unlock_irqrestore(hlist_lock, *flags); |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | /* | 1063 | /* |
@@ -1663,12 +1663,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1663 | 1663 | ||
1664 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 1664 | /*TODO: consider to only swap the RA after the last pre_handler fired */ |
1665 | hash = hash_ptr(current, KPROBE_HASH_BITS); | 1665 | hash = hash_ptr(current, KPROBE_HASH_BITS); |
1666 | spin_lock_irqsave(&rp->lock, flags); | 1666 | raw_spin_lock_irqsave(&rp->lock, flags); |
1667 | if (!hlist_empty(&rp->free_instances)) { | 1667 | if (!hlist_empty(&rp->free_instances)) { |
1668 | ri = hlist_entry(rp->free_instances.first, | 1668 | ri = hlist_entry(rp->free_instances.first, |
1669 | struct kretprobe_instance, hlist); | 1669 | struct kretprobe_instance, hlist); |
1670 | hlist_del(&ri->hlist); | 1670 | hlist_del(&ri->hlist); |
1671 | spin_unlock_irqrestore(&rp->lock, flags); | 1671 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
1672 | 1672 | ||
1673 | ri->rp = rp; | 1673 | ri->rp = rp; |
1674 | ri->task = current; | 1674 | ri->task = current; |
@@ -1685,7 +1685,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
1685 | kretprobe_table_unlock(hash, &flags); | 1685 | kretprobe_table_unlock(hash, &flags); |
1686 | } else { | 1686 | } else { |
1687 | rp->nmissed++; | 1687 | rp->nmissed++; |
1688 | spin_unlock_irqrestore(&rp->lock, flags); | 1688 | raw_spin_unlock_irqrestore(&rp->lock, flags); |
1689 | } | 1689 | } |
1690 | return 0; | 1690 | return 0; |
1691 | } | 1691 | } |
@@ -1721,7 +1721,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
1721 | rp->maxactive = num_possible_cpus(); | 1721 | rp->maxactive = num_possible_cpus(); |
1722 | #endif | 1722 | #endif |
1723 | } | 1723 | } |
1724 | spin_lock_init(&rp->lock); | 1724 | raw_spin_lock_init(&rp->lock); |
1725 | INIT_HLIST_HEAD(&rp->free_instances); | 1725 | INIT_HLIST_HEAD(&rp->free_instances); |
1726 | for (i = 0; i < rp->maxactive; i++) { | 1726 | for (i = 0; i < rp->maxactive; i++) { |
1727 | inst = kmalloc(sizeof(struct kretprobe_instance) + | 1727 | inst = kmalloc(sizeof(struct kretprobe_instance) + |
@@ -1959,7 +1959,7 @@ static int __init init_kprobes(void) | |||
1959 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1959 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1960 | INIT_HLIST_HEAD(&kprobe_table[i]); | 1960 | INIT_HLIST_HEAD(&kprobe_table[i]); |
1961 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | 1961 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
1962 | spin_lock_init(&(kretprobe_table_locks[i].lock)); | 1962 | raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); |
1963 | } | 1963 | } |
1964 | 1964 | ||
1965 | /* | 1965 | /* |