diff options
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 35 |
1 files changed, 14 insertions, 21 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 550294d58a02..e35be53f6613 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void) | |||
334 | struct kprobe __kprobes *get_kprobe(void *addr) | 334 | struct kprobe __kprobes *get_kprobe(void *addr) |
335 | { | 335 | { |
336 | struct hlist_head *head; | 336 | struct hlist_head *head; |
337 | struct hlist_node *node; | ||
338 | struct kprobe *p; | 337 | struct kprobe *p; |
339 | 338 | ||
340 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; | 339 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
341 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 340 | hlist_for_each_entry_rcu(p, head, hlist) { |
342 | if (p->addr == addr) | 341 | if (p->addr == addr) |
343 | return p; | 342 | return p; |
344 | } | 343 | } |
@@ -799,7 +798,6 @@ out: | |||
799 | static void __kprobes optimize_all_kprobes(void) | 798 | static void __kprobes optimize_all_kprobes(void) |
800 | { | 799 | { |
801 | struct hlist_head *head; | 800 | struct hlist_head *head; |
802 | struct hlist_node *node; | ||
803 | struct kprobe *p; | 801 | struct kprobe *p; |
804 | unsigned int i; | 802 | unsigned int i; |
805 | 803 | ||
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void) | |||
810 | kprobes_allow_optimization = true; | 808 | kprobes_allow_optimization = true; |
811 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 809 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
812 | head = &kprobe_table[i]; | 810 | head = &kprobe_table[i]; |
813 | hlist_for_each_entry_rcu(p, node, head, hlist) | 811 | hlist_for_each_entry_rcu(p, head, hlist) |
814 | if (!kprobe_disabled(p)) | 812 | if (!kprobe_disabled(p)) |
815 | optimize_kprobe(p); | 813 | optimize_kprobe(p); |
816 | } | 814 | } |
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void) | |||
821 | static void __kprobes unoptimize_all_kprobes(void) | 819 | static void __kprobes unoptimize_all_kprobes(void) |
822 | { | 820 | { |
823 | struct hlist_head *head; | 821 | struct hlist_head *head; |
824 | struct hlist_node *node; | ||
825 | struct kprobe *p; | 822 | struct kprobe *p; |
826 | unsigned int i; | 823 | unsigned int i; |
827 | 824 | ||
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void) | |||
832 | kprobes_allow_optimization = false; | 829 | kprobes_allow_optimization = false; |
833 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 830 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
834 | head = &kprobe_table[i]; | 831 | head = &kprobe_table[i]; |
835 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 832 | hlist_for_each_entry_rcu(p, head, hlist) { |
836 | if (!kprobe_disabled(p)) | 833 | if (!kprobe_disabled(p)) |
837 | unoptimize_kprobe(p, false); | 834 | unoptimize_kprobe(p, false); |
838 | } | 835 | } |
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
1148 | { | 1145 | { |
1149 | struct kretprobe_instance *ri; | 1146 | struct kretprobe_instance *ri; |
1150 | struct hlist_head *head, empty_rp; | 1147 | struct hlist_head *head, empty_rp; |
1151 | struct hlist_node *node, *tmp; | 1148 | struct hlist_node *tmp; |
1152 | unsigned long hash, flags = 0; | 1149 | unsigned long hash, flags = 0; |
1153 | 1150 | ||
1154 | if (unlikely(!kprobes_initialized)) | 1151 | if (unlikely(!kprobes_initialized)) |
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
1159 | hash = hash_ptr(tk, KPROBE_HASH_BITS); | 1156 | hash = hash_ptr(tk, KPROBE_HASH_BITS); |
1160 | head = &kretprobe_inst_table[hash]; | 1157 | head = &kretprobe_inst_table[hash]; |
1161 | kretprobe_table_lock(hash, &flags); | 1158 | kretprobe_table_lock(hash, &flags); |
1162 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 1159 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
1163 | if (ri->task == tk) | 1160 | if (ri->task == tk) |
1164 | recycle_rp_inst(ri, &empty_rp); | 1161 | recycle_rp_inst(ri, &empty_rp); |
1165 | } | 1162 | } |
1166 | kretprobe_table_unlock(hash, &flags); | 1163 | kretprobe_table_unlock(hash, &flags); |
1167 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | 1164 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
1168 | hlist_del(&ri->hlist); | 1165 | hlist_del(&ri->hlist); |
1169 | kfree(ri); | 1166 | kfree(ri); |
1170 | } | 1167 | } |
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
1173 | static inline void free_rp_inst(struct kretprobe *rp) | 1170 | static inline void free_rp_inst(struct kretprobe *rp) |
1174 | { | 1171 | { |
1175 | struct kretprobe_instance *ri; | 1172 | struct kretprobe_instance *ri; |
1176 | struct hlist_node *pos, *next; | 1173 | struct hlist_node *next; |
1177 | 1174 | ||
1178 | hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { | 1175 | hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { |
1179 | hlist_del(&ri->hlist); | 1176 | hlist_del(&ri->hlist); |
1180 | kfree(ri); | 1177 | kfree(ri); |
1181 | } | 1178 | } |
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp) | |||
1185 | { | 1182 | { |
1186 | unsigned long flags, hash; | 1183 | unsigned long flags, hash; |
1187 | struct kretprobe_instance *ri; | 1184 | struct kretprobe_instance *ri; |
1188 | struct hlist_node *pos, *next; | 1185 | struct hlist_node *next; |
1189 | struct hlist_head *head; | 1186 | struct hlist_head *head; |
1190 | 1187 | ||
1191 | /* No race here */ | 1188 | /* No race here */ |
1192 | for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { | 1189 | for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { |
1193 | kretprobe_table_lock(hash, &flags); | 1190 | kretprobe_table_lock(hash, &flags); |
1194 | head = &kretprobe_inst_table[hash]; | 1191 | head = &kretprobe_inst_table[hash]; |
1195 | hlist_for_each_entry_safe(ri, pos, next, head, hlist) { | 1192 | hlist_for_each_entry_safe(ri, next, head, hlist) { |
1196 | if (ri->rp == rp) | 1193 | if (ri->rp == rp) |
1197 | ri->rp = NULL; | 1194 | ri->rp = NULL; |
1198 | } | 1195 | } |
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb, | |||
2028 | { | 2025 | { |
2029 | struct module *mod = data; | 2026 | struct module *mod = data; |
2030 | struct hlist_head *head; | 2027 | struct hlist_head *head; |
2031 | struct hlist_node *node; | ||
2032 | struct kprobe *p; | 2028 | struct kprobe *p; |
2033 | unsigned int i; | 2029 | unsigned int i; |
2034 | int checkcore = (val == MODULE_STATE_GOING); | 2030 | int checkcore = (val == MODULE_STATE_GOING); |
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb, | |||
2045 | mutex_lock(&kprobe_mutex); | 2041 | mutex_lock(&kprobe_mutex); |
2046 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2042 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2047 | head = &kprobe_table[i]; | 2043 | head = &kprobe_table[i]; |
2048 | hlist_for_each_entry_rcu(p, node, head, hlist) | 2044 | hlist_for_each_entry_rcu(p, head, hlist) |
2049 | if (within_module_init((unsigned long)p->addr, mod) || | 2045 | if (within_module_init((unsigned long)p->addr, mod) || |
2050 | (checkcore && | 2046 | (checkcore && |
2051 | within_module_core((unsigned long)p->addr, mod))) { | 2047 | within_module_core((unsigned long)p->addr, mod))) { |
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) | |||
2192 | static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | 2188 | static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) |
2193 | { | 2189 | { |
2194 | struct hlist_head *head; | 2190 | struct hlist_head *head; |
2195 | struct hlist_node *node; | ||
2196 | struct kprobe *p, *kp; | 2191 | struct kprobe *p, *kp; |
2197 | const char *sym = NULL; | 2192 | const char *sym = NULL; |
2198 | unsigned int i = *(loff_t *) v; | 2193 | unsigned int i = *(loff_t *) v; |
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) | |||
2201 | 2196 | ||
2202 | head = &kprobe_table[i]; | 2197 | head = &kprobe_table[i]; |
2203 | preempt_disable(); | 2198 | preempt_disable(); |
2204 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2199 | hlist_for_each_entry_rcu(p, head, hlist) { |
2205 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, | 2200 | sym = kallsyms_lookup((unsigned long)p->addr, NULL, |
2206 | &offset, &modname, namebuf); | 2201 | &offset, &modname, namebuf); |
2207 | if (kprobe_aggrprobe(p)) { | 2202 | if (kprobe_aggrprobe(p)) { |
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = { | |||
2236 | static void __kprobes arm_all_kprobes(void) | 2231 | static void __kprobes arm_all_kprobes(void) |
2237 | { | 2232 | { |
2238 | struct hlist_head *head; | 2233 | struct hlist_head *head; |
2239 | struct hlist_node *node; | ||
2240 | struct kprobe *p; | 2234 | struct kprobe *p; |
2241 | unsigned int i; | 2235 | unsigned int i; |
2242 | 2236 | ||
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void) | |||
2249 | /* Arming kprobes doesn't optimize kprobe itself */ | 2243 | /* Arming kprobes doesn't optimize kprobe itself */ |
2250 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2244 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2251 | head = &kprobe_table[i]; | 2245 | head = &kprobe_table[i]; |
2252 | hlist_for_each_entry_rcu(p, node, head, hlist) | 2246 | hlist_for_each_entry_rcu(p, head, hlist) |
2253 | if (!kprobe_disabled(p)) | 2247 | if (!kprobe_disabled(p)) |
2254 | arm_kprobe(p); | 2248 | arm_kprobe(p); |
2255 | } | 2249 | } |
@@ -2265,7 +2259,6 @@ already_enabled: | |||
2265 | static void __kprobes disarm_all_kprobes(void) | 2259 | static void __kprobes disarm_all_kprobes(void) |
2266 | { | 2260 | { |
2267 | struct hlist_head *head; | 2261 | struct hlist_head *head; |
2268 | struct hlist_node *node; | ||
2269 | struct kprobe *p; | 2262 | struct kprobe *p; |
2270 | unsigned int i; | 2263 | unsigned int i; |
2271 | 2264 | ||
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void) | |||
2282 | 2275 | ||
2283 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2276 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
2284 | head = &kprobe_table[i]; | 2277 | head = &kprobe_table[i]; |
2285 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2278 | hlist_for_each_entry_rcu(p, head, hlist) { |
2286 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 2279 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
2287 | disarm_kprobe(p, false); | 2280 | disarm_kprobe(p, false); |
2288 | } | 2281 | } |