diff options
| -rw-r--r-- | kernel/kprobes.c | 30 |
1 files changed, 11 insertions, 19 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 16b5739c516a..6fe9dc6d1a81 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = { | |||
| 103 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) | 103 | #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) |
| 104 | 104 | ||
| 105 | struct kprobe_insn_page { | 105 | struct kprobe_insn_page { |
| 106 | struct hlist_node hlist; | 106 | struct list_head list; |
| 107 | kprobe_opcode_t *insns; /* Page of instruction slots */ | 107 | kprobe_opcode_t *insns; /* Page of instruction slots */ |
| 108 | char slot_used[INSNS_PER_PAGE]; | 108 | char slot_used[INSNS_PER_PAGE]; |
| 109 | int nused; | 109 | int nused; |
| @@ -117,7 +117,7 @@ enum kprobe_slot_state { | |||
| 117 | }; | 117 | }; |
| 118 | 118 | ||
| 119 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ | 119 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ |
| 120 | static struct hlist_head kprobe_insn_pages; | 120 | static LIST_HEAD(kprobe_insn_pages); |
| 121 | static int kprobe_garbage_slots; | 121 | static int kprobe_garbage_slots; |
| 122 | static int collect_garbage_slots(void); | 122 | static int collect_garbage_slots(void); |
| 123 | 123 | ||
| @@ -152,10 +152,9 @@ loop_end: | |||
| 152 | static kprobe_opcode_t __kprobes *__get_insn_slot(void) | 152 | static kprobe_opcode_t __kprobes *__get_insn_slot(void) |
| 153 | { | 153 | { |
| 154 | struct kprobe_insn_page *kip; | 154 | struct kprobe_insn_page *kip; |
| 155 | struct hlist_node *pos; | ||
| 156 | 155 | ||
| 157 | retry: | 156 | retry: |
| 158 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { | 157 | list_for_each_entry(kip, &kprobe_insn_pages, list) { |
| 159 | if (kip->nused < INSNS_PER_PAGE) { | 158 | if (kip->nused < INSNS_PER_PAGE) { |
| 160 | int i; | 159 | int i; |
| 161 | for (i = 0; i < INSNS_PER_PAGE; i++) { | 160 | for (i = 0; i < INSNS_PER_PAGE; i++) { |
| @@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void) | |||
| 189 | kfree(kip); | 188 | kfree(kip); |
| 190 | return NULL; | 189 | return NULL; |
| 191 | } | 190 | } |
| 192 | INIT_HLIST_NODE(&kip->hlist); | 191 | INIT_LIST_HEAD(&kip->list); |
| 193 | hlist_add_head(&kip->hlist, &kprobe_insn_pages); | 192 | list_add(&kip->list, &kprobe_insn_pages); |
| 194 | memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); | 193 | memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); |
| 195 | kip->slot_used[0] = SLOT_USED; | 194 | kip->slot_used[0] = SLOT_USED; |
| 196 | kip->nused = 1; | 195 | kip->nused = 1; |
| @@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | |||
| 219 | * so as not to have to set it up again the | 218 | * so as not to have to set it up again the |
| 220 | * next time somebody inserts a probe. | 219 | * next time somebody inserts a probe. |
| 221 | */ | 220 | */ |
| 222 | hlist_del(&kip->hlist); | 221 | if (!list_is_singular(&kprobe_insn_pages)) { |
| 223 | if (hlist_empty(&kprobe_insn_pages)) { | 222 | list_del(&kip->list); |
| 224 | INIT_HLIST_NODE(&kip->hlist); | ||
| 225 | hlist_add_head(&kip->hlist, | ||
| 226 | &kprobe_insn_pages); | ||
| 227 | } else { | ||
| 228 | module_free(NULL, kip->insns); | 223 | module_free(NULL, kip->insns); |
| 229 | kfree(kip); | 224 | kfree(kip); |
| 230 | } | 225 | } |
| @@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | |||
| 235 | 230 | ||
| 236 | static int __kprobes collect_garbage_slots(void) | 231 | static int __kprobes collect_garbage_slots(void) |
| 237 | { | 232 | { |
| 238 | struct kprobe_insn_page *kip; | 233 | struct kprobe_insn_page *kip, *next; |
| 239 | struct hlist_node *pos, *next; | ||
| 240 | 234 | ||
| 241 | /* Ensure no-one is preepmted on the garbages */ | 235 | /* Ensure no-one is preepmted on the garbages */ |
| 242 | if (check_safety()) | 236 | if (check_safety()) |
| 243 | return -EAGAIN; | 237 | return -EAGAIN; |
| 244 | 238 | ||
| 245 | hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { | 239 | list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) { |
| 246 | int i; | 240 | int i; |
| 247 | if (kip->ngarbage == 0) | 241 | if (kip->ngarbage == 0) |
| 248 | continue; | 242 | continue; |
| @@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void) | |||
| 260 | void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | 254 | void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) |
| 261 | { | 255 | { |
| 262 | struct kprobe_insn_page *kip; | 256 | struct kprobe_insn_page *kip; |
| 263 | struct hlist_node *pos; | ||
| 264 | 257 | ||
| 265 | mutex_lock(&kprobe_insn_mutex); | 258 | mutex_lock(&kprobe_insn_mutex); |
| 266 | hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { | 259 | list_for_each_entry(kip, &kprobe_insn_pages, list) { |
| 267 | if (kip->insns <= slot && | 260 | if (kip->insns <= slot && |
| 268 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { | 261 | slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { |
| 269 | int i = (slot - kip->insns) / MAX_INSN_SIZE; | 262 | int i = (slot - kip->insns) / MAX_INSN_SIZE; |
| 270 | if (dirty) { | 263 | if (dirty) { |
| 271 | kip->slot_used[i] = SLOT_DIRTY; | 264 | kip->slot_used[i] = SLOT_DIRTY; |
| 272 | kip->ngarbage++; | 265 | kip->ngarbage++; |
| 273 | } else { | 266 | } else |
| 274 | collect_one_slot(kip, i); | 267 | collect_one_slot(kip, i); |
| 275 | } | ||
| 276 | break; | 268 | break; |
| 277 | } | 269 | } |
| 278 | } | 270 | } |
