aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-09-11 17:24:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:58:52 -0400
commitc802d64a356b5cf349121ac4c5e005f037ce548d (patch)
tree654c5af4d00a40eeaa576acc1aee238e7c8a8a87 /kernel/kprobes.c
parentae79744975cb0b3b9c469fe1a05db37d2943c863 (diff)
kprobes: unify insn caches
The current kpropes insn caches allocate memory areas for insn slots with module_alloc(). The assumption is that the kernel image and module area are both within the same +/- 2GB memory area. This however is not true for s390 where the kernel image resides within the first 2GB (DMA memory area), but the module area is far away in the vmalloc area, usually somewhere close below the 4TB area. For new pc relative instructions s390 needs insn slots that are within +/- 2GB of each area. That way we can patch displacements of pc-relative instructions within the insn slots just like x86 and powerpc. The module area works already with the normal insn slot allocator, however there is currently no way to get insn slots that are within the first 2GB on s390 (aka DMA area). Therefore this patch set modifies the kprobes insn slot cache code in order to allow to specify a custom allocator for the insn slot cache pages. In addition architecure can now have private insn slot caches withhout the need to modify common code. Patch 1 unifies and simplifies the current insn and optinsn caches implementation. This is a preparation which allows to add more insn caches in a simple way. Patch 2 adds the possibility to specify a custom allocator. Patch 3 makes s390 use the new insn slot mechanisms and adds support for pc-relative instructions with long displacements. This patch (of 3): The two insn caches (insn, and optinsn) each have an own mutex and alloc/free functions (get_[opt]insn_slot() / free_[opt]insn_slot()). Since there is the need for yet another insn cache which satifies dma allocations on s390, unify and simplify the current implementation: - Move the per insn cache mutex into struct kprobe_insn_cache. - Move the alloc/free functions to kprobe.h so they are simply wrappers for the generic __get_insn_slot/__free_insn_slot functions. The implementation is done with a DEFINE_INSN_CACHE_OPS() macro which provides the alloc/free functions for each cache if needed. - move the struct kprobe_insn_cache to kprobe.h which allows to generate architecture specific insn slot caches outside of the core kprobes code. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c75
1 files changed, 21 insertions, 54 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6e33498d665c..9e4912dc5559 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -121,12 +121,6 @@ struct kprobe_insn_page {
121 (offsetof(struct kprobe_insn_page, slot_used) + \ 121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots))) 122 (sizeof(char) * (slots)))
123 123
124struct kprobe_insn_cache {
125 struct list_head pages; /* list of kprobe_insn_page */
126 size_t insn_size; /* size of instruction slot */
127 int nr_garbage;
128};
129
130static int slots_per_page(struct kprobe_insn_cache *c) 124static int slots_per_page(struct kprobe_insn_cache *c)
131{ 125{
132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 126 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
@@ -138,8 +132,8 @@ enum kprobe_slot_state {
138 SLOT_USED = 2, 132 SLOT_USED = 2,
139}; 133};
140 134
141static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */ 135struct kprobe_insn_cache kprobe_insn_slots = {
142static struct kprobe_insn_cache kprobe_insn_slots = { 136 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 137 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
144 .insn_size = MAX_INSN_SIZE, 138 .insn_size = MAX_INSN_SIZE,
145 .nr_garbage = 0, 139 .nr_garbage = 0,
@@ -150,10 +144,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
150 * __get_insn_slot() - Find a slot on an executable page for an instruction. 144 * __get_insn_slot() - Find a slot on an executable page for an instruction.
151 * We allocate an executable page if there's no room on existing ones. 145 * We allocate an executable page if there's no room on existing ones.
152 */ 146 */
153static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 147kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
154{ 148{
155 struct kprobe_insn_page *kip; 149 struct kprobe_insn_page *kip;
150 kprobe_opcode_t *slot = NULL;
156 151
152 mutex_lock(&c->mutex);
157 retry: 153 retry:
158 list_for_each_entry(kip, &c->pages, list) { 154 list_for_each_entry(kip, &c->pages, list) {
159 if (kip->nused < slots_per_page(c)) { 155 if (kip->nused < slots_per_page(c)) {
@@ -162,7 +158,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
162 if (kip->slot_used[i] == SLOT_CLEAN) { 158 if (kip->slot_used[i] == SLOT_CLEAN) {
163 kip->slot_used[i] = SLOT_USED; 159 kip->slot_used[i] = SLOT_USED;
164 kip->nused++; 160 kip->nused++;
165 return kip->insns + (i * c->insn_size); 161 slot = kip->insns + (i * c->insn_size);
162 goto out;
166 } 163 }
167 } 164 }
168 /* kip->nused is broken. Fix it. */ 165 /* kip->nused is broken. Fix it. */
@@ -178,7 +175,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
178 /* All out of space. Need to allocate a new page. */ 175 /* All out of space. Need to allocate a new page. */
179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 176 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
180 if (!kip) 177 if (!kip)
181 return NULL; 178 goto out;
182 179
183 /* 180 /*
184 * Use module_alloc so this page is within +/- 2GB of where the 181 * Use module_alloc so this page is within +/- 2GB of where the
@@ -188,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
188 kip->insns = module_alloc(PAGE_SIZE); 185 kip->insns = module_alloc(PAGE_SIZE);
189 if (!kip->insns) { 186 if (!kip->insns) {
190 kfree(kip); 187 kfree(kip);
191 return NULL; 188 goto out;
192 } 189 }
193 INIT_LIST_HEAD(&kip->list); 190 INIT_LIST_HEAD(&kip->list);
194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 191 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
@@ -196,19 +193,10 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
196 kip->nused = 1; 193 kip->nused = 1;
197 kip->ngarbage = 0; 194 kip->ngarbage = 0;
198 list_add(&kip->list, &c->pages); 195 list_add(&kip->list, &c->pages);
199 return kip->insns; 196 slot = kip->insns;
200} 197out:
201 198 mutex_unlock(&c->mutex);
202 199 return slot;
203kprobe_opcode_t __kprobes *get_insn_slot(void)
204{
205 kprobe_opcode_t *ret = NULL;
206
207 mutex_lock(&kprobe_insn_mutex);
208 ret = __get_insn_slot(&kprobe_insn_slots);
209 mutex_unlock(&kprobe_insn_mutex);
210
211 return ret;
212} 200}
213 201
214/* Return 1 if all garbages are collected, otherwise 0. */ 202/* Return 1 if all garbages are collected, otherwise 0. */
@@ -255,11 +243,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
255 return 0; 243 return 0;
256} 244}
257 245
258static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 246void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 kprobe_opcode_t *slot, int dirty) 247 kprobe_opcode_t *slot, int dirty)
260{ 248{
261 struct kprobe_insn_page *kip; 249 struct kprobe_insn_page *kip;
262 250
251 mutex_lock(&c->mutex);
263 list_for_each_entry(kip, &c->pages, list) { 252 list_for_each_entry(kip, &c->pages, list) {
264 long idx = ((long)slot - (long)kip->insns) / 253 long idx = ((long)slot - (long)kip->insns) /
265 (c->insn_size * sizeof(kprobe_opcode_t)); 254 (c->insn_size * sizeof(kprobe_opcode_t));
@@ -272,45 +261,23 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
272 collect_garbage_slots(c); 261 collect_garbage_slots(c);
273 } else 262 } else
274 collect_one_slot(kip, idx); 263 collect_one_slot(kip, idx);
275 return; 264 goto out;
276 } 265 }
277 } 266 }
278 /* Could not free this slot. */ 267 /* Could not free this slot. */
279 WARN_ON(1); 268 WARN_ON(1);
269out:
270 mutex_unlock(&c->mutex);
280} 271}
281 272
282void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
283{
284 mutex_lock(&kprobe_insn_mutex);
285 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
286 mutex_unlock(&kprobe_insn_mutex);
287}
288#ifdef CONFIG_OPTPROBES 273#ifdef CONFIG_OPTPROBES
289/* For optimized_kprobe buffer */ 274/* For optimized_kprobe buffer */
290static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */ 275struct kprobe_insn_cache kprobe_optinsn_slots = {
291static struct kprobe_insn_cache kprobe_optinsn_slots = { 276 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 277 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
293 /* .insn_size is initialized later */ 278 /* .insn_size is initialized later */
294 .nr_garbage = 0, 279 .nr_garbage = 0,
295}; 280};
296/* Get a slot for optimized_kprobe buffer */
297kprobe_opcode_t __kprobes *get_optinsn_slot(void)
298{
299 kprobe_opcode_t *ret = NULL;
300
301 mutex_lock(&kprobe_optinsn_mutex);
302 ret = __get_insn_slot(&kprobe_optinsn_slots);
303 mutex_unlock(&kprobe_optinsn_mutex);
304
305 return ret;
306}
307
308void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
309{
310 mutex_lock(&kprobe_optinsn_mutex);
311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
312 mutex_unlock(&kprobe_optinsn_mutex);
313}
314#endif 281#endif
315#endif 282#endif
316 283