summaryrefslogtreecommitdiffstats
path: root/include/linux/kprobes.h
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-09-11 17:24:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:58:52 -0400
commitc802d64a356b5cf349121ac4c5e005f037ce548d (patch)
tree654c5af4d00a40eeaa576acc1aee238e7c8a8a87 /include/linux/kprobes.h
parentae79744975cb0b3b9c469fe1a05db37d2943c863 (diff)
kprobes: unify insn caches
The current kpropes insn caches allocate memory areas for insn slots with module_alloc(). The assumption is that the kernel image and module area are both within the same +/- 2GB memory area. This however is not true for s390 where the kernel image resides within the first 2GB (DMA memory area), but the module area is far away in the vmalloc area, usually somewhere close below the 4TB area. For new pc relative instructions s390 needs insn slots that are within +/- 2GB of each area. That way we can patch displacements of pc-relative instructions within the insn slots just like x86 and powerpc. The module area works already with the normal insn slot allocator, however there is currently no way to get insn slots that are within the first 2GB on s390 (aka DMA area). Therefore this patch set modifies the kprobes insn slot cache code in order to allow to specify a custom allocator for the insn slot cache pages. In addition architecure can now have private insn slot caches withhout the need to modify common code. Patch 1 unifies and simplifies the current insn and optinsn caches implementation. This is a preparation which allows to add more insn caches in a simple way. Patch 2 adds the possibility to specify a custom allocator. Patch 3 makes s390 use the new insn slot mechanisms and adds support for pc-relative instructions with long displacements. This patch (of 3): The two insn caches (insn, and optinsn) each have an own mutex and alloc/free functions (get_[opt]insn_slot() / free_[opt]insn_slot()). Since there is the need for yet another insn cache which satifies dma allocations on s390, unify and simplify the current implementation: - Move the per insn cache mutex into struct kprobe_insn_cache. - Move the alloc/free functions to kprobe.h so they are simply wrappers for the generic __get_insn_slot/__free_insn_slot functions. The implementation is done with a DEFINE_INSN_CACHE_OPS() macro which provides the alloc/free functions for each cache if needed. - move the struct kprobe_insn_cache to kprobe.h which allows to generate architecture specific insn slot caches outside of the core kprobes code. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/kprobes.h')
-rw-r--r--include/linux/kprobes.h32
1 files changed, 28 insertions, 4 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ca1d27a0d6a6..077f65321b5e 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -264,10 +264,34 @@ extern void arch_arm_kprobe(struct kprobe *p);
264extern void arch_disarm_kprobe(struct kprobe *p); 264extern void arch_disarm_kprobe(struct kprobe *p);
265extern int arch_init_kprobes(void); 265extern int arch_init_kprobes(void);
266extern void show_registers(struct pt_regs *regs); 266extern void show_registers(struct pt_regs *regs);
267extern kprobe_opcode_t *get_insn_slot(void);
268extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
269extern void kprobes_inc_nmissed_count(struct kprobe *p); 267extern void kprobes_inc_nmissed_count(struct kprobe *p);
270 268
269struct kprobe_insn_cache {
270 struct mutex mutex;
271 struct list_head pages; /* list of kprobe_insn_page */
272 size_t insn_size; /* size of instruction slot */
273 int nr_garbage;
274};
275
276extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
277extern void __free_insn_slot(struct kprobe_insn_cache *c,
278 kprobe_opcode_t *slot, int dirty);
279
280#define DEFINE_INSN_CACHE_OPS(__name) \
281extern struct kprobe_insn_cache kprobe_##__name##_slots; \
282 \
283static inline kprobe_opcode_t *get_##__name##_slot(void) \
284{ \
285 return __get_insn_slot(&kprobe_##__name##_slots); \
286} \
287 \
288static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
289{ \
290 __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
291} \
292
293DEFINE_INSN_CACHE_OPS(insn);
294
271#ifdef CONFIG_OPTPROBES 295#ifdef CONFIG_OPTPROBES
272/* 296/*
273 * Internal structure for direct jump optimized probe 297 * Internal structure for direct jump optimized probe
@@ -287,13 +311,13 @@ extern void arch_optimize_kprobes(struct list_head *oplist);
287extern void arch_unoptimize_kprobes(struct list_head *oplist, 311extern void arch_unoptimize_kprobes(struct list_head *oplist,
288 struct list_head *done_list); 312 struct list_head *done_list);
289extern void arch_unoptimize_kprobe(struct optimized_kprobe *op); 313extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
290extern kprobe_opcode_t *get_optinsn_slot(void);
291extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
292extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, 314extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
293 unsigned long addr); 315 unsigned long addr);
294 316
295extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); 317extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
296 318
319DEFINE_INSN_CACHE_OPS(optinsn);
320
297#ifdef CONFIG_SYSCTL 321#ifdef CONFIG_SYSCTL
298extern int sysctl_kprobes_optimization; 322extern int sysctl_kprobes_optimization;
299extern int proc_kprobes_optimization_handler(struct ctl_table *table, 323extern int proc_kprobes_optimization_handler(struct ctl_table *table,