aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-09-21 00:56:39 -0400
committerPaul Mundt <lethal@linux-sh.org>2008-09-21 00:56:39 -0400
commit3d58695edbfac785161bf282dc11fd42a483d6c9 (patch)
tree08b2fc39eda15082d1dba142d6d76a9a05de7efe /arch
parent8f2baee28093ea77c7cc8da45049fd94cc76998e (diff)
sh: Trivial trace_mark() instrumentation for core events.
This implements a few trace points across events that are deemed interesting. This implements a number of trace points: - The page fault handler / TLB miss - IPC calls - Kernel thread creation The original LTTng patch had the slow-path instrumented, which fails to account for the vast majority of events. In general placing this in the fast-path is not a huge performance hit, as we don't take page faults for kernel addresses. The other bits of interest are some of the other trap handlers, as well as the syscall entry/exit (which is better off being handled through the tracehook API). Most of the other trap handlers are corner cases where alternate means of notification exist, so there is little value in placing extra trace points in these locations. Based on top of the points provided both by the LTTng instrumentation patch as well as the patch shipping in the ST-Linux tree, albeit in a stripped down form. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/process_32.c9
-rw-r--r--arch/sh/kernel/process_64.c10
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/mm/fault_32.c57
4 files changed, 50 insertions, 28 deletions
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 914e543102df..7b013aa8c43f 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -169,6 +169,7 @@ __asm__(".align 5\n"
169int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 169int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
170{ 170{
171 struct pt_regs regs; 171 struct pt_regs regs;
172 int pid;
172 173
173 memset(&regs, 0, sizeof(regs)); 174 memset(&regs, 0, sizeof(regs));
174 regs.regs[4] = (unsigned long)arg; 175 regs.regs[4] = (unsigned long)arg;
@@ -178,8 +179,12 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
178 regs.sr = (1 << 30); 179 regs.sr = (1 << 30);
179 180
180 /* Ok, create the new process.. */ 181 /* Ok, create the new process.. */
181 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 182 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
182 &regs, 0, NULL, NULL); 183 &regs, 0, NULL, NULL);
184
185 trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
186
187 return pid;
183} 188}
184 189
185/* 190/*
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index d0dddc438c0c..b7aa09235b51 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -396,6 +396,7 @@ ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
396int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 396int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
397{ 397{
398 struct pt_regs regs; 398 struct pt_regs regs;
399 int pid;
399 400
400 memset(&regs, 0, sizeof(regs)); 401 memset(&regs, 0, sizeof(regs));
401 regs.regs[2] = (unsigned long)arg; 402 regs.regs[2] = (unsigned long)arg;
@@ -404,8 +405,13 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
404 regs.pc = (unsigned long)kernel_thread_helper; 405 regs.pc = (unsigned long)kernel_thread_helper;
405 regs.sr = (1 << 30); 406 regs.sr = (1 << 30);
406 407
407 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, 408 /* Ok, create the new process.. */
408 &regs, 0, NULL, NULL); 409 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
410 &regs, 0, NULL, NULL);
411
412 trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
413
414 return pid;
409} 415}
410 416
411/* 417/*
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 0dfb88925add..38f098c9c72d 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -171,6 +171,8 @@ asmlinkage int sys_ipc(uint call, int first, int second,
171 version = call >> 16; /* hack for backward compatibility */ 171 version = call >> 16; /* hack for backward compatibility */
172 call &= 0xffff; 172 call &= 0xffff;
173 173
174 trace_mark(kernel_arch_ipc_call, "call %u first %d", call, first);
175
174 if (call <= SEMTIMEDOP) 176 if (call <= SEMTIMEDOP)
175 switch (call) { 177 switch (call) {
176 case SEMOP: 178 case SEMOP:
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 08a08ea5d69f..898d477e47c1 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,28 +15,13 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/hardirq.h> 16#include <linux/hardirq.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/marker.h>
18#include <asm/io_trapped.h> 19#include <asm/io_trapped.h>
19#include <asm/system.h> 20#include <asm/system.h>
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
22#include <asm/kgdb.h> 23#include <asm/kgdb.h>
23 24
24static inline int notify_page_fault(struct pt_regs *regs, int trap)
25{
26 int ret = 0;
27
28#ifdef CONFIG_KPROBES
29 if (!user_mode(regs)) {
30 preempt_disable();
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 ret = 1;
33 preempt_enable();
34 }
35#endif
36
37 return ret;
38}
39
40/* 25/*
41 * This routine handles page faults. It determines the address, 26 * This routine handles page faults. It determines the address,
42 * and the problem, and then passes it off to one of the appropriate 27 * and the problem, and then passes it off to one of the appropriate
@@ -261,6 +246,25 @@ do_sigbus:
261 goto no_context; 246 goto no_context;
262} 247}
263 248
249static inline int notify_page_fault(struct pt_regs *regs, int trap)
250{
251 int ret = 0;
252
253 trace_mark(kernel_arch_trap_entry, "trap_id %d ip #p%ld",
254 trap >> 5, instruction_pointer(regs));
255
256#ifdef CONFIG_KPROBES
257 if (!user_mode(regs)) {
258 preempt_disable();
259 if (kprobe_running() && kprobe_fault_handler(regs, trap))
260 ret = 1;
261 preempt_enable();
262 }
263#endif
264
265 return ret;
266}
267
264#ifdef CONFIG_SH_STORE_QUEUES 268#ifdef CONFIG_SH_STORE_QUEUES
265/* 269/*
266 * This is a special case for the SH-4 store queues, as pages for this 270 * This is a special case for the SH-4 store queues, as pages for this
@@ -284,15 +288,18 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
284 pmd_t *pmd; 288 pmd_t *pmd;
285 pte_t *pte; 289 pte_t *pte;
286 pte_t entry; 290 pte_t entry;
291 int ret = 0;
287 292
288 if (notify_page_fault(regs, lookup_exception_vector())) 293 if (notify_page_fault(regs, lookup_exception_vector()))
289 return 0; 294 goto out;
290 295
291#ifdef CONFIG_SH_KGDB 296#ifdef CONFIG_SH_KGDB
292 if (kgdb_nofault && kgdb_bus_err_hook) 297 if (kgdb_nofault && kgdb_bus_err_hook)
293 kgdb_bus_err_hook(); 298 kgdb_bus_err_hook();
294#endif 299#endif
295 300
301 ret = 1;
302
296 /* 303 /*
297 * We don't take page faults for P1, P2, and parts of P4, these 304 * We don't take page faults for P1, P2, and parts of P4, these
298 * are always mapped, whether it be due to legacy behaviour in 305 * are always mapped, whether it be due to legacy behaviour in
@@ -302,24 +309,23 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
302 pgd = pgd_offset_k(address); 309 pgd = pgd_offset_k(address);
303 } else { 310 } else {
304 if (unlikely(address >= TASK_SIZE || !current->mm)) 311 if (unlikely(address >= TASK_SIZE || !current->mm))
305 return 1; 312 goto out;
306 313
307 pgd = pgd_offset(current->mm, address); 314 pgd = pgd_offset(current->mm, address);
308 } 315 }
309 316
310 pud = pud_offset(pgd, address); 317 pud = pud_offset(pgd, address);
311 if (pud_none_or_clear_bad(pud)) 318 if (pud_none_or_clear_bad(pud))
312 return 1; 319 goto out;
313 pmd = pmd_offset(pud, address); 320 pmd = pmd_offset(pud, address);
314 if (pmd_none_or_clear_bad(pmd)) 321 if (pmd_none_or_clear_bad(pmd))
315 return 1; 322 goto out;
316
317 pte = pte_offset_kernel(pmd, address); 323 pte = pte_offset_kernel(pmd, address);
318 entry = *pte; 324 entry = *pte;
319 if (unlikely(pte_none(entry) || pte_not_present(entry))) 325 if (unlikely(pte_none(entry) || pte_not_present(entry)))
320 return 1; 326 goto out;
321 if (unlikely(writeaccess && !pte_write(entry))) 327 if (unlikely(writeaccess && !pte_write(entry)))
322 return 1; 328 goto out;
323 329
324 if (writeaccess) 330 if (writeaccess)
325 entry = pte_mkdirty(entry); 331 entry = pte_mkdirty(entry);
@@ -336,5 +342,8 @@ asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
336 set_pte(pte, entry); 342 set_pte(pte, entry);
337 update_mmu_cache(NULL, address, entry); 343 update_mmu_cache(NULL, address, entry);
338 344
339 return 0; 345 ret = 0;
346out:
347 trace_mark(kernel_arch_trap_exit, MARK_NOARGS);
348 return ret;
340} 349}