From 95f567bb53b3592d722c340c56aaabd5fc78a135 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Mon, 1 Dec 2008 04:31:11 -0500 Subject: LITMUS: remove trailing whitespace for release --- litmus/ftdev.c | 2 +- litmus/litmus.c | 2 +- litmus/sched_pfair.c | 6 +++--- litmus/sched_task_trace.c | 2 +- litmus/srp.c | 8 ++++---- litmus/sync.c | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/litmus/ftdev.c b/litmus/ftdev.c index a62ccecf64..7c933ffdaa 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c @@ -110,7 +110,7 @@ static int ftdev_open(struct inode *in, struct file *filp) } if (ftdev->can_open && (err = ftdev->can_open(ftdev, buf_idx))) goto out; - + ftdm = ftdev->minor + buf_idx; filp->private_data = ftdm; diff --git a/litmus/litmus.c b/litmus/litmus.c index cd3c96a8f3..314bdda489 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -595,7 +595,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) goto out; ret = plugin->activate_plugin(); if (0 != ret) { - printk(KERN_INFO "Can't activate %s (%d).\n", + printk(KERN_INFO "Can't activate %s (%d).\n", plugin->plugin_name, ret); plugin = &linux_sched_plugin; } diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index d0c06e03a7..a733c95b9a 100755 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -434,7 +434,7 @@ static void schedule_next_quantum(quanta_t time) for (cpu = 0; cpu < NR_CPUS; cpu++) if (pstate[cpu]->linked) - PTRACE_TASK(pstate[cpu]->linked, + PTRACE_TASK(pstate[cpu]->linked, " linked on %d.\n", cpu); else PTRACE("(null) linked on %d.\n", cpu); @@ -511,7 +511,7 @@ static void pfair_tick(struct task_struct* t) cur = current_quantum(state); PTRACE("q %lu at %llu\n", cur, litmus_clock()); - /* Attempt to advance time. First CPU to get here + /* Attempt to advance time. First CPU to get here * will prepare the next quantum. */ time = cmpxchg(&pfair_time, @@ -551,7 +551,7 @@ static void pfair_tick(struct task_struct* t) if (state->local != current && (is_realtime(current) || is_present(state->local))) - set_tsk_need_resched(current); + set_tsk_need_resched(current); } static int safe_to_schedule(struct task_struct* t, int cpu) diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 4b546a86d6..913d999dfe 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -148,7 +148,7 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id, unsigned } } -feather_callback void do_sched_trace_task_completion(unsigned long id, unsigned long _task, +feather_callback void do_sched_trace_task_completion(unsigned long id, unsigned long _task, unsigned long forced) { struct task_struct *t = (struct task_struct*) _task; diff --git a/litmus/srp.c b/litmus/srp.c index 6e670f9138..71639b9916 100644 --- a/litmus/srp.c +++ b/litmus/srp.c @@ -78,7 +78,7 @@ struct srp_semaphore { int cpu; /* cpu associated with this "semaphore" and resource */ }; -#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) +#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) static int srp_exceeds_ceiling(struct task_struct* first, struct srp* srp) @@ -86,7 +86,7 @@ static int srp_exceeds_ceiling(struct task_struct* first, return list_empty(&srp->ceiling) || get_rt_period(first) < system_ceiling(srp)->period || (get_rt_period(first) == system_ceiling(srp)->period && - first->pid < system_ceiling(srp)->pid) || + first->pid < system_ceiling(srp)->pid) || ceiling2sem(system_ceiling(srp))->owner == first; } @@ -176,7 +176,7 @@ static void do_srp_down(struct srp_semaphore* sem) } static void do_srp_up(struct srp_semaphore* sem) -{ +{ /* Determine new system priority ceiling for this CPU. */ WARN_ON(!in_list(&sem->ceiling.list)); if (in_list(&sem->ceiling.list)) @@ -296,7 +296,7 @@ void srp_ceiling_block(void) do_ceiling_block(tsk); TRACE_CUR("finally exceeds system ceiling.\n"); } else - TRACE_CUR("is not priority ceiling blocked\n"); + TRACE_CUR("is not priority ceiling blocked\n"); preempt_enable(); } diff --git a/litmus/sync.c b/litmus/sync.c index 8b0ebea49a..d5069f9899 100644 --- a/litmus/sync.c +++ b/litmus/sync.c @@ -44,7 +44,7 @@ static long do_release_ts(lt_t start) spin_lock_irqsave(&ts_release.wait.lock, flags); TRACE("<<<<<< synchronous task system release >>>>>>\n"); - + sched_trace_sys_release(&start); list_for_each(pos, &ts_release.wait.task_list) { t = (struct task_struct*) list_entry(pos, -- cgit v1.2.2 From 8fc3da5666f0dc06885f4c2315537cdb88d7d586 Mon Sep 17 00:00:00 2001 From: Mitchell Jareo Date: Sun, 1 Mar 2009 12:39:53 -0500 Subject: fixed sizeof usage to conform to linux kernel coding style. so, struct x* p = kmalloc(sizeof(struct x), ....) becomes struct x* p = kmalloc(sizeof(*p), ...) for example. --- litmus/fdso.c | 7 +++---- litmus/fmlp.c | 2 +- litmus/ftdev.c | 22 +++++++++++----------- litmus/litmus.c | 4 ++-- litmus/rt_domain.c | 2 +- litmus/sched_cedf.c | 2 +- litmus/sched_pfair.c | 2 +- litmus/sync.c | 2 +- 8 files changed, 21 insertions(+), 22 deletions(-) diff --git a/litmus/fdso.c b/litmus/fdso.c index 81ab0afff3..bdc0466f9e 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c @@ -67,7 +67,7 @@ static struct inode_obj_id* alloc_inode_obj(struct inode* inode, if (!raw_obj) return NULL; - obj = kmalloc(sizeof(struct inode_obj_id), GFP_KERNEL); + obj = kmalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return NULL; INIT_LIST_HEAD(&obj->list); @@ -134,9 +134,8 @@ static struct od_table_entry* get_od_entry(struct task_struct* t) table = t->od_table; if (!table) { - table = (struct od_table_entry*) - kzalloc(sizeof(struct od_table_entry) * - MAX_OBJECT_DESCRIPTORS, GFP_KERNEL); + table = kzalloc(sizeof(*table) * MAX_OBJECT_DESCRIPTORS, + GFP_KERNEL); t->od_table = table; } diff --git a/litmus/fmlp.c b/litmus/fmlp.c index f34eeea9ab..820af8e00e 100644 --- a/litmus/fmlp.c +++ b/litmus/fmlp.c @@ -23,7 +23,7 @@ static void* create_fmlp_semaphore(void) struct pi_semaphore* sem; int i; - sem = kmalloc(sizeof(struct pi_semaphore), GFP_KERNEL); + sem = kmalloc(sizeof(*sem), GFP_KERNEL); if (!sem) return NULL; atomic_set(&sem->count, 1); diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 7c933ffdaa..1c1c241a0a 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c @@ -15,7 +15,7 @@ struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size) char* mem; int order = 0, pages = 1; - buf = kmalloc(sizeof(struct ft_buffer), GFP_KERNEL); + buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return NULL; @@ -65,7 +65,7 @@ struct ftdev_event { static int activate(struct ftdev_event** chain, int id) { - struct ftdev_event* ev = kmalloc(sizeof(struct ftdev_event), GFP_KERNEL); + struct ftdev_event* ev = kmalloc(sizeof(*ev), GFP_KERNEL); if (ev) { printk(KERN_INFO "Enabling feather-trace event %d.\n", (int) id); @@ -258,15 +258,15 @@ static ssize_t ftdev_write(struct file *filp, const char __user *from, cmd_t cmd; cmd_t id; - if (len % sizeof(cmd_t) || len < 2 * sizeof(cmd_t)) + if (len % sizeof(cmd) || len < 2 * sizeof(cmd)) goto out; - if (copy_from_user(&cmd, from, sizeof(cmd_t))) { + if (copy_from_user(&cmd, from, sizeof(cmd))) { err = -EFAULT; goto out; } - len -= sizeof(cmd_t); - from += sizeof(cmd_t); + len -= sizeof(cmd); + from += sizeof(cmd); if (cmd != FTDEV_ENABLE_CMD && cmd != FTDEV_DISABLE_CMD) goto out; @@ -276,22 +276,22 @@ static ssize_t ftdev_write(struct file *filp, const char __user *from, goto out; } - err = sizeof(cmd_t); + err = sizeof(cmd); while (len) { - if (copy_from_user(&id, from, sizeof(cmd_t))) { + if (copy_from_user(&id, from, sizeof(cmd))) { err = -EFAULT; goto out_unlock; } /* FIXME: check id against list of acceptable events */ - len -= sizeof(cmd_t); - from += sizeof(cmd_t); + len -= sizeof(cmd); + from += sizeof(cmd); if (cmd == FTDEV_DISABLE_CMD) deactivate(&ftdm->events, id); else if (activate(&ftdm->events, id) != 0) { err = -ENOMEM; goto out_unlock; } - err += sizeof(cmd_t); + err += sizeof(cmd); } out_unlock: diff --git a/litmus/litmus.c b/litmus/litmus.c index 314bdda489..5eb4c1c94e 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -247,7 +247,7 @@ static void __scheduler_signal(struct task_struct *t, unsigned int signo, { struct sched_sig* sig; - sig = kmalloc(GFP_ATOMIC, sizeof(struct sched_sig)); + sig = kmalloc(GFP_ATOMIC, sizeof(*sig)); if (!sig) { TRACE_TASK(t, "dropping signal: %u\n", t); return; @@ -500,7 +500,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore) // __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); /* Cleanup everything else. */ - memset(&p->rt_param, 0, sizeof(struct rt_task)); + memset(&p->rt_param, 0, sizeof(user_config)); /* Restore preserved fields. */ if (restore) { diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index be4ef5ea6a..d0a243fc6f 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -80,7 +80,7 @@ static struct release_heap* get_release_heap(rt_domain_t *rt, lt_t release_time) if (!heap) { /* must create new node */ /* FIXME: use a kmemcache_t */ - rh = kmalloc(sizeof(struct release_heap), GFP_ATOMIC); + rh = kmalloc(sizeof(*rh), GFP_ATOMIC); if (unlikely(!rh)) /* Should be handled somehow. * For now, let's just hope there is diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 6c32e1c06c..ee5e5c8e07 100755 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -661,7 +661,7 @@ static void cedf_domain_init(int first_cpu, int last_cpu) int cpu; /* Create new domain for this cluster. */ - cedf_domain_t *new_cedf_domain = kmalloc(sizeof(cedf_domain_t), + cedf_domain_t *new_cedf_domain = kmalloc(sizeof(*new_cedf_domain), GFP_KERNEL); /* Initialize cluster domain. */ diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index a733c95b9a..053cd27ae4 100755 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c @@ -802,7 +802,7 @@ static long pfair_admit_task(struct task_struct* t) period = 1; } - param = kmalloc(sizeof(struct pfair_param) + + param = kmalloc(sizeof(*param) + quanta * sizeof(struct subtask), GFP_ATOMIC); if (!param) diff --git a/litmus/sync.c b/litmus/sync.c index d5069f9899..9c9941fc27 100644 --- a/litmus/sync.c +++ b/litmus/sync.c @@ -82,7 +82,7 @@ asmlinkage long sys_release_ts(lt_t __user *__delay) /* FIXME: check capabilities... */ - ret = copy_from_user(&delay, __delay, sizeof(lt_t)); + ret = copy_from_user(&delay, __delay, sizeof(delay)); if (ret == 0) ret = do_release_ts(litmus_clock() + delay); -- cgit v1.2.2 From acbe757a454b9cf0b810f5a4125da7243500bc96 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Mon, 2 Mar 2009 16:16:35 -0500 Subject: add sys_null_call() to enable system call overhead tracing A simple noop system call to record kernel entry and exit times. --- arch/sparc64/kernel/systbls.S | 3 ++- arch/x86/kernel/syscall_table_32.S | 3 ++- include/litmus/unistd.h | 3 ++- litmus/litmus.c | 17 +++++++++++++++++ 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index 7fc7615b57..7dcbac74f7 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S @@ -165,7 +165,8 @@ sys_call_table: .word sys_set_rt_task_param, sys_get_rt_task_param, sys_complete_job, sys_register_np_flag, sys_exit_np /*320*/ .word sys_od_open, sys_od_close, sys_fmlp_down, sys_fmlp_up, sys_srp_down -/*325*/ .word sys_srp_up, sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release, sys_release_ts +/*325*/ .word sys_srp_up, sys_query_job_no, sys_wait_for_job_release, sys_wait_for_ts_release +/*330*/ .word sys_release_ts, sys_null_call #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ defined(CONFIG_SOLARIS_EMUL_MODULE) diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index f6fdb0a885..65496c26da 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S @@ -339,4 +339,5 @@ ENTRY(sys_call_table) .long sys_query_job_no .long sys_wait_for_job_release .long sys_wait_for_ts_release - .long sys_release_ts /* 339 */ + .long sys_release_ts + .long sys_null_call /* 340 */ diff --git a/include/litmus/unistd.h b/include/litmus/unistd.h index 8224235d95..5ef367f491 100644 --- a/include/litmus/unistd.h +++ b/include/litmus/unistd.h @@ -16,5 +16,6 @@ #define __NR_wait_for_job_release __LSC(12) #define __NR_wait_for_ts_release __LSC(13) #define __NR_release_ts __LSC(14) +#define __NR_null_call __LSC(15) -#define NR_litmus_syscalls 15 +#define NR_litmus_syscalls 16 diff --git a/litmus/litmus.c b/litmus/litmus.c index 314bdda489..35b0a48ccd 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -479,6 +479,23 @@ asmlinkage long sys_exit_np(void) #endif /* CONFIG_NP_SECTION */ +/* sys_null_call() is only used for determining raw system call + * overheads (kernel entry, kernel exit). It has no useful side effects. + * If ts is non-NULL, then the current Feather-Trace time is recorded. + */ +asmlinkage long sys_null_call(cycles_t __user *ts) +{ + long ret = 0; + cycles_t now; + + if (ts) { + now = get_cycles(); + ret = put_user(now, ts); + } + + return ret; +} + /* p is a real-time task. Re-init its state as a best-effort task. */ static void reinit_litmus_state(struct task_struct* p, int restore) { -- cgit v1.2.2