From 4b38febbd59fd33542a343991262119eb9860f5e Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:23:36 -0500 Subject: [ported from 2008.3] Core LITMUS^RT infrastructure Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32 litmus_sched_class implements 4 new methods: - prio_changed: void - switched_to: void - get_rr_interval: return infinity (i.e., 0) - select_task_rq: return current cpu --- include/linux/sched.h | 7 ++ include/litmus/feather_buffer.h | 94 ++++++++++++++++++++ include/litmus/feather_trace.h | 36 ++++++++ include/litmus/heap.h | 77 ++++++++++++++++ include/litmus/jobs.h | 9 ++ include/litmus/litmus.h | 177 +++++++++++++++++++++++++++++++++++++ include/litmus/rt_param.h | 175 ++++++++++++++++++++++++++++++++++++ include/litmus/sched_plugin.h | 159 +++++++++++++++++++++++++++++++++ include/litmus/sched_trace.h | 191 ++++++++++++++++++++++++++++++++++++++++ include/litmus/trace.h | 113 ++++++++++++++++++++++++ 10 files changed, 1038 insertions(+) create mode 100644 include/litmus/feather_buffer.h create mode 100644 include/litmus/feather_trace.h create mode 100644 include/litmus/heap.h create mode 100644 include/litmus/jobs.h create mode 100644 include/litmus/litmus.h create mode 100644 include/litmus/rt_param.h create mode 100644 include/litmus/sched_plugin.h create mode 100644 include/litmus/sched_trace.h create mode 100644 include/litmus/trace.h (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 75e6e60bf583..bb046c0adf99 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -38,6 +38,7 @@ #define SCHED_BATCH 3 /* SCHED_ISO: reserved but not implemented yet */ #define SCHED_IDLE 5 +#define SCHED_LITMUS 6 /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 @@ -94,6 +95,8 @@ struct sched_param { #include +#include + struct exec_domain; struct futex_pi_state; struct robust_list_head; @@ -1505,6 +1508,10 @@ struct task_struct { int make_it_fail; #endif struct prop_local_single dirties; + + /* LITMUS RT parameters and state */ + struct rt_param rt_param; + #ifdef CONFIG_LATENCYTOP int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h new file mode 100644 index 000000000000..6c18277fdfc9 --- /dev/null +++ b/include/litmus/feather_buffer.h @@ -0,0 +1,94 @@ +#ifndef _FEATHER_BUFFER_H_ +#define _FEATHER_BUFFER_H_ + +/* requires UINT_MAX and memcpy */ + +#define SLOT_FREE 0 +#define SLOT_BUSY 1 +#define SLOT_READY 2 + +struct ft_buffer { + unsigned int slot_count; + unsigned int slot_size; + + int free_count; + unsigned int write_idx; + unsigned int read_idx; + + char* slots; + void* buffer_mem; + unsigned int failed_writes; +}; + +static inline int init_ft_buffer(struct ft_buffer* buf, + unsigned int slot_count, + unsigned int slot_size, + char* slots, + void* buffer_mem) +{ + int i = 0; + if (!slot_count || UINT_MAX % slot_count != slot_count - 1) { + /* The slot count must divide UNIT_MAX + 1 so that when it + * wraps around the index correctly points to 0. + */ + return 0; + } else { + buf->slot_count = slot_count; + buf->slot_size = slot_size; + buf->slots = slots; + buf->buffer_mem = buffer_mem; + buf->free_count = slot_count; + buf->write_idx = 0; + buf->read_idx = 0; + buf->failed_writes = 0; + for (i = 0; i < slot_count; i++) + buf->slots[i] = SLOT_FREE; + return 1; + } +} + +static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr) +{ + int free = fetch_and_dec(&buf->free_count); + unsigned int idx; + if (free <= 0) { + fetch_and_inc(&buf->free_count); + *ptr = 0; + fetch_and_inc(&buf->failed_writes); + return 0; + } else { + idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count; + buf->slots[idx] = SLOT_BUSY; + *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; + return 1; + } +} + +static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr) +{ + unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size; + buf->slots[idx] = SLOT_READY; +} + + +/* exclusive reader access is assumed */ +static inline int ft_buffer_read(struct ft_buffer* buf, void* dest) +{ + unsigned int idx; + if (buf->free_count == buf->slot_count) + /* nothing available */ + return 0; + idx = buf->read_idx % buf->slot_count; + if (buf->slots[idx] == SLOT_READY) { + memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size, + buf->slot_size); + buf->slots[idx] = SLOT_FREE; + buf->read_idx++; + fetch_and_inc(&buf->free_count); + return 1; + } else + return 0; +} + + +#endif diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h new file mode 100644 index 000000000000..3ac1ee5e0277 --- /dev/null +++ b/include/litmus/feather_trace.h @@ -0,0 +1,36 @@ +#ifndef _FEATHER_TRACE_H_ +#define _FEATHER_TRACE_H_ + + +int ft_enable_event(unsigned long id); +int ft_disable_event(unsigned long id); +int ft_is_event_enabled(unsigned long id); +int ft_disable_all_events(void); + +#ifndef __ARCH_HAS_FEATHER_TRACE +/* provide default implementation */ + +#define feather_callback + +#define MAX_EVENTS 1024 + +extern int ft_events[MAX_EVENTS]; + +#define ft_event(id, callback) \ + if (ft_events[id]) callback(); + +#define ft_event0(id, callback) \ + if (ft_events[id]) callback(id); + +#define ft_event1(id, callback, param) \ + if (ft_events[id]) callback(id, param); + +#define ft_event2(id, callback, param, param2) \ + if (ft_events[id]) callback(id, param, param2); + +#define ft_event3(id, callback, p, p2, p3) \ + if (ft_events[id]) callback(id, p, p2, p3); +#endif + + +#endif diff --git a/include/litmus/heap.h b/include/litmus/heap.h new file mode 100644 index 000000000000..da959b0bec9c --- /dev/null +++ b/include/litmus/heap.h @@ -0,0 +1,77 @@ +/* heaps.h -- Binomial Heaps + * + * (c) 2008, 2009 Bjoern Brandenburg + */ + +#ifndef HEAP_H +#define HEAP_H + +#define NOT_IN_HEAP UINT_MAX + +struct heap_node { + struct heap_node* parent; + struct heap_node* next; + struct heap_node* child; + + unsigned int degree; + void* value; + struct heap_node** ref; +}; + +struct heap { + struct heap_node* head; + /* We cache the minimum of the heap. + * This speeds up repeated peek operations. + */ + struct heap_node* min; +}; + +typedef int (*heap_prio_t)(struct heap_node* a, struct heap_node* b); + +void heap_init(struct heap* heap); +void heap_node_init(struct heap_node** ref_to_heap_node_ptr, void* value); + +static inline int heap_node_in_heap(struct heap_node* h) +{ + return h->degree != NOT_IN_HEAP; +} + +static inline int heap_empty(struct heap* heap) +{ + return heap->head == NULL && heap->min == NULL; +} + +/* insert (and reinitialize) a node into the heap */ +void heap_insert(heap_prio_t higher_prio, + struct heap* heap, + struct heap_node* node); + +/* merge addition into target */ +void heap_union(heap_prio_t higher_prio, + struct heap* target, + struct heap* addition); + +struct heap_node* heap_peek(heap_prio_t higher_prio, + struct heap* heap); + +struct heap_node* heap_take(heap_prio_t higher_prio, + struct heap* heap); + +void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap); +int heap_decrease(heap_prio_t higher_prio, struct heap_node* node); + +void heap_delete(heap_prio_t higher_prio, + struct heap* heap, + struct heap_node* node); + +/* allocate from memcache */ +struct heap_node* heap_node_alloc(int gfp_flags); +void heap_node_free(struct heap_node* hn); + +/* allocate a heap node for value and insert into the heap */ +int heap_add(heap_prio_t higher_prio, struct heap* heap, + void* value, int gfp_flags); + +void* heap_take_del(heap_prio_t higher_prio, + struct heap* heap); +#endif diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h new file mode 100644 index 000000000000..9bd361ef3943 --- /dev/null +++ b/include/litmus/jobs.h @@ -0,0 +1,9 @@ +#ifndef __LITMUS_JOBS_H__ +#define __LITMUS_JOBS_H__ + +void prepare_for_next_period(struct task_struct *t); +void release_at(struct task_struct *t, lt_t start); +long complete_job(void); + +#endif + diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h new file mode 100644 index 000000000000..380fcb8acb33 --- /dev/null +++ b/include/litmus/litmus.h @@ -0,0 +1,177 @@ +/* + * Constant definitions related to + * scheduling policy. + */ + +#ifndef _LINUX_LITMUS_H_ +#define _LINUX_LITMUS_H_ + +#include +#include + +extern atomic_t release_master_cpu; + +extern atomic_t __log_seq_no; + +#define TRACE(fmt, args...) \ + sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \ + raw_smp_processor_id(), ## args) + +#define TRACE_TASK(t, fmt, args...) \ + TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args) + +#define TRACE_CUR(fmt, args...) \ + TRACE_TASK(current, fmt, ## args) + +#define TRACE_BUG_ON(cond) \ + do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ + "called from %p current=%s/%d state=%d " \ + "flags=%x partition=%d cpu=%d rtflags=%d"\ + " job=%u knp=%d timeslice=%u\n", \ + #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ + current->pid, current->state, current->flags, \ + get_partition(current), smp_processor_id(), get_rt_flags(current), \ + current->rt_param.job_params.job_no, current->rt_param.kernel_np, \ + current->rt.time_slice\ + ); } while(0); + + +/* in_list - is a given list_head queued on some list? + */ +static inline int in_list(struct list_head* list) +{ + return !( /* case 1: deleted */ + (list->next == LIST_POISON1 && + list->prev == LIST_POISON2) + || + /* case 2: initialized */ + (list->next == list && + list->prev == list) + ); +} + +#define NO_CPU 0xffffffff + +void litmus_fork(struct task_struct *tsk); +void litmus_exec(void); +/* clean up real-time state of a task */ +void exit_litmus(struct task_struct *dead_tsk); + +long litmus_admit_task(struct task_struct *tsk); +void litmus_exit_task(struct task_struct *tsk); + +#define is_realtime(t) ((t)->policy == SCHED_LITMUS) +#define rt_transition_pending(t) \ + ((t)->rt_param.transition_pending) + +#define tsk_rt(t) (&(t)->rt_param) + +/* Realtime utility macros */ +#define get_rt_flags(t) (tsk_rt(t)->flags) +#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) +#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) +#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) +#define get_rt_period(t) (tsk_rt(t)->task_params.period) +#define get_rt_phase(t) (tsk_rt(t)->task_params.phase) +#define get_partition(t) (tsk_rt(t)->task_params.cpu) +#define get_deadline(t) (tsk_rt(t)->job_params.deadline) +#define get_release(t) (tsk_rt(t)->job_params.release) +#define get_class(t) (tsk_rt(t)->task_params.cls) + +inline static int budget_exhausted(struct task_struct* t) +{ + return get_exec_time(t) >= get_exec_cost(t); +} + + +#define is_hrt(t) \ + (tsk_rt(t)->task_params.class == RT_CLASS_HARD) +#define is_srt(t) \ + (tsk_rt(t)->task_params.class == RT_CLASS_SOFT) +#define is_be(t) \ + (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT) + +/* Our notion of time within LITMUS: kernel monotonic time. */ +static inline lt_t litmus_clock(void) +{ + return ktime_to_ns(ktime_get()); +} + +/* A macro to convert from nanoseconds to ktime_t. */ +#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) + +#define get_domain(t) (tsk_rt(t)->domain) + +/* Honor the flag in the preempt_count variable that is set + * when scheduling is in progress. + */ +#define is_running(t) \ + ((t)->state == TASK_RUNNING || \ + task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) + +#define is_blocked(t) \ + (!is_running(t)) +#define is_released(t, now) \ + (lt_before_eq(get_release(t), now)) +#define is_tardy(t, now) \ + (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) + +/* real-time comparison macros */ +#define earlier_deadline(a, b) (lt_before(\ + (a)->rt_param.job_params.deadline,\ + (b)->rt_param.job_params.deadline)) +#define earlier_release(a, b) (lt_before(\ + (a)->rt_param.job_params.release,\ + (b)->rt_param.job_params.release)) + +#define make_np(t) do {t->rt_param.kernel_np++;} while(0); +#define take_np(t) do {t->rt_param.kernel_np--;} while(0); + +#ifdef CONFIG_SRP +void srp_ceiling_block(void); +#else +#define srp_ceiling_block() /* nothing */ +#endif + +#define heap2task(hn) ((struct task_struct*) hn->value) + +static inline int is_np(struct task_struct *t) +{ + return tsk_rt(t)->kernel_np; +} + +#define request_exit_np(t) + +static inline int is_present(struct task_struct* t) +{ + return t && tsk_rt(t)->present; +} + + +/* make the unit explicit */ +typedef unsigned long quanta_t; + +enum round { + FLOOR, + CEIL +}; + + +/* Tick period is used to convert ns-specified execution + * costs and periods into tick-based equivalents. + */ +extern ktime_t tick_period; + +static inline quanta_t time2quanta(lt_t time, enum round round) +{ + s64 quantum_length = ktime_to_ns(tick_period); + + if (do_div(time, quantum_length) && round == CEIL) + time++; + return (quanta_t) time; +} + +/* By how much is cpu staggered behind CPU 0? */ +u64 cpu_stagger_offset(int cpu); + +#endif diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h new file mode 100644 index 000000000000..c599f848d1ed --- /dev/null +++ b/include/litmus/rt_param.h @@ -0,0 +1,175 @@ +/* + * Definition of the scheduler plugin interface. + * + */ +#ifndef _LINUX_RT_PARAM_H_ +#define _LINUX_RT_PARAM_H_ + +/* Litmus time type. */ +typedef unsigned long long lt_t; + +static inline int lt_after(lt_t a, lt_t b) +{ + return ((long long) b) - ((long long) a) < 0; +} +#define lt_before(a, b) lt_after(b, a) + +static inline int lt_after_eq(lt_t a, lt_t b) +{ + return ((long long) a) - ((long long) b) >= 0; +} +#define lt_before_eq(a, b) lt_after_eq(b, a) + +/* different types of clients */ +typedef enum { + RT_CLASS_HARD, + RT_CLASS_SOFT, + RT_CLASS_BEST_EFFORT +} task_class_t; + +struct rt_task { + lt_t exec_cost; + lt_t period; + lt_t phase; + unsigned int cpu; + task_class_t cls; +}; + +/* don't export internal data structures to user space (liblitmus) */ +#ifdef __KERNEL__ + +struct _rt_domain; +struct heap_node; +struct release_heap; + +struct rt_job { + /* Time instant the the job was or will be released. */ + lt_t release; + /* What is the current deadline? */ + lt_t deadline; + + /* How much service has this job received so far? */ + lt_t exec_time; + + /* Which job is this. This is used to let user space + * specify which job to wait for, which is important if jobs + * overrun. If we just call sys_sleep_next_period() then we + * will unintentionally miss jobs after an overrun. + * + * Increase this sequence number when a job is released. + */ + unsigned int job_no; +}; + + +struct pfair_param; + +/* RT task parameters for scheduling extensions + * These parameters are inherited during clone and therefore must + * be explicitly set up before the task set is launched. + */ +struct rt_param { + /* is the task sleeping? */ + unsigned int flags:8; + + /* do we need to check for srp blocking? */ + unsigned int srp_non_recurse:1; + + /* is the task present? (true if it can be scheduled) */ + unsigned int present:1; + + /* user controlled parameters */ + struct rt_task task_params; + + /* timing parameters */ + struct rt_job job_params; + + /* task representing the current "inherited" task + * priority, assigned by inherit_priority and + * return priority in the scheduler plugins. + * could point to self if PI does not result in + * an increased task priority. + */ + struct task_struct* inh_task; + + /* Don't just dereference this pointer in kernel space! + * It might very well point to junk or nothing at all. + * NULL indicates that the task has not requested any non-preemptable + * section support. + * Not inherited upon fork. + */ + short* np_flag; + + /* re-use unused counter in plugins that don't need it */ + union { + /* For the FMLP under PSN-EDF, it is required to make the task + * non-preemptive from kernel space. In order not to interfere with + * user space, this counter indicates the kernel space np setting. + * kernel_np > 0 => task is non-preemptive + */ + unsigned int kernel_np; + + /* Used by GQ-EDF */ + unsigned int last_cpu; + }; + + /* This field can be used by plugins to store where the task + * is currently scheduled. It is the responsibility of the + * plugin to avoid race conditions. + * + * This used by GSN-EDF and PFAIR. + */ + volatile int scheduled_on; + + /* Is the stack of the task currently in use? This is updated by + * the LITMUS core. + * + * Be careful to avoid deadlocks! + */ + volatile int stack_in_use; + + /* This field can be used by plugins to store where the task + * is currently linked. It is the responsibility of the plugin + * to avoid race conditions. + * + * Used by GSN-EDF. + */ + volatile int linked_on; + + /* PFAIR/PD^2 state. Allocated on demand. */ + struct pfair_param* pfair; + + /* Fields saved before BE->RT transition. + */ + int old_policy; + int old_prio; + + /* ready queue for this task */ + struct _rt_domain* domain; + + /* heap element for this task + * + * Warning: Don't statically allocate this node. The heap + * implementation swaps these between tasks, thus after + * dequeuing from a heap you may end up with a different node + * then the one you had when enqueuing the task. For the same + * reason, don't obtain and store references to this node + * other than this pointer (which is updated by the heap + * implementation). + */ + struct heap_node* heap_node; + struct release_heap* rel_heap; + + /* Used by rt_domain to queue task in release list. + */ + struct list_head list; +}; + +/* Possible RT flags */ +#define RT_F_RUNNING 0x00000000 +#define RT_F_SLEEP 0x00000001 +#define RT_F_EXIT_SEM 0x00000008 + +#endif + +#endif diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h new file mode 100644 index 000000000000..94952f6ccbfa --- /dev/null +++ b/include/litmus/sched_plugin.h @@ -0,0 +1,159 @@ +/* + * Definition of the scheduler plugin interface. + * + */ +#ifndef _LINUX_SCHED_PLUGIN_H_ +#define _LINUX_SCHED_PLUGIN_H_ + +#include + +/* struct for semaphore with priority inheritance */ +struct pi_semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; + union { + /* highest-prio holder/waiter */ + struct task_struct *task; + struct task_struct* cpu_task[NR_CPUS]; + } hp; + /* current lock holder */ + struct task_struct *holder; +}; + +/************************ setup/tear down ********************/ + +typedef long (*activate_plugin_t) (void); +typedef long (*deactivate_plugin_t) (void); + + + +/********************* scheduler invocation ******************/ + +/* Plugin-specific realtime tick handler */ +typedef void (*scheduler_tick_t) (struct task_struct *cur); +/* Novell make sched decision function */ +typedef struct task_struct* (*schedule_t)(struct task_struct * prev); +/* Clean up after the task switch has occured. + * This function is called after every (even non-rt) task switch. + */ +typedef void (*finish_switch_t)(struct task_struct *prev); + + +/********************* task state changes ********************/ + +/* Called to setup a new real-time task. + * Release the first job, enqueue, etc. + * Task may already be running. + */ +typedef void (*task_new_t) (struct task_struct *task, + int on_rq, + int running); + +/* Called to re-introduce a task after blocking. + * Can potentially be called multiple times. + */ +typedef void (*task_wake_up_t) (struct task_struct *task); +/* called to notify the plugin of a blocking real-time task + * it will only be called for real-time tasks and before schedule is called */ +typedef void (*task_block_t) (struct task_struct *task); +/* Called when a real-time task exits or changes to a different scheduling + * class. + * Free any allocated resources + */ +typedef void (*task_exit_t) (struct task_struct *); + +/* Called when the new_owner is released from the wait queue + * it should now inherit the priority from sem, _before_ it gets readded + * to any queue + */ +typedef long (*inherit_priority_t) (struct pi_semaphore *sem, + struct task_struct *new_owner); + +/* Called when the current task releases a semahpore where it might have + * inherited a piority from + */ +typedef long (*return_priority_t) (struct pi_semaphore *sem); + +/* Called when a task tries to acquire a semaphore and fails. Check if its + * priority is higher than that of the current holder. + */ +typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t); + + + + +/********************* sys call backends ********************/ +/* This function causes the caller to sleep until the next release */ +typedef long (*complete_job_t) (void); + +typedef long (*admit_task_t)(struct task_struct* tsk); + +typedef void (*release_at_t)(struct task_struct *t, lt_t start); + +struct sched_plugin { + struct list_head list; + /* basic info */ + char *plugin_name; + + /* setup */ + activate_plugin_t activate_plugin; + deactivate_plugin_t deactivate_plugin; + +#ifdef CONFIG_SRP + unsigned int srp_active; +#endif + + /* scheduler invocation */ + scheduler_tick_t tick; + schedule_t schedule; + finish_switch_t finish_switch; + + /* syscall backend */ + complete_job_t complete_job; + release_at_t release_at; + + /* task state changes */ + admit_task_t admit_task; + + task_new_t task_new; + task_wake_up_t task_wake_up; + task_block_t task_block; + task_exit_t task_exit; + +#ifdef CONFIG_FMLP + /* priority inheritance */ + unsigned int fmlp_active; + inherit_priority_t inherit_priority; + return_priority_t return_priority; + pi_block_t pi_block; +#endif +} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); + + +extern struct sched_plugin *litmus; + +int register_sched_plugin(struct sched_plugin* plugin); +struct sched_plugin* find_sched_plugin(const char* name); +int print_sched_plugins(char* buf, int max); + +static inline int srp_active(void) +{ +#ifdef CONFIG_SRP + return litmus->srp_active; +#else + return 0; +#endif +} +static inline int fmlp_active(void) +{ +#ifdef CONFIG_FMLP + return litmus->fmlp_active; +#else + return 0; +#endif +} + +extern struct sched_plugin linux_sched_plugin; + +#endif diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h new file mode 100644 index 000000000000..afd0391d127b --- /dev/null +++ b/include/litmus/sched_trace.h @@ -0,0 +1,191 @@ +/* sched_trace.h -- record scheduler events to a byte stream for offline analysis. + */ +#ifndef _LINUX_SCHED_TRACE_H_ +#define _LINUX_SCHED_TRACE_H_ + +/* all times in nanoseconds */ + +struct st_trace_header { + u8 type; /* Of what type is this record? */ + u8 cpu; /* On which CPU was it recorded? */ + u16 pid; /* PID of the task. */ + u32 job; /* The job sequence number. */ +}; + +#define ST_NAME_LEN 16 +struct st_name_data { + char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ +}; + +struct st_param_data { /* regular params */ + u32 wcet; + u32 period; + u32 phase; + u8 partition; + u8 __unused[3]; +}; + +struct st_release_data { /* A job is was/is going to be released. */ + u64 release; /* What's the release time? */ + u64 deadline; /* By when must it finish? */ +}; + +struct st_assigned_data { /* A job was asigned to a CPU. */ + u64 when; + u8 target; /* Where should it execute? */ + u8 __unused[3]; +}; + +struct st_switch_to_data { /* A process was switched to on a given CPU. */ + u64 when; /* When did this occur? */ + u32 exec_time; /* Time the current job has executed. */ + +}; + +struct st_switch_away_data { /* A process was switched away from on a given CPU. */ + u64 when; + u64 exec_time; +}; + +struct st_completion_data { /* A job completed. */ + u64 when; + u8 forced:1; /* Set to 1 if job overran and kernel advanced to the + * next task automatically; set to 0 otherwise. + */ + u8 __uflags:7; + u8 __unused[3]; +}; + +struct st_block_data { /* A task blocks. */ + u64 when; + u64 __unused; +}; + +struct st_resume_data { /* A task resumes. */ + u64 when; + u64 __unused; +}; + +struct st_sys_release_data { + u64 when; + u64 release; +}; + +#define DATA(x) struct st_ ## x ## _data x; + +typedef enum { + ST_NAME = 1, /* Start at one, so that we can spot + * uninitialized records. */ + ST_PARAM, + ST_RELEASE, + ST_ASSIGNED, + ST_SWITCH_TO, + ST_SWITCH_AWAY, + ST_COMPLETION, + ST_BLOCK, + ST_RESUME, + ST_SYS_RELEASE, +} st_event_record_type_t; + +struct st_event_record { + struct st_trace_header hdr; + union { + u64 raw[2]; + + DATA(name); + DATA(param); + DATA(release); + DATA(assigned); + DATA(switch_to); + DATA(switch_away); + DATA(completion); + DATA(block); + DATA(resume); + DATA(sys_release); + + } data; +}; + +#undef DATA + +#ifdef __KERNEL__ + +#include +#include + +#ifdef CONFIG_SCHED_TASK_TRACE + +#define SCHED_TRACE(id, callback, task) \ + ft_event1(id, callback, task) +#define SCHED_TRACE2(id, callback, task, xtra) \ + ft_event2(id, callback, task, xtra) + +/* provide prototypes; needed on sparc64 */ +#ifndef NO_TASK_TRACE_DECLS +feather_callback void do_sched_trace_task_name(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_task_param(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_task_release(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_task_switch_to(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_task_switch_away(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_task_completion(unsigned long id, + struct task_struct* task, + unsigned long forced); +feather_callback void do_sched_trace_task_block(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_task_resume(unsigned long id, + struct task_struct* task); +feather_callback void do_sched_trace_sys_release(unsigned long id, + lt_t* start); +#endif + +#else + +#define SCHED_TRACE(id, callback, task) /* no tracing */ +#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ + +#endif + + +#define SCHED_TRACE_BASE_ID 500 + + +#define sched_trace_task_name(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) +#define sched_trace_task_param(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) +#define sched_trace_task_release(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) +#define sched_trace_task_switch_to(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) +#define sched_trace_task_switch_away(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) +#define sched_trace_task_completion(t, forced) \ + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ + forced) +#define sched_trace_task_block(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) +#define sched_trace_task_resume(t) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) + +#define sched_trace_sys_release(when) \ + SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) + +#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ + +#ifdef CONFIG_SCHED_DEBUG_TRACE +void sched_trace_log_message(const char* fmt, ...); +void dump_trace_buffer(int max); +#else + +#define sched_trace_log_message(fmt, ...) + +#endif + +#endif /* __KERNEL__ */ + +#endif diff --git a/include/litmus/trace.h b/include/litmus/trace.h new file mode 100644 index 000000000000..e8e0c7b6cc6a --- /dev/null +++ b/include/litmus/trace.h @@ -0,0 +1,113 @@ +#ifndef _SYS_TRACE_H_ +#define _SYS_TRACE_H_ + +#ifdef CONFIG_SCHED_OVERHEAD_TRACE + +#include +#include + + +/*********************** TIMESTAMPS ************************/ + +enum task_type_marker { + TSK_BE, + TSK_RT, + TSK_UNKNOWN +}; + +struct timestamp { + uint64_t timestamp; + uint32_t seq_no; + uint8_t cpu; + uint8_t event; + uint8_t task_type; +}; + +/* tracing callbacks */ +feather_callback void save_timestamp(unsigned long event); +feather_callback void save_timestamp_def(unsigned long event, unsigned long type); +feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); +feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); + + +#define TIMESTAMP(id) ft_event0(id, save_timestamp) + +#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, def) + +#define TTIMESTAMP(id, task) \ + ft_event1(id, save_timestamp_task, (unsigned long) task) + +#define CTIMESTAMP(id, cpu) \ + ft_event1(id, save_timestamp_cpu, cpu) + +#else /* !CONFIG_SCHED_OVERHEAD_TRACE */ + +#define TIMESTAMP(id) /* no tracing */ + +#define DTIMESTAMP(id, def) /* no tracing */ + +#define TTIMESTAMP(id, task) /* no tracing */ + +#define CTIMESTAMP(id, cpu) /* no tracing */ + +#endif + + +/* Convention for timestamps + * ========================= + * + * In order to process the trace files with a common tool, we use the following + * convention to measure execution times: The end time id of a code segment is + * always the next number after the start time event id. + */ + +#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only + * care + * about + * next */ +#define TS_SCHED_END(t) TTIMESTAMP(101, t) +#define TS_SCHED2_START(t) TTIMESTAMP(102, t) +#define TS_SCHED2_END(t) TTIMESTAMP(103, t) + +#define TS_CXS_START(t) TTIMESTAMP(104, t) +#define TS_CXS_END(t) TTIMESTAMP(105, t) + +#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT) +#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT) + +#define TS_TICK_START(t) TTIMESTAMP(110, t) +#define TS_TICK_END(t) TTIMESTAMP(111, t) + + +#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */ +#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */ + +#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */ +#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */ + +#define TS_ENTER_NP_START TIMESTAMP(140) +#define TS_ENTER_NP_END TIMESTAMP(141) + +#define TS_EXIT_NP_START TIMESTAMP(150) +#define TS_EXIT_NP_END TIMESTAMP(151) + +#define TS_SRP_UP_START TIMESTAMP(160) +#define TS_SRP_UP_END TIMESTAMP(161) +#define TS_SRP_DOWN_START TIMESTAMP(162) +#define TS_SRP_DOWN_END TIMESTAMP(163) + +#define TS_PI_UP_START TIMESTAMP(170) +#define TS_PI_UP_END TIMESTAMP(171) +#define TS_PI_DOWN_START TIMESTAMP(172) +#define TS_PI_DOWN_END TIMESTAMP(173) + +#define TS_FIFO_UP_START TIMESTAMP(180) +#define TS_FIFO_UP_END TIMESTAMP(181) +#define TS_FIFO_DOWN_START TIMESTAMP(182) +#define TS_FIFO_DOWN_END TIMESTAMP(183) + +#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) +#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) + + +#endif /* !_SYS_TRACE_H_ */ -- cgit v1.2.2 From cf3f4bd8db320f3f487d66bdec924e926f004787 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:24:47 -0500 Subject: [ported from 2008.3] Add Feather-Trace device file support --- include/litmus/ftdev.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 include/litmus/ftdev.h (limited to 'include') diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h new file mode 100644 index 000000000000..7697b4616699 --- /dev/null +++ b/include/litmus/ftdev.h @@ -0,0 +1,49 @@ +#ifndef _LITMUS_FTDEV_H_ +#define _LITMUS_FTDEV_H_ + +#include +#include +#include +#include + +#define MAX_FTDEV_MINORS NR_CPUS + +#define FTDEV_ENABLE_CMD 0 +#define FTDEV_DISABLE_CMD 1 + +struct ftdev; + +/* return 0 if buffer can be opened, otherwise -$REASON */ +typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no); +/* return 0 on success, otherwise -$REASON */ +typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); +typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); + + +struct ftdev_event; + +struct ftdev_minor { + struct ft_buffer* buf; + unsigned int readers; + struct mutex lock; + /* FIXME: filter for authorized events */ + struct ftdev_event* events; +}; + +struct ftdev { + struct cdev cdev; + /* FIXME: don't waste memory, allocate dynamically */ + struct ftdev_minor minor[MAX_FTDEV_MINORS]; + unsigned int minor_cnt; + ftdev_alloc_t alloc; + ftdev_free_t free; + ftdev_can_open_t can_open; +}; + +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); +void free_ft_buffer(struct ft_buffer* buf); + +void ftdev_init(struct ftdev* ftdev, struct module* owner); +int register_ftdev(struct ftdev* ftdev, const char* name, int major); + +#endif -- cgit v1.2.2 From 96979188007a0671d3f067d7edf144742d7433ee Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:26:50 -0500 Subject: [ported from 2008.3] Add tracing support and hook up Litmus KConfig for x86 - fix requesting more than 2^11 pages (MAX_ORDER) to system allocator Still to be merged: - feather-trace generic implementation --- include/litmus/feather_trace.h | 14 ++++++++++++++ include/litmus/sched_trace.h | 3 ++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h index 3ac1ee5e0277..eef8af7a414e 100644 --- a/include/litmus/feather_trace.h +++ b/include/litmus/feather_trace.h @@ -1,6 +1,7 @@ #ifndef _FEATHER_TRACE_H_ #define _FEATHER_TRACE_H_ +#include int ft_enable_event(unsigned long id); int ft_disable_event(unsigned long id); @@ -30,6 +31,19 @@ extern int ft_events[MAX_EVENTS]; #define ft_event3(id, callback, p, p2, p3) \ if (ft_events[id]) callback(id, p, p2, p3); + +#include + +static inline int fetch_and_inc(int *val) +{ + return atomic_add_return(1, (atomic_t*) val) - 1; +} + +static inline int fetch_and_dec(int *val) +{ + return atomic_sub_return(1, (atomic_t*) val) + 1; +} + #endif diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index afd0391d127b..aae6ac27fe1b 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -1,4 +1,5 @@ -/* sched_trace.h -- record scheduler events to a byte stream for offline analysis. +/* + * sched_trace.h -- record scheduler events to a byte stream for offline analysis. */ #ifndef _LINUX_SCHED_TRACE_H_ #define _LINUX_SCHED_TRACE_H_ -- cgit v1.2.2 From 59d8d4c53f1e9f6408b87fc22e319e78f664276f Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:29:31 -0500 Subject: [ported from 2008.3] Add complete_n() call --- include/linux/completion.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/completion.h b/include/linux/completion.h index 4a6b604ef7e4..258bec13d424 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -88,6 +88,7 @@ extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); +extern void complete_n(struct completion *, int n); /** * INIT_COMPLETION: - reinitialize a completion structure -- cgit v1.2.2 From 4e593e7105dec02e62ea7a1812dccb35a0d56d01 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:30:47 -0500 Subject: [ported from 2008.3] Add support for quantum alignment --- include/linux/tick.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/tick.h b/include/linux/tick.h index 0482229c07db..4f9ba058abdb 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -71,6 +71,11 @@ extern int tick_is_oneshot_available(void); extern struct tick_device *tick_get_device(int cpu); # ifdef CONFIG_HIGH_RES_TIMERS +/* LITMUS^RT tick alignment */ +#define LINUX_DEFAULT_TICKS 0 +#define LITMUS_ALIGNED_TICKS 1 +#define LITMUS_STAGGERED_TICKS 2 + extern int tick_init_highres(void); extern int tick_program_event(ktime_t expires, int force); extern void tick_setup_sched_timer(void); -- cgit v1.2.2 From 53696c1fe6a6ada66f2a47c078d62aee40ad8ebe Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:31:46 -0500 Subject: [ported from 2008.3] Add rt_domain_t support Still to be merged: - arm_release_timer() with no rq locking --- include/litmus/rt_domain.h | 158 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 include/litmus/rt_domain.h (limited to 'include') diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h new file mode 100644 index 000000000000..bde1e5a54812 --- /dev/null +++ b/include/litmus/rt_domain.h @@ -0,0 +1,158 @@ +/* CLEANUP: Add comments and make it less messy. + * + */ + +#ifndef __UNC_RT_DOMAIN_H__ +#define __UNC_RT_DOMAIN_H__ + +#include + +#define RELEASE_QUEUE_SLOTS 127 /* prime */ + +struct _rt_domain; + +typedef int (*check_resched_needed_t)(struct _rt_domain *rt); +typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks); + +struct release_queue { + /* each slot maintains a list of release heaps sorted + * by release time */ + struct list_head slot[RELEASE_QUEUE_SLOTS]; +}; + +typedef struct _rt_domain { + /* runnable rt tasks are in here */ + spinlock_t ready_lock; + struct heap ready_queue; + + /* real-time tasks waiting for release are in here */ + spinlock_t release_lock; + struct release_queue release_queue; + + /* for moving tasks to the release queue */ + spinlock_t tobe_lock; + struct list_head tobe_released; + + /* how do we check if we need to kick another CPU? */ + check_resched_needed_t check_resched; + + /* how do we release jobs? */ + release_jobs_t release_jobs; + + /* how are tasks ordered in the ready queue? */ + heap_prio_t order; +} rt_domain_t; + +struct release_heap { + /* list_head for per-time-slot list */ + struct list_head list; + lt_t release_time; + /* all tasks to be released at release_time */ + struct heap heap; + /* used to trigger the release */ + struct hrtimer timer; + /* required for the timer callback */ + rt_domain_t* dom; +}; + + +static inline struct task_struct* __next_ready(rt_domain_t* rt) +{ + struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); + if (hn) + return heap2task(hn); + else + return NULL; +} + +void rt_domain_init(rt_domain_t *rt, heap_prio_t order, + check_resched_needed_t check, + release_jobs_t relase); + +void __add_ready(rt_domain_t* rt, struct task_struct *new); +void __merge_ready(rt_domain_t* rt, struct heap *tasks); +void __add_release(rt_domain_t* rt, struct task_struct *task); + +static inline struct task_struct* __take_ready(rt_domain_t* rt) +{ + struct heap_node* hn = heap_take(rt->order, &rt->ready_queue); + if (hn) + return heap2task(hn); + else + return NULL; +} + +static inline struct task_struct* __peek_ready(rt_domain_t* rt) +{ + struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue); + if (hn) + return heap2task(hn); + else + return NULL; +} + +static inline int is_queued(struct task_struct *t) +{ + return heap_node_in_heap(tsk_rt(t)->heap_node); +} + +static inline void remove(rt_domain_t* rt, struct task_struct *t) +{ + heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); +} + +static inline void add_ready(rt_domain_t* rt, struct task_struct *new) +{ + unsigned long flags; + /* first we need the write lock for rt_ready_queue */ + spin_lock_irqsave(&rt->ready_lock, flags); + __add_ready(rt, new); + spin_unlock_irqrestore(&rt->ready_lock, flags); +} + +static inline void merge_ready(rt_domain_t* rt, struct heap* tasks) +{ + unsigned long flags; + spin_lock_irqsave(&rt->ready_lock, flags); + __merge_ready(rt, tasks); + spin_unlock_irqrestore(&rt->ready_lock, flags); +} + +static inline struct task_struct* take_ready(rt_domain_t* rt) +{ + unsigned long flags; + struct task_struct* ret; + /* first we need the write lock for rt_ready_queue */ + spin_lock_irqsave(&rt->ready_lock, flags); + ret = __take_ready(rt); + spin_unlock_irqrestore(&rt->ready_lock, flags); + return ret; +} + + +static inline void add_release(rt_domain_t* rt, struct task_struct *task) +{ + unsigned long flags; + /* first we need the write lock for rt_ready_queue */ + spin_lock_irqsave(&rt->tobe_lock, flags); + __add_release(rt, task); + spin_unlock_irqrestore(&rt->tobe_lock, flags); +} + +static inline int __jobs_pending(rt_domain_t* rt) +{ + return !heap_empty(&rt->ready_queue); +} + +static inline int jobs_pending(rt_domain_t* rt) +{ + unsigned long flags; + int ret; + /* first we need the write lock for rt_ready_queue */ + spin_lock_irqsave(&rt->ready_lock, flags); + ret = !heap_empty(&rt->ready_queue); + spin_unlock_irqrestore(&rt->ready_lock, flags); + return ret; +} + +#endif -- cgit v1.2.2 From f5936ecf0cff0b94419b6768efba3e15622beeb6 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:32:31 -0500 Subject: [ported from 2008.3] Add common EDF functions --- include/litmus/edf_common.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 include/litmus/edf_common.h (limited to 'include') diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h new file mode 100644 index 000000000000..166cdac6bcab --- /dev/null +++ b/include/litmus/edf_common.h @@ -0,0 +1,27 @@ +/* + * EDF common data structures and utility functions shared by all EDF + * based scheduler plugins + */ + +/* CLEANUP: Add comments and make it less messy. + * + */ + +#ifndef __UNC_EDF_COMMON_H__ +#define __UNC_EDF_COMMON_H__ + +#include + +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, + release_jobs_t release); + +int edf_higher_prio(struct task_struct* first, + struct task_struct* second); + +int edf_ready_order(struct heap_node* a, struct heap_node* b); + +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); + +int edf_set_hp_task(struct pi_semaphore *sem); +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); +#endif -- cgit v1.2.2 From fa3c94fc9cd1619fe0dd6081a1a980c09ef3e119 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:33:26 -0500 Subject: [ported from 2008.3] Add File Descriptor Attached Shared Objects (FDSO) infrastructure --- include/linux/fs.h | 21 +++++++++------- include/linux/sched.h | 10 +++++--- include/litmus/fdso.h | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 12 deletions(-) create mode 100644 include/litmus/fdso.h (limited to 'include') diff --git a/include/linux/fs.h b/include/linux/fs.h index 2620a8c63571..5c7e0ff370ba 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -15,8 +15,8 @@ * nr_file rlimit, so it's safe to set up a ridiculously high absolute * upper limit on files-per-process. * - * Some programs (notably those using select()) may have to be - * recompiled to take full advantage of the new limits.. + * Some programs (notably those using select()) may have to be + * recompiled to take full advantage of the new limits.. */ /* Fixed constants first: */ @@ -169,7 +169,7 @@ struct inodes_stat_t { #define SEL_EX 4 /* public flags for file_system_type */ -#define FS_REQUIRES_DEV 1 +#define FS_REQUIRES_DEV 1 #define FS_BINARY_MOUNTDATA 2 #define FS_HAS_SUBTYPE 4 #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ @@ -466,7 +466,7 @@ struct iattr { */ #include -/** +/** * enum positive_aop_returns - aop return codes with specific semantics * * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has @@ -476,7 +476,7 @@ struct iattr { * be a candidate for writeback again in the near * future. Other callers must be careful to unlock * the page if they get this return. Returned by - * writepage(); + * writepage(); * * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has * unlocked it and the page might have been truncated. @@ -715,6 +715,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) +struct inode_obj_id_table; struct inode { struct hlist_node i_hash; @@ -783,6 +784,8 @@ struct inode { struct posix_acl *i_acl; struct posix_acl *i_default_acl; #endif + struct list_head i_obj_list; + struct mutex i_obj_mutex; void *i_private; /* fs or device private pointer */ }; @@ -995,10 +998,10 @@ static inline int file_check_writeable(struct file *filp) #define MAX_NON_LFS ((1UL<<31) - 1) -/* Page cache limit. The filesystems should put that into their s_maxbytes - limits, otherwise bad things can happen in VM. */ +/* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL #endif @@ -2139,7 +2142,7 @@ extern int may_open(struct path *, int, int); extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern struct file * open_exec(const char *); - + /* fs/dcache.c -- generic fs support functions */ extern int is_subdir(struct dentry *, struct dentry *); extern ino_t find_inode_number(struct dentry *, struct qstr *); diff --git a/include/linux/sched.h b/include/linux/sched.h index bb046c0adf99..724814191fe9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1214,6 +1214,7 @@ struct sched_rt_entity { }; struct rcu_node; +struct od_table_entry; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -1296,9 +1297,9 @@ struct task_struct { unsigned long stack_canary; #endif - /* + /* * pointers to (original) parent process, youngest child, younger sibling, - * older sibling, respectively. (p->father can be replaced with + * older sibling, respectively. (p->father can be replaced with * p->real_parent->pid) */ struct task_struct *real_parent; /* real parent process */ @@ -1512,6 +1513,9 @@ struct task_struct { /* LITMUS RT parameters and state */ struct rt_param rt_param; + /* references to PI semaphores, etc. */ + struct od_table_entry *od_table; + #ifdef CONFIG_LATENCYTOP int latency_record_count; struct latency_record latency_record[LT_SAVECOUNT]; @@ -2051,7 +2055,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s spin_unlock_irqrestore(&tsk->sighand->siglock, flags); return ret; -} +} extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h new file mode 100644 index 000000000000..286e10f86de0 --- /dev/null +++ b/include/litmus/fdso.h @@ -0,0 +1,69 @@ +/* fdso.h - file descriptor attached shared objects + * + * (c) 2007 B. Brandenburg, LITMUS^RT project + */ + +#ifndef _LINUX_FDSO_H_ +#define _LINUX_FDSO_H_ + +#include +#include + +#include + +#define MAX_OBJECT_DESCRIPTORS 32 + +typedef enum { + MIN_OBJ_TYPE = 0, + + FMLP_SEM = 0, + SRP_SEM = 1, + + MAX_OBJ_TYPE = 1 +} obj_type_t; + +struct inode_obj_id { + struct list_head list; + atomic_t count; + struct inode* inode; + + obj_type_t type; + void* obj; + unsigned int id; +}; + + +struct od_table_entry { + unsigned int used; + + struct inode_obj_id* obj; + void* extra; +}; + +struct fdso_ops { + void* (*create) (void); + void (*destroy)(void*); + int (*open) (struct od_table_entry*, void* __user); + int (*close) (struct od_table_entry*); +}; + +/* translate a userspace supplied od into the raw table entry + * returns NULL if od is invalid + */ +struct od_table_entry* __od_lookup(int od); + +/* translate a userspace supplied od into the associated object + * returns NULL if od is invalid + */ +static inline void* od_lookup(int od, obj_type_t type) +{ + struct od_table_entry* e = __od_lookup(od); + return e && e->obj->type == type ? e->obj->obj : NULL; +} + +#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) +#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) +#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) + + +#endif -- cgit v1.2.2 From 2a94c7bf9869a13e32de7a1fe94596de7b4789a8 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Fri, 28 May 2010 10:03:24 -0400 Subject: [ported from 2008.3] Add LITRMUS^RT syscalls to x86_32 --- include/litmus/unistd.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 include/litmus/unistd.h (limited to 'include') diff --git a/include/litmus/unistd.h b/include/litmus/unistd.h new file mode 100644 index 000000000000..6fec5364adaf --- /dev/null +++ b/include/litmus/unistd.h @@ -0,0 +1,23 @@ +/* + * included from arch/x86/include/asm/unistd_32.h + * + * LITMUS^RT syscalls with "relative" numbers + */ +#define __LSC(x) (__NR_LITMUS + x) + +#define __NR_set_rt_task_param __LSC(0) +#define __NR_get_rt_task_param __LSC(1) +#define __NR_sleep_next_period __LSC(2) +#define __NR_od_open __LSC(3) +#define __NR_od_close __LSC(4) +#define __NR_fmlp_down __LSC(5) +#define __NR_fmlp_up __LSC(6) +#define __NR_srp_down __LSC(7) +#define __NR_srp_up __LSC(8) +#define __NR_query_job_no __LSC(9) +#define __NR_wait_for_job_release __LSC(10) +#define __NR_wait_for_ts_release __LSC(11) +#define __NR_release_ts __LSC(12) +#define __NR_null_call __LSC(13) + +#define NR_litmus_syscalls 14 -- cgit v1.2.2 From b085cafc43bc395e255626204169e20a587f28ba Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:44:47 -0500 Subject: [ported from 2008.3] Add send_pull_timers() support for x86_32 arch --- include/linux/smp.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/linux/smp.h b/include/linux/smp.h index 39c64bae776d..76bb3e45351f 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -76,6 +76,11 @@ void smp_call_function_many(const struct cpumask *mask, void __smp_call_function_single(int cpuid, struct call_single_data *data, int wait); +/* + * sends a 'pull timer' event to a remote CPU + */ +extern void smp_send_pull_timers(int cpu); + /* * Generic and arch helpers */ -- cgit v1.2.2 From c15be843778236e9f2fdbc207ab36ba996b2bb1b Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:45:38 -0500 Subject: [ported from 2008.3] Add hrtimer_start_on() API --- include/linux/hrtimer.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'include') diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index ff037f0b1b4e..b984b947f5db 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -166,6 +166,7 @@ struct hrtimer_clock_base { * event devices whether high resolution mode can be * activated. * @nr_events: Total number of timer interrupt events + * @to_pull: LITMUS^RT list of timers to be pulled on this cpu */ struct hrtimer_cpu_base { spinlock_t lock; @@ -175,6 +176,26 @@ struct hrtimer_cpu_base { int hres_active; unsigned long nr_events; #endif + struct list_head to_pull; +}; + +#define HRTIMER_START_ON_INACTIVE 0 +#define HRTIMER_START_ON_QUEUED 1 + +/* + * struct hrtimer_start_on_info - save timer info on remote cpu + * @list: list of hrtimer_start_on_info on remote cpu (to_pull) + * @timer: timer to be triggered on remote cpu + * @time: time event + * @mode: timer mode + * @state: activity flag + */ +struct hrtimer_start_on_info { + struct list_head list; + struct hrtimer *timer; + ktime_t time; + enum hrtimer_mode mode; + atomic_t state; }; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) @@ -343,6 +364,10 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, const enum hrtimer_mode mode, int wakeup); +extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, + struct hrtimer *timer, ktime_t time, + const enum hrtimer_mode mode); + extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); -- cgit v1.2.2 From 0b28a3122d6917784701377e15a863489aee1c6c Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:47:19 -0500 Subject: [ported from 2008.3] Add release-master support --- include/litmus/rt_domain.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index bde1e5a54812..c7c55bef3e42 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h @@ -28,6 +28,7 @@ typedef struct _rt_domain { /* real-time tasks waiting for release are in here */ spinlock_t release_lock; struct release_queue release_queue; + int release_master; /* for moving tasks to the release queue */ spinlock_t tobe_lock; @@ -51,6 +52,8 @@ struct release_heap { struct heap heap; /* used to trigger the release */ struct hrtimer timer; + /* used to delegate releases */ + struct hrtimer_start_on_info info; /* required for the timer callback */ rt_domain_t* dom; }; -- cgit v1.2.2 From ee09f78d8faa0b988088d93142e6f5f8a6e75394 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Mon, 21 Dec 2009 12:23:57 -0500 Subject: Refactor binomial heap names: heap -> bheap - Binomial heap "heap" names conflicted with priority heap of cgroup in kernel - This patch change binomial heap "heap" names in "bheap" --- include/litmus/bheap.h | 77 +++++++++++++++++++++++++++++++++++++++++++++ include/litmus/edf_common.h | 2 +- include/litmus/heap.h | 77 --------------------------------------------- include/litmus/litmus.h | 2 +- include/litmus/rt_domain.h | 36 ++++++++++----------- include/litmus/rt_param.h | 4 +-- 6 files changed, 99 insertions(+), 99 deletions(-) create mode 100644 include/litmus/bheap.h delete mode 100644 include/litmus/heap.h (limited to 'include') diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h new file mode 100644 index 000000000000..cf4864a498d8 --- /dev/null +++ b/include/litmus/bheap.h @@ -0,0 +1,77 @@ +/* bheaps.h -- Binomial Heaps + * + * (c) 2008, 2009 Bjoern Brandenburg + */ + +#ifndef BHEAP_H +#define BHEAP_H + +#define NOT_IN_HEAP UINT_MAX + +struct bheap_node { + struct bheap_node* parent; + struct bheap_node* next; + struct bheap_node* child; + + unsigned int degree; + void* value; + struct bheap_node** ref; +}; + +struct bheap { + struct bheap_node* head; + /* We cache the minimum of the heap. + * This speeds up repeated peek operations. + */ + struct bheap_node* min; +}; + +typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); + +void bheap_init(struct bheap* heap); +void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); + +static inline int bheap_node_in_heap(struct bheap_node* h) +{ + return h->degree != NOT_IN_HEAP; +} + +static inline int bheap_empty(struct bheap* heap) +{ + return heap->head == NULL && heap->min == NULL; +} + +/* insert (and reinitialize) a node into the heap */ +void bheap_insert(bheap_prio_t higher_prio, + struct bheap* heap, + struct bheap_node* node); + +/* merge addition into target */ +void bheap_union(bheap_prio_t higher_prio, + struct bheap* target, + struct bheap* addition); + +struct bheap_node* bheap_peek(bheap_prio_t higher_prio, + struct bheap* heap); + +struct bheap_node* bheap_take(bheap_prio_t higher_prio, + struct bheap* heap); + +void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); +int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); + +void bheap_delete(bheap_prio_t higher_prio, + struct bheap* heap, + struct bheap_node* node); + +/* allocate from memcache */ +struct bheap_node* bheap_node_alloc(int gfp_flags); +void bheap_node_free(struct bheap_node* hn); + +/* allocate a heap node for value and insert into the heap */ +int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, + void* value, int gfp_flags); + +void* bheap_take_del(bheap_prio_t higher_prio, + struct bheap* heap); +#endif diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index 166cdac6bcab..80d4321cc87e 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h @@ -18,7 +18,7 @@ void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, int edf_higher_prio(struct task_struct* first, struct task_struct* second); -int edf_ready_order(struct heap_node* a, struct heap_node* b); +int edf_ready_order(struct bheap_node* a, struct bheap_node* b); int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); diff --git a/include/litmus/heap.h b/include/litmus/heap.h deleted file mode 100644 index da959b0bec9c..000000000000 --- a/include/litmus/heap.h +++ /dev/null @@ -1,77 +0,0 @@ -/* heaps.h -- Binomial Heaps - * - * (c) 2008, 2009 Bjoern Brandenburg - */ - -#ifndef HEAP_H -#define HEAP_H - -#define NOT_IN_HEAP UINT_MAX - -struct heap_node { - struct heap_node* parent; - struct heap_node* next; - struct heap_node* child; - - unsigned int degree; - void* value; - struct heap_node** ref; -}; - -struct heap { - struct heap_node* head; - /* We cache the minimum of the heap. - * This speeds up repeated peek operations. - */ - struct heap_node* min; -}; - -typedef int (*heap_prio_t)(struct heap_node* a, struct heap_node* b); - -void heap_init(struct heap* heap); -void heap_node_init(struct heap_node** ref_to_heap_node_ptr, void* value); - -static inline int heap_node_in_heap(struct heap_node* h) -{ - return h->degree != NOT_IN_HEAP; -} - -static inline int heap_empty(struct heap* heap) -{ - return heap->head == NULL && heap->min == NULL; -} - -/* insert (and reinitialize) a node into the heap */ -void heap_insert(heap_prio_t higher_prio, - struct heap* heap, - struct heap_node* node); - -/* merge addition into target */ -void heap_union(heap_prio_t higher_prio, - struct heap* target, - struct heap* addition); - -struct heap_node* heap_peek(heap_prio_t higher_prio, - struct heap* heap); - -struct heap_node* heap_take(heap_prio_t higher_prio, - struct heap* heap); - -void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap); -int heap_decrease(heap_prio_t higher_prio, struct heap_node* node); - -void heap_delete(heap_prio_t higher_prio, - struct heap* heap, - struct heap_node* node); - -/* allocate from memcache */ -struct heap_node* heap_node_alloc(int gfp_flags); -void heap_node_free(struct heap_node* hn); - -/* allocate a heap node for value and insert into the heap */ -int heap_add(heap_prio_t higher_prio, struct heap* heap, - void* value, int gfp_flags); - -void* heap_take_del(heap_prio_t higher_prio, - struct heap* heap); -#endif diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 380fcb8acb33..a03580bc707c 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -133,7 +133,7 @@ void srp_ceiling_block(void); #define srp_ceiling_block() /* nothing */ #endif -#define heap2task(hn) ((struct task_struct*) hn->value) +#define bheap2task(hn) ((struct task_struct*) hn->value) static inline int is_np(struct task_struct *t) { diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index c7c55bef3e42..c780fdfcccae 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h @@ -5,14 +5,14 @@ #ifndef __UNC_RT_DOMAIN_H__ #define __UNC_RT_DOMAIN_H__ -#include +#include #define RELEASE_QUEUE_SLOTS 127 /* prime */ struct _rt_domain; typedef int (*check_resched_needed_t)(struct _rt_domain *rt); -typedef void (*release_jobs_t)(struct _rt_domain *rt, struct heap* tasks); +typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks); struct release_queue { /* each slot maintains a list of release heaps sorted @@ -23,7 +23,7 @@ struct release_queue { typedef struct _rt_domain { /* runnable rt tasks are in here */ spinlock_t ready_lock; - struct heap ready_queue; + struct bheap ready_queue; /* real-time tasks waiting for release are in here */ spinlock_t release_lock; @@ -41,7 +41,7 @@ typedef struct _rt_domain { release_jobs_t release_jobs; /* how are tasks ordered in the ready queue? */ - heap_prio_t order; + bheap_prio_t order; } rt_domain_t; struct release_heap { @@ -49,7 +49,7 @@ struct release_heap { struct list_head list; lt_t release_time; /* all tasks to be released at release_time */ - struct heap heap; + struct bheap heap; /* used to trigger the release */ struct hrtimer timer; /* used to delegate releases */ @@ -61,47 +61,47 @@ struct release_heap { static inline struct task_struct* __next_ready(rt_domain_t* rt) { - struct heap_node *hn = heap_peek(rt->order, &rt->ready_queue); + struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); if (hn) - return heap2task(hn); + return bheap2task(hn); else return NULL; } -void rt_domain_init(rt_domain_t *rt, heap_prio_t order, +void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, check_resched_needed_t check, release_jobs_t relase); void __add_ready(rt_domain_t* rt, struct task_struct *new); -void __merge_ready(rt_domain_t* rt, struct heap *tasks); +void __merge_ready(rt_domain_t* rt, struct bheap *tasks); void __add_release(rt_domain_t* rt, struct task_struct *task); static inline struct task_struct* __take_ready(rt_domain_t* rt) { - struct heap_node* hn = heap_take(rt->order, &rt->ready_queue); + struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); if (hn) - return heap2task(hn); + return bheap2task(hn); else return NULL; } static inline struct task_struct* __peek_ready(rt_domain_t* rt) { - struct heap_node* hn = heap_peek(rt->order, &rt->ready_queue); + struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue); if (hn) - return heap2task(hn); + return bheap2task(hn); else return NULL; } static inline int is_queued(struct task_struct *t) { - return heap_node_in_heap(tsk_rt(t)->heap_node); + return bheap_node_in_heap(tsk_rt(t)->heap_node); } static inline void remove(rt_domain_t* rt, struct task_struct *t) { - heap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); + bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); } static inline void add_ready(rt_domain_t* rt, struct task_struct *new) @@ -113,7 +113,7 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new) spin_unlock_irqrestore(&rt->ready_lock, flags); } -static inline void merge_ready(rt_domain_t* rt, struct heap* tasks) +static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) { unsigned long flags; spin_lock_irqsave(&rt->ready_lock, flags); @@ -144,7 +144,7 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task) static inline int __jobs_pending(rt_domain_t* rt) { - return !heap_empty(&rt->ready_queue); + return !bheap_empty(&rt->ready_queue); } static inline int jobs_pending(rt_domain_t* rt) @@ -153,7 +153,7 @@ static inline int jobs_pending(rt_domain_t* rt) int ret; /* first we need the write lock for rt_ready_queue */ spin_lock_irqsave(&rt->ready_lock, flags); - ret = !heap_empty(&rt->ready_queue); + ret = !bheap_empty(&rt->ready_queue); spin_unlock_irqrestore(&rt->ready_lock, flags); return ret; } diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index c599f848d1ed..e20427846273 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -39,7 +39,7 @@ struct rt_task { #ifdef __KERNEL__ struct _rt_domain; -struct heap_node; +struct bheap_node; struct release_heap; struct rt_job { @@ -157,7 +157,7 @@ struct rt_param { * other than this pointer (which is updated by the heap * implementation). */ - struct heap_node* heap_node; + struct bheap_node* heap_node; struct release_heap* rel_heap; /* Used by rt_domain to queue task in release list. -- cgit v1.2.2 From 07ae7efcb81f95eb8e870cad21c7ba72573af7e8 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Thu, 17 Dec 2009 21:48:38 -0500 Subject: Add support for x86_64 architecture - Add syscall on x86_64 - Refactor __NR_sleep_next_period -> __NR_complete_job for both x86_32 and x86_64 --- include/litmus/unistd.h | 23 ----------------------- include/litmus/unistd_32.h | 23 +++++++++++++++++++++++ include/litmus/unistd_64.h | 37 +++++++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 23 deletions(-) delete mode 100644 include/litmus/unistd.h create mode 100644 include/litmus/unistd_32.h create mode 100644 include/litmus/unistd_64.h (limited to 'include') diff --git a/include/litmus/unistd.h b/include/litmus/unistd.h deleted file mode 100644 index 6fec5364adaf..000000000000 --- a/include/litmus/unistd.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * included from arch/x86/include/asm/unistd_32.h - * - * LITMUS^RT syscalls with "relative" numbers - */ -#define __LSC(x) (__NR_LITMUS + x) - -#define __NR_set_rt_task_param __LSC(0) -#define __NR_get_rt_task_param __LSC(1) -#define __NR_sleep_next_period __LSC(2) -#define __NR_od_open __LSC(3) -#define __NR_od_close __LSC(4) -#define __NR_fmlp_down __LSC(5) -#define __NR_fmlp_up __LSC(6) -#define __NR_srp_down __LSC(7) -#define __NR_srp_up __LSC(8) -#define __NR_query_job_no __LSC(9) -#define __NR_wait_for_job_release __LSC(10) -#define __NR_wait_for_ts_release __LSC(11) -#define __NR_release_ts __LSC(12) -#define __NR_null_call __LSC(13) - -#define NR_litmus_syscalls 14 diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h new file mode 100644 index 000000000000..dbddc6523f8e --- /dev/null +++ b/include/litmus/unistd_32.h @@ -0,0 +1,23 @@ +/* + * included from arch/x86/include/asm/unistd_32.h + * + * LITMUS^RT syscalls with "relative" numbers + */ +#define __LSC(x) (__NR_LITMUS + x) + +#define __NR_set_rt_task_param __LSC(0) +#define __NR_get_rt_task_param __LSC(1) +#define __NR_complete_job __LSC(2) +#define __NR_od_open __LSC(3) +#define __NR_od_close __LSC(4) +#define __NR_fmlp_down __LSC(5) +#define __NR_fmlp_up __LSC(6) +#define __NR_srp_down __LSC(7) +#define __NR_srp_up __LSC(8) +#define __NR_query_job_no __LSC(9) +#define __NR_wait_for_job_release __LSC(10) +#define __NR_wait_for_ts_release __LSC(11) +#define __NR_release_ts __LSC(12) +#define __NR_null_call __LSC(13) + +#define NR_litmus_syscalls 14 diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h new file mode 100644 index 000000000000..f0618e75348d --- /dev/null +++ b/include/litmus/unistd_64.h @@ -0,0 +1,37 @@ +/* + * included from arch/x86/include/asm/unistd_64.h + * + * LITMUS^RT syscalls with "relative" numbers + */ +#define __LSC(x) (__NR_LITMUS + x) + +#define __NR_set_rt_task_param __LSC(0) +__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param) +#define __NR_get_rt_task_param __LSC(1) +__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param) +#define __NR_complete_job __LSC(2) +__SYSCALL(__NR_complete_job, sys_complete_job) +#define __NR_od_open __LSC(3) +__SYSCALL(__NR_od_open, sys_od_open) +#define __NR_od_close __LSC(4) +__SYSCALL(__NR_od_close, sys_od_close) +#define __NR_fmlp_down __LSC(5) +__SYSCALL(__NR_fmlp_down, sys_fmlp_down) +#define __NR_fmlp_up __LSC(6) +__SYSCALL(__NR_fmlp_up, sys_fmlp_up) +#define __NR_srp_down __LSC(7) +__SYSCALL(__NR_srp_down, sys_srp_down) +#define __NR_srp_up __LSC(8) +__SYSCALL(__NR_srp_up, sys_srp_up) +#define __NR_query_job_no __LSC(9) +__SYSCALL(__NR_query_job_no, sys_query_job_no) +#define __NR_wait_for_job_release __LSC(10) +__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release) +#define __NR_wait_for_ts_release __LSC(11) +__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) +#define __NR_release_ts __LSC(12) +__SYSCALL(__NR_release_ts, sys_release_ts) +#define __NR_null_call __LSC(13) +__SYSCALL(__NR_null_call, sys_null_call) + +#define NR_litmus_syscalls 14 -- cgit v1.2.2 From d1a840d7194fdd09c1bd9977e30fd391ef2a7526 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Tue, 19 Jan 2010 19:38:14 -0500 Subject: [ported from 2008.3] Add Feather-Trace x86_32 architecture dependent code - [ported from 2008.3] Add x86_32 architecture dependent code. - Add the infrastructure for x86_32 - x86_64 integration. --- include/litmus/feather_trace.h | 25 ++++++++++++------------- include/litmus/sched_trace.h | 4 ++-- include/litmus/trace.h | 4 ++-- 3 files changed, 16 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h index eef8af7a414e..7d27e763406f 100644 --- a/include/litmus/feather_trace.h +++ b/include/litmus/feather_trace.h @@ -1,6 +1,7 @@ #ifndef _FEATHER_TRACE_H_ #define _FEATHER_TRACE_H_ +#include #include int ft_enable_event(unsigned long id); @@ -8,6 +9,17 @@ int ft_disable_event(unsigned long id); int ft_is_event_enabled(unsigned long id); int ft_disable_all_events(void); +/* atomic_* funcitons are inline anyway */ +static inline int fetch_and_inc(int *val) +{ + return atomic_add_return(1, (atomic_t*) val) - 1; +} + +static inline int fetch_and_dec(int *val) +{ + return atomic_sub_return(1, (atomic_t*) val) + 1; +} + #ifndef __ARCH_HAS_FEATHER_TRACE /* provide default implementation */ @@ -32,19 +44,6 @@ extern int ft_events[MAX_EVENTS]; #define ft_event3(id, callback, p, p2, p3) \ if (ft_events[id]) callback(id, p, p2, p3); -#include - -static inline int fetch_and_inc(int *val) -{ - return atomic_add_return(1, (atomic_t*) val) - 1; -} - -static inline int fetch_and_dec(int *val) -{ - return atomic_sub_return(1, (atomic_t*) val) + 1; -} - #endif - #endif diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index aae6ac27fe1b..e1b0c9712b5f 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -167,12 +167,12 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) #define sched_trace_task_completion(t, forced) \ SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ - forced) + (unsigned long) forced) #define sched_trace_task_block(t) \ SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) #define sched_trace_task_resume(t) \ SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) - +/* when is a pointer, it does not need an explicit cast to unsigned long */ #define sched_trace_sys_release(when) \ SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e8e0c7b6cc6a..b32c71180774 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h @@ -32,13 +32,13 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu) #define TIMESTAMP(id) ft_event0(id, save_timestamp) -#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, def) +#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) #define TTIMESTAMP(id, task) \ ft_event1(id, save_timestamp_task, (unsigned long) task) #define CTIMESTAMP(id, cpu) \ - ft_event1(id, save_timestamp_cpu, cpu) + ft_event1(id, save_timestamp_cpu, (unsigned long) cpu) #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ -- cgit v1.2.2 From 37b840336a1663a5ce62d663a702d9afefd56d23 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Mon, 1 Feb 2010 23:07:54 -0500 Subject: Add Feather-Trace x86_64 architecture dependent code --- include/litmus/rt_domain.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index c780fdfcccae..b452be1d2256 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h @@ -96,6 +96,7 @@ static inline struct task_struct* __peek_ready(rt_domain_t* rt) static inline int is_queued(struct task_struct *t) { + BUG_ON(!tsk_rt(t)->heap_node); return bheap_node_in_heap(tsk_rt(t)->heap_node); } -- cgit v1.2.2 From b973c95c86e6710c913c01a67013605f68a3c2c3 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Wed, 3 Feb 2010 19:35:20 -0500 Subject: Add virtual LITMUS^RT control device. This device only supports mmap()'ing a single page. This page is shared RW between the kernel and userspace. It is inteded to allow near-zero-overhead communication between the kernel and userspace. It's first use will be a proper implementation of user-signaled non-preemptable section support. --- include/litmus/rt_param.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'include') diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index e20427846273..9353251fb30e 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -35,6 +35,30 @@ struct rt_task { task_class_t cls; }; +/* The definition of the data that is shared between the kernel and real-time + * tasks via a shared page (see litmus/ctrldev.c). + * + * WARNING: User space can write to this, so don't trust + * the correctness of the fields! + * + * This servees two purposes: to enable efficient signaling + * of non-preemptive sections (user->kernel) and + * delayed preemptions (kernel->user), and to export + * some real-time relevant statistics such as preemption and + * migration data to user space. We can't use a device to export + * statistics because we want to avoid system call overhead when + * determining preemption/migration overheads). + */ +struct control_page { + /* Is the task currently in a non-preemptive section? */ + int np_flag; + /* Should the task call into the kernel when it leaves + * its non-preemptive section? */ + int delayed_preemption; + + /* to be extended */ +}; + /* don't export internal data structures to user space (liblitmus) */ #ifdef __KERNEL__ @@ -163,6 +187,9 @@ struct rt_param { /* Used by rt_domain to queue task in release list. */ struct list_head list; + + /* Pointer to the page shared between userspace and kernel. */ + struct control_page * ctrl_page; }; /* Possible RT flags */ -- cgit v1.2.2 From fb95c290fe461de794c984bc4130741f04f9142d Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Wed, 3 Feb 2010 19:40:01 -0500 Subject: Re-implement non-preemptive section support. Re-introduce NP sections in the configuration and in litmus.h. Remove the old np_flag from rt_param. If CONFIG_NP_SECTION is disabled, then all non-preemptive section checks are constant expressions which should get removed by the dead code elimination during optimization. Instead of re-implementing sys_exit_np(), we simply repurposed sched_yield() for calling into the scheduler to trigger delayed preemptions. --- include/litmus/litmus.h | 86 +++++++++++++++++++++++++++++++++++++++++++---- include/litmus/rt_param.h | 27 ++++----------- 2 files changed, 87 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index a03580bc707c..faaf83961dfa 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -27,11 +27,11 @@ extern atomic_t __log_seq_no; do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ "called from %p current=%s/%d state=%d " \ "flags=%x partition=%d cpu=%d rtflags=%d"\ - " job=%u knp=%d timeslice=%u\n", \ + " job=%u timeslice=%u\n", \ #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ current->pid, current->state, current->flags, \ get_partition(current), smp_processor_id(), get_rt_flags(current), \ - current->rt_param.job_params.job_no, current->rt_param.kernel_np, \ + current->rt_param.job_params.job_no, \ current->rt.time_slice\ ); } while(0); @@ -124,8 +124,6 @@ static inline lt_t litmus_clock(void) (a)->rt_param.job_params.release,\ (b)->rt_param.job_params.release)) -#define make_np(t) do {t->rt_param.kernel_np++;} while(0); -#define take_np(t) do {t->rt_param.kernel_np--;} while(0); #ifdef CONFIG_SRP void srp_ceiling_block(void); @@ -135,12 +133,88 @@ void srp_ceiling_block(void); #define bheap2task(hn) ((struct task_struct*) hn->value) -static inline int is_np(struct task_struct *t) +#ifdef CONFIG_NP_SECTION + +static inline int is_kernel_np(struct task_struct *t) { return tsk_rt(t)->kernel_np; } -#define request_exit_np(t) +static inline int is_user_np(struct task_struct *t) +{ + return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; +} + +static inline void request_exit_np(struct task_struct *t) +{ + if (is_user_np(t)) { + /* Set the flag that tells user space to call + * into the kernel at the end of a critical section. */ + if (likely(tsk_rt(t)->ctrl_page)) { + TRACE_TASK(t, "setting delayed_preemption flag\n"); + tsk_rt(t)->ctrl_page->delayed_preemption = 1; + } + } +} + +static inline void clear_exit_np(struct task_struct *t) +{ + if (likely(tsk_rt(t)->ctrl_page)) + tsk_rt(t)->ctrl_page->delayed_preemption = 0; +} + +static inline void make_np(struct task_struct *t) +{ + tsk_rt(t)->kernel_np++; +} + +/* Caller should check if preemption is necessary when + * the function return 0. + */ +static inline int take_np(struct task_struct *t) +{ + return --tsk_rt(t)->kernel_np; +} + +#else + +static inline int is_kernel_np(struct task_struct* t) +{ + return 0; +} + +static inline int is_user_np(struct task_struct* t) +{ + return 0; +} + +static inline void request_exit_np(struct task_struct *t) +{ + /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ + BUG(); +} + +static inline void clear_exit_np(struct task_struct* t) +{ +} + +#endif + +static inline int is_np(struct task_struct *t) +{ +#ifdef CONFIG_SCHED_DEBUG_TRACE + int kernel, user; + kernel = is_kernel_np(t); + user = is_user_np(t); + if (kernel || user) + TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", + + kernel, user); + return kernel || user; +#else + return unlikely(is_kernel_np(t) || is_user_np(t)); +#endif +} static inline int is_present(struct task_struct* t) { diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 9353251fb30e..5b94d1a8eea7 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -85,7 +85,6 @@ struct rt_job { unsigned int job_no; }; - struct pfair_param; /* RT task parameters for scheduling extensions @@ -116,26 +115,14 @@ struct rt_param { */ struct task_struct* inh_task; - /* Don't just dereference this pointer in kernel space! - * It might very well point to junk or nothing at all. - * NULL indicates that the task has not requested any non-preemptable - * section support. - * Not inherited upon fork. +#ifdef CONFIG_NP_SECTION + /* For the FMLP under PSN-EDF, it is required to make the task + * non-preemptive from kernel space. In order not to interfere with + * user space, this counter indicates the kernel space np setting. + * kernel_np > 0 => task is non-preemptive */ - short* np_flag; - - /* re-use unused counter in plugins that don't need it */ - union { - /* For the FMLP under PSN-EDF, it is required to make the task - * non-preemptive from kernel space. In order not to interfere with - * user space, this counter indicates the kernel space np setting. - * kernel_np > 0 => task is non-preemptive - */ - unsigned int kernel_np; - - /* Used by GQ-EDF */ - unsigned int last_cpu; - }; + unsigned int kernel_np; +#endif /* This field can be used by plugins to store where the task * is currently scheduled. It is the responsibility of the -- cgit v1.2.2 From f3a6cb9af5cdb01f29ad32b01aa56a14f0da144e Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Wed, 3 Feb 2010 19:42:02 -0500 Subject: Introduce generic NP-section aware preemption function Dealing with preemptions across CPUs in the presence of non-preemptive sections can be tricky and should not be replicated across (event-driven) plugins. This patch introduces a generic preemption function that handles non-preemptive sections (hopefully) correctly. --- include/litmus/litmus.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index faaf83961dfa..62107e659c12 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -124,6 +124,7 @@ static inline lt_t litmus_clock(void) (a)->rt_param.job_params.release,\ (b)->rt_param.job_params.release)) +void preempt_if_preemptable(struct task_struct* t, int on_cpu); #ifdef CONFIG_SRP void srp_ceiling_block(void); -- cgit v1.2.2 From 944f051fda9551483399bed556870b0895df1efa Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 28 May 2010 10:39:56 -0400 Subject: Bugfix: 1) incorrect FMLP high prio task tracking and 2) race in print statement 1) High priority task tied to FMLP semaphore in P-EDF scheduling is incorrectly tracked for tasks acquiring the lock without contention. (HP is always set to CPU 0 instead of proper CPU.) 2) Race in a print statement from P-EDF's pi_block() causes NULL pointer dereference. --- include/litmus/sched_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 94952f6ccbfa..2d856d587041 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -12,7 +12,7 @@ struct pi_semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; - union { + struct { /* highest-prio holder/waiter */ struct task_struct *task; struct task_struct* cpu_task[NR_CPUS]; -- cgit v1.2.2 From 7c1ff4c544dd650cceff3cd69a04bcba60856678 Mon Sep 17 00:00:00 2001 From: Andrea Bastoni Date: Fri, 28 May 2010 10:51:01 -0400 Subject: Add C-EDF Plugin Improved C-EDF plugin. C-EDF now supports different cluster sizes (based on L2 and L3 cache sharing) and supports dynamic changes of cluster size (this requires reloading the plugin). --- include/litmus/sched_plugin.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 2d856d587041..9c1c9f28ba79 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -133,6 +133,9 @@ struct sched_plugin { extern struct sched_plugin *litmus; +/* cluster size: cache_index = 2 L2, cache_index = 3 L3 */ +extern int cluster_cache_index; + int register_sched_plugin(struct sched_plugin* plugin); struct sched_plugin* find_sched_plugin(const char* name); int print_sched_plugins(char* buf, int max); -- cgit v1.2.2