diff options
Diffstat (limited to 'include/litmus')
| -rw-r--r-- | include/litmus/affinity.h | 80 | ||||
| -rw-r--r-- | include/litmus/budget.h | 12 | ||||
| -rw-r--r-- | include/litmus/color.h | 51 | ||||
| -rw-r--r-- | include/litmus/dgl.h | 65 | ||||
| -rw-r--r-- | include/litmus/fifo_common.h | 25 | ||||
| -rw-r--r-- | include/litmus/ftdev.h | 5 | ||||
| -rw-r--r-- | include/litmus/litmus.h | 73 | ||||
| -rw-r--r-- | include/litmus/preempt.h | 2 | ||||
| -rw-r--r-- | include/litmus/rm_common.h | 25 | ||||
| -rw-r--r-- | include/litmus/rt_domain.h | 2 | ||||
| -rw-r--r-- | include/litmus/rt_param.h | 58 | ||||
| -rw-r--r-- | include/litmus/rt_server.h | 31 | ||||
| -rw-r--r-- | include/litmus/sched_trace.h | 51 | ||||
| -rw-r--r-- | include/litmus/trace.h | 37 | ||||
| -rw-r--r-- | include/litmus/trace_irq.h | 21 |
15 files changed, 481 insertions, 57 deletions
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h new file mode 100644 index 000000000000..ca2e442eb547 --- /dev/null +++ b/include/litmus/affinity.h | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | #ifndef __LITMUS_AFFINITY_H | ||
| 2 | #define __LITMUS_AFFINITY_H | ||
| 3 | |||
| 4 | #include <linux/cpumask.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | L1 (instr) = depth 0 | ||
| 8 | L1 (data) = depth 1 | ||
| 9 | L2 = depth 2 | ||
| 10 | L3 = depth 3 | ||
| 11 | */ | ||
| 12 | #define NUM_CACHE_LEVELS 4 | ||
| 13 | |||
| 14 | struct neighborhood | ||
| 15 | { | ||
| 16 | unsigned int size[NUM_CACHE_LEVELS]; | ||
| 17 | cpumask_var_t neighbors[NUM_CACHE_LEVELS]; | ||
| 18 | }; | ||
| 19 | |||
| 20 | /* topology info is stored redundently in a big array for fast lookups */ | ||
| 21 | extern struct neighborhood neigh_info[NR_CPUS]; | ||
| 22 | |||
| 23 | void init_topology(void); /* called by Litmus module's _init_litmus() */ | ||
| 24 | |||
| 25 | /* Works like: | ||
| 26 | void get_nearest_available_cpu( | ||
| 27 | cpu_entry_t **nearest, | ||
| 28 | cpu_entry_t *start, | ||
| 29 | cpu_entry_t *entries, | ||
| 30 | int release_master) | ||
| 31 | |||
| 32 | Set release_master = NO_CPU for no Release Master. | ||
| 33 | |||
| 34 | We use a macro here to exploit the fact that C-EDF and G-EDF | ||
| 35 | have similar structures for their cpu_entry_t structs, even though | ||
| 36 | they do not share a common base-struct. The macro allows us to | ||
| 37 | avoid code duplication. | ||
| 38 | |||
| 39 | TODO: Factor out the job-to-processor linking from C/G-EDF into | ||
| 40 | a reusable "processor mapping". (See B.B.'s RTSS'09 paper & | ||
| 41 | dissertation.) | ||
| 42 | */ | ||
| 43 | #define get_nearest_available_cpu(nearest, start, entries, release_master) \ | ||
| 44 | { \ | ||
| 45 | (nearest) = NULL; \ | ||
| 46 | if (!(start)->linked) { \ | ||
| 47 | (nearest) = (start); \ | ||
| 48 | } else { \ | ||
| 49 | int __level; \ | ||
| 50 | int __cpu; \ | ||
| 51 | int __release_master = ((release_master) == NO_CPU) ? -1 : (release_master); \ | ||
| 52 | struct neighborhood *__neighbors = &neigh_info[(start)->cpu]; \ | ||
| 53 | \ | ||
| 54 | for (__level = 0; (__level < NUM_CACHE_LEVELS) && !(nearest); ++__level) { \ | ||
| 55 | if (__neighbors->size[__level] > 1) { \ | ||
| 56 | for_each_cpu(__cpu, __neighbors->neighbors[__level]) { \ | ||
| 57 | if (__cpu != __release_master) { \ | ||
| 58 | cpu_entry_t *__entry = &per_cpu((entries), __cpu); \ | ||
| 59 | if (!__entry->linked) { \ | ||
| 60 | (nearest) = __entry; \ | ||
| 61 | break; \ | ||
| 62 | } \ | ||
| 63 | } \ | ||
| 64 | } \ | ||
| 65 | } else if (__neighbors->size[__level] == 0) { \ | ||
| 66 | break; \ | ||
| 67 | } \ | ||
| 68 | } \ | ||
| 69 | } \ | ||
| 70 | \ | ||
| 71 | if ((nearest)) { \ | ||
| 72 | TRACE("P%d is closest available CPU to P%d\n", \ | ||
| 73 | (nearest)->cpu, (start)->cpu); \ | ||
| 74 | } else { \ | ||
| 75 | TRACE("Could not find an available CPU close to P%d\n", \ | ||
| 76 | (start)->cpu); \ | ||
| 77 | } \ | ||
| 78 | } | ||
| 79 | |||
| 80 | #endif | ||
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index ff18d89e8630..265f2b1e62b8 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
| @@ -1,6 +1,11 @@ | |||
| 1 | #ifndef _LITMUS_BUDGET_H_ | 1 | #ifndef _LITMUS_BUDGET_H_ |
| 2 | #define _LITMUS_BUDGET_H_ | 2 | #define _LITMUS_BUDGET_H_ |
| 3 | 3 | ||
| 4 | struct enforcement_timer { | ||
| 5 | struct hrtimer timer; | ||
| 6 | int armed; | ||
| 7 | }; | ||
| 8 | |||
| 4 | /** | 9 | /** |
| 5 | * update_enforcement_timer() - Update per-processor enforcement timer for | 10 | * update_enforcement_timer() - Update per-processor enforcement timer for |
| 6 | * the next scheduled task. | 11 | * the next scheduled task. |
| @@ -11,6 +16,12 @@ | |||
| 11 | */ | 16 | */ |
| 12 | void update_enforcement_timer(struct task_struct* t); | 17 | void update_enforcement_timer(struct task_struct* t); |
| 13 | 18 | ||
| 19 | void init_enforcement_timer(struct enforcement_timer *et); | ||
| 20 | |||
| 21 | void arm_enforcement_timer(struct enforcement_timer* et, struct task_struct* t); | ||
| 22 | |||
| 23 | void cancel_enforcement_timer(struct enforcement_timer* et); | ||
| 24 | |||
| 14 | /* True if a task's server has progressed farther than the task | 25 | /* True if a task's server has progressed farther than the task |
| 15 | * itself. This happens when budget enforcement has caused a task to be | 26 | * itself. This happens when budget enforcement has caused a task to be |
| 16 | * booted off until the next period. | 27 | * booted off until the next period. |
| @@ -32,4 +43,5 @@ void server_release(struct task_struct *t); | |||
| 32 | * so that we can write them to feather trace. | 43 | * so that we can write them to feather trace. |
| 33 | */ | 44 | */ |
| 34 | void task_release(struct task_struct *t); | 45 | void task_release(struct task_struct *t); |
| 46 | |||
| 35 | #endif | 47 | #endif |
diff --git a/include/litmus/color.h b/include/litmus/color.h new file mode 100644 index 000000000000..eefb6c6dddf5 --- /dev/null +++ b/include/litmus/color.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | #ifndef LITMUS_COLOR_H | ||
| 2 | #define LITMUS_COLOR_H | ||
| 3 | |||
| 4 | #ifdef __KERNEL__ | ||
| 5 | |||
| 6 | #define ONE_COLOR_LEN 11 | ||
| 7 | #define ONE_COLOR_FMT "%4lu: %4d\n" | ||
| 8 | |||
| 9 | struct color_cache_info { | ||
| 10 | unsigned long size; | ||
| 11 | unsigned long line_size; | ||
| 12 | unsigned long ways; | ||
| 13 | unsigned long sets; | ||
| 14 | unsigned long nr_colors; | ||
| 15 | }; | ||
| 16 | |||
| 17 | /* defined in litmus/color.c */ | ||
| 18 | extern struct color_cache_info color_cache_info; | ||
| 19 | extern unsigned long color_chunk; | ||
| 20 | |||
| 21 | struct page* get_colored_page(unsigned long); | ||
| 22 | void add_page_to_color_list(struct page*); | ||
| 23 | void add_page_to_alloced_list(struct page*, struct vm_area_struct*); | ||
| 24 | void reclaim_pages(struct vm_area_struct*); | ||
| 25 | |||
| 26 | int color_server_params(int cpu, unsigned long *wcet, unsigned long *period); | ||
| 27 | |||
| 28 | int color_add_pages_handler(struct ctl_table *, int, void __user *, | ||
| 29 | size_t *, loff_t *); | ||
| 30 | int color_nr_pages_handler(struct ctl_table *, int, void __user *, | ||
| 31 | size_t *, loff_t *); | ||
| 32 | int color_reclaim_pages_handler(struct ctl_table *, int, void __user *, | ||
| 33 | size_t *, loff_t *); | ||
| 34 | |||
| 35 | #ifdef CONFIG_LOCKDEP | ||
| 36 | #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 | ||
| 37 | #define LOCKDEP_DYNAMIC_ALLOC(lock, key, name_buf, fmt, args...) \ | ||
| 38 | do { \ | ||
| 39 | snprintf(name_buf, LITMUS_LOCKDEP_NAME_MAX_LEN, \ | ||
| 40 | fmt, ## args); \ | ||
| 41 | lockdep_set_class_and_name(lock, key, name_buf); \ | ||
| 42 | } while (0) | ||
| 43 | #else | ||
| 44 | #define LITMUS_LOCKDEP_NAME_MAX_LEN 0 | ||
| 45 | #define LOCKDEP_DYNAMIC_ALLOC(lock, key, name_buf, fmt, args) \ | ||
| 46 | do { (void)(key); } while (0) | ||
| 47 | #endif | ||
| 48 | |||
| 49 | #endif | ||
| 50 | |||
| 51 | #endif | ||
diff --git a/include/litmus/dgl.h b/include/litmus/dgl.h new file mode 100644 index 000000000000..acd58f80b58b --- /dev/null +++ b/include/litmus/dgl.h | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | #ifndef __DGL_H_ | ||
| 2 | #define __DGL_H_ | ||
| 3 | |||
| 4 | #include <litmus/color.h> | ||
| 5 | #include <linux/list.h> | ||
| 6 | |||
| 7 | /* | ||
| 8 | * A request for @replica amount of a single resource. | ||
| 9 | */ | ||
| 10 | struct dgl_req { | ||
| 11 | unsigned short replicas; | ||
| 12 | struct list_head list; | ||
| 13 | struct dgl_group_req *greq; | ||
| 14 | }; | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Simultaneous @requests for multiple resources. | ||
| 18 | */ | ||
| 19 | struct dgl_group_req { | ||
| 20 | int cpu; | ||
| 21 | unsigned long *requested; | ||
| 22 | unsigned long *waiting; | ||
| 23 | |||
| 24 | struct dgl_req *requests; | ||
| 25 | |||
| 26 | unsigned long long ts; | ||
| 27 | }; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * A single resource. | ||
| 31 | */ | ||
| 32 | struct dgl_resource { | ||
| 33 | unsigned long free_replicas; | ||
| 34 | struct list_head waiting; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* | ||
| 38 | * A group of resources. | ||
| 39 | */ | ||
| 40 | struct dgl { | ||
| 41 | struct dgl_resource *resources; | ||
| 42 | struct dgl_group_req* *acquired; | ||
| 43 | |||
| 44 | char requests; | ||
| 45 | char running; | ||
| 46 | unsigned long long ts; | ||
| 47 | |||
| 48 | unsigned long num_resources; | ||
| 49 | unsigned long num_replicas; | ||
| 50 | }; | ||
| 51 | |||
| 52 | void dgl_init(struct dgl *dgl, unsigned long num_resources, | ||
| 53 | unsigned long num_replicas); | ||
| 54 | void dgl_free(struct dgl *dgl); | ||
| 55 | |||
| 56 | void dgl_group_req_init(struct dgl *dgl, struct dgl_group_req *greq); | ||
| 57 | void dgl_group_req_free(struct dgl_group_req *greq); | ||
| 58 | |||
| 59 | void set_req(struct dgl *dgl, struct dgl_group_req *greq, | ||
| 60 | int resource, int replicas); | ||
| 61 | |||
| 62 | void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu); | ||
| 63 | void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq); | ||
| 64 | |||
| 65 | #endif | ||
diff --git a/include/litmus/fifo_common.h b/include/litmus/fifo_common.h new file mode 100644 index 000000000000..4756f77bd511 --- /dev/null +++ b/include/litmus/fifo_common.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * EDF common data structures and utility functions shared by all EDF | ||
| 3 | * based scheduler plugins | ||
| 4 | */ | ||
| 5 | |||
| 6 | /* CLEANUP: Add comments and make it less messy. | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __FIFO_COMMON_H__ | ||
| 11 | #define __FIFO_COMMON_H__ | ||
| 12 | |||
| 13 | #include <litmus/rt_domain.h> | ||
| 14 | |||
| 15 | void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
| 16 | release_jobs_t release); | ||
| 17 | |||
| 18 | int fifo_higher_prio(struct task_struct* first, | ||
| 19 | struct task_struct* second); | ||
| 20 | |||
| 21 | int fifo_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
| 22 | |||
| 23 | int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
| 24 | |||
| 25 | #endif | ||
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h index 348387e9adf9..0b959874dd70 100644 --- a/include/litmus/ftdev.h +++ b/include/litmus/ftdev.h | |||
| @@ -16,7 +16,8 @@ typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no); | |||
| 16 | /* return 0 on success, otherwise -$REASON */ | 16 | /* return 0 on success, otherwise -$REASON */ |
| 17 | typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); | 17 | typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); |
| 18 | typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); | 18 | typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); |
| 19 | 19 | /* Let devices handle writes from userspace. No synchronization provided. */ | |
| 20 | typedef ssize_t (*ftdev_write_t)(struct ft_buffer* buf, size_t len, const char __user *from); | ||
| 20 | 21 | ||
| 21 | struct ftdev_event; | 22 | struct ftdev_event; |
| 22 | 23 | ||
| @@ -27,6 +28,7 @@ struct ftdev_minor { | |||
| 27 | /* FIXME: filter for authorized events */ | 28 | /* FIXME: filter for authorized events */ |
| 28 | struct ftdev_event* events; | 29 | struct ftdev_event* events; |
| 29 | struct device* device; | 30 | struct device* device; |
| 31 | struct ftdev* ftdev; | ||
| 30 | }; | 32 | }; |
| 31 | 33 | ||
| 32 | struct ftdev { | 34 | struct ftdev { |
| @@ -39,6 +41,7 @@ struct ftdev { | |||
| 39 | ftdev_alloc_t alloc; | 41 | ftdev_alloc_t alloc; |
| 40 | ftdev_free_t free; | 42 | ftdev_free_t free; |
| 41 | ftdev_can_open_t can_open; | 43 | ftdev_can_open_t can_open; |
| 44 | ftdev_write_t write; | ||
| 42 | }; | 45 | }; |
| 43 | 46 | ||
| 44 | struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); | 47 | struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index d5dbc82f0dfc..1455f249c1fb 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
| @@ -26,7 +26,7 @@ static inline int in_list(struct list_head* list) | |||
| 26 | ); | 26 | ); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | struct task_struct* waitqueue_first(wait_queue_head_t *wq); | 29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); |
| 30 | 30 | ||
| 31 | #define NO_CPU 0xffffffff | 31 | #define NO_CPU 0xffffffff |
| 32 | 32 | ||
| @@ -53,11 +53,15 @@ void litmus_exit_task(struct task_struct *tsk); | |||
| 53 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | 53 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) |
| 54 | #define get_rt_period(t) (tsk_rt(t)->task_params.period) | 54 | #define get_rt_period(t) (tsk_rt(t)->task_params.period) |
| 55 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | 55 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) |
| 56 | #define get_rt_job(t) (tsk_rt(t)->job_params.job_no) | ||
| 56 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | 57 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) |
| 57 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 58 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
| 58 | #define get_release(t) (tsk_rt(t)->job_params.release) | 59 | #define get_release(t) (tsk_rt(t)->job_params.release) |
| 59 | #define get_class(t) (tsk_rt(t)->task_params.cls) | 60 | #define get_class(t) (tsk_rt(t)->task_params.cls) |
| 61 | |||
| 60 | #define get_task_domain(t) (tsk_rt(t)->_domain) | 62 | #define get_task_domain(t) (tsk_rt(t)->_domain) |
| 63 | #define is_server(t) (tsk_rt(t)->is_server) | ||
| 64 | #define get_task_server(task) (tsk_rt(task)->server) | ||
| 61 | 65 | ||
| 62 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 66 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
| 63 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 67 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
| @@ -150,7 +154,7 @@ static inline int is_kernel_np(struct task_struct *t) | |||
| 150 | 154 | ||
| 151 | static inline int is_user_np(struct task_struct *t) | 155 | static inline int is_user_np(struct task_struct *t) |
| 152 | { | 156 | { |
| 153 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; | 157 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0; |
| 154 | } | 158 | } |
| 155 | 159 | ||
| 156 | static inline void request_exit_np(struct task_struct *t) | 160 | static inline void request_exit_np(struct task_struct *t) |
| @@ -160,20 +164,58 @@ static inline void request_exit_np(struct task_struct *t) | |||
| 160 | * into the kernel at the end of a critical section. */ | 164 | * into the kernel at the end of a critical section. */ |
| 161 | if (likely(tsk_rt(t)->ctrl_page)) { | 165 | if (likely(tsk_rt(t)->ctrl_page)) { |
| 162 | TRACE_TASK(t, "setting delayed_preemption flag\n"); | 166 | TRACE_TASK(t, "setting delayed_preemption flag\n"); |
| 163 | tsk_rt(t)->ctrl_page->delayed_preemption = 1; | 167 | tsk_rt(t)->ctrl_page->sched.np.preempt = 1; |
| 164 | } | 168 | } |
| 165 | } | 169 | } |
| 166 | } | 170 | } |
| 167 | 171 | ||
| 168 | static inline void clear_exit_np(struct task_struct *t) | 172 | static inline void make_np(struct task_struct *t) |
| 169 | { | 173 | { |
| 170 | if (likely(tsk_rt(t)->ctrl_page)) | 174 | tsk_rt(t)->kernel_np = 1; |
| 171 | tsk_rt(t)->ctrl_page->delayed_preemption = 0; | 175 | } |
| 176 | |||
| 177 | /* Caller should check if preemption is necessary when | ||
| 178 | * the function return 0. | ||
| 179 | */ | ||
| 180 | static inline int take_np(struct task_struct *t) | ||
| 181 | { | ||
| 182 | return tsk_rt(t)->kernel_np = 0; | ||
| 183 | } | ||
| 184 | |||
| 185 | /* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ | ||
| 186 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
| 187 | { | ||
| 188 | union np_flag old, new; | ||
| 189 | |||
| 190 | if (tsk_rt(t)->ctrl_page) { | ||
| 191 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; | ||
| 192 | if (old.np.flag == 0) { | ||
| 193 | /* no longer non-preemptive */ | ||
| 194 | return 0; | ||
| 195 | } else if (old.np.preempt) { | ||
| 196 | /* already set, nothing for us to do */ | ||
| 197 | return 1; | ||
| 198 | } else { | ||
| 199 | /* non preemptive and flag not set */ | ||
| 200 | new.raw = old.raw; | ||
| 201 | new.np.preempt = 1; | ||
| 202 | /* if we get old back, then we atomically set the flag */ | ||
| 203 | return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; | ||
| 204 | /* If we raced with a concurrent change, then so be | ||
| 205 | * it. Deliver it by IPI. We don't want an unbounded | ||
| 206 | * retry loop here since tasks might exploit that to | ||
| 207 | * keep the kernel busy indefinitely. */ | ||
| 208 | } | ||
| 209 | } else | ||
| 210 | return 0; | ||
| 172 | } | 211 | } |
| 173 | 212 | ||
| 213 | #else | ||
| 214 | |||
| 215 | |||
| 174 | static inline void make_np(struct task_struct *t) | 216 | static inline void make_np(struct task_struct *t) |
| 175 | { | 217 | { |
| 176 | tsk_rt(t)->kernel_np++; | 218 | |
| 177 | } | 219 | } |
| 178 | 220 | ||
| 179 | /* Caller should check if preemption is necessary when | 221 | /* Caller should check if preemption is necessary when |
| @@ -181,11 +223,9 @@ static inline void make_np(struct task_struct *t) | |||
| 181 | */ | 223 | */ |
| 182 | static inline int take_np(struct task_struct *t) | 224 | static inline int take_np(struct task_struct *t) |
| 183 | { | 225 | { |
| 184 | return --tsk_rt(t)->kernel_np; | 226 | return 0; |
| 185 | } | 227 | } |
| 186 | 228 | ||
| 187 | #else | ||
| 188 | |||
| 189 | static inline int is_kernel_np(struct task_struct* t) | 229 | static inline int is_kernel_np(struct task_struct* t) |
| 190 | { | 230 | { |
| 191 | return 0; | 231 | return 0; |
| @@ -202,22 +242,25 @@ static inline void request_exit_np(struct task_struct *t) | |||
| 202 | BUG(); | 242 | BUG(); |
| 203 | } | 243 | } |
| 204 | 244 | ||
| 205 | static inline void clear_exit_np(struct task_struct* t) | 245 | static inline int request_exit_np_atomic(struct task_struct *t) |
| 206 | { | 246 | { |
| 247 | return 0; | ||
| 207 | } | 248 | } |
| 208 | 249 | ||
| 209 | #endif | 250 | #endif |
| 210 | 251 | ||
| 252 | static inline void clear_exit_np(struct task_struct *t) | ||
| 253 | { | ||
| 254 | if (likely(tsk_rt(t)->ctrl_page)) | ||
| 255 | tsk_rt(t)->ctrl_page->sched.np.preempt = 0; | ||
| 256 | } | ||
| 257 | |||
| 211 | static inline int is_np(struct task_struct *t) | 258 | static inline int is_np(struct task_struct *t) |
| 212 | { | 259 | { |
| 213 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 260 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
| 214 | int kernel, user; | 261 | int kernel, user; |
| 215 | kernel = is_kernel_np(t); | 262 | kernel = is_kernel_np(t); |
| 216 | user = is_user_np(t); | 263 | user = is_user_np(t); |
| 217 | if (kernel || user) | ||
| 218 | TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
| 219 | |||
| 220 | kernel, user); | ||
| 221 | return kernel || user; | 264 | return kernel || user; |
| 222 | #else | 265 | #else |
| 223 | return unlikely(is_kernel_np(t) || is_user_np(t)); | 266 | return unlikely(is_kernel_np(t) || is_user_np(t)); |
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 29bf9bc47daf..5b69e26fc57d 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | 12 | extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); |
| 13 | 13 | ||
| 14 | #ifdef CONFIG_DEBUG_KERNEL | 14 | #ifdef CONFIG_PREEMPT_STATE_TRACE |
| 15 | const char* sched_state_name(int s); | 15 | const char* sched_state_name(int s); |
| 16 | //#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) | 16 | //#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) |
| 17 | #define TRACE_STATE(fmt, args...) /* ignore */ | 17 | #define TRACE_STATE(fmt, args...) /* ignore */ |
diff --git a/include/litmus/rm_common.h b/include/litmus/rm_common.h new file mode 100644 index 000000000000..3e03d9b5d140 --- /dev/null +++ b/include/litmus/rm_common.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * RM common data structures and utility functions shared by all RM | ||
| 3 | * based scheduler plugins | ||
| 4 | */ | ||
| 5 | |||
| 6 | /* CLEANUP: Add comments and make it less messy. | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __UNC_RM_COMMON_H__ | ||
| 11 | #define __UNC_RM_COMMON_H__ | ||
| 12 | |||
| 13 | #include <litmus/rt_domain.h> | ||
| 14 | |||
| 15 | void rm_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
| 16 | release_jobs_t release); | ||
| 17 | |||
| 18 | int rm_higher_prio(struct task_struct* first, | ||
| 19 | struct task_struct* second); | ||
| 20 | |||
| 21 | int rm_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
| 22 | |||
| 23 | int rm_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
| 24 | |||
| 25 | #endif | ||
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 3a0861c1d700..03826228dbd9 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
| @@ -104,6 +104,8 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new); | |||
| 104 | void __merge_ready(rt_domain_t* rt, struct bheap *tasks); | 104 | void __merge_ready(rt_domain_t* rt, struct bheap *tasks); |
| 105 | void __add_release(rt_domain_t* rt, struct task_struct *task); | 105 | void __add_release(rt_domain_t* rt, struct task_struct *task); |
| 106 | 106 | ||
| 107 | struct release_heap* release_heap_alloc(int gfp_flags); | ||
| 108 | |||
| 107 | static inline struct task_struct* __take_ready(rt_domain_t* rt) | 109 | static inline struct task_struct* __take_ready(rt_domain_t* rt) |
| 108 | { | 110 | { |
| 109 | struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); | 111 | struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 4293575d3472..2ced0dba067d 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
| @@ -30,7 +30,7 @@ typedef enum { | |||
| 30 | typedef enum { | 30 | typedef enum { |
| 31 | NO_ENFORCEMENT, /* job may overrun unhindered */ | 31 | NO_ENFORCEMENT, /* job may overrun unhindered */ |
| 32 | QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ | 32 | QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ |
| 33 | PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ | 33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ |
| 34 | } budget_policy_t; | 34 | } budget_policy_t; |
| 35 | 35 | ||
| 36 | struct rt_task { | 36 | struct rt_task { |
| @@ -42,6 +42,16 @@ struct rt_task { | |||
| 42 | budget_policy_t budget_policy; /* ignored by pfair */ | 42 | budget_policy_t budget_policy; /* ignored by pfair */ |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | union np_flag { | ||
| 46 | uint32_t raw; | ||
| 47 | struct { | ||
| 48 | /* Is the task currently in a non-preemptive section? */ | ||
| 49 | uint32_t flag:31; | ||
| 50 | /* Should the task call into the scheduler? */ | ||
| 51 | uint32_t preempt:1; | ||
| 52 | } np; | ||
| 53 | }; | ||
| 54 | |||
| 45 | /* The definition of the data that is shared between the kernel and real-time | 55 | /* The definition of the data that is shared between the kernel and real-time |
| 46 | * tasks via a shared page (see litmus/ctrldev.c). | 56 | * tasks via a shared page (see litmus/ctrldev.c). |
| 47 | * | 57 | * |
| @@ -57,16 +67,33 @@ struct rt_task { | |||
| 57 | * determining preemption/migration overheads). | 67 | * determining preemption/migration overheads). |
| 58 | */ | 68 | */ |
| 59 | struct control_page { | 69 | struct control_page { |
| 60 | /* Is the task currently in a non-preemptive section? */ | 70 | volatile union np_flag sched; |
| 61 | int np_flag; | ||
| 62 | /* Should the task call into the kernel when it leaves | ||
| 63 | * its non-preemptive section? */ | ||
| 64 | int delayed_preemption; | ||
| 65 | 71 | ||
| 66 | /* locking overhead tracing: time stamp prior to system call */ | 72 | /* locking overhead tracing: time stamp prior to system call */ |
| 67 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ | 73 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ |
| 68 | 74 | ||
| 69 | /* to be extended */ | 75 | int colors_updated:8; |
| 76 | }; | ||
| 77 | |||
| 78 | #ifndef __KERNEL__ | ||
| 79 | /* | ||
| 80 | * XXX This is a terrible hack so liblitmus can use the PAGE_SIZE macro. | ||
| 81 | * We should fix liblitmus to do setup the page size at runtime. | ||
| 82 | */ | ||
| 83 | #define CACHE_LINE_SIZE 64 | ||
| 84 | #if (ARCH == x86_64) | ||
| 85 | #define PAGE_SIZE 4096 | ||
| 86 | #elif (ARCH == sparc64) | ||
| 87 | #define PAGE_SIZE 8192 | ||
| 88 | #endif | ||
| 89 | #endif /* ifndef __KERNEL__ */ | ||
| 90 | |||
| 91 | typedef uint8_t color_t; | ||
| 92 | #define COLORS_PER_CONTROL_PAGE (PAGE_SIZE / (2 * sizeof(color_t))) | ||
| 93 | struct color_ctrl_page { | ||
| 94 | color_t colors[COLORS_PER_CONTROL_PAGE]; | ||
| 95 | /* must be same type to guarantee equal array sizes */ | ||
| 96 | color_t pages[COLORS_PER_CONTROL_PAGE]; | ||
| 70 | }; | 97 | }; |
| 71 | 98 | ||
| 72 | /* don't export internal data structures to user space (liblitmus) */ | 99 | /* don't export internal data structures to user space (liblitmus) */ |
| @@ -76,6 +103,8 @@ struct _rt_domain; | |||
| 76 | struct bheap_node; | 103 | struct bheap_node; |
| 77 | struct release_heap; | 104 | struct release_heap; |
| 78 | struct domain; | 105 | struct domain; |
| 106 | struct rt_server; | ||
| 107 | struct dgl_group_req; | ||
| 79 | 108 | ||
| 80 | struct rt_job { | 109 | struct rt_job { |
| 81 | /* Time instant the the job was or will be released. */ | 110 | /* Time instant the the job was or will be released. */ |
| @@ -119,6 +148,8 @@ struct rt_param { | |||
| 119 | /* is the task present? (true if it can be scheduled) */ | 148 | /* is the task present? (true if it can be scheduled) */ |
| 120 | unsigned int present:1; | 149 | unsigned int present:1; |
| 121 | 150 | ||
| 151 | unsigned int is_server:1; | ||
| 152 | |||
| 122 | #ifdef CONFIG_LITMUS_LOCKING | 153 | #ifdef CONFIG_LITMUS_LOCKING |
| 123 | /* Is the task being priority-boosted by a locking protocol? */ | 154 | /* Is the task being priority-boosted by a locking protocol? */ |
| 124 | unsigned int priority_boosted:1; | 155 | unsigned int priority_boosted:1; |
| @@ -134,6 +165,9 @@ struct rt_param { | |||
| 134 | struct rt_event *event; | 165 | struct rt_event *event; |
| 135 | #endif | 166 | #endif |
| 136 | 167 | ||
| 168 | struct rt_server *server; | ||
| 169 | |||
| 170 | |||
| 137 | /* user controlled parameters */ | 171 | /* user controlled parameters */ |
| 138 | struct rt_task task_params; | 172 | struct rt_task task_params; |
| 139 | 173 | ||
| @@ -213,6 +247,16 @@ struct rt_param { | |||
| 213 | 247 | ||
| 214 | /* Pointer to the page shared between userspace and kernel. */ | 248 | /* Pointer to the page shared between userspace and kernel. */ |
| 215 | struct control_page * ctrl_page; | 249 | struct control_page * ctrl_page; |
| 250 | |||
| 251 | lt_t total_tardy; | ||
| 252 | lt_t max_tardy; | ||
| 253 | unsigned int missed; | ||
| 254 | |||
| 255 | lt_t max_exec_time; | ||
| 256 | lt_t tot_exec_time; | ||
| 257 | lt_t last_exec_time; | ||
| 258 | struct color_ctrl_page *color_ctrl_page; | ||
| 259 | struct dgl_group_req *req; | ||
| 216 | }; | 260 | }; |
| 217 | 261 | ||
| 218 | /* Possible RT flags */ | 262 | /* Possible RT flags */ |
diff --git a/include/litmus/rt_server.h b/include/litmus/rt_server.h new file mode 100644 index 000000000000..0e2feb6c6b0e --- /dev/null +++ b/include/litmus/rt_server.h | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | #ifndef __RT_SERVER_H | ||
| 2 | #define __RT_SERVER_H | ||
| 3 | |||
| 4 | #include <linux/sched.h> | ||
| 5 | #include <litmus/litmus.h> | ||
| 6 | #include <litmus/rt_domain.h> | ||
| 7 | |||
| 8 | struct rt_server; | ||
| 9 | |||
| 10 | typedef int (*need_preempt_t)(rt_domain_t *rt, struct task_struct *t); | ||
| 11 | typedef void (*server_update_t)(struct rt_server *srv); | ||
| 12 | |||
| 13 | struct rt_server { | ||
| 14 | int sid; | ||
| 15 | int cpu; | ||
| 16 | struct task_struct* linked; | ||
| 17 | rt_domain_t* domain; | ||
| 18 | int running; | ||
| 19 | |||
| 20 | /* Does this server have a higher-priority task? */ | ||
| 21 | need_preempt_t need_preempt; | ||
| 22 | /* System state has changed, so should server */ | ||
| 23 | server_update_t update; | ||
| 24 | }; | ||
| 25 | |||
| 26 | void init_rt_server(struct rt_server *server, | ||
| 27 | int sid, int cpu, rt_domain_t *domain, | ||
| 28 | need_preempt_t need_preempt, | ||
| 29 | server_update_t update); | ||
| 30 | |||
| 31 | #endif | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index e193267a321f..0370cdc091aa 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
| @@ -81,6 +81,17 @@ struct st_sys_release_data { | |||
| 81 | u64 release; | 81 | u64 release; |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| 84 | struct st_task_exit_data { | ||
| 85 | u64 avg_exec_time; | ||
| 86 | u64 max_exec_time; | ||
| 87 | }; | ||
| 88 | |||
| 89 | struct st_task_tardy_data { | ||
| 90 | u64 total_tardy; | ||
| 91 | u32 max_tardy; | ||
| 92 | u32 missed; | ||
| 93 | }; | ||
| 94 | |||
| 84 | #define DATA(x) struct st_ ## x ## _data x; | 95 | #define DATA(x) struct st_ ## x ## _data x; |
| 85 | 96 | ||
| 86 | typedef enum { | 97 | typedef enum { |
| @@ -95,7 +106,9 @@ typedef enum { | |||
| 95 | ST_BLOCK, | 106 | ST_BLOCK, |
| 96 | ST_RESUME, | 107 | ST_RESUME, |
| 97 | ST_ACTION, | 108 | ST_ACTION, |
| 98 | ST_SYS_RELEASE | 109 | ST_SYS_RELEASE, |
| 110 | ST_TASK_EXIT, | ||
| 111 | ST_TASK_TARDY, | ||
| 99 | } st_event_record_type_t; | 112 | } st_event_record_type_t; |
| 100 | 113 | ||
| 101 | struct st_event_record { | 114 | struct st_event_record { |
| @@ -114,6 +127,8 @@ struct st_event_record { | |||
| 114 | DATA(resume); | 127 | DATA(resume); |
| 115 | DATA(action); | 128 | DATA(action); |
| 116 | DATA(sys_release); | 129 | DATA(sys_release); |
| 130 | DATA(task_exit); | ||
| 131 | DATA(task_tardy); | ||
| 117 | } data; | 132 | } data; |
| 118 | }; | 133 | }; |
| 119 | 134 | ||
| @@ -155,6 +170,10 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
| 155 | unsigned long action); | 170 | unsigned long action); |
| 156 | feather_callback void do_sched_trace_sys_release(unsigned long id, | 171 | feather_callback void do_sched_trace_sys_release(unsigned long id, |
| 157 | lt_t* start); | 172 | lt_t* start); |
| 173 | feather_callback void do_sched_trace_task_exit(unsigned long id, | ||
| 174 | struct task_struct* task); | ||
| 175 | feather_callback void do_sched_trace_task_tardy(unsigned long id, | ||
| 176 | struct task_struct* task); | ||
| 158 | 177 | ||
| 159 | #endif | 178 | #endif |
| 160 | 179 | ||
| @@ -181,14 +200,17 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
| 181 | #define trace_litmus_task_resume(t, i) | 200 | #define trace_litmus_task_resume(t, i) |
| 182 | #define trace_litmus_sys_release(start) | 201 | #define trace_litmus_sys_release(start) |
| 183 | 202 | ||
| 203 | #define trace_litmus_task_exit(t) | ||
| 204 | #define trace_litmus_task_tardy(t) | ||
| 205 | |||
| 184 | #define trace_litmus_resource_acquire(t, i); | 206 | #define trace_litmus_resource_acquire(t, i); |
| 185 | #define trace_litmus_resource_release(t, i); | 207 | #define trace_litmus_resource_release(t, i); |
| 186 | #define trace_litmus_priority_donate(t, d, i) | 208 | #define trace_litmus_priority_donate(t, d, i) |
| 187 | 209 | ||
| 188 | #define trace_litmus_container_param(cid, name) | 210 | #define trace_litmus_container_param(cid, name) |
| 189 | #define trace_litmus_server_param(sid, cid, wcet, time) | 211 | #define trace_litmus_server_param(sid, cid, wcet, time) |
| 190 | #define trace_litmus_server_switch_to(sid, job, tid) | 212 | #define trace_litmus_server_switch_to(sid, job, tid, tjob) |
| 191 | #define trace_litmus_server_switch_away(sid, job, tid) | 213 | #define trace_litmus_server_switch_away(sid, job, tid, tjob) |
| 192 | #define trace_litmus_server_release(sid, job, release, deadline) | 214 | #define trace_litmus_server_release(sid, job, release, deadline) |
| 193 | #define trace_litmus_server_completion(sid, job) | 215 | #define trace_litmus_server_completion(sid, job) |
| 194 | 216 | ||
| @@ -264,6 +286,21 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
| 264 | trace_litmus_sys_release(when); \ | 286 | trace_litmus_sys_release(when); \ |
| 265 | } while (0) | 287 | } while (0) |
| 266 | 288 | ||
| 289 | #define sched_trace_task_exit(t) \ | ||
| 290 | do { \ | ||
| 291 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, \ | ||
| 292 | do_sched_trace_task_exit, t); \ | ||
| 293 | trace_litmus_task_exit(t); \ | ||
| 294 | } while (0) | ||
| 295 | |||
| 296 | |||
| 297 | #define sched_trace_task_tardy(t) \ | ||
| 298 | do { \ | ||
| 299 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, \ | ||
| 300 | do_sched_trace_task_tardy, t); \ | ||
| 301 | trace_litmus_task_tardy(t); \ | ||
| 302 | } while (0) | ||
| 303 | |||
| 267 | #define QT_START lt_t _qt_start = litmus_clock() | 304 | #define QT_START lt_t _qt_start = litmus_clock() |
| 268 | #define QT_END \ | 305 | #define QT_END \ |
| 269 | sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \ | 306 | sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \ |
| @@ -294,14 +331,14 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
| 294 | trace_litmus_server_param(sid, cid, wcet, period); \ | 331 | trace_litmus_server_param(sid, cid, wcet, period); \ |
| 295 | } while(0) | 332 | } while(0) |
| 296 | 333 | ||
| 297 | #define sched_trace_server_switch_to(sid, job, tid) \ | 334 | #define sched_trace_server_switch_to(sid, job, tid, tjob) \ |
| 298 | do { \ | 335 | do { \ |
| 299 | trace_litmus_server_switch_to(sid, job, tid); \ | 336 | trace_litmus_server_switch_to(sid, job, tid, tjob); \ |
| 300 | } while(0) | 337 | } while(0) |
| 301 | 338 | ||
| 302 | #define sched_trace_server_switch_away(sid, job, tid) \ | 339 | #define sched_trace_server_switch_away(sid, job, tid, tjob) \ |
| 303 | do { \ | 340 | do { \ |
| 304 | trace_litmus_server_switch_away(sid, job, tid); \ | 341 | trace_litmus_server_switch_away(sid, job, tid, tjob); \ |
| 305 | } while (0) | 342 | } while (0) |
| 306 | 343 | ||
| 307 | #define sched_trace_server_release(sid, job, rel, dead) \ | 344 | #define sched_trace_server_release(sid, job, rel, dead) \ |
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index c1bbf168e6d4..d868144f6928 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
| @@ -23,7 +23,9 @@ struct timestamp { | |||
| 23 | uint32_t seq_no; | 23 | uint32_t seq_no; |
| 24 | uint8_t cpu; | 24 | uint8_t cpu; |
| 25 | uint8_t event; | 25 | uint8_t event; |
| 26 | uint8_t task_type; | 26 | uint8_t task_type:2; |
| 27 | uint8_t irq_flag:1; | ||
| 28 | uint8_t irq_count:5; | ||
| 27 | }; | 29 | }; |
| 28 | 30 | ||
| 29 | /* tracing callbacks */ | 31 | /* tracing callbacks */ |
| @@ -32,7 +34,6 @@ feather_callback void save_timestamp_def(unsigned long event, unsigned long type | |||
| 32 | feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); | 34 | feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); |
| 33 | feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); | 35 | feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); |
| 34 | feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr); | 36 | feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr); |
| 35 | feather_callback void save_timestamp_time(unsigned long event, unsigned long time_ptr); | ||
| 36 | 37 | ||
| 37 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) | 38 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) |
| 38 | 39 | ||
| @@ -47,11 +48,6 @@ feather_callback void save_timestamp_time(unsigned long event, unsigned long tim | |||
| 47 | #define LTIMESTAMP(id, task) \ | 48 | #define LTIMESTAMP(id, task) \ |
| 48 | ft_event1(id, save_task_latency, (unsigned long) task) | 49 | ft_event1(id, save_task_latency, (unsigned long) task) |
| 49 | 50 | ||
| 50 | #define TIMESTAMP_TIME(id, time_ptr) \ | ||
| 51 | ft_event1(id, save_timestamp_time, (unsigned long) time_ptr) | ||
| 52 | |||
| 53 | #define TIMESTAMP_PID(id) ft_event0(id, save_timestamp_pid) | ||
| 54 | |||
| 55 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ | 51 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ |
| 56 | 52 | ||
| 57 | #define TIMESTAMP(id) /* no tracing */ | 53 | #define TIMESTAMP(id) /* no tracing */ |
| @@ -64,10 +60,6 @@ feather_callback void save_timestamp_time(unsigned long event, unsigned long tim | |||
| 64 | 60 | ||
| 65 | #define LTIMESTAMP(id, when_ptr) /* no tracing */ | 61 | #define LTIMESTAMP(id, when_ptr) /* no tracing */ |
| 66 | 62 | ||
| 67 | #define TIMESTAMP_TIME(id, time_ptr) /* no tracing */ | ||
| 68 | |||
| 69 | #define TIMESTAMP_PID(id) /* no tracing */ | ||
| 70 | |||
| 71 | #endif | 63 | #endif |
| 72 | 64 | ||
| 73 | 65 | ||
| @@ -79,21 +71,6 @@ feather_callback void save_timestamp_time(unsigned long event, unsigned long tim | |||
| 79 | * always the next number after the start time event id. | 71 | * always the next number after the start time event id. |
| 80 | */ | 72 | */ |
| 81 | 73 | ||
| 82 | #define __TS_SYSCALL_IN_START(p) TIMESTAMP_TIME(10, p) | ||
| 83 | #define TS_SYSCALL_IN_END TIMESTAMP_PID(11) | ||
| 84 | |||
| 85 | #define TS_SYSCALL_OUT_START TIMESTAMP_PID(20) | ||
| 86 | #define TS_SYSCALL_OUT_END TIMESTAMP_PID(21) | ||
| 87 | |||
| 88 | #define TS_LOCK_START TIMESTAMP_PID(30) | ||
| 89 | #define TS_LOCK_END TIMESTAMP_PID(31) | ||
| 90 | |||
| 91 | #define TS_LOCK_SUSPEND TIMESTAMP_PID(38) | ||
| 92 | #define TS_LOCK_RESUME TIMESTAMP_PID(39) | ||
| 93 | |||
| 94 | #define TS_UNLOCK_START TIMESTAMP_PID(40) | ||
| 95 | #define TS_UNLOCK_END TIMESTAMP_PID(41) | ||
| 96 | |||
| 97 | #define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only | 74 | #define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only |
| 98 | * care | 75 | * care |
| 99 | * about | 76 | * about |
| @@ -144,6 +121,14 @@ feather_callback void save_timestamp_time(unsigned long event, unsigned long tim | |||
| 144 | #define TS_EXIT_NP_START TIMESTAMP(150) | 121 | #define TS_EXIT_NP_START TIMESTAMP(150) |
| 145 | #define TS_EXIT_NP_END TIMESTAMP(151) | 122 | #define TS_EXIT_NP_END TIMESTAMP(151) |
| 146 | 123 | ||
| 124 | #define TS_LOCK_START TIMESTAMP(170) | ||
| 125 | #define TS_LOCK_SUSPEND TIMESTAMP(171) | ||
| 126 | #define TS_LOCK_RESUME TIMESTAMP(172) | ||
| 127 | #define TS_LOCK_END TIMESTAMP(173) | ||
| 128 | |||
| 129 | #define TS_UNLOCK_START TIMESTAMP(180) | ||
| 130 | #define TS_UNLOCK_END TIMESTAMP(181) | ||
| 131 | |||
| 147 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | 132 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) |
| 148 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) | 133 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) |
| 149 | 134 | ||
diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h new file mode 100644 index 000000000000..f18b127a089d --- /dev/null +++ b/include/litmus/trace_irq.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef _LITMUS_TRACE_IRQ_H_ | ||
| 2 | #define _LITMUS_TRACE_IRQ_H_ | ||
| 3 | |||
| 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
| 5 | |||
| 6 | extern DEFINE_PER_CPU(atomic_t, irq_fired_count); | ||
| 7 | |||
| 8 | static inline void ft_irq_fired(void) | ||
| 9 | { | ||
| 10 | /* Only called with preemptions disabled. */ | ||
| 11 | atomic_inc(&__get_cpu_var(irq_fired_count)); | ||
| 12 | } | ||
| 13 | |||
| 14 | |||
| 15 | #else | ||
| 16 | |||
| 17 | #define ft_irq_fired() /* nothing to do */ | ||
| 18 | |||
| 19 | #endif | ||
| 20 | |||
| 21 | #endif | ||
