diff options
Diffstat (limited to 'include')
36 files changed, 3353 insertions, 13 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h index cf7bc25928c..78987e9a384 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -17,8 +17,8 @@ | |||
| 17 | * nr_file rlimit, so it's safe to set up a ridiculously high absolute | 17 | * nr_file rlimit, so it's safe to set up a ridiculously high absolute |
| 18 | * upper limit on files-per-process. | 18 | * upper limit on files-per-process. |
| 19 | * | 19 | * |
| 20 | * Some programs (notably those using select()) may have to be | 20 | * Some programs (notably those using select()) may have to be |
| 21 | * recompiled to take full advantage of the new limits.. | 21 | * recompiled to take full advantage of the new limits.. |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | /* Fixed constants first: */ | 24 | /* Fixed constants first: */ |
| @@ -173,7 +173,7 @@ struct inodes_stat_t { | |||
| 173 | #define SEL_EX 4 | 173 | #define SEL_EX 4 |
| 174 | 174 | ||
| 175 | /* public flags for file_system_type */ | 175 | /* public flags for file_system_type */ |
| 176 | #define FS_REQUIRES_DEV 1 | 176 | #define FS_REQUIRES_DEV 1 |
| 177 | #define FS_BINARY_MOUNTDATA 2 | 177 | #define FS_BINARY_MOUNTDATA 2 |
| 178 | #define FS_HAS_SUBTYPE 4 | 178 | #define FS_HAS_SUBTYPE 4 |
| 179 | #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ | 179 | #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ |
| @@ -481,7 +481,7 @@ struct iattr { | |||
| 481 | */ | 481 | */ |
| 482 | #include <linux/quota.h> | 482 | #include <linux/quota.h> |
| 483 | 483 | ||
| 484 | /** | 484 | /** |
| 485 | * enum positive_aop_returns - aop return codes with specific semantics | 485 | * enum positive_aop_returns - aop return codes with specific semantics |
| 486 | * | 486 | * |
| 487 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has | 487 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has |
| @@ -491,7 +491,7 @@ struct iattr { | |||
| 491 | * be a candidate for writeback again in the near | 491 | * be a candidate for writeback again in the near |
| 492 | * future. Other callers must be careful to unlock | 492 | * future. Other callers must be careful to unlock |
| 493 | * the page if they get this return. Returned by | 493 | * the page if they get this return. Returned by |
| 494 | * writepage(); | 494 | * writepage(); |
| 495 | * | 495 | * |
| 496 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has | 496 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has |
| 497 | * unlocked it and the page might have been truncated. | 497 | * unlocked it and the page might have been truncated. |
| @@ -735,6 +735,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping) | |||
| 735 | 735 | ||
| 736 | struct posix_acl; | 736 | struct posix_acl; |
| 737 | #define ACL_NOT_CACHED ((void *)(-1)) | 737 | #define ACL_NOT_CACHED ((void *)(-1)) |
| 738 | struct inode_obj_id_table; | ||
| 738 | 739 | ||
| 739 | #define IOP_FASTPERM 0x0001 | 740 | #define IOP_FASTPERM 0x0001 |
| 740 | #define IOP_LOOKUP 0x0002 | 741 | #define IOP_LOOKUP 0x0002 |
| @@ -1048,10 +1049,10 @@ static inline int file_check_writeable(struct file *filp) | |||
| 1048 | 1049 | ||
| 1049 | #define MAX_NON_LFS ((1UL<<31) - 1) | 1050 | #define MAX_NON_LFS ((1UL<<31) - 1) |
| 1050 | 1051 | ||
| 1051 | /* Page cache limit. The filesystems should put that into their s_maxbytes | 1052 | /* Page cache limit. The filesystems should put that into their s_maxbytes |
| 1052 | limits, otherwise bad things can happen in VM. */ | 1053 | limits, otherwise bad things can happen in VM. */ |
| 1053 | #if BITS_PER_LONG==32 | 1054 | #if BITS_PER_LONG==32 |
| 1054 | #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) | 1055 | #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) |
| 1055 | #elif BITS_PER_LONG==64 | 1056 | #elif BITS_PER_LONG==64 |
| 1056 | #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL | 1057 | #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL |
| 1057 | #endif | 1058 | #endif |
| @@ -2282,7 +2283,7 @@ extern void free_write_pipe(struct file *); | |||
| 2282 | 2283 | ||
| 2283 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); | 2284 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); |
| 2284 | extern struct file * open_exec(const char *); | 2285 | extern struct file * open_exec(const char *); |
| 2285 | 2286 | ||
| 2286 | /* fs/dcache.c -- generic fs support functions */ | 2287 | /* fs/dcache.c -- generic fs support functions */ |
| 2287 | extern int is_subdir(struct dentry *, struct dentry *); | 2288 | extern int is_subdir(struct dentry *, struct dentry *); |
| 2288 | extern int path_is_under(struct path *, struct path *); | 2289 | extern int path_is_under(struct path *, struct path *); |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f743883f769..ef18786a7b4 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | #include <linux/ftrace_irq.h> | 6 | #include <linux/ftrace_irq.h> |
| 7 | #include <asm/hardirq.h> | 7 | #include <asm/hardirq.h> |
| 8 | 8 | ||
| 9 | #include <litmus/trace_irq.h> | ||
| 10 | |||
| 9 | /* | 11 | /* |
| 10 | * We put the hardirq and softirq counter into the preemption | 12 | * We put the hardirq and softirq counter into the preemption |
| 11 | * counter. The bitmask has the following meaning: | 13 | * counter. The bitmask has the following meaning: |
| @@ -186,6 +188,7 @@ extern void rcu_nmi_exit(void); | |||
| 186 | account_system_vtime(current); \ | 188 | account_system_vtime(current); \ |
| 187 | add_preempt_count(HARDIRQ_OFFSET); \ | 189 | add_preempt_count(HARDIRQ_OFFSET); \ |
| 188 | trace_hardirq_enter(); \ | 190 | trace_hardirq_enter(); \ |
| 191 | ft_irq_fired(); \ | ||
| 189 | } while (0) | 192 | } while (0) |
| 190 | 193 | ||
| 191 | /* | 194 | /* |
| @@ -216,6 +219,7 @@ extern void irq_exit(void); | |||
| 216 | lockdep_off(); \ | 219 | lockdep_off(); \ |
| 217 | rcu_nmi_enter(); \ | 220 | rcu_nmi_enter(); \ |
| 218 | trace_hardirq_enter(); \ | 221 | trace_hardirq_enter(); \ |
| 222 | ft_irq_fired(); \ | ||
| 219 | } while (0) | 223 | } while (0) |
| 220 | 224 | ||
| 221 | #define nmi_exit() \ | 225 | #define nmi_exit() \ |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index fd0dc30c9f1..d91bba539ca 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -174,6 +174,7 @@ enum hrtimer_base_type { | |||
| 174 | * @nr_hangs: Total number of hrtimer interrupt hangs | 174 | * @nr_hangs: Total number of hrtimer interrupt hangs |
| 175 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | 175 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
| 176 | * @clock_base: array of clock bases for this cpu | 176 | * @clock_base: array of clock bases for this cpu |
| 177 | * @to_pull: LITMUS^RT list of timers to be pulled on this cpu | ||
| 177 | */ | 178 | */ |
| 178 | struct hrtimer_cpu_base { | 179 | struct hrtimer_cpu_base { |
| 179 | raw_spinlock_t lock; | 180 | raw_spinlock_t lock; |
| @@ -188,8 +189,32 @@ struct hrtimer_cpu_base { | |||
| 188 | ktime_t max_hang_time; | 189 | ktime_t max_hang_time; |
| 189 | #endif | 190 | #endif |
| 190 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 191 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
| 192 | struct list_head to_pull; | ||
| 191 | }; | 193 | }; |
| 192 | 194 | ||
| 195 | #ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS | ||
| 196 | |||
| 197 | #define HRTIMER_START_ON_INACTIVE 0 | ||
| 198 | #define HRTIMER_START_ON_QUEUED 1 | ||
| 199 | |||
| 200 | /* | ||
| 201 | * struct hrtimer_start_on_info - save timer info on remote cpu | ||
| 202 | * @list: list of hrtimer_start_on_info on remote cpu (to_pull) | ||
| 203 | * @timer: timer to be triggered on remote cpu | ||
| 204 | * @time: time event | ||
| 205 | * @mode: timer mode | ||
| 206 | * @state: activity flag | ||
| 207 | */ | ||
| 208 | struct hrtimer_start_on_info { | ||
| 209 | struct list_head list; | ||
| 210 | struct hrtimer *timer; | ||
| 211 | ktime_t time; | ||
| 212 | enum hrtimer_mode mode; | ||
| 213 | atomic_t state; | ||
| 214 | }; | ||
| 215 | |||
| 216 | #endif | ||
| 217 | |||
| 193 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | 218 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
| 194 | { | 219 | { |
| 195 | timer->node.expires = time; | 220 | timer->node.expires = time; |
| @@ -355,6 +380,13 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
| 355 | unsigned long delta_ns, | 380 | unsigned long delta_ns, |
| 356 | const enum hrtimer_mode mode, int wakeup); | 381 | const enum hrtimer_mode mode, int wakeup); |
| 357 | 382 | ||
| 383 | #ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS | ||
| 384 | extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info); | ||
| 385 | extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, | ||
| 386 | struct hrtimer *timer, ktime_t time, | ||
| 387 | const enum hrtimer_mode mode); | ||
| 388 | #endif | ||
| 389 | |||
| 358 | extern int hrtimer_cancel(struct hrtimer *timer); | 390 | extern int hrtimer_cancel(struct hrtimer *timer); |
| 359 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | 391 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
| 360 | 392 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5bb4dd2e4c5..096834c7c63 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #define SCHED_BATCH 3 | 39 | #define SCHED_BATCH 3 |
| 40 | /* SCHED_ISO: reserved but not implemented yet */ | 40 | /* SCHED_ISO: reserved but not implemented yet */ |
| 41 | #define SCHED_IDLE 5 | 41 | #define SCHED_IDLE 5 |
| 42 | #define SCHED_LITMUS 6 | ||
| 42 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ | 43 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ |
| 43 | #define SCHED_RESET_ON_FORK 0x40000000 | 44 | #define SCHED_RESET_ON_FORK 0x40000000 |
| 44 | 45 | ||
| @@ -93,6 +94,10 @@ struct sched_param { | |||
| 93 | 94 | ||
| 94 | #include <asm/processor.h> | 95 | #include <asm/processor.h> |
| 95 | 96 | ||
| 97 | #include <litmus/rt_param.h> | ||
| 98 | #include <litmus/preempt.h> | ||
| 99 | #include <litmus/fdso.h> | ||
| 100 | |||
| 96 | struct exec_domain; | 101 | struct exec_domain; |
| 97 | struct futex_pi_state; | 102 | struct futex_pi_state; |
| 98 | struct robust_list_head; | 103 | struct robust_list_head; |
| @@ -1209,6 +1214,7 @@ struct sched_rt_entity { | |||
| 1209 | }; | 1214 | }; |
| 1210 | 1215 | ||
| 1211 | struct rcu_node; | 1216 | struct rcu_node; |
| 1217 | struct od_table_entry; | ||
| 1212 | 1218 | ||
| 1213 | enum perf_event_task_context { | 1219 | enum perf_event_task_context { |
| 1214 | perf_invalid_context = -1, | 1220 | perf_invalid_context = -1, |
| @@ -1313,9 +1319,9 @@ struct task_struct { | |||
| 1313 | unsigned long stack_canary; | 1319 | unsigned long stack_canary; |
| 1314 | #endif | 1320 | #endif |
| 1315 | 1321 | ||
| 1316 | /* | 1322 | /* |
| 1317 | * pointers to (original) parent process, youngest child, younger sibling, | 1323 | * pointers to (original) parent process, youngest child, younger sibling, |
| 1318 | * older sibling, respectively. (p->father can be replaced with | 1324 | * older sibling, respectively. (p->father can be replaced with |
| 1319 | * p->real_parent->pid) | 1325 | * p->real_parent->pid) |
| 1320 | */ | 1326 | */ |
| 1321 | struct task_struct *real_parent; /* real parent process */ | 1327 | struct task_struct *real_parent; /* real parent process */ |
| @@ -1525,6 +1531,15 @@ struct task_struct { | |||
| 1525 | int make_it_fail; | 1531 | int make_it_fail; |
| 1526 | #endif | 1532 | #endif |
| 1527 | struct prop_local_single dirties; | 1533 | struct prop_local_single dirties; |
| 1534 | |||
| 1535 | /* LITMUS RT parameters and state */ | ||
| 1536 | struct rt_param rt_param; | ||
| 1537 | |||
| 1538 | /* references to PI semaphores, etc. */ | ||
| 1539 | struct od_table_entry *od_table; | ||
| 1540 | |||
| 1541 | resource_mask_t resources; | ||
| 1542 | |||
| 1528 | #ifdef CONFIG_LATENCYTOP | 1543 | #ifdef CONFIG_LATENCYTOP |
| 1529 | int latency_record_count; | 1544 | int latency_record_count; |
| 1530 | struct latency_record latency_record[LT_SAVECOUNT]; | 1545 | struct latency_record latency_record[LT_SAVECOUNT]; |
| @@ -2464,6 +2479,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | |||
| 2464 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 2479 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
| 2465 | { | 2480 | { |
| 2466 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2481 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 2482 | sched_state_will_schedule(tsk); | ||
| 2467 | } | 2483 | } |
| 2468 | 2484 | ||
| 2469 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 2485 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 8cc38d3bab0..53b1beef27a 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -82,6 +82,11 @@ int smp_call_function_any(const struct cpumask *mask, | |||
| 82 | smp_call_func_t func, void *info, int wait); | 82 | smp_call_func_t func, void *info, int wait); |
| 83 | 83 | ||
| 84 | /* | 84 | /* |
| 85 | * sends a 'pull timer' event to a remote CPU | ||
| 86 | */ | ||
| 87 | extern void smp_send_pull_timers(int cpu); | ||
| 88 | |||
| 89 | /* | ||
| 85 | * Generic and arch helpers | 90 | * Generic and arch helpers |
| 86 | */ | 91 | */ |
| 87 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | 92 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS |
diff --git a/include/linux/tick.h b/include/linux/tick.h index b232ccc0ee2..1e29bd5b18a 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -74,6 +74,11 @@ extern int tick_is_oneshot_available(void); | |||
| 74 | extern struct tick_device *tick_get_device(int cpu); | 74 | extern struct tick_device *tick_get_device(int cpu); |
| 75 | 75 | ||
| 76 | # ifdef CONFIG_HIGH_RES_TIMERS | 76 | # ifdef CONFIG_HIGH_RES_TIMERS |
| 77 | /* LITMUS^RT tick alignment */ | ||
| 78 | #define LINUX_DEFAULT_TICKS 0 | ||
| 79 | #define LITMUS_ALIGNED_TICKS 1 | ||
| 80 | #define LITMUS_STAGGERED_TICKS 2 | ||
| 81 | |||
| 77 | extern int tick_init_highres(void); | 82 | extern int tick_init_highres(void); |
| 78 | extern int tick_program_event(ktime_t expires, int force); | 83 | extern int tick_program_event(ktime_t expires, int force); |
| 79 | extern void tick_setup_sched_timer(void); | 84 | extern void tick_setup_sched_timer(void); |
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h new file mode 100644 index 00000000000..ca2e442eb54 --- /dev/null +++ b/include/litmus/affinity.h | |||
| @@ -0,0 +1,80 @@ | |||
| 1 | #ifndef __LITMUS_AFFINITY_H | ||
| 2 | #define __LITMUS_AFFINITY_H | ||
| 3 | |||
| 4 | #include <linux/cpumask.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | L1 (instr) = depth 0 | ||
| 8 | L1 (data) = depth 1 | ||
| 9 | L2 = depth 2 | ||
| 10 | L3 = depth 3 | ||
| 11 | */ | ||
| 12 | #define NUM_CACHE_LEVELS 4 | ||
| 13 | |||
| 14 | struct neighborhood | ||
| 15 | { | ||
| 16 | unsigned int size[NUM_CACHE_LEVELS]; | ||
| 17 | cpumask_var_t neighbors[NUM_CACHE_LEVELS]; | ||
| 18 | }; | ||
| 19 | |||
| 20 | /* topology info is stored redundently in a big array for fast lookups */ | ||
| 21 | extern struct neighborhood neigh_info[NR_CPUS]; | ||
| 22 | |||
| 23 | void init_topology(void); /* called by Litmus module's _init_litmus() */ | ||
| 24 | |||
| 25 | /* Works like: | ||
| 26 | void get_nearest_available_cpu( | ||
| 27 | cpu_entry_t **nearest, | ||
| 28 | cpu_entry_t *start, | ||
| 29 | cpu_entry_t *entries, | ||
| 30 | int release_master) | ||
| 31 | |||
| 32 | Set release_master = NO_CPU for no Release Master. | ||
| 33 | |||
| 34 | We use a macro here to exploit the fact that C-EDF and G-EDF | ||
| 35 | have similar structures for their cpu_entry_t structs, even though | ||
| 36 | they do not share a common base-struct. The macro allows us to | ||
| 37 | avoid code duplication. | ||
| 38 | |||
| 39 | TODO: Factor out the job-to-processor linking from C/G-EDF into | ||
| 40 | a reusable "processor mapping". (See B.B.'s RTSS'09 paper & | ||
| 41 | dissertation.) | ||
| 42 | */ | ||
| 43 | #define get_nearest_available_cpu(nearest, start, entries, release_master) \ | ||
| 44 | { \ | ||
| 45 | (nearest) = NULL; \ | ||
| 46 | if (!(start)->linked) { \ | ||
| 47 | (nearest) = (start); \ | ||
| 48 | } else { \ | ||
| 49 | int __level; \ | ||
| 50 | int __cpu; \ | ||
| 51 | int __release_master = ((release_master) == NO_CPU) ? -1 : (release_master); \ | ||
| 52 | struct neighborhood *__neighbors = &neigh_info[(start)->cpu]; \ | ||
| 53 | \ | ||
| 54 | for (__level = 0; (__level < NUM_CACHE_LEVELS) && !(nearest); ++__level) { \ | ||
| 55 | if (__neighbors->size[__level] > 1) { \ | ||
| 56 | for_each_cpu(__cpu, __neighbors->neighbors[__level]) { \ | ||
| 57 | if (__cpu != __release_master) { \ | ||
| 58 | cpu_entry_t *__entry = &per_cpu((entries), __cpu); \ | ||
| 59 | if (!__entry->linked) { \ | ||
| 60 | (nearest) = __entry; \ | ||
| 61 | break; \ | ||
| 62 | } \ | ||
| 63 | } \ | ||
| 64 | } \ | ||
| 65 | } else if (__neighbors->size[__level] == 0) { \ | ||
| 66 | break; \ | ||
| 67 | } \ | ||
| 68 | } \ | ||
| 69 | } \ | ||
| 70 | \ | ||
| 71 | if ((nearest)) { \ | ||
| 72 | TRACE("P%d is closest available CPU to P%d\n", \ | ||
| 73 | (nearest)->cpu, (start)->cpu); \ | ||
| 74 | } else { \ | ||
| 75 | TRACE("Could not find an available CPU close to P%d\n", \ | ||
| 76 | (start)->cpu); \ | ||
| 77 | } \ | ||
| 78 | } | ||
| 79 | |||
| 80 | #endif | ||
diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h new file mode 100644 index 00000000000..cf4864a498d --- /dev/null +++ b/include/litmus/bheap.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | /* bheaps.h -- Binomial Heaps | ||
| 2 | * | ||
| 3 | * (c) 2008, 2009 Bjoern Brandenburg | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef BHEAP_H | ||
| 7 | #define BHEAP_H | ||
| 8 | |||
| 9 | #define NOT_IN_HEAP UINT_MAX | ||
| 10 | |||
| 11 | struct bheap_node { | ||
| 12 | struct bheap_node* parent; | ||
| 13 | struct bheap_node* next; | ||
| 14 | struct bheap_node* child; | ||
| 15 | |||
| 16 | unsigned int degree; | ||
| 17 | void* value; | ||
| 18 | struct bheap_node** ref; | ||
| 19 | }; | ||
| 20 | |||
| 21 | struct bheap { | ||
| 22 | struct bheap_node* head; | ||
| 23 | /* We cache the minimum of the heap. | ||
| 24 | * This speeds up repeated peek operations. | ||
| 25 | */ | ||
| 26 | struct bheap_node* min; | ||
| 27 | }; | ||
| 28 | |||
| 29 | typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); | ||
| 30 | |||
| 31 | void bheap_init(struct bheap* heap); | ||
| 32 | void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); | ||
| 33 | |||
| 34 | static inline int bheap_node_in_heap(struct bheap_node* h) | ||
| 35 | { | ||
| 36 | return h->degree != NOT_IN_HEAP; | ||
| 37 | } | ||
| 38 | |||
| 39 | static inline int bheap_empty(struct bheap* heap) | ||
| 40 | { | ||
| 41 | return heap->head == NULL && heap->min == NULL; | ||
| 42 | } | ||
| 43 | |||
| 44 | /* insert (and reinitialize) a node into the heap */ | ||
| 45 | void bheap_insert(bheap_prio_t higher_prio, | ||
| 46 | struct bheap* heap, | ||
| 47 | struct bheap_node* node); | ||
| 48 | |||
| 49 | /* merge addition into target */ | ||
| 50 | void bheap_union(bheap_prio_t higher_prio, | ||
| 51 | struct bheap* target, | ||
| 52 | struct bheap* addition); | ||
| 53 | |||
| 54 | struct bheap_node* bheap_peek(bheap_prio_t higher_prio, | ||
| 55 | struct bheap* heap); | ||
| 56 | |||
| 57 | struct bheap_node* bheap_take(bheap_prio_t higher_prio, | ||
| 58 | struct bheap* heap); | ||
| 59 | |||
| 60 | void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); | ||
| 61 | int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); | ||
| 62 | |||
| 63 | void bheap_delete(bheap_prio_t higher_prio, | ||
| 64 | struct bheap* heap, | ||
| 65 | struct bheap_node* node); | ||
| 66 | |||
| 67 | /* allocate from memcache */ | ||
| 68 | struct bheap_node* bheap_node_alloc(int gfp_flags); | ||
| 69 | void bheap_node_free(struct bheap_node* hn); | ||
| 70 | |||
| 71 | /* allocate a heap node for value and insert into the heap */ | ||
| 72 | int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, | ||
| 73 | void* value, int gfp_flags); | ||
| 74 | |||
| 75 | void* bheap_take_del(bheap_prio_t higher_prio, | ||
| 76 | struct bheap* heap); | ||
| 77 | #endif | ||
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h new file mode 100644 index 00000000000..901a30a3e29 --- /dev/null +++ b/include/litmus/binheap.h | |||
| @@ -0,0 +1,206 @@ | |||
| 1 | #ifndef LITMUS_BINARY_HEAP_H | ||
| 2 | #define LITMUS_BINARY_HEAP_H | ||
| 3 | |||
| 4 | #include <linux/kernel.h> | ||
| 5 | |||
| 6 | /** | ||
| 7 | * Simple binary heap with add, arbitrary delete, delete_root, and top | ||
| 8 | * operations. | ||
| 9 | * | ||
| 10 | * Style meant to conform with list.h. | ||
| 11 | * | ||
| 12 | * Motivation: Linux's prio_heap.h is of fixed size. Litmus's binomial | ||
| 13 | * heap may be overkill (and perhaps not general enough) for some applications. | ||
| 14 | * | ||
| 15 | * Note: In order to make node swaps fast, a node inserted with a data pointer | ||
| 16 | * may not always hold said data pointer. This is similar to the binomial heap | ||
| 17 | * implementation. This does make node deletion tricky since we have to | ||
| 18 | * (1) locate the node that holds the data pointer to delete, and (2) the | ||
| 19 | * node that was originally inserted with said data pointer. These have to be | ||
| 20 | * coalesced into a single node before removal (see usage of | ||
| 21 | * __binheap_safe_swap()). We have to track node references to accomplish this. | ||
| 22 | */ | ||
| 23 | |||
| 24 | struct binheap_node { | ||
| 25 | void *data; | ||
| 26 | struct binheap_node *parent; | ||
| 27 | struct binheap_node *left; | ||
| 28 | struct binheap_node *right; | ||
| 29 | |||
| 30 | /* pointer to binheap_node that holds *data for which this binheap_node | ||
| 31 | * was originally inserted. (*data "owns" this node) | ||
| 32 | */ | ||
| 33 | struct binheap_node *ref; | ||
| 34 | struct binheap_node **ref_ptr; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /** | ||
| 38 | * Signature of compator function. Assumed 'less-than' (min-heap). | ||
| 39 | * Pass in 'greater-than' for max-heap. | ||
| 40 | * | ||
| 41 | * TODO: Consider macro-based implementation that allows comparator to be | ||
| 42 | * inlined (similar to Linux red/black tree) for greater efficiency. | ||
| 43 | */ | ||
| 44 | typedef int (*binheap_order_t)(struct binheap_node *a, | ||
| 45 | struct binheap_node *b); | ||
| 46 | |||
| 47 | |||
| 48 | struct binheap { | ||
| 49 | struct binheap_node *root; | ||
| 50 | |||
| 51 | /* pointer to node to take next inserted child */ | ||
| 52 | struct binheap_node *next; | ||
| 53 | |||
| 54 | /* pointer to last node in complete binary tree */ | ||
| 55 | struct binheap_node *last; | ||
| 56 | |||
| 57 | /* comparator function pointer */ | ||
| 58 | binheap_order_t compare; | ||
| 59 | }; | ||
| 60 | |||
| 61 | |||
| 62 | /* Initialized heap nodes not in a heap have parent | ||
| 63 | * set to BINHEAP_POISON. | ||
| 64 | */ | ||
| 65 | #define BINHEAP_POISON ((void*)(0xdeadbeef)) | ||
| 66 | |||
| 67 | |||
| 68 | /** | ||
| 69 | * binheap_entry - get the struct for this heap node. | ||
| 70 | * Only valid when called upon heap nodes other than the root handle. | ||
| 71 | * @ptr: the heap node. | ||
| 72 | * @type: the type of struct pointed to by binheap_node::data. | ||
| 73 | * @member: unused. | ||
| 74 | */ | ||
| 75 | #define binheap_entry(ptr, type, member) \ | ||
| 76 | ((type *)((ptr)->data)) | ||
| 77 | |||
| 78 | /** | ||
| 79 | * binheap_node_container - get the struct that contains this node. | ||
| 80 | * Only valid when called upon heap nodes other than the root handle. | ||
| 81 | * @ptr: the heap node. | ||
| 82 | * @type: the type of struct the node is embedded in. | ||
| 83 | * @member: the name of the binheap_struct within the (type) struct. | ||
| 84 | */ | ||
| 85 | #define binheap_node_container(ptr, type, member) \ | ||
| 86 | container_of((ptr), type, member) | ||
| 87 | |||
| 88 | /** | ||
| 89 | * binheap_top_entry - get the struct for the node at the top of the heap. | ||
| 90 | * Only valid when called upon the heap handle node. | ||
| 91 | * @ptr: the special heap-handle node. | ||
| 92 | * @type: the type of the struct the head is embedded in. | ||
| 93 | * @member: the name of the binheap_struct within the (type) struct. | ||
| 94 | */ | ||
| 95 | #define binheap_top_entry(ptr, type, member) \ | ||
| 96 | binheap_entry((ptr)->root, type, member) | ||
| 97 | |||
| 98 | /** | ||
| 99 | * binheap_delete_root - remove the root element from the heap. | ||
| 100 | * @handle: handle to the heap. | ||
| 101 | * @type: the type of the struct the head is embedded in. | ||
| 102 | * @member: the name of the binheap_struct within the (type) struct. | ||
| 103 | */ | ||
| 104 | #define binheap_delete_root(handle, type, member) \ | ||
| 105 | __binheap_delete_root((handle), &((type *)((handle)->root->data))->member) | ||
| 106 | |||
| 107 | /** | ||
| 108 | * binheap_delete - remove an arbitrary element from the heap. | ||
| 109 | * @to_delete: pointer to node to be removed. | ||
| 110 | * @handle: handle to the heap. | ||
| 111 | */ | ||
| 112 | #define binheap_delete(to_delete, handle) \ | ||
| 113 | __binheap_delete((to_delete), (handle)) | ||
| 114 | |||
| 115 | /** | ||
| 116 | * binheap_add - insert an element to the heap | ||
| 117 | * new_node: node to add. | ||
| 118 | * @handle: handle to the heap. | ||
| 119 | * @type: the type of the struct the head is embedded in. | ||
| 120 | * @member: the name of the binheap_struct within the (type) struct. | ||
| 121 | */ | ||
| 122 | #define binheap_add(new_node, handle, type, member) \ | ||
| 123 | __binheap_add((new_node), (handle), container_of((new_node), type, member)) | ||
| 124 | |||
| 125 | /** | ||
| 126 | * binheap_decrease - re-eval the position of a node (based upon its | ||
| 127 | * original data pointer). | ||
| 128 | * @handle: handle to the heap. | ||
| 129 | * @orig_node: node that was associated with the data pointer | ||
| 130 | * (whose value has changed) when said pointer was | ||
| 131 | * added to the heap. | ||
| 132 | */ | ||
| 133 | #define binheap_decrease(orig_node, handle) \ | ||
| 134 | __binheap_decrease((orig_node), (handle)) | ||
| 135 | |||
| 136 | #define BINHEAP_NODE_INIT() { NULL, BINHEAP_POISON, NULL, NULL , NULL, NULL} | ||
| 137 | |||
| 138 | #define BINHEAP_NODE(name) \ | ||
| 139 | struct binheap_node name = BINHEAP_NODE_INIT() | ||
| 140 | |||
| 141 | |||
| 142 | static inline void INIT_BINHEAP_NODE(struct binheap_node *n) | ||
| 143 | { | ||
| 144 | n->data = NULL; | ||
| 145 | n->parent = BINHEAP_POISON; | ||
| 146 | n->left = NULL; | ||
| 147 | n->right = NULL; | ||
| 148 | n->ref = NULL; | ||
| 149 | n->ref_ptr = NULL; | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline void INIT_BINHEAP_HANDLE(struct binheap *handle, | ||
| 153 | binheap_order_t compare) | ||
| 154 | { | ||
| 155 | handle->root = NULL; | ||
| 156 | handle->next = NULL; | ||
| 157 | handle->last = NULL; | ||
| 158 | handle->compare = compare; | ||
| 159 | } | ||
| 160 | |||
| 161 | /* Returns true if binheap is empty. */ | ||
| 162 | static inline int binheap_empty(struct binheap *handle) | ||
| 163 | { | ||
| 164 | return(handle->root == NULL); | ||
| 165 | } | ||
| 166 | |||
| 167 | /* Returns true if binheap node is in a heap. */ | ||
| 168 | static inline int binheap_is_in_heap(struct binheap_node *node) | ||
| 169 | { | ||
| 170 | return (node->parent != BINHEAP_POISON); | ||
| 171 | } | ||
| 172 | |||
| 173 | /* Returns true if binheap node is in given heap. */ | ||
| 174 | int binheap_is_in_this_heap(struct binheap_node *node, struct binheap* heap); | ||
| 175 | |||
| 176 | /* Add a node to a heap */ | ||
| 177 | void __binheap_add(struct binheap_node *new_node, | ||
| 178 | struct binheap *handle, | ||
| 179 | void *data); | ||
| 180 | |||
| 181 | /** | ||
| 182 | * Removes the root node from the heap. The node is removed after coalescing | ||
| 183 | * the binheap_node with its original data pointer at the root of the tree. | ||
| 184 | * | ||
| 185 | * The 'last' node in the tree is then swapped up to the root and bubbled | ||
| 186 | * down. | ||
| 187 | */ | ||
| 188 | void __binheap_delete_root(struct binheap *handle, | ||
| 189 | struct binheap_node *container); | ||
| 190 | |||
| 191 | /** | ||
| 192 | * Delete an arbitrary node. Bubble node to delete up to the root, | ||
| 193 | * and then delete to root. | ||
| 194 | */ | ||
| 195 | void __binheap_delete(struct binheap_node *node_to_delete, | ||
| 196 | struct binheap *handle); | ||
| 197 | |||
| 198 | /** | ||
| 199 | * Bubble up a node whose pointer has decreased in value. | ||
| 200 | */ | ||
| 201 | void __binheap_decrease(struct binheap_node *orig_node, | ||
| 202 | struct binheap *handle); | ||
| 203 | |||
| 204 | |||
| 205 | #endif | ||
| 206 | |||
diff --git a/include/litmus/budget.h b/include/litmus/budget.h new file mode 100644 index 00000000000..33344ee8d5f --- /dev/null +++ b/include/litmus/budget.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | #ifndef _LITMUS_BUDGET_H_ | ||
| 2 | #define _LITMUS_BUDGET_H_ | ||
| 3 | |||
| 4 | /* Update the per-processor enforcement timer (arm/reproram/cancel) for | ||
| 5 | * the next task. */ | ||
| 6 | void update_enforcement_timer(struct task_struct* t); | ||
| 7 | |||
| 8 | inline static int budget_exhausted(struct task_struct* t) | ||
| 9 | { | ||
| 10 | return get_exec_time(t) >= get_exec_cost(t); | ||
| 11 | } | ||
| 12 | |||
| 13 | inline static lt_t budget_remaining(struct task_struct* t) | ||
| 14 | { | ||
| 15 | if (!budget_exhausted(t)) | ||
| 16 | return get_exec_cost(t) - get_exec_time(t); | ||
| 17 | else | ||
| 18 | /* avoid overflow */ | ||
| 19 | return 0; | ||
| 20 | } | ||
| 21 | |||
| 22 | #define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
| 23 | |||
| 24 | #define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | ||
| 25 | == PRECISE_ENFORCEMENT) | ||
| 26 | |||
| 27 | static inline int requeue_preempted_job(struct task_struct* t) | ||
| 28 | { | ||
| 29 | /* Add task to ready queue only if not subject to budget enforcement or | ||
| 30 | * if the job has budget remaining. t may be NULL. | ||
| 31 | */ | ||
| 32 | return t && (!budget_exhausted(t) || !budget_enforced(t)); | ||
| 33 | } | ||
| 34 | |||
| 35 | #endif | ||
diff --git a/include/litmus/clustered.h b/include/litmus/clustered.h new file mode 100644 index 00000000000..0c18dcb15e6 --- /dev/null +++ b/include/litmus/clustered.h | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | #ifndef CLUSTERED_H | ||
| 2 | #define CLUSTERED_H | ||
| 3 | |||
| 4 | /* Which cache level should be used to group CPUs into clusters? | ||
| 5 | * GLOBAL_CLUSTER means that all CPUs form a single cluster (just like under | ||
| 6 | * global scheduling). | ||
| 7 | */ | ||
| 8 | enum cache_level { | ||
| 9 | GLOBAL_CLUSTER = 0, | ||
| 10 | L1_CLUSTER = 1, | ||
| 11 | L2_CLUSTER = 2, | ||
| 12 | L3_CLUSTER = 3 | ||
| 13 | }; | ||
| 14 | |||
| 15 | int parse_cache_level(const char *str, enum cache_level *level); | ||
| 16 | const char* cache_level_name(enum cache_level level); | ||
| 17 | |||
| 18 | /* expose a cache level in a /proc dir */ | ||
| 19 | struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent, | ||
| 20 | enum cache_level* level); | ||
| 21 | |||
| 22 | |||
| 23 | |||
| 24 | struct scheduling_cluster { | ||
| 25 | unsigned int id; | ||
| 26 | /* list of CPUs that are part of this cluster */ | ||
| 27 | struct list_head cpus; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct cluster_cpu { | ||
| 31 | unsigned int id; /* which CPU is this? */ | ||
| 32 | struct list_head cluster_list; /* List of the CPUs in this cluster. */ | ||
| 33 | struct scheduling_cluster* cluster; /* The cluster that this CPU belongs to. */ | ||
| 34 | }; | ||
| 35 | |||
| 36 | int get_cluster_size(enum cache_level level); | ||
| 37 | |||
| 38 | int assign_cpus_to_clusters(enum cache_level level, | ||
| 39 | struct scheduling_cluster* clusters[], | ||
| 40 | unsigned int num_clusters, | ||
| 41 | struct cluster_cpu* cpus[], | ||
| 42 | unsigned int num_cpus); | ||
| 43 | |||
| 44 | #endif | ||
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h new file mode 100644 index 00000000000..928b1dfd1db --- /dev/null +++ b/include/litmus/debug_trace.h | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | #ifndef LITMUS_DEBUG_TRACE_H | ||
| 2 | #define LITMUS_DEBUG_TRACE_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
| 5 | void sched_trace_log_message(const char* fmt, ...); | ||
| 6 | void dump_trace_buffer(int max); | ||
| 7 | #else | ||
| 8 | |||
| 9 | #define sched_trace_log_message(fmt, ...) | ||
| 10 | |||
| 11 | #endif | ||
| 12 | |||
| 13 | extern atomic_t __log_seq_no; | ||
| 14 | |||
| 15 | #ifdef CONFIG_SCHED_DEBUG_TRACE_CALLER | ||
| 16 | #define TRACE_PREFIX "%d P%d [%s@%s:%d]: " | ||
| 17 | #define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ | ||
| 18 | raw_smp_processor_id(), \ | ||
| 19 | __FUNCTION__, __FILE__, __LINE__ | ||
| 20 | #define STRACE(fmt, args...) \ | ||
| 21 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | ||
| 22 | TRACE_ARGS, ## args) | ||
| 23 | #define STRACE2(fmt, args...) \ | ||
| 24 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | ||
| 25 | TRACE_ARGS, ## args) | ||
| 26 | #else | ||
| 27 | #define TRACE_PREFIX "%d P%d: " | ||
| 28 | #define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ | ||
| 29 | raw_smp_processor_id() | ||
| 30 | #define STRACE(fmt, args...) \ | ||
| 31 | sched_trace_log_message("%d P%d : " fmt, \ | ||
| 32 | TRACE_ARGS, ## args) | ||
| 33 | #define STRACE2(fmt, args...) \ | ||
| 34 | sched_trace_log_message("%d P%d : " fmt, \ | ||
| 35 | TRACE_ARGS, ## args) | ||
| 36 | #endif | ||
| 37 | |||
| 38 | #define TRACE(fmt, args...) \ | ||
| 39 | sched_trace_log_message(TRACE_PREFIX fmt, \ | ||
| 40 | TRACE_ARGS, ## args) | ||
| 41 | |||
| 42 | #define TRACE_TASK(t, fmt, args...) \ | ||
| 43 | TRACE("(%s/%d:%d) " fmt, \ | ||
| 44 | t ? (t)->comm : "null", \ | ||
| 45 | t ? (t)->pid : 0, \ | ||
| 46 | t ? (t)->rt_param.job_params.job_no : 0, \ | ||
| 47 | ##args) | ||
| 48 | |||
| 49 | #define STRACE_TASK(t, fmt, args...) \ | ||
| 50 | STRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
| 51 | (t)->rt_param.job_params.job_no, ##args) | ||
| 52 | |||
| 53 | #define TRACE_CUR(fmt, args...) \ | ||
| 54 | TRACE_TASK(current, fmt, ## args) | ||
| 55 | |||
| 56 | |||
| 57 | |||
| 58 | #endif | ||
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h new file mode 100644 index 00000000000..bbaf22ea7f1 --- /dev/null +++ b/include/litmus/edf_common.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * EDF common data structures and utility functions shared by all EDF | ||
| 3 | * based scheduler plugins | ||
| 4 | */ | ||
| 5 | |||
| 6 | /* CLEANUP: Add comments and make it less messy. | ||
| 7 | * | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef __UNC_EDF_COMMON_H__ | ||
| 11 | #define __UNC_EDF_COMMON_H__ | ||
| 12 | |||
| 13 | #include <litmus/rt_domain.h> | ||
| 14 | |||
| 15 | void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
| 16 | release_jobs_t release); | ||
| 17 | |||
| 18 | int edf_higher_prio(struct task_struct* first, | ||
| 19 | struct task_struct* second); | ||
| 20 | |||
| 21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
| 22 | |||
| 23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
| 24 | |||
| 25 | #endif | ||
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h new file mode 100644 index 00000000000..85a649e2722 --- /dev/null +++ b/include/litmus/fdso.h | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | /* fdso.h - file descriptor attached shared objects | ||
| 2 | * | ||
| 3 | * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef _LINUX_FDSO_H_ | ||
| 7 | #define _LINUX_FDSO_H_ | ||
| 8 | |||
| 9 | #include <linux/list.h> | ||
| 10 | #include <asm/atomic.h> | ||
| 11 | |||
| 12 | #include <linux/fs.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | |||
| 15 | #define MAX_OBJECT_DESCRIPTORS 32 | ||
| 16 | |||
| 17 | typedef unsigned int resource_mask_t; | ||
| 18 | |||
| 19 | typedef enum { | ||
| 20 | MIN_OBJ_TYPE = 0, | ||
| 21 | |||
| 22 | FMLP_SEM = 0, | ||
| 23 | SRP_SEM = 1, | ||
| 24 | |||
| 25 | MPCP_SEM = 2, | ||
| 26 | MPCP_VS_SEM = 3, | ||
| 27 | DPCP_SEM = 4, | ||
| 28 | |||
| 29 | PCP_SEM = 5, | ||
| 30 | |||
| 31 | DGL_SEM = 6, | ||
| 32 | |||
| 33 | MAX_OBJ_TYPE = 6 | ||
| 34 | } obj_type_t; | ||
| 35 | |||
| 36 | struct inode_obj_id { | ||
| 37 | struct list_head list; | ||
| 38 | atomic_t count; | ||
| 39 | struct inode* inode; | ||
| 40 | |||
| 41 | obj_type_t type; | ||
| 42 | void* obj; | ||
| 43 | unsigned int id; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct fdso_ops; | ||
| 47 | |||
| 48 | struct od_table_entry { | ||
| 49 | unsigned int used; | ||
| 50 | |||
| 51 | struct inode_obj_id* obj; | ||
| 52 | const struct fdso_ops* class; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct fdso_ops { | ||
| 56 | int (*create)(void** obj_ref, obj_type_t type, void* __user); | ||
| 57 | void (*destroy)(obj_type_t type, void*); | ||
| 58 | int (*open) (struct od_table_entry*, void* __user); | ||
| 59 | int (*close) (struct od_table_entry*); | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* translate a userspace supplied od into the raw table entry | ||
| 63 | * returns NULL if od is invalid | ||
| 64 | */ | ||
| 65 | struct od_table_entry* get_entry_for_od(int od); | ||
| 66 | |||
| 67 | /* translate a userspace supplied od into the associated object | ||
| 68 | * returns NULL if od is invalid | ||
| 69 | */ | ||
| 70 | static inline void* od_lookup(int od, obj_type_t type) | ||
| 71 | { | ||
| 72 | struct od_table_entry* e = get_entry_for_od(od); | ||
| 73 | return e && e->obj->type == type ? e->obj->obj : NULL; | ||
| 74 | } | ||
| 75 | |||
| 76 | #define lookup_fmlp_sem(od)((struct fmlp_semaphore*) od_lookup(od, FMLP_SEM)) | ||
| 77 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | ||
| 78 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | ||
| 79 | |||
| 80 | |||
| 81 | #endif | ||
diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h new file mode 100644 index 00000000000..6c18277fdfc --- /dev/null +++ b/include/litmus/feather_buffer.h | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | #ifndef _FEATHER_BUFFER_H_ | ||
| 2 | #define _FEATHER_BUFFER_H_ | ||
| 3 | |||
| 4 | /* requires UINT_MAX and memcpy */ | ||
| 5 | |||
| 6 | #define SLOT_FREE 0 | ||
| 7 | #define SLOT_BUSY 1 | ||
| 8 | #define SLOT_READY 2 | ||
| 9 | |||
| 10 | struct ft_buffer { | ||
| 11 | unsigned int slot_count; | ||
| 12 | unsigned int slot_size; | ||
| 13 | |||
| 14 | int free_count; | ||
| 15 | unsigned int write_idx; | ||
| 16 | unsigned int read_idx; | ||
| 17 | |||
| 18 | char* slots; | ||
| 19 | void* buffer_mem; | ||
| 20 | unsigned int failed_writes; | ||
| 21 | }; | ||
| 22 | |||
| 23 | static inline int init_ft_buffer(struct ft_buffer* buf, | ||
| 24 | unsigned int slot_count, | ||
| 25 | unsigned int slot_size, | ||
| 26 | char* slots, | ||
| 27 | void* buffer_mem) | ||
| 28 | { | ||
| 29 | int i = 0; | ||
| 30 | if (!slot_count || UINT_MAX % slot_count != slot_count - 1) { | ||
| 31 | /* The slot count must divide UNIT_MAX + 1 so that when it | ||
| 32 | * wraps around the index correctly points to 0. | ||
| 33 | */ | ||
| 34 | return 0; | ||
| 35 | } else { | ||
| 36 | buf->slot_count = slot_count; | ||
| 37 | buf->slot_size = slot_size; | ||
| 38 | buf->slots = slots; | ||
| 39 | buf->buffer_mem = buffer_mem; | ||
| 40 | buf->free_count = slot_count; | ||
| 41 | buf->write_idx = 0; | ||
| 42 | buf->read_idx = 0; | ||
| 43 | buf->failed_writes = 0; | ||
| 44 | for (i = 0; i < slot_count; i++) | ||
| 45 | buf->slots[i] = SLOT_FREE; | ||
| 46 | return 1; | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr) | ||
| 51 | { | ||
| 52 | int free = fetch_and_dec(&buf->free_count); | ||
| 53 | unsigned int idx; | ||
| 54 | if (free <= 0) { | ||
| 55 | fetch_and_inc(&buf->free_count); | ||
| 56 | *ptr = 0; | ||
| 57 | fetch_and_inc(&buf->failed_writes); | ||
| 58 | return 0; | ||
| 59 | } else { | ||
| 60 | idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count; | ||
| 61 | buf->slots[idx] = SLOT_BUSY; | ||
| 62 | *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; | ||
| 63 | return 1; | ||
| 64 | } | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr) | ||
| 68 | { | ||
| 69 | unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size; | ||
| 70 | buf->slots[idx] = SLOT_READY; | ||
| 71 | } | ||
| 72 | |||
| 73 | |||
| 74 | /* exclusive reader access is assumed */ | ||
| 75 | static inline int ft_buffer_read(struct ft_buffer* buf, void* dest) | ||
| 76 | { | ||
| 77 | unsigned int idx; | ||
| 78 | if (buf->free_count == buf->slot_count) | ||
| 79 | /* nothing available */ | ||
| 80 | return 0; | ||
| 81 | idx = buf->read_idx % buf->slot_count; | ||
| 82 | if (buf->slots[idx] == SLOT_READY) { | ||
| 83 | memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size, | ||
| 84 | buf->slot_size); | ||
| 85 | buf->slots[idx] = SLOT_FREE; | ||
| 86 | buf->read_idx++; | ||
| 87 | fetch_and_inc(&buf->free_count); | ||
| 88 | return 1; | ||
| 89 | } else | ||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | |||
| 94 | #endif | ||
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h new file mode 100644 index 00000000000..028dfb206fb --- /dev/null +++ b/include/litmus/feather_trace.h | |||
| @@ -0,0 +1,65 @@ | |||
| 1 | #ifndef _FEATHER_TRACE_H_ | ||
| 2 | #define _FEATHER_TRACE_H_ | ||
| 3 | |||
| 4 | #include <asm/atomic.h> | ||
| 5 | |||
| 6 | int ft_enable_event(unsigned long id); | ||
| 7 | int ft_disable_event(unsigned long id); | ||
| 8 | int ft_is_event_enabled(unsigned long id); | ||
| 9 | int ft_disable_all_events(void); | ||
| 10 | |||
| 11 | /* atomic_* funcitons are inline anyway */ | ||
| 12 | static inline int fetch_and_inc(int *val) | ||
| 13 | { | ||
| 14 | return atomic_add_return(1, (atomic_t*) val) - 1; | ||
| 15 | } | ||
| 16 | |||
| 17 | static inline int fetch_and_dec(int *val) | ||
| 18 | { | ||
| 19 | return atomic_sub_return(1, (atomic_t*) val) + 1; | ||
| 20 | } | ||
| 21 | |||
| 22 | /* Don't use rewriting implementation if kernel text pages are read-only. | ||
| 23 | * Ftrace gets around this by using the identity mapping, but that's more | ||
| 24 | * effort that is warrented right now for Feather-Trace. | ||
| 25 | * Eventually, it may make sense to replace Feather-Trace with ftrace. | ||
| 26 | */ | ||
| 27 | #if defined(CONFIG_ARCH_HAS_FEATHER_TRACE) && !defined(CONFIG_DEBUG_RODATA) | ||
| 28 | |||
| 29 | #include <asm/feather_trace.h> | ||
| 30 | |||
| 31 | #else /* !__ARCH_HAS_FEATHER_TRACE */ | ||
| 32 | |||
| 33 | /* provide default implementation */ | ||
| 34 | |||
| 35 | #include <asm/timex.h> /* for get_cycles() */ | ||
| 36 | |||
| 37 | static inline unsigned long long ft_timestamp(void) | ||
| 38 | { | ||
| 39 | return get_cycles(); | ||
| 40 | } | ||
| 41 | |||
| 42 | #define feather_callback | ||
| 43 | |||
| 44 | #define MAX_EVENTS 1024 | ||
| 45 | |||
| 46 | extern int ft_events[MAX_EVENTS]; | ||
| 47 | |||
| 48 | #define ft_event(id, callback) \ | ||
| 49 | if (ft_events[id]) callback(); | ||
| 50 | |||
| 51 | #define ft_event0(id, callback) \ | ||
| 52 | if (ft_events[id]) callback(id); | ||
| 53 | |||
| 54 | #define ft_event1(id, callback, param) \ | ||
| 55 | if (ft_events[id]) callback(id, param); | ||
| 56 | |||
| 57 | #define ft_event2(id, callback, param, param2) \ | ||
| 58 | if (ft_events[id]) callback(id, param, param2); | ||
| 59 | |||
| 60 | #define ft_event3(id, callback, p, p2, p3) \ | ||
| 61 | if (ft_events[id]) callback(id, p, p2, p3); | ||
| 62 | |||
| 63 | #endif /* __ARCH_HAS_FEATHER_TRACE */ | ||
| 64 | |||
| 65 | #endif | ||
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h new file mode 100644 index 00000000000..19356c0fa6c --- /dev/null +++ b/include/litmus/fp_common.h | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | /* Fixed-priority scheduler support. | ||
| 2 | */ | ||
| 3 | |||
| 4 | #ifndef __FP_COMMON_H__ | ||
| 5 | #define __FP_COMMON_H__ | ||
| 6 | |||
| 7 | #include <litmus/rt_domain.h> | ||
| 8 | |||
| 9 | #include <asm/bitops.h> | ||
| 10 | |||
| 11 | |||
| 12 | void fp_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
| 13 | release_jobs_t release); | ||
| 14 | |||
| 15 | int fp_higher_prio(struct task_struct* first, | ||
| 16 | struct task_struct* second); | ||
| 17 | |||
| 18 | int fp_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
| 19 | |||
| 20 | #define FP_PRIO_BIT_WORDS (LITMUS_MAX_PRIORITY / BITS_PER_LONG) | ||
| 21 | |||
| 22 | #if (LITMUS_MAX_PRIORITY % BITS_PER_LONG) | ||
| 23 | #error LITMUS_MAX_PRIORITY must be a multiple of BITS_PER_LONG | ||
| 24 | #endif | ||
| 25 | |||
| 26 | /* bitmask-inexed priority queue */ | ||
| 27 | struct fp_prio_queue { | ||
| 28 | unsigned long bitmask[FP_PRIO_BIT_WORDS]; | ||
| 29 | struct bheap queue[LITMUS_MAX_PRIORITY]; | ||
| 30 | }; | ||
| 31 | |||
| 32 | void fp_prio_queue_init(struct fp_prio_queue* q); | ||
| 33 | |||
| 34 | static inline void fpq_set(struct fp_prio_queue* q, unsigned int index) | ||
| 35 | { | ||
| 36 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
| 37 | __set_bit(index % BITS_PER_LONG, word); | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline void fpq_clear(struct fp_prio_queue* q, unsigned int index) | ||
| 41 | { | ||
| 42 | unsigned long *word = q->bitmask + (index / BITS_PER_LONG); | ||
| 43 | __clear_bit(index % BITS_PER_LONG, word); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline unsigned int fpq_find(struct fp_prio_queue* q) | ||
| 47 | { | ||
| 48 | int i; | ||
| 49 | |||
| 50 | /* loop optimizer should unroll this */ | ||
| 51 | for (i = 0; i < FP_PRIO_BIT_WORDS; i++) | ||
| 52 | if (q->bitmask[i]) | ||
| 53 | return __ffs(q->bitmask[i]) + i * BITS_PER_LONG; | ||
| 54 | |||
| 55 | return LITMUS_MAX_PRIORITY; /* nothing found */ | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
| 59 | { | ||
| 60 | BUG_ON(index >= LITMUS_MAX_PRIORITY); | ||
| 61 | BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | ||
| 62 | |||
| 63 | fpq_set(q, index); | ||
| 64 | bheap_insert(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline void fp_prio_remove(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | ||
| 68 | { | ||
| 69 | BUG_ON(!is_queued(t)); | ||
| 70 | |||
| 71 | bheap_delete(fp_ready_order, &q->queue[index], tsk_rt(t)->heap_node); | ||
| 72 | if (likely(bheap_empty(&q->queue[index]))) | ||
| 73 | fpq_clear(q, index); | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline struct task_struct* fp_prio_peek(struct fp_prio_queue* q) | ||
| 77 | { | ||
| 78 | unsigned int idx = fpq_find(q); | ||
| 79 | struct bheap_node* hn; | ||
| 80 | |||
| 81 | if (idx < LITMUS_MAX_PRIORITY) { | ||
| 82 | hn = bheap_peek(fp_ready_order, &q->queue[idx]); | ||
| 83 | return bheap2task(hn); | ||
| 84 | } else | ||
| 85 | return NULL; | ||
| 86 | } | ||
| 87 | |||
| 88 | static inline struct task_struct* fp_prio_take(struct fp_prio_queue* q) | ||
| 89 | { | ||
| 90 | unsigned int idx = fpq_find(q); | ||
| 91 | struct bheap_node* hn; | ||
| 92 | |||
| 93 | if (idx < LITMUS_MAX_PRIORITY) { | ||
| 94 | hn = bheap_take(fp_ready_order, &q->queue[idx]); | ||
| 95 | if (likely(bheap_empty(&q->queue[idx]))) | ||
| 96 | fpq_clear(q, idx); | ||
| 97 | return bheap2task(hn); | ||
| 98 | } else | ||
| 99 | return NULL; | ||
| 100 | } | ||
| 101 | |||
| 102 | int fp_preemption_needed(struct fp_prio_queue* q, struct task_struct *t); | ||
| 103 | |||
| 104 | |||
| 105 | #endif | ||
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 00000000000..642de98542c --- /dev/null +++ b/include/litmus/fpmath.h | |||
| @@ -0,0 +1,147 @@ | |||
| 1 | #ifndef __FP_MATH_H__ | ||
| 2 | #define __FP_MATH_H__ | ||
| 3 | |||
| 4 | #include <linux/math64.h> | ||
| 5 | |||
| 6 | #ifndef __KERNEL__ | ||
| 7 | #include <stdint.h> | ||
| 8 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
| 9 | #endif | ||
| 10 | |||
| 11 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
| 12 | // This can lead to very large numbers. | ||
| 13 | typedef int64_t fpbuf_t; | ||
| 14 | typedef struct | ||
| 15 | { | ||
| 16 | fpbuf_t val; | ||
| 17 | } fp_t; | ||
| 18 | |||
| 19 | #define FP_SHIFT 10 | ||
| 20 | #define ROUND_BIT (FP_SHIFT - 1) | ||
| 21 | |||
| 22 | #define _fp(x) ((fp_t) {x}) | ||
| 23 | |||
| 24 | #ifdef __KERNEL__ | ||
| 25 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
| 26 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
| 27 | #endif | ||
| 28 | |||
| 29 | static inline fp_t FP(fpbuf_t x) | ||
| 30 | { | ||
| 31 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
| 32 | } | ||
| 33 | |||
| 34 | /* divide two integers to obtain a fixed point value */ | ||
| 35 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
| 36 | { | ||
| 37 | return _fp(div64_s64(FP(a).val, (b))); | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline fpbuf_t _point(fp_t x) | ||
| 41 | { | ||
| 42 | return (x.val % (1 << FP_SHIFT)); | ||
| 43 | |||
| 44 | } | ||
| 45 | |||
| 46 | #define fp2str(x) x.val | ||
| 47 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
| 48 | #define _FP_ "%ld/1024" | ||
| 49 | |||
| 50 | static inline fpbuf_t _floor(fp_t x) | ||
| 51 | { | ||
| 52 | return x.val >> FP_SHIFT; | ||
| 53 | } | ||
| 54 | |||
| 55 | /* FIXME: negative rounding */ | ||
| 56 | static inline fpbuf_t _round(fp_t x) | ||
| 57 | { | ||
| 58 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
| 59 | } | ||
| 60 | |||
| 61 | /* multiply two fixed point values */ | ||
| 62 | static inline fp_t _mul(fp_t a, fp_t b) | ||
| 63 | { | ||
| 64 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline fp_t _div(fp_t a, fp_t b) | ||
| 68 | { | ||
| 69 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
| 70 | #define unlikely(x) (x) | ||
| 71 | #define DO_UNDEF_UNLIKELY | ||
| 72 | #endif | ||
| 73 | /* try not to overflow */ | ||
| 74 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
| 75 | return _fp((a.val / b.val) << FP_SHIFT); | ||
| 76 | else | ||
| 77 | return _fp((a.val << FP_SHIFT) / b.val); | ||
| 78 | #ifdef DO_UNDEF_UNLIKELY | ||
| 79 | #undef unlikely | ||
| 80 | #undef DO_UNDEF_UNLIKELY | ||
| 81 | #endif | ||
| 82 | } | ||
| 83 | |||
| 84 | static inline fp_t _add(fp_t a, fp_t b) | ||
| 85 | { | ||
| 86 | return _fp(a.val + b.val); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline fp_t _sub(fp_t a, fp_t b) | ||
| 90 | { | ||
| 91 | return _fp(a.val - b.val); | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline fp_t _neg(fp_t x) | ||
| 95 | { | ||
| 96 | return _fp(-x.val); | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline fp_t _abs(fp_t x) | ||
| 100 | { | ||
| 101 | return _fp(abs(x.val)); | ||
| 102 | } | ||
| 103 | |||
| 104 | /* works the same as casting float/double to integer */ | ||
| 105 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
| 106 | { | ||
| 107 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
| 108 | } | ||
| 109 | |||
| 110 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
| 111 | { | ||
| 112 | return _frac(x,1); | ||
| 113 | } | ||
| 114 | |||
| 115 | static inline int _leq(fp_t a, fp_t b) | ||
| 116 | { | ||
| 117 | return a.val <= b.val; | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline int _geq(fp_t a, fp_t b) | ||
| 121 | { | ||
| 122 | return a.val >= b.val; | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline int _lt(fp_t a, fp_t b) | ||
| 126 | { | ||
| 127 | return a.val < b.val; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline int _gt(fp_t a, fp_t b) | ||
| 131 | { | ||
| 132 | return a.val > b.val; | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline int _eq(fp_t a, fp_t b) | ||
| 136 | { | ||
| 137 | return a.val == b.val; | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline fp_t _max(fp_t a, fp_t b) | ||
| 141 | { | ||
| 142 | if (a.val < b.val) | ||
| 143 | return b; | ||
| 144 | else | ||
| 145 | return a; | ||
| 146 | } | ||
| 147 | #endif | ||
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h new file mode 100644 index 00000000000..0b959874dd7 --- /dev/null +++ b/include/litmus/ftdev.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | #ifndef _LITMUS_FTDEV_H_ | ||
| 2 | #define _LITMUS_FTDEV_H_ | ||
| 3 | |||
| 4 | #include <litmus/feather_trace.h> | ||
| 5 | #include <litmus/feather_buffer.h> | ||
| 6 | #include <linux/mutex.h> | ||
| 7 | #include <linux/cdev.h> | ||
| 8 | |||
| 9 | #define FTDEV_ENABLE_CMD 0 | ||
| 10 | #define FTDEV_DISABLE_CMD 1 | ||
| 11 | |||
| 12 | struct ftdev; | ||
| 13 | |||
| 14 | /* return 0 if buffer can be opened, otherwise -$REASON */ | ||
| 15 | typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no); | ||
| 16 | /* return 0 on success, otherwise -$REASON */ | ||
| 17 | typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); | ||
| 18 | typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); | ||
| 19 | /* Let devices handle writes from userspace. No synchronization provided. */ | ||
| 20 | typedef ssize_t (*ftdev_write_t)(struct ft_buffer* buf, size_t len, const char __user *from); | ||
| 21 | |||
| 22 | struct ftdev_event; | ||
| 23 | |||
| 24 | struct ftdev_minor { | ||
| 25 | struct ft_buffer* buf; | ||
| 26 | unsigned int readers; | ||
| 27 | struct mutex lock; | ||
| 28 | /* FIXME: filter for authorized events */ | ||
| 29 | struct ftdev_event* events; | ||
| 30 | struct device* device; | ||
| 31 | struct ftdev* ftdev; | ||
| 32 | }; | ||
| 33 | |||
| 34 | struct ftdev { | ||
| 35 | dev_t major; | ||
| 36 | struct cdev cdev; | ||
| 37 | struct class* class; | ||
| 38 | const char* name; | ||
| 39 | struct ftdev_minor* minor; | ||
| 40 | unsigned int minor_cnt; | ||
| 41 | ftdev_alloc_t alloc; | ||
| 42 | ftdev_free_t free; | ||
| 43 | ftdev_can_open_t can_open; | ||
| 44 | ftdev_write_t write; | ||
| 45 | }; | ||
| 46 | |||
| 47 | struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); | ||
| 48 | void free_ft_buffer(struct ft_buffer* buf); | ||
| 49 | |||
| 50 | int ftdev_init( struct ftdev* ftdev, struct module* owner, | ||
| 51 | const int minor_cnt, const char* name); | ||
| 52 | void ftdev_exit(struct ftdev* ftdev); | ||
| 53 | int register_ftdev(struct ftdev* ftdev); | ||
| 54 | |||
| 55 | #endif | ||
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h new file mode 100644 index 00000000000..9bd361ef394 --- /dev/null +++ b/include/litmus/jobs.h | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | #ifndef __LITMUS_JOBS_H__ | ||
| 2 | #define __LITMUS_JOBS_H__ | ||
| 3 | |||
| 4 | void prepare_for_next_period(struct task_struct *t); | ||
| 5 | void release_at(struct task_struct *t, lt_t start); | ||
| 6 | long complete_job(void); | ||
| 7 | |||
| 8 | #endif | ||
| 9 | |||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h new file mode 100644 index 00000000000..6a1a59da6b5 --- /dev/null +++ b/include/litmus/litmus.h | |||
| @@ -0,0 +1,312 @@ | |||
| 1 | /* | ||
| 2 | * Constant definitions related to | ||
| 3 | * scheduling policy. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef _LINUX_LITMUS_H_ | ||
| 7 | #define _LINUX_LITMUS_H_ | ||
| 8 | |||
| 9 | #include <litmus/debug_trace.h> | ||
| 10 | |||
| 11 | #ifdef CONFIG_RELEASE_MASTER | ||
| 12 | extern atomic_t release_master_cpu; | ||
| 13 | #endif | ||
| 14 | |||
| 15 | /* in_list - is a given list_head queued on some list? | ||
| 16 | */ | ||
| 17 | static inline int in_list(struct list_head* list) | ||
| 18 | { | ||
| 19 | return !( /* case 1: deleted */ | ||
| 20 | (list->next == LIST_POISON1 && | ||
| 21 | list->prev == LIST_POISON2) | ||
| 22 | || | ||
| 23 | /* case 2: initialized */ | ||
| 24 | (list->next == list && | ||
| 25 | list->prev == list) | ||
| 26 | ); | ||
| 27 | } | ||
| 28 | |||
| 29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | ||
| 30 | struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq); | ||
| 31 | |||
| 32 | #define NO_CPU 0xffffffff | ||
| 33 | |||
| 34 | void litmus_fork(struct task_struct *tsk); | ||
| 35 | void litmus_exec(void); | ||
| 36 | /* clean up real-time state of a task */ | ||
| 37 | void exit_litmus(struct task_struct *dead_tsk); | ||
| 38 | |||
| 39 | long litmus_admit_task(struct task_struct *tsk); | ||
| 40 | void litmus_exit_task(struct task_struct *tsk); | ||
| 41 | |||
| 42 | #define is_realtime(t) ((t)->policy == SCHED_LITMUS) | ||
| 43 | #define rt_transition_pending(t) \ | ||
| 44 | ((t)->rt_param.transition_pending) | ||
| 45 | |||
| 46 | #define tsk_rt(t) (&(t)->rt_param) | ||
| 47 | |||
| 48 | /* Realtime utility macros */ | ||
| 49 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | ||
| 50 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | ||
| 51 | |||
| 52 | /* task_params macros */ | ||
| 53 | #define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) | ||
| 54 | #define get_rt_period(t) (tsk_rt(t)->task_params.period) | ||
| 55 | #define get_rt_relative_deadline(t) (tsk_rt(t)->task_params.relative_deadline) | ||
| 56 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | ||
| 57 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | ||
| 58 | #define get_priority(t) (tsk_rt(t)->task_params.priority) | ||
| 59 | #define get_class(t) (tsk_rt(t)->task_params.cls) | ||
| 60 | #define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) | ||
| 61 | |||
| 62 | /* job_param macros */ | ||
| 63 | #define get_job_no(t) (tsk_rt(t)->job_params.job_no) | ||
| 64 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
| 65 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | ||
| 66 | #define get_release(t) (tsk_rt(t)->job_params.release) | ||
| 67 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | ||
| 68 | |||
| 69 | /* release policy macros */ | ||
| 70 | #define is_periodic(t) (get_release_policy(t) == PERIODIC) | ||
| 71 | #define is_sporadic(t) (get_release_policy(t) == SPORADIC) | ||
| 72 | #ifdef CONFIG_ALLOW_EARLY_RELEASE | ||
| 73 | #define is_early_releasing(t) (get_release_policy(t) == EARLY) | ||
| 74 | #else | ||
| 75 | #define is_early_releasing(t) (0) | ||
| 76 | #endif | ||
| 77 | |||
| 78 | #define is_hrt(t) \ | ||
| 79 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | ||
| 80 | #define is_srt(t) \ | ||
| 81 | (tsk_rt(t)->task_params.cls == RT_CLASS_SOFT) | ||
| 82 | #define is_be(t) \ | ||
| 83 | (tsk_rt(t)->task_params.cls == RT_CLASS_BEST_EFFORT) | ||
| 84 | |||
| 85 | /* Our notion of time within LITMUS: kernel monotonic time. */ | ||
| 86 | static inline lt_t litmus_clock(void) | ||
| 87 | { | ||
| 88 | return ktime_to_ns(ktime_get()); | ||
| 89 | } | ||
| 90 | |||
| 91 | /* A macro to convert from nanoseconds to ktime_t. */ | ||
| 92 | #define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | ||
| 93 | |||
| 94 | #define get_domain(t) (tsk_rt(t)->domain) | ||
| 95 | |||
| 96 | /* Honor the flag in the preempt_count variable that is set | ||
| 97 | * when scheduling is in progress. | ||
| 98 | */ | ||
| 99 | #define is_running(t) \ | ||
| 100 | ((t)->state == TASK_RUNNING || \ | ||
| 101 | task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) | ||
| 102 | |||
| 103 | #define is_blocked(t) \ | ||
| 104 | (!is_running(t)) | ||
| 105 | #define is_released(t, now) \ | ||
| 106 | (lt_before_eq(get_release(t), now)) | ||
| 107 | #define is_tardy(t, now) \ | ||
| 108 | (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) | ||
| 109 | |||
| 110 | /* real-time comparison macros */ | ||
| 111 | #define earlier_deadline(a, b) (lt_before(\ | ||
| 112 | (a)->rt_param.job_params.deadline,\ | ||
| 113 | (b)->rt_param.job_params.deadline)) | ||
| 114 | #define earlier_release(a, b) (lt_before(\ | ||
| 115 | (a)->rt_param.job_params.release,\ | ||
| 116 | (b)->rt_param.job_params.release)) | ||
| 117 | |||
| 118 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | ||
| 119 | |||
| 120 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 121 | void srp_ceiling_block(void); | ||
| 122 | #else | ||
| 123 | #define srp_ceiling_block() /* nothing */ | ||
| 124 | #endif | ||
| 125 | |||
| 126 | #define bheap2task(hn) ((struct task_struct*) hn->value) | ||
| 127 | |||
| 128 | #ifdef CONFIG_NP_SECTION | ||
| 129 | |||
| 130 | static inline int is_kernel_np(struct task_struct *t) | ||
| 131 | { | ||
| 132 | return tsk_rt(t)->kernel_np; | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline int is_user_np(struct task_struct *t) | ||
| 136 | { | ||
| 137 | return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0; | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void request_exit_np(struct task_struct *t) | ||
| 141 | { | ||
| 142 | if (is_user_np(t)) { | ||
| 143 | /* Set the flag that tells user space to call | ||
| 144 | * into the kernel at the end of a critical section. */ | ||
| 145 | if (likely(tsk_rt(t)->ctrl_page)) { | ||
| 146 | TRACE_TASK(t, "setting delayed_preemption flag\n"); | ||
| 147 | tsk_rt(t)->ctrl_page->sched.np.preempt = 1; | ||
| 148 | } | ||
| 149 | } | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline void make_np(struct task_struct *t) | ||
| 153 | { | ||
| 154 | tsk_rt(t)->kernel_np++; | ||
| 155 | } | ||
| 156 | |||
| 157 | /* Caller should check if preemption is necessary when | ||
| 158 | * the function return 0. | ||
| 159 | */ | ||
| 160 | static inline int take_np(struct task_struct *t) | ||
| 161 | { | ||
| 162 | return --tsk_rt(t)->kernel_np; | ||
| 163 | } | ||
| 164 | |||
| 165 | /* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ | ||
| 166 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
| 167 | { | ||
| 168 | union np_flag old, new; | ||
| 169 | |||
| 170 | if (tsk_rt(t)->ctrl_page) { | ||
| 171 | old.raw = tsk_rt(t)->ctrl_page->sched.raw; | ||
| 172 | if (old.np.flag == 0) { | ||
| 173 | /* no longer non-preemptive */ | ||
| 174 | return 0; | ||
| 175 | } else if (old.np.preempt) { | ||
| 176 | /* already set, nothing for us to do */ | ||
| 177 | return 1; | ||
| 178 | } else { | ||
| 179 | /* non preemptive and flag not set */ | ||
| 180 | new.raw = old.raw; | ||
| 181 | new.np.preempt = 1; | ||
| 182 | /* if we get old back, then we atomically set the flag */ | ||
| 183 | return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw; | ||
| 184 | /* If we raced with a concurrent change, then so be | ||
| 185 | * it. Deliver it by IPI. We don't want an unbounded | ||
| 186 | * retry loop here since tasks might exploit that to | ||
| 187 | * keep the kernel busy indefinitely. */ | ||
| 188 | } | ||
| 189 | } else | ||
| 190 | return 0; | ||
| 191 | } | ||
| 192 | |||
| 193 | #else | ||
| 194 | |||
| 195 | static inline int is_kernel_np(struct task_struct* t) | ||
| 196 | { | ||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline int is_user_np(struct task_struct* t) | ||
| 201 | { | ||
| 202 | return 0; | ||
| 203 | } | ||
| 204 | |||
| 205 | static inline void request_exit_np(struct task_struct *t) | ||
| 206 | { | ||
| 207 | /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ | ||
| 208 | BUG(); | ||
| 209 | } | ||
| 210 | |||
| 211 | static inline int request_exit_np_atomic(struct task_struct *t) | ||
| 212 | { | ||
| 213 | return 0; | ||
| 214 | } | ||
| 215 | |||
| 216 | #endif | ||
| 217 | |||
| 218 | static inline void clear_exit_np(struct task_struct *t) | ||
| 219 | { | ||
| 220 | if (likely(tsk_rt(t)->ctrl_page)) | ||
| 221 | tsk_rt(t)->ctrl_page->sched.np.preempt = 0; | ||
| 222 | } | ||
| 223 | |||
| 224 | static inline int is_np(struct task_struct *t) | ||
| 225 | { | ||
| 226 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
| 227 | int kernel, user; | ||
| 228 | kernel = is_kernel_np(t); | ||
| 229 | user = is_user_np(t); | ||
| 230 | if (kernel || user) | ||
| 231 | TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
| 232 | |||
| 233 | kernel, user); | ||
| 234 | return kernel || user; | ||
| 235 | #else | ||
| 236 | return unlikely(is_kernel_np(t) || is_user_np(t)); | ||
| 237 | #endif | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline int is_present(struct task_struct* t) | ||
| 241 | { | ||
| 242 | return t && tsk_rt(t)->present; | ||
| 243 | } | ||
| 244 | |||
| 245 | static inline int is_completed(struct task_struct* t) | ||
| 246 | { | ||
| 247 | return t && tsk_rt(t)->completed; | ||
| 248 | } | ||
| 249 | |||
| 250 | |||
| 251 | /* make the unit explicit */ | ||
| 252 | typedef unsigned long quanta_t; | ||
| 253 | |||
| 254 | enum round { | ||
| 255 | FLOOR, | ||
| 256 | CEIL | ||
| 257 | }; | ||
| 258 | |||
| 259 | |||
| 260 | /* Tick period is used to convert ns-specified execution | ||
| 261 | * costs and periods into tick-based equivalents. | ||
| 262 | */ | ||
| 263 | extern ktime_t tick_period; | ||
| 264 | |||
| 265 | static inline quanta_t time2quanta(lt_t time, enum round round) | ||
| 266 | { | ||
| 267 | s64 quantum_length = ktime_to_ns(tick_period); | ||
| 268 | |||
| 269 | if (do_div(time, quantum_length) && round == CEIL) | ||
| 270 | time++; | ||
| 271 | return (quanta_t) time; | ||
| 272 | } | ||
| 273 | |||
| 274 | /* By how much is cpu staggered behind CPU 0? */ | ||
| 275 | u64 cpu_stagger_offset(int cpu); | ||
| 276 | |||
| 277 | static inline struct control_page* get_control_page(struct task_struct *t) | ||
| 278 | { | ||
| 279 | return tsk_rt(t)->ctrl_page; | ||
| 280 | } | ||
| 281 | |||
| 282 | static inline int has_control_page(struct task_struct* t) | ||
| 283 | { | ||
| 284 | return tsk_rt(t)->ctrl_page != NULL; | ||
| 285 | } | ||
| 286 | |||
| 287 | |||
| 288 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
| 289 | |||
| 290 | #define TS_SYSCALL_IN_START \ | ||
| 291 | if (has_control_page(current)) { \ | ||
| 292 | __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \ | ||
| 293 | } | ||
| 294 | |||
| 295 | #define TS_SYSCALL_IN_END \ | ||
| 296 | if (has_control_page(current)) { \ | ||
| 297 | uint64_t irqs; \ | ||
| 298 | local_irq_disable(); \ | ||
| 299 | irqs = get_control_page(current)->irq_count - \ | ||
| 300 | get_control_page(current)->irq_syscall_start; \ | ||
| 301 | __TS_SYSCALL_IN_END(&irqs); \ | ||
| 302 | local_irq_enable(); \ | ||
| 303 | } | ||
| 304 | |||
| 305 | #else | ||
| 306 | |||
| 307 | #define TS_SYSCALL_IN_START | ||
| 308 | #define TS_SYSCALL_IN_END | ||
| 309 | |||
| 310 | #endif | ||
| 311 | |||
| 312 | #endif | ||
diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h new file mode 100644 index 00000000000..6800e725d48 --- /dev/null +++ b/include/litmus/litmus_proc.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | #include <litmus/sched_plugin.h> | ||
| 2 | #include <linux/proc_fs.h> | ||
| 3 | |||
| 4 | int __init init_litmus_proc(void); | ||
| 5 | void exit_litmus_proc(void); | ||
| 6 | |||
| 7 | /* | ||
| 8 | * On success, returns 0 and sets the pointer to the location of the new | ||
| 9 | * proc dir entry, otherwise returns an error code and sets pde to NULL. | ||
| 10 | */ | ||
| 11 | long make_plugin_proc_dir(struct sched_plugin* plugin, | ||
| 12 | struct proc_dir_entry** pde); | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Plugins should deallocate all child proc directory entries before | ||
| 16 | * calling this, to avoid memory leaks. | ||
| 17 | */ | ||
| 18 | void remove_plugin_proc_dir(struct sched_plugin* plugin); | ||
| 19 | |||
| 20 | |||
| 21 | /* Copy at most size-1 bytes from ubuf into kbuf, null-terminate buf, and | ||
| 22 | * remove a '\n' if present. Returns the number of bytes that were read or | ||
| 23 | * -EFAULT. */ | ||
| 24 | int copy_and_chomp(char *kbuf, unsigned long ksize, | ||
| 25 | __user const char* ubuf, unsigned long ulength); | ||
diff --git a/include/litmus/locking.h b/include/litmus/locking.h new file mode 100644 index 00000000000..968ba6fa828 --- /dev/null +++ b/include/litmus/locking.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | #ifndef LITMUS_LOCKING_H | ||
| 2 | #define LITMUS_LOCKING_H | ||
| 3 | |||
| 4 | #include <litmus/fdso.h> | ||
| 5 | |||
| 6 | struct litmus_lock_ops; | ||
| 7 | |||
| 8 | extern struct fdso_ops generic_lock_ops; | ||
| 9 | |||
| 10 | /* Generic base struct for LITMUS^RT userspace semaphores. | ||
| 11 | * This structure should be embedded in protocol-specific semaphores. | ||
| 12 | */ | ||
| 13 | struct litmus_lock { | ||
| 14 | struct litmus_lock_ops *ops; | ||
| 15 | int type; | ||
| 16 | }; | ||
| 17 | |||
| 18 | struct litmus_lock_ops { | ||
| 19 | /* Current task tries to obtain / drop a reference to a lock. | ||
| 20 | * Optional methods, allowed by default. */ | ||
| 21 | int (*open)(struct litmus_lock*, void* __user); | ||
| 22 | int (*close)(struct litmus_lock*); | ||
| 23 | |||
| 24 | /* Current tries to lock/unlock this lock (mandatory methods). */ | ||
| 25 | int (*lock)(struct litmus_lock*); | ||
| 26 | int (*unlock)(struct litmus_lock*); | ||
| 27 | |||
| 28 | int (*dynamic_group_lock)(struct litmus_lock*, resource_mask_t); | ||
| 29 | int (*dynamic_group_unlock)(struct litmus_lock*, resource_mask_t); | ||
| 30 | |||
| 31 | /* The lock is no longer being referenced (mandatory method). */ | ||
| 32 | void (*deallocate)(struct litmus_lock*); | ||
| 33 | }; | ||
| 34 | |||
| 35 | static inline bool is_lock(struct od_table_entry* entry) | ||
| 36 | { | ||
| 37 | return entry->class == &generic_lock_ops; | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline struct litmus_lock* get_lock(struct od_table_entry* entry) | ||
| 41 | { | ||
| 42 | BUG_ON(!is_lock(entry)); | ||
| 43 | return (struct litmus_lock*) entry->obj->obj; | ||
| 44 | } | ||
| 45 | |||
| 46 | #endif | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h new file mode 100644 index 00000000000..380b886d78f --- /dev/null +++ b/include/litmus/preempt.h | |||
| @@ -0,0 +1,164 @@ | |||
| 1 | #ifndef LITMUS_PREEMPT_H | ||
| 2 | #define LITMUS_PREEMPT_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | #include <linux/cache.h> | ||
| 6 | #include <linux/percpu.h> | ||
| 7 | #include <asm/atomic.h> | ||
| 8 | |||
| 9 | #include <litmus/debug_trace.h> | ||
| 10 | |||
| 11 | extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
| 12 | |||
| 13 | #ifdef CONFIG_PREEMPT_STATE_TRACE | ||
| 14 | const char* sched_state_name(int s); | ||
| 15 | #define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) | ||
| 16 | #else | ||
| 17 | #define TRACE_STATE(fmt, args...) /* ignore */ | ||
| 18 | #endif | ||
| 19 | |||
| 20 | #define VERIFY_SCHED_STATE(x) \ | ||
| 21 | do { int __s = get_sched_state(); \ | ||
| 22 | if ((__s & (x)) == 0) \ | ||
| 23 | TRACE_STATE("INVALID s=0x%x (%s) not " \ | ||
| 24 | "in 0x%x (%s) [%s]\n", \ | ||
| 25 | __s, sched_state_name(__s), \ | ||
| 26 | (x), #x, __FUNCTION__); \ | ||
| 27 | } while (0); | ||
| 28 | |||
| 29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | ||
| 30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | ||
| 31 | cpu, (x), sched_state_name(x), \ | ||
| 32 | (y), sched_state_name(y)) | ||
| 33 | |||
| 34 | |||
| 35 | typedef enum scheduling_state { | ||
| 36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | ||
| 37 | * should be scheduled, and the processor does not | ||
| 38 | * plan to invoke schedule(). */ | ||
| 39 | SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the | ||
| 40 | * processor should reschedule, but this has not | ||
| 41 | * been communicated yet (IPI still pending). */ | ||
| 42 | WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to | ||
| 43 | * reschedule and will do so shortly. */ | ||
| 44 | TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(), | ||
| 45 | * has selected a new task to schedule, but has not | ||
| 46 | * yet performed the actual context switch. */ | ||
| 47 | PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context | ||
| 48 | * switch, but a remote processor has already | ||
| 49 | * determined that a higher-priority task became | ||
| 50 | * eligible after the task was picked. */ | ||
| 51 | } sched_state_t; | ||
| 52 | |||
| 53 | static inline sched_state_t get_sched_state_on(int cpu) | ||
| 54 | { | ||
| 55 | return atomic_read(&per_cpu(resched_state, cpu)); | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline sched_state_t get_sched_state(void) | ||
| 59 | { | ||
| 60 | return atomic_read(&__get_cpu_var(resched_state)); | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline int is_in_sched_state(int possible_states) | ||
| 64 | { | ||
| 65 | return get_sched_state() & possible_states; | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline int cpu_is_in_sched_state(int cpu, int possible_states) | ||
| 69 | { | ||
| 70 | return get_sched_state_on(cpu) & possible_states; | ||
| 71 | } | ||
| 72 | |||
| 73 | static inline void set_sched_state(sched_state_t s) | ||
| 74 | { | ||
| 75 | TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id()); | ||
| 76 | atomic_set(&__get_cpu_var(resched_state), s); | ||
| 77 | } | ||
| 78 | |||
| 79 | static inline int sched_state_transition(sched_state_t from, sched_state_t to) | ||
| 80 | { | ||
| 81 | sched_state_t old_state; | ||
| 82 | |||
| 83 | old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to); | ||
| 84 | if (old_state == from) { | ||
| 85 | TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id()); | ||
| 86 | return 1; | ||
| 87 | } else | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline int sched_state_transition_on(int cpu, | ||
| 92 | sched_state_t from, | ||
| 93 | sched_state_t to) | ||
| 94 | { | ||
| 95 | sched_state_t old_state; | ||
| 96 | |||
| 97 | old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to); | ||
| 98 | if (old_state == from) { | ||
| 99 | TRACE_SCHED_STATE_CHANGE(from, to, cpu); | ||
| 100 | return 1; | ||
| 101 | } else | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | |||
| 105 | /* Plugins must call this function after they have decided which job to | ||
| 106 | * schedule next. IMPORTANT: this function must be called while still holding | ||
| 107 | * the lock that is used to serialize scheduling decisions. | ||
| 108 | * | ||
| 109 | * (Ideally, we would like to use runqueue locks for this purpose, but that | ||
| 110 | * would lead to deadlocks with the migration code.) | ||
| 111 | */ | ||
| 112 | static inline void sched_state_task_picked(void) | ||
| 113 | { | ||
| 114 | VERIFY_SCHED_STATE(WILL_SCHEDULE); | ||
| 115 | |||
| 116 | /* WILL_SCHEDULE has only a local tansition => simple store is ok */ | ||
| 117 | set_sched_state(TASK_PICKED); | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline void sched_state_entered_schedule(void) | ||
| 121 | { | ||
| 122 | /* Update state for the case that we entered schedule() not due to | ||
| 123 | * set_tsk_need_resched() */ | ||
| 124 | set_sched_state(WILL_SCHEDULE); | ||
| 125 | } | ||
| 126 | |||
| 127 | /* Called by schedule() to check if the scheduling decision is still valid | ||
| 128 | * after a context switch. Returns 1 if the CPU needs to reschdule. */ | ||
| 129 | static inline int sched_state_validate_switch(void) | ||
| 130 | { | ||
| 131 | int left_state_ok = 0; | ||
| 132 | |||
| 133 | VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED); | ||
| 134 | |||
| 135 | if (is_in_sched_state(TASK_PICKED)) { | ||
| 136 | /* Might be good; let's try to transition out of this | ||
| 137 | * state. This must be done atomically since remote processors | ||
| 138 | * may try to change the state, too. */ | ||
| 139 | left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED); | ||
| 140 | } | ||
| 141 | |||
| 142 | if (!left_state_ok) { | ||
| 143 | /* We raced with a higher-priority task arrival => not | ||
| 144 | * valid. The CPU needs to reschedule. */ | ||
| 145 | set_sched_state(WILL_SCHEDULE); | ||
| 146 | return 1; | ||
| 147 | } else | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | /* State transition events. See litmus/preempt.c for details. */ | ||
| 152 | void sched_state_will_schedule(struct task_struct* tsk); | ||
| 153 | void sched_state_ipi(void); | ||
| 154 | /* Cause a CPU (remote or local) to reschedule. */ | ||
| 155 | void litmus_reschedule(int cpu); | ||
| 156 | void litmus_reschedule_local(void); | ||
| 157 | |||
| 158 | #ifdef CONFIG_DEBUG_KERNEL | ||
| 159 | void sched_state_plugin_check(void); | ||
| 160 | #else | ||
| 161 | #define sched_state_plugin_check() /* no check */ | ||
| 162 | #endif | ||
| 163 | |||
| 164 | #endif | ||
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h new file mode 100644 index 00000000000..ac249292e86 --- /dev/null +++ b/include/litmus/rt_domain.h | |||
| @@ -0,0 +1,182 @@ | |||
| 1 | /* CLEANUP: Add comments and make it less messy. | ||
| 2 | * | ||
| 3 | */ | ||
| 4 | |||
| 5 | #ifndef __UNC_RT_DOMAIN_H__ | ||
| 6 | #define __UNC_RT_DOMAIN_H__ | ||
| 7 | |||
| 8 | #include <litmus/bheap.h> | ||
| 9 | |||
| 10 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ | ||
| 11 | |||
| 12 | struct _rt_domain; | ||
| 13 | |||
| 14 | typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | ||
| 15 | typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks); | ||
| 16 | |||
| 17 | struct release_queue { | ||
| 18 | /* each slot maintains a list of release heaps sorted | ||
| 19 | * by release time */ | ||
| 20 | struct list_head slot[RELEASE_QUEUE_SLOTS]; | ||
| 21 | }; | ||
| 22 | |||
| 23 | typedef struct _rt_domain { | ||
| 24 | /* runnable rt tasks are in here */ | ||
| 25 | raw_spinlock_t ready_lock; | ||
| 26 | struct bheap ready_queue; | ||
| 27 | |||
| 28 | /* real-time tasks waiting for release are in here */ | ||
| 29 | raw_spinlock_t release_lock; | ||
| 30 | struct release_queue release_queue; | ||
| 31 | |||
| 32 | #ifdef CONFIG_RELEASE_MASTER | ||
| 33 | int release_master; | ||
| 34 | #endif | ||
| 35 | |||
| 36 | /* for moving tasks to the release queue */ | ||
| 37 | raw_spinlock_t tobe_lock; | ||
| 38 | struct list_head tobe_released; | ||
| 39 | |||
| 40 | /* how do we check if we need to kick another CPU? */ | ||
| 41 | check_resched_needed_t check_resched; | ||
| 42 | |||
| 43 | /* how do we release jobs? */ | ||
| 44 | release_jobs_t release_jobs; | ||
| 45 | |||
| 46 | /* how are tasks ordered in the ready queue? */ | ||
| 47 | bheap_prio_t order; | ||
| 48 | } rt_domain_t; | ||
| 49 | |||
| 50 | struct release_heap { | ||
| 51 | /* list_head for per-time-slot list */ | ||
| 52 | struct list_head list; | ||
| 53 | lt_t release_time; | ||
| 54 | /* all tasks to be released at release_time */ | ||
| 55 | struct bheap heap; | ||
| 56 | /* used to trigger the release */ | ||
| 57 | struct hrtimer timer; | ||
| 58 | |||
| 59 | #ifdef CONFIG_RELEASE_MASTER | ||
| 60 | /* used to delegate releases */ | ||
| 61 | struct hrtimer_start_on_info info; | ||
| 62 | #endif | ||
| 63 | /* required for the timer callback */ | ||
| 64 | rt_domain_t* dom; | ||
| 65 | }; | ||
| 66 | |||
| 67 | |||
| 68 | static inline struct task_struct* __next_ready(rt_domain_t* rt) | ||
| 69 | { | ||
| 70 | struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); | ||
| 71 | if (hn) | ||
| 72 | return bheap2task(hn); | ||
| 73 | else | ||
| 74 | return NULL; | ||
| 75 | } | ||
| 76 | |||
| 77 | void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, | ||
| 78 | check_resched_needed_t check, | ||
| 79 | release_jobs_t relase); | ||
| 80 | |||
| 81 | void __add_ready(rt_domain_t* rt, struct task_struct *new); | ||
| 82 | void __merge_ready(rt_domain_t* rt, struct bheap *tasks); | ||
| 83 | void __add_release(rt_domain_t* rt, struct task_struct *task); | ||
| 84 | |||
| 85 | static inline struct task_struct* __take_ready(rt_domain_t* rt) | ||
| 86 | { | ||
| 87 | struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); | ||
| 88 | if (hn) | ||
| 89 | return bheap2task(hn); | ||
| 90 | else | ||
| 91 | return NULL; | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline struct task_struct* __peek_ready(rt_domain_t* rt) | ||
| 95 | { | ||
| 96 | struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue); | ||
| 97 | if (hn) | ||
| 98 | return bheap2task(hn); | ||
| 99 | else | ||
| 100 | return NULL; | ||
| 101 | } | ||
| 102 | |||
| 103 | static inline int is_queued(struct task_struct *t) | ||
| 104 | { | ||
| 105 | BUG_ON(!tsk_rt(t)->heap_node); | ||
| 106 | return bheap_node_in_heap(tsk_rt(t)->heap_node); | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline void remove(rt_domain_t* rt, struct task_struct *t) | ||
| 110 | { | ||
| 111 | bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | ||
| 115 | { | ||
| 116 | unsigned long flags; | ||
| 117 | /* first we need the write lock for rt_ready_queue */ | ||
| 118 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
| 119 | __add_ready(rt, new); | ||
| 120 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
| 124 | { | ||
| 125 | unsigned long flags; | ||
| 126 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
| 127 | __merge_ready(rt, tasks); | ||
| 128 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline struct task_struct* take_ready(rt_domain_t* rt) | ||
| 132 | { | ||
| 133 | unsigned long flags; | ||
| 134 | struct task_struct* ret; | ||
| 135 | /* first we need the write lock for rt_ready_queue */ | ||
| 136 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
| 137 | ret = __take_ready(rt); | ||
| 138 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
| 139 | return ret; | ||
| 140 | } | ||
| 141 | |||
| 142 | |||
| 143 | static inline void add_release(rt_domain_t* rt, struct task_struct *task) | ||
| 144 | { | ||
| 145 | unsigned long flags; | ||
| 146 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); | ||
| 147 | __add_release(rt, task); | ||
| 148 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
| 149 | } | ||
| 150 | |||
| 151 | #ifdef CONFIG_RELEASE_MASTER | ||
| 152 | void __add_release_on(rt_domain_t* rt, struct task_struct *task, | ||
| 153 | int target_cpu); | ||
| 154 | |||
| 155 | static inline void add_release_on(rt_domain_t* rt, | ||
| 156 | struct task_struct *task, | ||
| 157 | int target_cpu) | ||
| 158 | { | ||
| 159 | unsigned long flags; | ||
| 160 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); | ||
| 161 | __add_release_on(rt, task, target_cpu); | ||
| 162 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
| 163 | } | ||
| 164 | #endif | ||
| 165 | |||
| 166 | static inline int __jobs_pending(rt_domain_t* rt) | ||
| 167 | { | ||
| 168 | return !bheap_empty(&rt->ready_queue); | ||
| 169 | } | ||
| 170 | |||
| 171 | static inline int jobs_pending(rt_domain_t* rt) | ||
| 172 | { | ||
| 173 | unsigned long flags; | ||
| 174 | int ret; | ||
| 175 | /* first we need the write lock for rt_ready_queue */ | ||
| 176 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
| 177 | ret = !bheap_empty(&rt->ready_queue); | ||
| 178 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
| 179 | return ret; | ||
| 180 | } | ||
| 181 | |||
| 182 | #endif | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h new file mode 100644 index 00000000000..70c09acbeb2 --- /dev/null +++ b/include/litmus/rt_param.h | |||
| @@ -0,0 +1,283 @@ | |||
| 1 | /* | ||
| 2 | * Definition of the scheduler plugin interface. | ||
| 3 | * | ||
| 4 | */ | ||
| 5 | #ifndef _LINUX_RT_PARAM_H_ | ||
| 6 | #define _LINUX_RT_PARAM_H_ | ||
| 7 | |||
| 8 | /* Litmus time type. */ | ||
| 9 | typedef unsigned long long lt_t; | ||
| 10 | |||
| 11 | static inline int lt_after(lt_t a, lt_t b) | ||
| 12 | { | ||
| 13 | return ((long long) b) - ((long long) a) < 0; | ||
| 14 | } | ||
| 15 | #define lt_before(a, b) lt_after(b, a) | ||
| 16 | |||
| 17 | static inline int lt_after_eq(lt_t a, lt_t b) | ||
| 18 | { | ||
| 19 | return ((long long) a) - ((long long) b) >= 0; | ||
| 20 | } | ||
| 21 | #define lt_before_eq(a, b) lt_after_eq(b, a) | ||
| 22 | |||
| 23 | /* different types of clients */ | ||
| 24 | typedef enum { | ||
| 25 | RT_CLASS_HARD, | ||
| 26 | RT_CLASS_SOFT, | ||
| 27 | RT_CLASS_BEST_EFFORT | ||
| 28 | } task_class_t; | ||
| 29 | |||
| 30 | typedef enum { | ||
| 31 | NO_ENFORCEMENT, /* job may overrun unhindered */ | ||
| 32 | QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ | ||
| 33 | PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ | ||
| 34 | } budget_policy_t; | ||
| 35 | |||
| 36 | /* Release behaviors for jobs. PERIODIC and EARLY jobs | ||
| 37 | must end by calling sys_complete_job() (or equivalent) | ||
| 38 | to set up their next release and deadline. */ | ||
| 39 | typedef enum { | ||
| 40 | /* Jobs are released sporadically (provided job precedence | ||
| 41 | constraints are met). */ | ||
| 42 | SPORADIC, | ||
| 43 | |||
| 44 | /* Jobs are released periodically (provided job precedence | ||
| 45 | constraints are met). */ | ||
| 46 | PERIODIC, | ||
| 47 | |||
| 48 | /* Jobs are released immediately after meeting precedence | ||
| 49 | constraints. Beware this can peg your CPUs if used in | ||
| 50 | the wrong applications. Only supported by EDF schedulers. */ | ||
| 51 | EARLY | ||
| 52 | } release_policy_t; | ||
| 53 | |||
| 54 | /* We use the common priority interpretation "lower index == higher priority", | ||
| 55 | * which is commonly used in fixed-priority schedulability analysis papers. | ||
| 56 | * So, a numerically lower priority value implies higher scheduling priority, | ||
| 57 | * with priority 1 being the highest priority. Priority 0 is reserved for | ||
| 58 | * priority boosting. LITMUS_MAX_PRIORITY denotes the maximum priority value | ||
| 59 | * range. | ||
| 60 | */ | ||
| 61 | |||
| 62 | #define LITMUS_MAX_PRIORITY 512 | ||
| 63 | #define LITMUS_HIGHEST_PRIORITY 1 | ||
| 64 | #define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1) | ||
| 65 | |||
| 66 | /* Provide generic comparison macros for userspace, | ||
| 67 | * in case that we change this later. */ | ||
| 68 | #define litmus_higher_fixed_prio(a, b) (a < b) | ||
| 69 | #define litmus_lower_fixed_prio(a, b) (a > b) | ||
| 70 | #define litmus_is_valid_fixed_prio(p) \ | ||
| 71 | ((p) >= LITMUS_HIGHEST_PRIORITY && \ | ||
| 72 | (p) <= LITMUS_LOWEST_PRIORITY) | ||
| 73 | |||
| 74 | struct rt_task { | ||
| 75 | lt_t exec_cost; | ||
| 76 | lt_t period; | ||
| 77 | lt_t relative_deadline; | ||
| 78 | lt_t phase; | ||
| 79 | unsigned int cpu; | ||
| 80 | unsigned int priority; | ||
| 81 | task_class_t cls; | ||
| 82 | budget_policy_t budget_policy; /* ignored by pfair */ | ||
| 83 | release_policy_t release_policy; | ||
| 84 | }; | ||
| 85 | |||
| 86 | union np_flag { | ||
| 87 | uint64_t raw; | ||
| 88 | struct { | ||
| 89 | /* Is the task currently in a non-preemptive section? */ | ||
| 90 | uint64_t flag:31; | ||
| 91 | /* Should the task call into the scheduler? */ | ||
| 92 | uint64_t preempt:1; | ||
| 93 | } np; | ||
| 94 | }; | ||
| 95 | |||
| 96 | /* The definition of the data that is shared between the kernel and real-time | ||
| 97 | * tasks via a shared page (see litmus/ctrldev.c). | ||
| 98 | * | ||
| 99 | * WARNING: User space can write to this, so don't trust | ||
| 100 | * the correctness of the fields! | ||
| 101 | * | ||
| 102 | * This servees two purposes: to enable efficient signaling | ||
| 103 | * of non-preemptive sections (user->kernel) and | ||
| 104 | * delayed preemptions (kernel->user), and to export | ||
| 105 | * some real-time relevant statistics such as preemption and | ||
| 106 | * migration data to user space. We can't use a device to export | ||
| 107 | * statistics because we want to avoid system call overhead when | ||
| 108 | * determining preemption/migration overheads). | ||
| 109 | */ | ||
| 110 | struct control_page { | ||
| 111 | /* This flag is used by userspace to communicate non-preempive | ||
| 112 | * sections. */ | ||
| 113 | volatile union np_flag sched; | ||
| 114 | |||
| 115 | volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is | ||
| 116 | * handled. */ | ||
| 117 | |||
| 118 | /* Locking overhead tracing: userspace records here the time stamp | ||
| 119 | * and IRQ counter prior to starting the system call. */ | ||
| 120 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ | ||
| 121 | uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall | ||
| 122 | * started. */ | ||
| 123 | |||
| 124 | /* to be extended */ | ||
| 125 | }; | ||
| 126 | |||
| 127 | /* Expected offsets within the control page. */ | ||
| 128 | |||
| 129 | #define LITMUS_CP_OFFSET_SCHED 0 | ||
| 130 | #define LITMUS_CP_OFFSET_IRQ_COUNT 8 | ||
| 131 | #define LITMUS_CP_OFFSET_TS_SC_START 16 | ||
| 132 | #define LITMUS_CP_OFFSET_IRQ_SC_START 24 | ||
| 133 | |||
| 134 | /* don't export internal data structures to user space (liblitmus) */ | ||
| 135 | #ifdef __KERNEL__ | ||
| 136 | |||
| 137 | struct _rt_domain; | ||
| 138 | struct bheap_node; | ||
| 139 | struct release_heap; | ||
| 140 | |||
| 141 | struct rt_job { | ||
| 142 | /* Time instant the the job was or will be released. */ | ||
| 143 | lt_t release; | ||
| 144 | /* What is the current deadline? */ | ||
| 145 | lt_t deadline; | ||
| 146 | |||
| 147 | /* How much service has this job received so far? */ | ||
| 148 | lt_t exec_time; | ||
| 149 | |||
| 150 | /* By how much did the prior job miss its deadline by? | ||
| 151 | * Value differs from tardiness in that lateness may | ||
| 152 | * be negative (when job finishes before its deadline). | ||
| 153 | */ | ||
| 154 | long long lateness; | ||
| 155 | |||
| 156 | /* Which job is this. This is used to let user space | ||
| 157 | * specify which job to wait for, which is important if jobs | ||
| 158 | * overrun. If we just call sys_sleep_next_period() then we | ||
| 159 | * will unintentionally miss jobs after an overrun. | ||
| 160 | * | ||
| 161 | * Increase this sequence number when a job is released. | ||
| 162 | */ | ||
| 163 | unsigned int job_no; | ||
| 164 | }; | ||
| 165 | |||
| 166 | struct pfair_param; | ||
| 167 | |||
| 168 | /* RT task parameters for scheduling extensions | ||
| 169 | * These parameters are inherited during clone and therefore must | ||
| 170 | * be explicitly set up before the task set is launched. | ||
| 171 | */ | ||
| 172 | struct rt_param { | ||
| 173 | /* is the task sleeping? */ | ||
| 174 | unsigned int flags:8; | ||
| 175 | |||
| 176 | /* do we need to check for srp blocking? */ | ||
| 177 | unsigned int srp_non_recurse:1; | ||
| 178 | |||
| 179 | /* is the task present? (true if it can be scheduled) */ | ||
| 180 | unsigned int present:1; | ||
| 181 | |||
| 182 | /* has the task completed? */ | ||
| 183 | unsigned int completed:1; | ||
| 184 | |||
| 185 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 186 | /* Is the task being priority-boosted by a locking protocol? */ | ||
| 187 | unsigned int priority_boosted:1; | ||
| 188 | /* If so, when did this start? */ | ||
| 189 | lt_t boost_start_time; | ||
| 190 | |||
| 191 | /* How many LITMUS^RT locks does the task currently hold/wait for? */ | ||
| 192 | unsigned int num_locks_held; | ||
| 193 | /* How many PCP/SRP locks does the task currently hold/wait for? */ | ||
| 194 | unsigned int num_local_locks_held; | ||
| 195 | #endif | ||
| 196 | |||
| 197 | /* user controlled parameters */ | ||
| 198 | struct rt_task task_params; | ||
| 199 | |||
| 200 | /* timing parameters */ | ||
| 201 | struct rt_job job_params; | ||
| 202 | |||
| 203 | /* task representing the current "inherited" task | ||
| 204 | * priority, assigned by inherit_priority and | ||
| 205 | * return priority in the scheduler plugins. | ||
| 206 | * could point to self if PI does not result in | ||
| 207 | * an increased task priority. | ||
| 208 | */ | ||
| 209 | struct task_struct* inh_task; | ||
| 210 | |||
| 211 | #ifdef CONFIG_NP_SECTION | ||
| 212 | /* For the FMLP under PSN-EDF, it is required to make the task | ||
| 213 | * non-preemptive from kernel space. In order not to interfere with | ||
| 214 | * user space, this counter indicates the kernel space np setting. | ||
| 215 | * kernel_np > 0 => task is non-preemptive | ||
| 216 | */ | ||
| 217 | unsigned int kernel_np; | ||
| 218 | #endif | ||
| 219 | |||
| 220 | /* This field can be used by plugins to store where the task | ||
| 221 | * is currently scheduled. It is the responsibility of the | ||
| 222 | * plugin to avoid race conditions. | ||
| 223 | * | ||
| 224 | * This used by GSN-EDF and PFAIR. | ||
| 225 | */ | ||
| 226 | volatile int scheduled_on; | ||
| 227 | |||
| 228 | /* Is the stack of the task currently in use? This is updated by | ||
| 229 | * the LITMUS core. | ||
| 230 | * | ||
| 231 | * Be careful to avoid deadlocks! | ||
| 232 | */ | ||
| 233 | volatile int stack_in_use; | ||
| 234 | |||
| 235 | /* This field can be used by plugins to store where the task | ||
| 236 | * is currently linked. It is the responsibility of the plugin | ||
| 237 | * to avoid race conditions. | ||
| 238 | * | ||
| 239 | * Used by GSN-EDF. | ||
| 240 | */ | ||
| 241 | volatile int linked_on; | ||
| 242 | |||
| 243 | /* PFAIR/PD^2 state. Allocated on demand. */ | ||
| 244 | struct pfair_param* pfair; | ||
| 245 | |||
| 246 | /* Fields saved before BE->RT transition. | ||
| 247 | */ | ||
| 248 | int old_policy; | ||
| 249 | int old_prio; | ||
| 250 | |||
| 251 | /* ready queue for this task */ | ||
| 252 | struct _rt_domain* domain; | ||
| 253 | |||
| 254 | /* heap element for this task | ||
| 255 | * | ||
| 256 | * Warning: Don't statically allocate this node. The heap | ||
| 257 | * implementation swaps these between tasks, thus after | ||
| 258 | * dequeuing from a heap you may end up with a different node | ||
| 259 | * then the one you had when enqueuing the task. For the same | ||
| 260 | * reason, don't obtain and store references to this node | ||
| 261 | * other than this pointer (which is updated by the heap | ||
| 262 | * implementation). | ||
| 263 | */ | ||
| 264 | struct bheap_node* heap_node; | ||
| 265 | struct release_heap* rel_heap; | ||
| 266 | |||
| 267 | /* Used by rt_domain to queue task in release list. | ||
| 268 | */ | ||
| 269 | struct list_head list; | ||
| 270 | |||
| 271 | /* Pointer to the page shared between userspace and kernel. */ | ||
| 272 | struct control_page * ctrl_page; | ||
| 273 | |||
| 274 | lt_t total_tardy; | ||
| 275 | lt_t max_tardy; | ||
| 276 | unsigned int missed; | ||
| 277 | lt_t max_exec_time; | ||
| 278 | lt_t tot_exec_time; | ||
| 279 | }; | ||
| 280 | |||
| 281 | #endif | ||
| 282 | |||
| 283 | #endif | ||
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h new file mode 100644 index 00000000000..1546ab7f1d6 --- /dev/null +++ b/include/litmus/sched_plugin.h | |||
| @@ -0,0 +1,113 @@ | |||
| 1 | /* | ||
| 2 | * Definition of the scheduler plugin interface. | ||
| 3 | * | ||
| 4 | */ | ||
| 5 | #ifndef _LINUX_SCHED_PLUGIN_H_ | ||
| 6 | #define _LINUX_SCHED_PLUGIN_H_ | ||
| 7 | |||
| 8 | #include <linux/sched.h> | ||
| 9 | |||
| 10 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 11 | #include <litmus/locking.h> | ||
| 12 | #endif | ||
| 13 | |||
| 14 | /************************ setup/tear down ********************/ | ||
| 15 | |||
| 16 | typedef long (*activate_plugin_t) (void); | ||
| 17 | typedef long (*deactivate_plugin_t) (void); | ||
| 18 | |||
| 19 | |||
| 20 | |||
| 21 | /********************* scheduler invocation ******************/ | ||
| 22 | |||
| 23 | /* Plugin-specific realtime tick handler */ | ||
| 24 | typedef void (*scheduler_tick_t) (struct task_struct *cur); | ||
| 25 | /* Novell make sched decision function */ | ||
| 26 | typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | ||
| 27 | /* Clean up after the task switch has occured. | ||
| 28 | * This function is called after every (even non-rt) task switch. | ||
| 29 | */ | ||
| 30 | typedef void (*finish_switch_t)(struct task_struct *prev); | ||
| 31 | |||
| 32 | |||
| 33 | /********************* task state changes ********************/ | ||
| 34 | |||
| 35 | /* Called to setup a new real-time task. | ||
| 36 | * Release the first job, enqueue, etc. | ||
| 37 | * Task may already be running. | ||
| 38 | */ | ||
| 39 | typedef void (*task_new_t) (struct task_struct *task, | ||
| 40 | int on_rq, | ||
| 41 | int running); | ||
| 42 | |||
| 43 | /* Called to re-introduce a task after blocking. | ||
| 44 | * Can potentially be called multiple times. | ||
| 45 | */ | ||
| 46 | typedef void (*task_wake_up_t) (struct task_struct *task); | ||
| 47 | /* called to notify the plugin of a blocking real-time task | ||
| 48 | * it will only be called for real-time tasks and before schedule is called */ | ||
| 49 | typedef void (*task_block_t) (struct task_struct *task); | ||
| 50 | /* Called when a real-time task exits or changes to a different scheduling | ||
| 51 | * class. | ||
| 52 | * Free any allocated resources | ||
| 53 | */ | ||
| 54 | typedef void (*task_exit_t) (struct task_struct *); | ||
| 55 | |||
| 56 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 57 | /* Called when the current task attempts to create a new lock of a given | ||
| 58 | * protocol type. */ | ||
| 59 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | ||
| 60 | void* __user config); | ||
| 61 | #endif | ||
| 62 | |||
| 63 | |||
| 64 | /********************* sys call backends ********************/ | ||
| 65 | /* This function causes the caller to sleep until the next release */ | ||
| 66 | typedef long (*complete_job_t) (void); | ||
| 67 | |||
| 68 | typedef long (*admit_task_t)(struct task_struct* tsk); | ||
| 69 | |||
| 70 | typedef void (*release_at_t)(struct task_struct *t, lt_t start); | ||
| 71 | |||
| 72 | struct sched_plugin { | ||
| 73 | struct list_head list; | ||
| 74 | /* basic info */ | ||
| 75 | char *plugin_name; | ||
| 76 | |||
| 77 | /* setup */ | ||
| 78 | activate_plugin_t activate_plugin; | ||
| 79 | deactivate_plugin_t deactivate_plugin; | ||
| 80 | |||
| 81 | /* scheduler invocation */ | ||
| 82 | scheduler_tick_t tick; | ||
| 83 | schedule_t schedule; | ||
| 84 | finish_switch_t finish_switch; | ||
| 85 | |||
| 86 | /* syscall backend */ | ||
| 87 | complete_job_t complete_job; | ||
| 88 | release_at_t release_at; | ||
| 89 | |||
| 90 | /* task state changes */ | ||
| 91 | admit_task_t admit_task; | ||
| 92 | |||
| 93 | task_new_t task_new; | ||
| 94 | task_wake_up_t task_wake_up; | ||
| 95 | task_block_t task_block; | ||
| 96 | task_exit_t task_exit; | ||
| 97 | |||
| 98 | #ifdef CONFIG_LITMUS_LOCKING | ||
| 99 | /* locking protocols */ | ||
| 100 | allocate_lock_t allocate_lock; | ||
| 101 | #endif | ||
| 102 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
| 103 | |||
| 104 | |||
| 105 | extern struct sched_plugin *litmus; | ||
| 106 | |||
| 107 | int register_sched_plugin(struct sched_plugin* plugin); | ||
| 108 | struct sched_plugin* find_sched_plugin(const char* name); | ||
| 109 | int print_sched_plugins(char* buf, int max); | ||
| 110 | |||
| 111 | extern struct sched_plugin linux_sched_plugin; | ||
| 112 | |||
| 113 | #endif | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h new file mode 100644 index 00000000000..fa7042f744c --- /dev/null +++ b/include/litmus/sched_trace.h | |||
| @@ -0,0 +1,342 @@ | |||
| 1 | /* | ||
| 2 | * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
| 3 | */ | ||
| 4 | #ifndef _LINUX_SCHED_TRACE_H_ | ||
| 5 | #define _LINUX_SCHED_TRACE_H_ | ||
| 6 | |||
| 7 | /* all times in nanoseconds */ | ||
| 8 | |||
| 9 | struct st_trace_header { | ||
| 10 | u8 type; /* Of what type is this record? */ | ||
| 11 | u8 cpu; /* On which CPU was it recorded? */ | ||
| 12 | u16 pid; /* PID of the task. */ | ||
| 13 | u32 job; /* The job sequence number. */ | ||
| 14 | }; | ||
| 15 | |||
| 16 | #define ST_NAME_LEN 16 | ||
| 17 | struct st_name_data { | ||
| 18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | ||
| 19 | }; | ||
| 20 | |||
| 21 | struct st_param_data { /* regular params */ | ||
| 22 | u32 wcet; | ||
| 23 | u32 period; | ||
| 24 | u32 phase; | ||
| 25 | u8 partition; | ||
| 26 | u8 class; | ||
| 27 | u8 __unused[2]; | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct st_release_data { /* A job is was/is going to be released. */ | ||
| 31 | u64 release; /* What's the release time? */ | ||
| 32 | u64 deadline; /* By when must it finish? */ | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct st_assigned_data { /* A job was asigned to a CPU. */ | ||
| 36 | u64 when; | ||
| 37 | u8 target; /* Where should it execute? */ | ||
| 38 | u8 __unused[7]; | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ | ||
| 42 | u64 when; /* When did this occur? */ | ||
| 43 | u32 exec_time; /* Time the current job has executed. */ | ||
| 44 | u8 __unused[4]; | ||
| 45 | |||
| 46 | }; | ||
| 47 | |||
| 48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | ||
| 49 | u64 when; | ||
| 50 | u64 exec_time; | ||
| 51 | }; | ||
| 52 | |||
| 53 | struct st_completion_data { /* A job completed. */ | ||
| 54 | u64 when; | ||
| 55 | u8 forced:1; /* Set to 1 if job overran and kernel advanced to the | ||
| 56 | * next task automatically; set to 0 otherwise. | ||
| 57 | */ | ||
| 58 | u8 __uflags:7; | ||
| 59 | u8 __unused[7]; | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct st_block_data { /* A task blocks. */ | ||
| 63 | u64 when; | ||
| 64 | u64 __unused; | ||
| 65 | }; | ||
| 66 | |||
| 67 | struct st_resume_data { /* A task resumes. */ | ||
| 68 | u64 when; | ||
| 69 | u64 __unused; | ||
| 70 | }; | ||
| 71 | |||
| 72 | struct st_action_data { | ||
| 73 | u64 when; | ||
| 74 | u8 action; | ||
| 75 | u8 __unused[7]; | ||
| 76 | }; | ||
| 77 | |||
| 78 | struct st_sys_release_data { | ||
| 79 | u64 when; | ||
| 80 | u64 release; | ||
| 81 | }; | ||
| 82 | |||
| 83 | struct st_task_exit_data { | ||
| 84 | u64 avg_exec_time; | ||
| 85 | u64 max_exec_time; | ||
| 86 | }; | ||
| 87 | |||
| 88 | struct st_task_tardy_data { | ||
| 89 | u64 total_tardy; | ||
| 90 | u32 max_tardy; | ||
| 91 | u32 missed; | ||
| 92 | }; | ||
| 93 | |||
| 94 | #define DATA(x) struct st_ ## x ## _data x; | ||
| 95 | |||
| 96 | typedef enum { | ||
| 97 | ST_NAME = 1, /* Start at one, so that we can spot | ||
| 98 | * uninitialized records. */ | ||
| 99 | ST_PARAM, | ||
| 100 | ST_RELEASE, | ||
| 101 | ST_ASSIGNED, | ||
| 102 | ST_SWITCH_TO, | ||
| 103 | ST_SWITCH_AWAY, | ||
| 104 | ST_COMPLETION, | ||
| 105 | ST_BLOCK, | ||
| 106 | ST_RESUME, | ||
| 107 | ST_ACTION, | ||
| 108 | ST_SYS_RELEASE, | ||
| 109 | ST_TASK_EXIT, | ||
| 110 | ST_TASK_TARDY, | ||
| 111 | } st_event_record_type_t; | ||
| 112 | |||
| 113 | struct st_event_record { | ||
| 114 | struct st_trace_header hdr; | ||
| 115 | union { | ||
| 116 | u64 raw[2]; | ||
| 117 | |||
| 118 | DATA(name); | ||
| 119 | DATA(param); | ||
| 120 | DATA(release); | ||
| 121 | DATA(assigned); | ||
| 122 | DATA(switch_to); | ||
| 123 | DATA(switch_away); | ||
| 124 | DATA(completion); | ||
| 125 | DATA(block); | ||
| 126 | DATA(resume); | ||
| 127 | DATA(action); | ||
| 128 | DATA(sys_release); | ||
| 129 | DATA(task_exit); | ||
| 130 | DATA(task_tardy); | ||
| 131 | } data; | ||
| 132 | }; | ||
| 133 | |||
| 134 | #undef DATA | ||
| 135 | |||
| 136 | #ifdef __KERNEL__ | ||
| 137 | |||
| 138 | #include <linux/sched.h> | ||
| 139 | #include <litmus/feather_trace.h> | ||
| 140 | |||
| 141 | #ifdef CONFIG_SCHED_TASK_TRACE | ||
| 142 | |||
| 143 | #define SCHED_TRACE(id, callback, task) \ | ||
| 144 | ft_event1(id, callback, task) | ||
| 145 | #define SCHED_TRACE2(id, callback, task, xtra) \ | ||
| 146 | ft_event2(id, callback, task, xtra) | ||
| 147 | |||
| 148 | /* provide prototypes; needed on sparc64 */ | ||
| 149 | #ifndef NO_TASK_TRACE_DECLS | ||
| 150 | feather_callback void do_sched_trace_task_name(unsigned long id, | ||
| 151 | struct task_struct* task); | ||
| 152 | feather_callback void do_sched_trace_task_param(unsigned long id, | ||
| 153 | struct task_struct* task); | ||
| 154 | feather_callback void do_sched_trace_task_release(unsigned long id, | ||
| 155 | struct task_struct* task); | ||
| 156 | feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
| 157 | struct task_struct* task); | ||
| 158 | feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
| 159 | struct task_struct* task); | ||
| 160 | feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
| 161 | struct task_struct* task, | ||
| 162 | unsigned long forced); | ||
| 163 | feather_callback void do_sched_trace_task_block(unsigned long id, | ||
| 164 | struct task_struct* task); | ||
| 165 | feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
| 166 | struct task_struct* task); | ||
| 167 | feather_callback void do_sched_trace_action(unsigned long id, | ||
| 168 | struct task_struct* task, | ||
| 169 | unsigned long action); | ||
| 170 | feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
| 171 | lt_t* start); | ||
| 172 | feather_callback void do_sched_trace_task_exit(unsigned long id, | ||
| 173 | struct task_struct* task); | ||
| 174 | feather_callback void do_sched_trace_task_tardy(unsigned long id, | ||
| 175 | struct task_struct* task); | ||
| 176 | |||
| 177 | #endif | ||
| 178 | |||
| 179 | #else | ||
| 180 | |||
| 181 | #define SCHED_TRACE(id, callback, task) /* no tracing */ | ||
| 182 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | ||
| 183 | |||
| 184 | #endif | ||
| 185 | |||
| 186 | #ifdef CONFIG_SCHED_LITMUS_TRACEPOINT | ||
| 187 | |||
| 188 | #include <trace/events/litmus.h> | ||
| 189 | |||
| 190 | #else | ||
| 191 | |||
| 192 | /* Override trace macros to actually do nothing */ | ||
| 193 | #define trace_litmus_task_param(t) | ||
| 194 | #define trace_litmus_task_release(t) | ||
| 195 | #define trace_litmus_switch_to(t) | ||
| 196 | #define trace_litmus_switch_away(prev) | ||
| 197 | #define trace_litmus_task_completion(t, forced) | ||
| 198 | #define trace_litmus_task_block(t) | ||
| 199 | #define trace_litmus_task_resume(t) | ||
| 200 | #define trace_litmus_sys_release(start) | ||
| 201 | #define trace_litmus_task_exit(t) | ||
| 202 | #define trace_litmus_task_tardy(t) | ||
| 203 | |||
| 204 | #define trace_litmus_container_param(cid, name) | ||
| 205 | #define trace_litmus_server_param(sid, cid, wcet, time) | ||
| 206 | #define trace_litmus_server_switch_to(sid, job, tid) | ||
| 207 | #define trace_litmus_server_switch_away(sid, job, tid) | ||
| 208 | #define trace_litmus_server_release(sid, job, release, deadline) | ||
| 209 | #define trace_litmus_server_completion(sid, job) | ||
| 210 | |||
| 211 | #define trace_litmus_container_param(cid, name) | ||
| 212 | #define trace_litmus_server_param(sid, cid, wcet, time) | ||
| 213 | #define trace_litmus_server_switch_to(sid, job, tid, tjob, cpu) | ||
| 214 | #define trace_litmus_server_switch_away(sid, job, tid, tjob, cpu) | ||
| 215 | #define trace_litmus_server_release(sid, job, release, deadline) | ||
| 216 | #define trace_litmus_server_completion(sid, job) | ||
| 217 | #define trace_litmus_server_block(sid) | ||
| 218 | #define trace_litmus_server_resume(sid) | ||
| 219 | |||
| 220 | #endif | ||
| 221 | |||
| 222 | |||
| 223 | #define SCHED_TRACE_BASE_ID 500 | ||
| 224 | |||
| 225 | |||
| 226 | #define sched_trace_task_name(t) \ | ||
| 227 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \ | ||
| 228 | do_sched_trace_task_name, t) | ||
| 229 | |||
| 230 | #define sched_trace_task_param(t) \ | ||
| 231 | do { \ | ||
| 232 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \ | ||
| 233 | do_sched_trace_task_param, t); \ | ||
| 234 | trace_litmus_task_param(t); \ | ||
| 235 | } while (0) | ||
| 236 | |||
| 237 | #define sched_trace_task_release(t) \ | ||
| 238 | do { \ | ||
| 239 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \ | ||
| 240 | do_sched_trace_task_release, t); \ | ||
| 241 | trace_litmus_task_release(t); \ | ||
| 242 | } while (0) | ||
| 243 | |||
| 244 | #define sched_trace_task_switch_to(t) \ | ||
| 245 | do { \ | ||
| 246 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \ | ||
| 247 | do_sched_trace_task_switch_to, t); \ | ||
| 248 | trace_litmus_switch_to(t); \ | ||
| 249 | } while (0) | ||
| 250 | |||
| 251 | #define sched_trace_task_switch_away(t) \ | ||
| 252 | do { \ | ||
| 253 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \ | ||
| 254 | do_sched_trace_task_switch_away, t); \ | ||
| 255 | trace_litmus_switch_away(t); \ | ||
| 256 | } while (0) | ||
| 257 | |||
| 258 | #define sched_trace_task_completion(t, forced) \ | ||
| 259 | do { \ | ||
| 260 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \ | ||
| 261 | do_sched_trace_task_completion, t, \ | ||
| 262 | (unsigned long) forced); \ | ||
| 263 | trace_litmus_task_completion(t, forced); \ | ||
| 264 | } while (0) | ||
| 265 | |||
| 266 | #define sched_trace_task_block_on(t, i) \ | ||
| 267 | do { \ | ||
| 268 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \ | ||
| 269 | do_sched_trace_task_block, t); \ | ||
| 270 | trace_litmus_task_block(t, i); \ | ||
| 271 | } while (0) | ||
| 272 | |||
| 273 | #define sched_trace_task_block(t) \ | ||
| 274 | sched_trace_task_block_on(t, 0) | ||
| 275 | |||
| 276 | #define sched_trace_task_resume_on(t, i) \ | ||
| 277 | do { \ | ||
| 278 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \ | ||
| 279 | do_sched_trace_task_resume, t); \ | ||
| 280 | trace_litmus_task_resume(t, i); \ | ||
| 281 | } while (0) | ||
| 282 | |||
| 283 | #define sched_trace_task_resume(t) \ | ||
| 284 | sched_trace_task_resume_on(t, 0) | ||
| 285 | |||
| 286 | #define sched_trace_resource_acquire(t, i) \ | ||
| 287 | do { \ | ||
| 288 | trace_litmus_resource_acquire(t, i); \ | ||
| 289 | } while (0) | ||
| 290 | |||
| 291 | #define sched_trace_resource_released(t, i) \ | ||
| 292 | do { \ | ||
| 293 | trace_litmus_resource_released(t, i); \ | ||
| 294 | } while (0) | ||
| 295 | |||
| 296 | #define sched_trace_action(t, action) \ | ||
| 297 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \ | ||
| 298 | do_sched_trace_action, t, (unsigned long) action); | ||
| 299 | |||
| 300 | /* when is a pointer, it does not need an explicit cast to unsigned long */ | ||
| 301 | #define sched_trace_sys_release(when) \ | ||
| 302 | do { \ | ||
| 303 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \ | ||
| 304 | do_sched_trace_sys_release, when); \ | ||
| 305 | trace_litmus_sys_release(when); \ | ||
| 306 | } while (0) | ||
| 307 | |||
| 308 | #define sched_trace_container_param(cid, name) \ | ||
| 309 | do { \ | ||
| 310 | trace_litmus_container_param(cid, name); \ | ||
| 311 | } while (0) | ||
| 312 | |||
| 313 | #define sched_trace_server_param(sid, cid, wcet, period) \ | ||
| 314 | do { \ | ||
| 315 | trace_litmus_server_param(sid, cid, wcet, period); \ | ||
| 316 | } while(0) | ||
| 317 | |||
| 318 | #define sched_trace_server_switch_to(sid, job, tid, tjob, cpu) \ | ||
| 319 | do { \ | ||
| 320 | trace_litmus_server_switch_to(sid, job, tid, tjob, cpu);\ | ||
| 321 | } while(0) | ||
| 322 | |||
| 323 | #define sched_trace_server_switch_away(sid, job, tid, tjob, cpu) \ | ||
| 324 | do { \ | ||
| 325 | trace_litmus_server_switch_away(sid, job, tid, tjob, cpu);\ | ||
| 326 | } while (0) | ||
| 327 | |||
| 328 | #define sched_trace_server_release(sid, job, release, deadline) \ | ||
| 329 | do { \ | ||
| 330 | trace_litmus_server_release(sid, job, release, deadline); \ | ||
| 331 | } while (0) | ||
| 332 | |||
| 333 | #define sched_trace_server_completion(sid, job) \ | ||
| 334 | do { \ | ||
| 335 | trace_litmus_server_completion(sid, job); \ | ||
| 336 | } while (0) | ||
| 337 | |||
| 338 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | ||
| 339 | |||
| 340 | #endif /* __KERNEL__ */ | ||
| 341 | |||
| 342 | #endif | ||
diff --git a/include/litmus/srp.h b/include/litmus/srp.h new file mode 100644 index 00000000000..c9a4552b2bf --- /dev/null +++ b/include/litmus/srp.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #ifndef LITMUS_SRP_H | ||
| 2 | #define LITMUS_SRP_H | ||
| 3 | |||
| 4 | struct srp_semaphore; | ||
| 5 | |||
| 6 | struct srp_priority { | ||
| 7 | struct list_head list; | ||
| 8 | unsigned int priority; | ||
| 9 | pid_t pid; | ||
| 10 | }; | ||
| 11 | #define list2prio(l) list_entry(l, struct srp_priority, list) | ||
| 12 | |||
| 13 | /* struct for uniprocessor SRP "semaphore" */ | ||
| 14 | struct srp_semaphore { | ||
| 15 | struct litmus_lock litmus_lock; | ||
| 16 | struct srp_priority ceiling; | ||
| 17 | struct task_struct* owner; | ||
| 18 | int cpu; /* cpu associated with this "semaphore" and resource */ | ||
| 19 | }; | ||
| 20 | |||
| 21 | /* map a task to its SRP preemption level priority */ | ||
| 22 | typedef unsigned int (*srp_prioritization_t)(struct task_struct* t); | ||
| 23 | /* Must be updated by each plugin that uses SRP.*/ | ||
| 24 | extern srp_prioritization_t get_srp_prio; | ||
| 25 | |||
| 26 | struct srp_semaphore* allocate_srp_semaphore(void); | ||
| 27 | |||
| 28 | #endif | ||
diff --git a/include/litmus/trace.h b/include/litmus/trace.h new file mode 100644 index 00000000000..8ad4966c602 --- /dev/null +++ b/include/litmus/trace.h | |||
| @@ -0,0 +1,145 @@ | |||
| 1 | #ifndef _SYS_TRACE_H_ | ||
| 2 | #define _SYS_TRACE_H_ | ||
| 3 | |||
| 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
| 5 | |||
| 6 | |||
| 7 | #include <litmus/feather_trace.h> | ||
| 8 | #include <litmus/feather_buffer.h> | ||
| 9 | |||
| 10 | |||
| 11 | /*********************** TIMESTAMPS ************************/ | ||
| 12 | |||
| 13 | enum task_type_marker { | ||
| 14 | TSK_BE, | ||
| 15 | TSK_RT, | ||
| 16 | TSK_UNKNOWN | ||
| 17 | }; | ||
| 18 | |||
| 19 | struct timestamp { | ||
| 20 | uint64_t timestamp:48; | ||
| 21 | uint64_t pid:16; | ||
| 22 | uint32_t seq_no; | ||
| 23 | uint8_t cpu; | ||
| 24 | uint8_t event; | ||
| 25 | uint8_t task_type:2; | ||
| 26 | uint8_t irq_flag:1; | ||
| 27 | uint8_t irq_count:5; | ||
| 28 | }; | ||
| 29 | |||
| 30 | /* tracing callbacks */ | ||
| 31 | feather_callback void save_timestamp(unsigned long event); | ||
| 32 | feather_callback void save_timestamp_def(unsigned long event, unsigned long type); | ||
| 33 | feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); | ||
| 34 | feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); | ||
| 35 | feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr); | ||
| 36 | feather_callback void save_timestamp_time(unsigned long event, unsigned long time_ptr); | ||
| 37 | feather_callback void save_timestamp_irq(unsigned long event, unsigned long irq_count_ptr); | ||
| 38 | feather_callback void save_timestamp_hide_irq(unsigned long event); | ||
| 39 | |||
| 40 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) | ||
| 41 | |||
| 42 | #define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) | ||
| 43 | |||
| 44 | #define TIMESTAMP_CUR(id) DTIMESTAMP(id, is_realtime(current) ? TSK_RT : TSK_BE) | ||
| 45 | |||
| 46 | #define TTIMESTAMP(id, task) \ | ||
| 47 | ft_event1(id, save_timestamp_task, (unsigned long) task) | ||
| 48 | |||
| 49 | #define CTIMESTAMP(id, cpu) \ | ||
| 50 | ft_event1(id, save_timestamp_cpu, (unsigned long) cpu) | ||
| 51 | |||
| 52 | #define LTIMESTAMP(id, task) \ | ||
| 53 | ft_event1(id, save_task_latency, (unsigned long) task) | ||
| 54 | |||
| 55 | #define TIMESTAMP_TIME(id, time_ptr) \ | ||
| 56 | ft_event1(id, save_timestamp_time, (unsigned long) time_ptr) | ||
| 57 | |||
| 58 | #define TIMESTAMP_IRQ(id, irq_count_ptr) \ | ||
| 59 | ft_event1(id, save_timestamp_irq, (unsigned long) irq_count_ptr) | ||
| 60 | |||
| 61 | #define TIMESTAMP_IN_IRQ(id) \ | ||
| 62 | ft_event0(id, save_timestamp_hide_irq) | ||
| 63 | |||
| 64 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ | ||
| 65 | |||
| 66 | #define TIMESTAMP(id) /* no tracing */ | ||
| 67 | |||
| 68 | #define DTIMESTAMP(id, def) /* no tracing */ | ||
| 69 | |||
| 70 | #define TIMESTAMP_CUR(id) /* no tracing */ | ||
| 71 | |||
| 72 | #define TTIMESTAMP(id, task) /* no tracing */ | ||
| 73 | |||
| 74 | #define CTIMESTAMP(id, cpu) /* no tracing */ | ||
| 75 | |||
| 76 | #define LTIMESTAMP(id, when_ptr) /* no tracing */ | ||
| 77 | |||
| 78 | #define TIMESTAMP_TIME(id, time_ptr) /* no tracing */ | ||
| 79 | |||
| 80 | #define TIMESTAMP_IRQ(id, irq_count_ptr) /* no tracing */ | ||
| 81 | |||
| 82 | #define TIMESTAMP_IN_IRQ(id) /* no tracing */ | ||
| 83 | |||
| 84 | #endif | ||
| 85 | |||
| 86 | |||
| 87 | /* Convention for timestamps | ||
| 88 | * ========================= | ||
| 89 | * | ||
| 90 | * In order to process the trace files with a common tool, we use the following | ||
| 91 | * convention to measure execution times: The end time id of a code segment is | ||
| 92 | * always the next number after the start time event id. | ||
| 93 | */ | ||
| 94 | |||
| 95 | #define __TS_SYSCALL_IN_START(p) TIMESTAMP_TIME(10, p) | ||
| 96 | #define __TS_SYSCALL_IN_END(p) TIMESTAMP_IRQ(11, p) | ||
| 97 | |||
| 98 | #define TS_SYSCALL_OUT_START TIMESTAMP_CUR(20) | ||
| 99 | #define TS_SYSCALL_OUT_END TIMESTAMP_CUR(21) | ||
| 100 | |||
| 101 | #define TS_LOCK_START TIMESTAMP_CUR(30) | ||
| 102 | #define TS_LOCK_END TIMESTAMP_CUR(31) | ||
| 103 | |||
| 104 | #define TS_LOCK_SUSPEND TIMESTAMP_CUR(38) | ||
| 105 | #define TS_LOCK_RESUME TIMESTAMP_CUR(39) | ||
| 106 | |||
| 107 | #define TS_UNLOCK_START TIMESTAMP_CUR(40) | ||
| 108 | #define TS_UNLOCK_END TIMESTAMP_CUR(41) | ||
| 109 | |||
| 110 | #define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only | ||
| 111 | * care | ||
| 112 | * about | ||
| 113 | * next */ | ||
| 114 | #define TS_SCHED_END(t) TTIMESTAMP(101, t) | ||
| 115 | #define TS_SCHED2_START(t) TTIMESTAMP(102, t) | ||
| 116 | #define TS_SCHED2_END(t) TTIMESTAMP(103, t) | ||
| 117 | |||
| 118 | #define TS_CXS_START(t) TTIMESTAMP(104, t) | ||
| 119 | #define TS_CXS_END(t) TTIMESTAMP(105, t) | ||
| 120 | |||
| 121 | #define TS_RELEASE_START DTIMESTAMP(106, TSK_RT) | ||
| 122 | #define TS_RELEASE_END DTIMESTAMP(107, TSK_RT) | ||
| 123 | |||
| 124 | #define TS_TICK_START(t) TTIMESTAMP(110, t) | ||
| 125 | #define TS_TICK_END(t) TTIMESTAMP(111, t) | ||
| 126 | |||
| 127 | |||
| 128 | #define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */ | ||
| 129 | #define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */ | ||
| 130 | |||
| 131 | #define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */ | ||
| 132 | #define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */ | ||
| 133 | |||
| 134 | #define TS_ENTER_NP_START TIMESTAMP(140) | ||
| 135 | #define TS_ENTER_NP_END TIMESTAMP(141) | ||
| 136 | |||
| 137 | #define TS_EXIT_NP_START TIMESTAMP(150) | ||
| 138 | #define TS_EXIT_NP_END TIMESTAMP(151) | ||
| 139 | |||
| 140 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | ||
| 141 | #define TS_SEND_RESCHED_END TIMESTAMP_IN_IRQ(191) | ||
| 142 | |||
| 143 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | ||
| 144 | |||
| 145 | #endif /* !_SYS_TRACE_H_ */ | ||
diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h new file mode 100644 index 00000000000..0d0c042ba9c --- /dev/null +++ b/include/litmus/trace_irq.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | #ifndef _LITMUS_TRACE_IRQ_H_ | ||
| 2 | #define _LITMUS_TRACE_IRQ_H_ | ||
| 3 | |||
| 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
| 5 | |||
| 6 | void ft_irq_fired(void); | ||
| 7 | |||
| 8 | #else | ||
| 9 | |||
| 10 | #define ft_irq_fired() /* nothing to do */ | ||
| 11 | |||
| 12 | #endif | ||
| 13 | |||
| 14 | #endif | ||
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h new file mode 100644 index 00000000000..04e453e8991 --- /dev/null +++ b/include/litmus/unistd_32.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | /* | ||
| 2 | * included from arch/x86/include/asm/unistd_32.h | ||
| 3 | * | ||
| 4 | * LITMUS^RT syscalls with "relative" numbers | ||
| 5 | */ | ||
| 6 | #define __LSC(x) (__NR_LITMUS + x) | ||
| 7 | |||
| 8 | #define __NR_set_rt_task_param __LSC(0) | ||
| 9 | #define __NR_get_rt_task_param __LSC(1) | ||
| 10 | #define __NR_complete_job __LSC(2) | ||
| 11 | #define __NR_od_open __LSC(3) | ||
| 12 | #define __NR_od_close __LSC(4) | ||
| 13 | #define __NR_litmus_lock __LSC(5) | ||
| 14 | #define __NR_litmus_unlock __LSC(6) | ||
| 15 | #define __NR_query_job_no __LSC(7) | ||
| 16 | #define __NR_wait_for_job_release __LSC(8) | ||
| 17 | #define __NR_wait_for_ts_release __LSC(9) | ||
| 18 | #define __NR_release_ts __LSC(10) | ||
| 19 | #define __NR_null_call __LSC(11) | ||
| 20 | #define __NR_dynamic_group_lock __LSC(12) | ||
| 21 | #define __NR_dynamic_group_unlock __LSC(13) | ||
| 22 | |||
| 23 | #define NR_litmus_syscalls 14 | ||
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h new file mode 100644 index 00000000000..ae55b488466 --- /dev/null +++ b/include/litmus/unistd_64.h | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | /* | ||
| 2 | * included from arch/x86/include/asm/unistd_64.h | ||
| 3 | * | ||
| 4 | * LITMUS^RT syscalls with "relative" numbers | ||
| 5 | */ | ||
| 6 | #define __LSC(x) (__NR_LITMUS + x) | ||
| 7 | |||
| 8 | #define __NR_set_rt_task_param __LSC(0) | ||
| 9 | __SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param) | ||
| 10 | #define __NR_get_rt_task_param __LSC(1) | ||
| 11 | __SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param) | ||
| 12 | #define __NR_complete_job __LSC(2) | ||
| 13 | __SYSCALL(__NR_complete_job, sys_complete_job) | ||
| 14 | #define __NR_od_open __LSC(3) | ||
| 15 | __SYSCALL(__NR_od_open, sys_od_open) | ||
| 16 | #define __NR_od_close __LSC(4) | ||
| 17 | __SYSCALL(__NR_od_close, sys_od_close) | ||
| 18 | #define __NR_litmus_lock __LSC(5) | ||
| 19 | __SYSCALL(__NR_litmus_lock, sys_litmus_lock) | ||
| 20 | #define __NR_litmus_unlock __LSC(6) | ||
| 21 | __SYSCALL(__NR_litmus_unlock, sys_litmus_unlock) | ||
| 22 | #define __NR_query_job_no __LSC(7) | ||
| 23 | __SYSCALL(__NR_query_job_no, sys_query_job_no) | ||
| 24 | #define __NR_wait_for_job_release __LSC(8) | ||
| 25 | __SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release) | ||
| 26 | #define __NR_wait_for_ts_release __LSC(9) | ||
| 27 | __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | ||
| 28 | #define __NR_release_ts __LSC(10) | ||
| 29 | __SYSCALL(__NR_release_ts, sys_release_ts) | ||
| 30 | #define __NR_null_call __LSC(11) | ||
| 31 | __SYSCALL(__NR_null_call, sys_null_call) | ||
| 32 | #define __NR_dynamic_group_lock __LSC(12) | ||
| 33 | __SYSCALL(__NR_dynamic_group_lock, sys_dynamic_group_lock) | ||
| 34 | #define __NR_dynamic_group_unlock __LSC(13) | ||
| 35 | __SYSCALL(__NR_dynamic_group_unlock, sys_dynamic_group_unlock) | ||
| 36 | |||
| 37 | #define NR_litmus_syscalls 14 | ||
diff --git a/include/litmus/wait.h b/include/litmus/wait.h new file mode 100644 index 00000000000..7e20c0a4a1f --- /dev/null +++ b/include/litmus/wait.h | |||
| @@ -0,0 +1,58 @@ | |||
| 1 | #ifndef _LITMUS_WAIT_H_ | ||
| 2 | #define _LITMUS_WAIT_H_ | ||
| 3 | |||
| 4 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | ||
| 5 | struct task_struct* __waitqueue_peek_first(wait_queue_head_t *wq); | ||
| 6 | |||
| 7 | /* wrap regular wait_queue_t head */ | ||
| 8 | struct __prio_wait_queue { | ||
| 9 | wait_queue_t wq; | ||
| 10 | |||
| 11 | /* some priority point */ | ||
| 12 | lt_t priority; | ||
| 13 | /* break ties in priority by lower tie_breaker */ | ||
| 14 | unsigned int tie_breaker; | ||
| 15 | }; | ||
| 16 | |||
| 17 | typedef struct __prio_wait_queue prio_wait_queue_t; | ||
| 18 | |||
| 19 | static inline void init_prio_waitqueue_entry(prio_wait_queue_t *pwq, | ||
| 20 | struct task_struct* t, | ||
| 21 | lt_t priority) | ||
| 22 | { | ||
| 23 | init_waitqueue_entry(&pwq->wq, t); | ||
| 24 | pwq->priority = priority; | ||
| 25 | pwq->tie_breaker = 0; | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void init_prio_waitqueue_entry_tie(prio_wait_queue_t *pwq, | ||
| 29 | struct task_struct* t, | ||
| 30 | lt_t priority, | ||
| 31 | unsigned int tie_breaker) | ||
| 32 | { | ||
| 33 | init_waitqueue_entry(&pwq->wq, t); | ||
| 34 | pwq->priority = priority; | ||
| 35 | pwq->tie_breaker = tie_breaker; | ||
| 36 | } | ||
| 37 | |||
| 38 | unsigned int __add_wait_queue_prio_exclusive( | ||
| 39 | wait_queue_head_t* head, | ||
| 40 | prio_wait_queue_t *new); | ||
| 41 | |||
| 42 | static inline unsigned int add_wait_queue_prio_exclusive( | ||
| 43 | wait_queue_head_t* head, | ||
| 44 | prio_wait_queue_t *new) | ||
| 45 | { | ||
| 46 | unsigned long flags; | ||
| 47 | unsigned int passed; | ||
| 48 | |||
| 49 | spin_lock_irqsave(&head->lock, flags); | ||
| 50 | passed = __add_wait_queue_prio_exclusive(head, new); | ||
| 51 | |||
| 52 | spin_unlock_irqrestore(&head->lock, flags); | ||
| 53 | |||
| 54 | return passed; | ||
| 55 | } | ||
| 56 | |||
| 57 | |||
| 58 | #endif | ||
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h new file mode 100644 index 00000000000..ed50bc809e8 --- /dev/null +++ b/include/trace/events/litmus.h | |||
| @@ -0,0 +1,423 @@ | |||
| 1 | /* | ||
| 2 | * LITMUS^RT kernel style scheduling tracepoints | ||
| 3 | */ | ||
| 4 | #undef TRACE_SYSTEM | ||
| 5 | #define TRACE_SYSTEM litmus | ||
| 6 | |||
| 7 | #if !defined(_SCHED_TASK_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ) | ||
| 8 | #define _SCHED_TASK_TRACEPOINT_H | ||
| 9 | |||
| 10 | #include <linux/tracepoint.h> | ||
| 11 | |||
| 12 | #include <litmus/litmus.h> | ||
| 13 | #include <litmus/rt_param.h> | ||
| 14 | TRACE_EVENT(litmus_task_param, | ||
| 15 | |||
| 16 | TP_PROTO(struct task_struct *t), | ||
| 17 | |||
| 18 | TP_ARGS(t), | ||
| 19 | |||
| 20 | TP_STRUCT__entry( | ||
| 21 | __field( pid_t, pid ) | ||
| 22 | __field( unsigned int, job ) | ||
| 23 | __field( unsigned long long, wcet ) | ||
| 24 | __field( unsigned long long, period ) | ||
| 25 | __field( unsigned long long, phase ) | ||
| 26 | __field( int, partition ) | ||
| 27 | ), | ||
| 28 | |||
| 29 | TP_fast_assign( | ||
| 30 | __entry->pid = t ? t->pid : 0; | ||
| 31 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | ||
| 32 | __entry->wcet = get_exec_cost(t); | ||
| 33 | __entry->period = get_rt_period(t); | ||
| 34 | __entry->phase = get_rt_phase(t); | ||
| 35 | __entry->partition = get_partition(t); | ||
| 36 | ), | ||
| 37 | |||
| 38 | TP_printk("period(%d, %Lu).\nwcet(%d, %Lu).\n", | ||
| 39 | __entry->pid, __entry->period, | ||
| 40 | __entry->pid, __entry->wcet) | ||
| 41 | ); | ||
| 42 | |||
| 43 | /* | ||
| 44 | * Tracing jobs release | ||
| 45 | */ | ||
| 46 | TRACE_EVENT(litmus_task_release, | ||
| 47 | |||
| 48 | TP_PROTO(struct task_struct *t), | ||
| 49 | |||
| 50 | TP_ARGS(t), | ||
| 51 | |||
| 52 | TP_STRUCT__entry( | ||
| 53 | __field( pid_t, pid ) | ||
| 54 | __field( unsigned int, job ) | ||
| 55 | __field( unsigned long long, release ) | ||
| 56 | __field( unsigned long long, deadline ) | ||
| 57 | ), | ||
| 58 | |||
| 59 | TP_fast_assign( | ||
| 60 | __entry->pid = t ? t->pid : 0; | ||
| 61 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | ||
| 62 | __entry->release = get_release(t); | ||
| 63 | __entry->deadline = get_deadline(t); | ||
| 64 | ), | ||
| 65 | |||
| 66 | TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n", | ||
| 67 | __entry->pid, __entry->job, __entry->release, | ||
| 68 | __entry->pid, __entry->job, __entry->deadline) | ||
| 69 | ); | ||
| 70 | |||
| 71 | /* | ||
| 72 | * Tracepoint for switching to new task | ||
| 73 | */ | ||
| 74 | TRACE_EVENT(litmus_switch_to, | ||
| 75 | |||
| 76 | TP_PROTO(struct task_struct *t), | ||
| 77 | |||
| 78 | TP_ARGS(t), | ||
| 79 | |||
| 80 | TP_STRUCT__entry( | ||
| 81 | __field( pid_t, pid ) | ||
| 82 | __field( unsigned int, job ) | ||
| 83 | __field( unsigned long long, when ) | ||
| 84 | __field( unsigned long long, exec_time ) | ||
| 85 | ), | ||
| 86 | |||
| 87 | TP_fast_assign( | ||
| 88 | __entry->pid = is_realtime(t) ? t->pid : 0; | ||
| 89 | __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; | ||
| 90 | __entry->when = litmus_clock(); | ||
| 91 | __entry->exec_time = get_exec_time(t); | ||
| 92 | ), | ||
| 93 | |||
| 94 | TP_printk("switch_to(job(%u, %u)): %Lu (exec: %Lu)\n", | ||
| 95 | __entry->pid, __entry->job, | ||
| 96 | __entry->when, __entry->exec_time) | ||
| 97 | ); | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Tracepoint for switching away previous task | ||
| 101 | */ | ||
| 102 | TRACE_EVENT(litmus_switch_away, | ||
| 103 | |||
| 104 | TP_PROTO(struct task_struct *t), | ||
| 105 | |||
| 106 | TP_ARGS(t), | ||
| 107 | |||
| 108 | TP_STRUCT__entry( | ||
| 109 | __field( pid_t, pid ) | ||
| 110 | __field( unsigned int, job ) | ||
| 111 | __field( unsigned long long, when ) | ||
| 112 | __field( unsigned long long, exec_time ) | ||
| 113 | ), | ||
| 114 | |||
| 115 | TP_fast_assign( | ||
| 116 | __entry->pid = is_realtime(t) ? t->pid : 0; | ||
| 117 | __entry->job = is_realtime(t) ? t->rt_param.job_params.job_no : 0; | ||
| 118 | __entry->when = litmus_clock(); | ||
| 119 | __entry->exec_time = get_exec_time(t); | ||
| 120 | ), | ||
| 121 | |||
| 122 | TP_printk("switch_away(job(%u, %u)): %Lu (exec: %Lu)\n", | ||
| 123 | __entry->pid, __entry->job, | ||
| 124 | __entry->when, __entry->exec_time) | ||
| 125 | ); | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Tracing jobs completion | ||
| 129 | */ | ||
| 130 | TRACE_EVENT(litmus_task_completion, | ||
| 131 | |||
| 132 | TP_PROTO(struct task_struct *t, unsigned long forced), | ||
| 133 | |||
| 134 | TP_ARGS(t, forced), | ||
| 135 | |||
| 136 | TP_STRUCT__entry( | ||
| 137 | __field( pid_t, pid ) | ||
| 138 | __field( unsigned int, job ) | ||
| 139 | __field( unsigned long long, when ) | ||
| 140 | __field( unsigned long, forced ) | ||
| 141 | ), | ||
| 142 | |||
| 143 | TP_fast_assign( | ||
| 144 | __entry->pid = t ? t->pid : 0; | ||
| 145 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | ||
| 146 | __entry->when = litmus_clock(); | ||
| 147 | __entry->forced = forced; | ||
| 148 | ), | ||
| 149 | |||
| 150 | TP_printk("completed(job(%u, %u)): %Lu (forced: %lu)\n", | ||
| 151 | __entry->pid, __entry->job, | ||
| 152 | __entry->when, __entry->forced) | ||
| 153 | ); | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Trace blocking tasks. | ||
| 157 | */ | ||
| 158 | TRACE_EVENT(litmus_task_block, | ||
| 159 | |||
| 160 | TP_PROTO(struct task_struct *t, int lid), | ||
| 161 | |||
| 162 | TP_ARGS(t, lid), | ||
| 163 | |||
| 164 | TP_STRUCT__entry( | ||
| 165 | __field( pid_t, pid ) | ||
| 166 | __field( int, lid ) | ||
| 167 | __field( unsigned long long, when ) | ||
| 168 | ), | ||
| 169 | |||
| 170 | TP_fast_assign( | ||
| 171 | __entry->pid = t ? t->pid : 0; | ||
| 172 | __entry->lid = lid; | ||
| 173 | __entry->when = litmus_clock(); | ||
| 174 | ), | ||
| 175 | |||
| 176 | TP_printk("(%u) blocks on %d: %Lu\n", __entry->pid, | ||
| 177 | __entry->lid, __entry->when) | ||
| 178 | ); | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Lock events | ||
| 182 | */ | ||
| 183 | TRACE_EVENT(litmus_resource_acquire, | ||
| 184 | |||
| 185 | TP_PROTO(struct task_struct *t, int lid), | ||
| 186 | |||
| 187 | TP_ARGS(t, lid), | ||
| 188 | |||
| 189 | TP_STRUCT__entry( | ||
| 190 | __field( pid_t, pid ) | ||
| 191 | __field( int, lid ) | ||
| 192 | __field( unsigned long long, when ) | ||
| 193 | ), | ||
| 194 | |||
| 195 | TP_fast_assign( | ||
| 196 | __entry->pid = t ? t->pid : 0; | ||
| 197 | __entry->lid = lid; | ||
| 198 | __entry->when = litmus_clock(); | ||
| 199 | ), | ||
| 200 | |||
| 201 | TP_printk("(%u) acquires %d: %Lu\n", __entry->pid, | ||
| 202 | __entry->lid, __entry->when) | ||
| 203 | ); | ||
| 204 | |||
| 205 | TRACE_EVENT(litmus_resource_release, | ||
| 206 | |||
| 207 | TP_PROTO(struct task_struct *t, int lid), | ||
| 208 | |||
| 209 | TP_ARGS(t, lid), | ||
| 210 | |||
| 211 | TP_STRUCT__entry( | ||
| 212 | __field( pid_t, pid ) | ||
| 213 | __field( int, lid ) | ||
| 214 | __field( unsigned long long, when ) | ||
| 215 | ), | ||
| 216 | |||
| 217 | TP_fast_assign( | ||
| 218 | __entry->pid = t ? t->pid : 0; | ||
| 219 | __entry->lid = lid; | ||
| 220 | __entry->when = litmus_clock(); | ||
| 221 | ), | ||
| 222 | |||
| 223 | TP_printk("(%u) releases %d: %Lu\n", __entry->pid, | ||
| 224 | __entry->lid, __entry->when) | ||
| 225 | ); | ||
| 226 | |||
| 227 | /* | ||
| 228 | * Tracing jobs resume | ||
| 229 | */ | ||
| 230 | TRACE_EVENT(litmus_task_resume, | ||
| 231 | |||
| 232 | TP_PROTO(struct task_struct *t, int lid), | ||
| 233 | |||
| 234 | TP_ARGS(t, lid), | ||
| 235 | |||
| 236 | TP_STRUCT__entry( | ||
| 237 | __field( pid_t, pid ) | ||
| 238 | __field( int, lid ) | ||
| 239 | __field( unsigned int, job ) | ||
| 240 | __field( unsigned long long, when ) | ||
| 241 | ), | ||
| 242 | |||
| 243 | TP_fast_assign( | ||
| 244 | __entry->pid = t ? t->pid : 0; | ||
| 245 | __entry->job = t ? t->rt_param.job_params.job_no : 0; | ||
| 246 | __entry->when = litmus_clock(); | ||
| 247 | __entry->lid = lid; | ||
| 248 | ), | ||
| 249 | |||
| 250 | TP_printk("resume(job(%u, %u)) on %d: %Lu\n", | ||
| 251 | __entry->pid, __entry->job, | ||
| 252 | __entry->lid, __entry->when) | ||
| 253 | ); | ||
| 254 | |||
| 255 | /* | ||
| 256 | * Trace synchronous release | ||
| 257 | */ | ||
| 258 | TRACE_EVENT(litmus_sys_release, | ||
| 259 | |||
| 260 | TP_PROTO(unsigned long long *start), | ||
| 261 | |||
| 262 | TP_ARGS(start), | ||
| 263 | |||
| 264 | TP_STRUCT__entry( | ||
| 265 | __field( unsigned long long, rel ) | ||
| 266 | __field( unsigned long long, when ) | ||
| 267 | ), | ||
| 268 | |||
| 269 | TP_fast_assign( | ||
| 270 | __entry->rel = *start; | ||
| 271 | __entry->when = litmus_clock(); | ||
| 272 | ), | ||
| 273 | |||
| 274 | TP_printk("SynRelease(%Lu) at %Lu\n", __entry->rel, __entry->when) | ||
| 275 | ); | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Containers | ||
| 279 | */ | ||
| 280 | TRACE_EVENT(litmus_container_param, | ||
| 281 | |||
| 282 | TP_PROTO(int cid, const char *name), | ||
| 283 | |||
| 284 | TP_ARGS(cid, name), | ||
| 285 | |||
| 286 | TP_STRUCT__entry( | ||
| 287 | __field( int, cid ) | ||
| 288 | __array( char, name, TASK_COMM_LEN ) | ||
| 289 | ), | ||
| 290 | |||
| 291 | TP_fast_assign( | ||
| 292 | memcpy(__entry->name, name, TASK_COMM_LEN); | ||
| 293 | __entry->cid = cid; | ||
| 294 | ), | ||
| 295 | |||
| 296 | TP_printk("container, name: %s, id: %d\n", __entry->name, __entry->cid) | ||
| 297 | ); | ||
| 298 | |||
| 299 | TRACE_EVENT(litmus_server_param, | ||
| 300 | |||
| 301 | TP_PROTO(int sid, int cid, unsigned long long wcet, unsigned long long period), | ||
| 302 | |||
| 303 | TP_ARGS(sid, cid, wcet, period), | ||
| 304 | |||
| 305 | TP_STRUCT__entry( | ||
| 306 | __field( int, sid ) | ||
| 307 | __field( int, cid ) | ||
| 308 | __field( unsigned long long, wcet ) | ||
| 309 | __field( unsigned long long, period ) | ||
| 310 | ), | ||
| 311 | |||
| 312 | TP_fast_assign( | ||
| 313 | __entry->cid = cid; | ||
| 314 | __entry->sid = sid; | ||
| 315 | __entry->wcet = wcet; | ||
| 316 | __entry->period = period; | ||
| 317 | ), | ||
| 318 | |||
| 319 | TP_printk("server(%llu, %llu), sid: %llu, cont: %llu\n", | ||
| 320 | __entry->wcet, __entry->period, __entry->sid, __entry->cid) | ||
| 321 | ); | ||
| 322 | |||
| 323 | TRACE_EVENT(litmus_server_switch_to, | ||
| 324 | |||
| 325 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob, int cpu), | ||
| 326 | |||
| 327 | TP_ARGS(sid, job, tid, tjob, cpu), | ||
| 328 | |||
| 329 | TP_STRUCT__entry( | ||
| 330 | __field( int, sid) | ||
| 331 | __field( unsigned int, job) | ||
| 332 | __field( int, tid) | ||
| 333 | __field( unsigned int, tjob) | ||
| 334 | __field( int, cpu) | ||
| 335 | ), | ||
| 336 | |||
| 337 | TP_fast_assign( | ||
| 338 | __entry->sid = sid; | ||
| 339 | __entry->tid = tid; | ||
| 340 | __entry->job = job; | ||
| 341 | __entry->tjob = tjob; | ||
| 342 | __entry->cpu = cpu; | ||
| 343 | ), | ||
| 344 | |||
| 345 | TP_printk("switch_to(server(%d, %u)): (%d, %d) on %d\n", | ||
| 346 | __entry->sid, __entry->job, __entry->tid, __entry->tjob, __entry->cpu) | ||
| 347 | ); | ||
| 348 | |||
| 349 | TRACE_EVENT(litmus_server_switch_away, | ||
| 350 | |||
| 351 | TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob, int cpu), | ||
| 352 | |||
| 353 | TP_ARGS(sid, job, tid, tjob, cpu), | ||
| 354 | |||
| 355 | TP_STRUCT__entry( | ||
| 356 | __field( int, sid) | ||
| 357 | __field( unsigned int, job) | ||
| 358 | __field( int, tid) | ||
| 359 | __field( unsigned int, tjob) | ||
| 360 | __field( int, cpu) | ||
| 361 | ), | ||
| 362 | |||
| 363 | TP_fast_assign( | ||
| 364 | __entry->sid = sid; | ||
| 365 | __entry->tid = tid; | ||
| 366 | __entry->job = job; | ||
| 367 | __entry->tjob = tjob; | ||
| 368 | __entry->cpu = cpu; | ||
| 369 | ), | ||
| 370 | |||
| 371 | TP_printk("switch_away(server(%d, %u)): (%d, %d) on %d\n", | ||
| 372 | __entry->sid, __entry->job, __entry->tid, __entry->tjob, __entry->cpu) | ||
| 373 | ); | ||
| 374 | |||
| 375 | TRACE_EVENT(litmus_server_release, | ||
| 376 | |||
| 377 | TP_PROTO(int sid, unsigned int job, | ||
| 378 | unsigned long long release, | ||
| 379 | unsigned long long deadline), | ||
| 380 | |||
| 381 | TP_ARGS(sid, job, release, deadline), | ||
| 382 | |||
| 383 | TP_STRUCT__entry( | ||
| 384 | __field( int, sid) | ||
| 385 | __field( unsigned int, job) | ||
| 386 | __field( unsigned long long, release) | ||
| 387 | __field( unsigned long long, deadline) | ||
| 388 | ), | ||
| 389 | |||
| 390 | TP_fast_assign( | ||
| 391 | __entry->sid = sid; | ||
| 392 | __entry->job = job; | ||
| 393 | __entry->release = release; | ||
| 394 | __entry->deadline = deadline; | ||
| 395 | ), | ||
| 396 | |||
| 397 | TP_printk("release(server(%d, %u)), release: %llu, deadline: %llu\n", | ||
| 398 | __entry->sid, __entry->job, __entry->release, __entry->deadline) | ||
| 399 | ); | ||
| 400 | |||
| 401 | TRACE_EVENT(litmus_server_completion, | ||
| 402 | |||
| 403 | TP_PROTO(int sid, int job), | ||
| 404 | |||
| 405 | TP_ARGS(sid, job), | ||
| 406 | |||
| 407 | TP_STRUCT__entry( | ||
| 408 | __field( int, sid) | ||
| 409 | __field( unsigned int, job) | ||
| 410 | ), | ||
| 411 | |||
| 412 | TP_fast_assign( | ||
| 413 | __entry->sid = sid; | ||
| 414 | __entry->job = job; | ||
| 415 | ), | ||
| 416 | |||
| 417 | TP_printk("completion(server(%d, %d))\n", __entry->sid, __entry->job) | ||
| 418 | ); | ||
| 419 | |||
| 420 | #endif /* _SCHED_TASK_TRACEPOINT_H */ | ||
| 421 | |||
| 422 | /* Must stay outside the protection */ | ||
| 423 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 533c49f4804..4d6f3474e8f 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/ftrace_event.h> | 19 | #include <linux/ftrace_event.h> |
| 20 | #include <litmus/litmus.h> | ||
| 20 | 21 | ||
| 21 | /* | 22 | /* |
| 22 | * DECLARE_EVENT_CLASS can be used to add a generic function | 23 | * DECLARE_EVENT_CLASS can be used to add a generic function |
| @@ -54,7 +55,7 @@ | |||
| 54 | #define __string(item, src) __dynamic_array(char, item, -1) | 55 | #define __string(item, src) __dynamic_array(char, item, -1) |
| 55 | 56 | ||
| 56 | #undef TP_STRUCT__entry | 57 | #undef TP_STRUCT__entry |
| 57 | #define TP_STRUCT__entry(args...) args | 58 | #define TP_STRUCT__entry(args...) args __field( unsigned long long, __rt_ts ) |
| 58 | 59 | ||
| 59 | #undef DECLARE_EVENT_CLASS | 60 | #undef DECLARE_EVENT_CLASS |
| 60 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | 61 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ |
| @@ -507,7 +508,7 @@ static inline notrace int ftrace_get_offsets_##call( \ | |||
| 507 | strcpy(__get_str(dst), src); | 508 | strcpy(__get_str(dst), src); |
| 508 | 509 | ||
| 509 | #undef TP_fast_assign | 510 | #undef TP_fast_assign |
| 510 | #define TP_fast_assign(args...) args | 511 | #define TP_fast_assign(args...) args; __entry->__rt_ts = litmus_clock(); |
| 511 | 512 | ||
| 512 | #undef TP_perf_assign | 513 | #undef TP_perf_assign |
| 513 | #define TP_perf_assign(args...) | 514 | #define TP_perf_assign(args...) |
