diff options
author | Zelin Tong <ztong@cs.unc.edu> | 2021-10-21 23:49:10 -0400 |
---|---|---|
committer | Zelin Tong <ztong@cs.unc.edu> | 2021-10-21 23:49:10 -0400 |
commit | 8119553094cd2d3f6985bfbd97b79525b03a3b20 (patch) | |
tree | 1c6ef46528b55237ae13e0f5dc1656cd81febbfd | |
parent | ee55e03db8da32a865cfcf292d06c9ca0a588439 (diff) |
Added support for Budgetable OMLP and Budgetable short FMLP to GSN-EDF
-rw-r--r-- | include/litmus/ctrlpage.h | 8 | ||||
-rw-r--r-- | include/litmus/fdso.h | 4 | ||||
-rw-r--r-- | include/litmus/locking.h | 4 | ||||
-rw-r--r-- | include/litmus/trace.h | 6 | ||||
-rw-r--r-- | litmus/ctrldev.c | 16 | ||||
-rw-r--r-- | litmus/fdso.c | 1 | ||||
-rw-r--r-- | litmus/locking.c | 80 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 390 |
8 files changed, 502 insertions, 7 deletions
diff --git a/include/litmus/ctrlpage.h b/include/litmus/ctrlpage.h index f7b03e1aedd6..f523347fafd7 100644 --- a/include/litmus/ctrlpage.h +++ b/include/litmus/ctrlpage.h | |||
@@ -69,7 +69,10 @@ typedef enum { | |||
69 | LRT_od_open, | 69 | LRT_od_open, |
70 | LRT_od_close, | 70 | LRT_od_close, |
71 | LRT_litmus_lock, | 71 | LRT_litmus_lock, |
72 | LRT_litmus_budgeted_lock, | ||
72 | LRT_litmus_unlock, | 73 | LRT_litmus_unlock, |
74 | LRT_litmus_cs_timer_start, | ||
75 | LRT_litmus_cs_timer_stop, | ||
73 | LRT_wait_for_job_release, | 76 | LRT_wait_for_job_release, |
74 | LRT_wait_for_ts_release, | 77 | LRT_wait_for_ts_release, |
75 | LRT_release_ts, | 78 | LRT_release_ts, |
@@ -88,6 +91,11 @@ union litmus_syscall_args { | |||
88 | } reservation_create; | 91 | } reservation_create; |
89 | 92 | ||
90 | struct { | 93 | struct { |
94 | uint32_t sem_od; | ||
95 | lt_t value; | ||
96 | } lock_budgeting; | ||
97 | |||
98 | struct { | ||
91 | uint32_t fd; | 99 | uint32_t fd; |
92 | uint32_t obj_type; | 100 | uint32_t obj_type; |
93 | uint32_t obj_id; | 101 | uint32_t obj_id; |
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index fd9b30dbfb34..49513497766f 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -27,7 +27,9 @@ typedef enum { | |||
27 | 27 | ||
28 | DFLP_SEM = 6, | 28 | DFLP_SEM = 6, |
29 | 29 | ||
30 | MAX_OBJ_TYPE = 6 | 30 | OMLP_SEM = 7, |
31 | |||
32 | MAX_OBJ_TYPE = 7 | ||
31 | } obj_type_t; | 33 | } obj_type_t; |
32 | 34 | ||
33 | struct inode_obj_id { | 35 | struct inode_obj_id { |
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 4d7b870cb443..95893c99664c 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -19,8 +19,12 @@ struct litmus_lock_ops { | |||
19 | 19 | ||
20 | /* Current tries to lock/unlock this lock (mandatory methods). */ | 20 | /* Current tries to lock/unlock this lock (mandatory methods). */ |
21 | int (*lock)(struct litmus_lock*); | 21 | int (*lock)(struct litmus_lock*); |
22 | int (*budgeted_lock)(struct litmus_lock*, lt_t fz_len); | ||
22 | int (*unlock)(struct litmus_lock*); | 23 | int (*unlock)(struct litmus_lock*); |
23 | 24 | ||
25 | int (*timer_start)(struct litmus_lock*, lt_t budget); | ||
26 | int (*timer_stop)(struct litmus_lock*); | ||
27 | |||
24 | /* The lock is no longer being referenced (mandatory method). */ | 28 | /* The lock is no longer being referenced (mandatory method). */ |
25 | void (*deallocate)(struct litmus_lock*); | 29 | void (*deallocate)(struct litmus_lock*); |
26 | }; | 30 | }; |
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index 2646136e3881..4275f9f8f44b 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -110,6 +110,12 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w | |||
110 | #define TS_UNLOCK_START CPU_TIMESTAMP_CUR(40) | 110 | #define TS_UNLOCK_START CPU_TIMESTAMP_CUR(40) |
111 | #define TS_UNLOCK_END CPU_TIMESTAMP_CUR(41) | 111 | #define TS_UNLOCK_END CPU_TIMESTAMP_CUR(41) |
112 | 112 | ||
113 | #define TS_START_TIMER_START CPU_TIMESTAMP_CUR(50) | ||
114 | #define TS_START_TIMER_END CPU_TIMESTAMP_CUR(51) | ||
115 | |||
116 | #define TS_STOP_TIMER_START CPU_TIMESTAMP_CUR(60) | ||
117 | #define TS_STOP_TIMER_END CPU_TIMESTAMP_CUR(61) | ||
118 | |||
113 | #define TS_SCHED_START CPU_DTIMESTAMP(100, TSK_UNKNOWN) /* we only | 119 | #define TS_SCHED_START CPU_DTIMESTAMP(100, TSK_UNKNOWN) /* we only |
114 | * care | 120 | * care |
115 | * about | 121 | * about |
diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c index b649a27f5041..639003dc1d72 100644 --- a/litmus/ctrldev.c +++ b/litmus/ctrldev.c | |||
@@ -129,7 +129,10 @@ asmlinkage long sys_od_open(int fd, int type, int obj_id, void* __user config); | |||
129 | asmlinkage long sys_od_close(int od); | 129 | asmlinkage long sys_od_close(int od); |
130 | asmlinkage long sys_complete_job(void); | 130 | asmlinkage long sys_complete_job(void); |
131 | asmlinkage long sys_litmus_lock(int lock_od); | 131 | asmlinkage long sys_litmus_lock(int lock_od); |
132 | asmlinkage long sys_litmus_budgeted_lock(int lock_od, lt_t fz_len); | ||
132 | asmlinkage long sys_litmus_unlock(int lock_od); | 133 | asmlinkage long sys_litmus_unlock(int lock_od); |
134 | asmlinkage long sys_litmus_cs_timer_start(int lock_od, lt_t budget); | ||
135 | asmlinkage long sys_litmus_cs_timer_stop(int lock_od); | ||
133 | asmlinkage long sys_wait_for_job_release(unsigned int job); | 136 | asmlinkage long sys_wait_for_job_release(unsigned int job); |
134 | asmlinkage long sys_wait_for_ts_release(void); | 137 | asmlinkage long sys_wait_for_ts_release(void); |
135 | asmlinkage long sys_release_ts(lt_t __user *__when); | 138 | asmlinkage long sys_release_ts(lt_t __user *__when); |
@@ -154,6 +157,8 @@ static long litmus_ctrl_ioctl(struct file *filp, | |||
154 | case LRT_reservation_create: | 157 | case LRT_reservation_create: |
155 | case LRT_get_current_budget: | 158 | case LRT_get_current_budget: |
156 | case LRT_od_open: | 159 | case LRT_od_open: |
160 | case LRT_litmus_budgeted_lock: | ||
161 | case LRT_litmus_cs_timer_start: | ||
157 | /* multiple arguments => need to get args via pointer */ | 162 | /* multiple arguments => need to get args via pointer */ |
158 | /* get syscall parameters */ | 163 | /* get syscall parameters */ |
159 | if (copy_from_user(&syscall_args, (void*) arg, | 164 | if (copy_from_user(&syscall_args, (void*) arg, |
@@ -184,6 +189,14 @@ static long litmus_ctrl_ioctl(struct file *filp, | |||
184 | syscall_args.od_open.obj_type, | 189 | syscall_args.od_open.obj_type, |
185 | syscall_args.od_open.obj_id, | 190 | syscall_args.od_open.obj_id, |
186 | syscall_args.od_open.config); | 191 | syscall_args.od_open.config); |
192 | case LRT_litmus_budgeted_lock: | ||
193 | return sys_litmus_budgeted_lock( | ||
194 | syscall_args.lock_budgeting.sem_od, | ||
195 | syscall_args.lock_budgeting.value); | ||
196 | case LRT_litmus_cs_timer_start: | ||
197 | return sys_litmus_cs_timer_start( | ||
198 | syscall_args.lock_budgeting.sem_od, | ||
199 | syscall_args.lock_budgeting.value); | ||
187 | default: | 200 | default: |
188 | printk(KERN_DEBUG "ctrldev: strange od_open cmd: %d\n", cmd); | 201 | printk(KERN_DEBUG "ctrldev: strange od_open cmd: %d\n", cmd); |
189 | return -EINVAL; | 202 | return -EINVAL; |
@@ -205,6 +218,9 @@ static long litmus_ctrl_ioctl(struct file *filp, | |||
205 | case LRT_litmus_unlock: | 218 | case LRT_litmus_unlock: |
206 | return sys_litmus_unlock(arg); | 219 | return sys_litmus_unlock(arg); |
207 | 220 | ||
221 | case LRT_litmus_cs_timer_stop: | ||
222 | return sys_litmus_cs_timer_stop(arg); | ||
223 | |||
208 | case LRT_wait_for_job_release: | 224 | case LRT_wait_for_job_release: |
209 | return sys_wait_for_job_release(arg); | 225 | return sys_wait_for_job_release(arg); |
210 | 226 | ||
diff --git a/litmus/fdso.c b/litmus/fdso.c index 0ff54e41839c..e96e575d912e 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -28,6 +28,7 @@ static const struct fdso_ops* fdso_ops[] = { | |||
28 | &generic_lock_ops, /* DPCP_SEM */ | 28 | &generic_lock_ops, /* DPCP_SEM */ |
29 | &generic_lock_ops, /* PCP_SEM */ | 29 | &generic_lock_ops, /* PCP_SEM */ |
30 | &generic_lock_ops, /* DFLP_SEM */ | 30 | &generic_lock_ops, /* DFLP_SEM */ |
31 | &generic_lock_ops, /* OMLP_SEM */ | ||
31 | }; | 32 | }; |
32 | 33 | ||
33 | static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) | 34 | static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) |
diff --git a/litmus/locking.c b/litmus/locking.c index 183d4ac68fe7..64ed8026a510 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -97,6 +97,34 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
97 | return err; | 97 | return err; |
98 | } | 98 | } |
99 | 99 | ||
100 | asmlinkage long sys_litmus_budgeted_lock(int lock_od, lt_t fz_len) | ||
101 | { | ||
102 | long err = -EINVAL; | ||
103 | struct od_table_entry* entry; | ||
104 | struct litmus_lock* l; | ||
105 | |||
106 | TS_SYSCALL_IN_START; | ||
107 | |||
108 | TS_SYSCALL_IN_END; | ||
109 | |||
110 | TS_LOCK_START; | ||
111 | |||
112 | entry = get_entry_for_od(lock_od); | ||
113 | if (entry && is_lock(entry)) { | ||
114 | l = get_lock(entry); | ||
115 | TRACE_CUR("attempts to lock 0x%p\n", l); | ||
116 | err = l->ops->budgeted_lock(l, fz_len); | ||
117 | } | ||
118 | |||
119 | /* Note: task my have been suspended or preempted in between! Take | ||
120 | * this into account when computing overheads. */ | ||
121 | TS_LOCK_END; | ||
122 | |||
123 | TS_SYSCALL_OUT_START; | ||
124 | |||
125 | return err; | ||
126 | } | ||
127 | |||
100 | asmlinkage long sys_litmus_unlock(int lock_od) | 128 | asmlinkage long sys_litmus_unlock(int lock_od) |
101 | { | 129 | { |
102 | long err = -EINVAL; | 130 | long err = -EINVAL; |
@@ -125,6 +153,58 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
125 | return err; | 153 | return err; |
126 | } | 154 | } |
127 | 155 | ||
156 | asmlinkage long sys_litmus_cs_timer_start(int lock_od, lt_t budget) | ||
157 | { | ||
158 | long err = -EINVAL; | ||
159 | struct od_table_entry* entry; | ||
160 | struct litmus_lock* l; | ||
161 | |||
162 | TS_SYSCALL_IN_START; | ||
163 | |||
164 | TS_SYSCALL_IN_END; | ||
165 | |||
166 | TS_START_TIMER_START; | ||
167 | |||
168 | entry = get_entry_for_od(lock_od); | ||
169 | if (entry && is_lock(entry)) { | ||
170 | l = get_lock(entry); | ||
171 | TRACE_CUR("attempts to start timer of 0x%p\n", l); | ||
172 | err = l->ops->timer_start(l, budget); | ||
173 | } | ||
174 | |||
175 | TS_START_TIMER_END; | ||
176 | |||
177 | TS_SYSCALL_OUT_START; | ||
178 | |||
179 | return err; | ||
180 | } | ||
181 | |||
182 | asmlinkage long sys_litmus_cs_timer_stop(int lock_od) | ||
183 | { | ||
184 | long err = -EINVAL; | ||
185 | struct od_table_entry* entry; | ||
186 | struct litmus_lock* l; | ||
187 | |||
188 | TS_SYSCALL_IN_START; | ||
189 | |||
190 | TS_SYSCALL_IN_END; | ||
191 | |||
192 | TS_STOP_TIMER_START; | ||
193 | |||
194 | entry = get_entry_for_od(lock_od); | ||
195 | if (entry && is_lock(entry)) { | ||
196 | l = get_lock(entry); | ||
197 | TRACE_CUR("attempts to start timer of 0x%p\n", l); | ||
198 | err = l->ops->timer_stop(l); | ||
199 | } | ||
200 | |||
201 | TS_STOP_TIMER_END; | ||
202 | |||
203 | TS_SYSCALL_OUT_START; | ||
204 | |||
205 | return err; | ||
206 | } | ||
207 | |||
128 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) | 208 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) |
129 | { | 209 | { |
130 | wait_queue_entry_t* q; | 210 | wait_queue_entry_t* q; |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b76e46372c6a..d4635c74df1c 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/sched/signal.h> | 14 | #include <linux/sched/signal.h> |
15 | #include <linux/sched/topology.h> | 15 | #include <linux/sched/topology.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/sched/signal.h> | ||
17 | 18 | ||
18 | #include <litmus/debug_trace.h> | 19 | #include <litmus/debug_trace.h> |
19 | #include <litmus/litmus.h> | 20 | #include <litmus/litmus.h> |
@@ -22,6 +23,7 @@ | |||
22 | #include <litmus/edf_common.h> | 23 | #include <litmus/edf_common.h> |
23 | #include <litmus/sched_trace.h> | 24 | #include <litmus/sched_trace.h> |
24 | #include <litmus/trace.h> | 25 | #include <litmus/trace.h> |
26 | #include <litmus/wait.h> | ||
25 | 27 | ||
26 | #include <litmus/preempt.h> | 28 | #include <litmus/preempt.h> |
27 | #include <litmus/budget.h> | 29 | #include <litmus/budget.h> |
@@ -757,6 +759,9 @@ struct fmlp_semaphore { | |||
757 | 759 | ||
758 | /* FIFO queue of waiting tasks */ | 760 | /* FIFO queue of waiting tasks */ |
759 | wait_queue_head_t wait; | 761 | wait_queue_head_t wait; |
762 | |||
763 | struct hrtimer budgeting_timer; | ||
764 | lt_t enforce_time; | ||
760 | }; | 765 | }; |
761 | 766 | ||
762 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | 767 | static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) |
@@ -764,6 +769,27 @@ static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) | |||
764 | return container_of(lock, struct fmlp_semaphore, litmus_lock); | 769 | return container_of(lock, struct fmlp_semaphore, litmus_lock); |
765 | } | 770 | } |
766 | 771 | ||
772 | static enum hrtimer_restart bfmlp_budget_enforce(struct hrtimer *timer) | ||
773 | { | ||
774 | struct fmlp_semaphore* sem = | ||
775 | container_of(timer, struct fmlp_semaphore, budgeting_timer); | ||
776 | struct task_struct* t = sem->owner; | ||
777 | |||
778 | |||
779 | if (t == current) | ||
780 | send_sig(31, t, 0); | ||
781 | /* up to the task to: | ||
782 | * -detect if CS completed | ||
783 | * -perform fix_state if needed | ||
784 | * -unlock | ||
785 | * -set job to preemptive again | ||
786 | * -decide what to do next | ||
787 | */ | ||
788 | TS_TIMER_LATENCY(sem->enforce_time); | ||
789 | |||
790 | return HRTIMER_NORESTART; | ||
791 | } | ||
792 | |||
767 | /* caller is responsible for locking */ | 793 | /* caller is responsible for locking */ |
768 | struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, | 794 | struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, |
769 | struct task_struct* skip) | 795 | struct task_struct* skip) |
@@ -903,6 +929,25 @@ out: | |||
903 | return err; | 929 | return err; |
904 | } | 930 | } |
905 | 931 | ||
932 | int gsnedf_fmlp_tstart(struct litmus_lock* l, lt_t budget) | ||
933 | { | ||
934 | struct fmlp_semaphore* sem = fmlp_from_lock(l); | ||
935 | |||
936 | sem->enforce_time = litmus_clock() + budget; | ||
937 | hrtimer_start(&sem->budgeting_timer, | ||
938 | ns_to_ktime(sem->enforce_time), | ||
939 | HRTIMER_MODE_ABS_PINNED_HARD); | ||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | int gsnedf_fmlp_tstop(struct litmus_lock* l) | ||
944 | { | ||
945 | struct fmlp_semaphore* sem = fmlp_from_lock(l); | ||
946 | |||
947 | hrtimer_cancel(&sem->budgeting_timer); | ||
948 | return 0; | ||
949 | } | ||
950 | |||
906 | int gsnedf_fmlp_close(struct litmus_lock* l) | 951 | int gsnedf_fmlp_close(struct litmus_lock* l) |
907 | { | 952 | { |
908 | struct task_struct *t = current; | 953 | struct task_struct *t = current; |
@@ -929,10 +974,12 @@ void gsnedf_fmlp_free(struct litmus_lock* lock) | |||
929 | } | 974 | } |
930 | 975 | ||
931 | static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { | 976 | static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { |
932 | .close = gsnedf_fmlp_close, | 977 | .close = gsnedf_fmlp_close, |
933 | .lock = gsnedf_fmlp_lock, | 978 | .lock = gsnedf_fmlp_lock, |
934 | .unlock = gsnedf_fmlp_unlock, | 979 | .unlock = gsnedf_fmlp_unlock, |
935 | .deallocate = gsnedf_fmlp_free, | 980 | .timer_start = gsnedf_fmlp_tstart, |
981 | .timer_stop = gsnedf_fmlp_tstop, | ||
982 | .deallocate = gsnedf_fmlp_free, | ||
936 | }; | 983 | }; |
937 | 984 | ||
938 | static struct litmus_lock* gsnedf_new_fmlp(void) | 985 | static struct litmus_lock* gsnedf_new_fmlp(void) |
@@ -947,6 +994,331 @@ static struct litmus_lock* gsnedf_new_fmlp(void) | |||
947 | sem->hp_waiter = NULL; | 994 | sem->hp_waiter = NULL; |
948 | init_waitqueue_head(&sem->wait); | 995 | init_waitqueue_head(&sem->wait); |
949 | sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops; | 996 | sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops; |
997 | hrtimer_init(&sem->budgeting_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); | ||
998 | sem->budgeting_timer.function = bfmlp_budget_enforce; | ||
999 | |||
1000 | return &sem->litmus_lock; | ||
1001 | } | ||
1002 | |||
1003 | /* ****************************** B-OMLP support ************************** */ | ||
1004 | |||
1005 | struct omlp_semaphore { | ||
1006 | struct litmus_lock litmus_lock; | ||
1007 | |||
1008 | /* current resource holder */ | ||
1009 | struct task_struct *owner; | ||
1010 | |||
1011 | /* highest-priority waiter */ | ||
1012 | struct task_struct *hp_waiter; | ||
1013 | |||
1014 | /* FIFO queue of waiting tasks */ | ||
1015 | wait_queue_head_t fifo_wait; | ||
1016 | /* Prio queue of waiting tasks */ | ||
1017 | wait_queue_head_t prio_wait; | ||
1018 | |||
1019 | /* How many slots remaining in FIFO queue? */ | ||
1020 | unsigned int num_free; | ||
1021 | |||
1022 | /* budgeting support */ | ||
1023 | struct hrtimer budgeting_timer; | ||
1024 | lt_t enforce_time; | ||
1025 | }; | ||
1026 | |||
1027 | static inline struct omlp_semaphore* omlp_from_lock(struct litmus_lock* lock) | ||
1028 | { | ||
1029 | return container_of(lock, struct omlp_semaphore, litmus_lock); | ||
1030 | } | ||
1031 | |||
1032 | static enum hrtimer_restart bomlp_budget_enforce(struct hrtimer *timer) | ||
1033 | { | ||
1034 | struct omlp_semaphore* sem = | ||
1035 | container_of(timer, struct omlp_semaphore, budgeting_timer); | ||
1036 | struct task_struct* t = sem->owner; | ||
1037 | |||
1038 | if (t == current) | ||
1039 | send_sig(31, t, 0); | ||
1040 | /* up to the task to: | ||
1041 | * -detect if CS completed | ||
1042 | * -perform fix_state if needed | ||
1043 | * -unlock | ||
1044 | * -set job to preemptive again | ||
1045 | * -decide what to do next | ||
1046 | */ | ||
1047 | TS_TIMER_LATENCY(sem->enforce_time); | ||
1048 | |||
1049 | return HRTIMER_NORESTART; | ||
1050 | } | ||
1051 | |||
1052 | /* caller is responsible for locking */ | ||
1053 | static struct task_struct* omlp_find_hp_waiter(struct omlp_semaphore* sem, | ||
1054 | struct task_struct* skip) | ||
1055 | { | ||
1056 | struct list_head* pos; | ||
1057 | struct task_struct *queued, *found = NULL; | ||
1058 | |||
1059 | /* check FIFO queue first */ | ||
1060 | list_for_each(pos, &sem->fifo_wait.head) { | ||
1061 | queued = (struct task_struct*) list_entry(pos, wait_queue_entry_t, | ||
1062 | entry)->private; | ||
1063 | |||
1064 | /* Compare task prios, find high prio task. */ | ||
1065 | if (queued != skip && edf_higher_prio(queued, found)) | ||
1066 | found = queued; | ||
1067 | } | ||
1068 | |||
1069 | /* check priority queue next */ | ||
1070 | if (waitqueue_active(&sem->prio_wait)) { | ||
1071 | /* first has highest priority */ | ||
1072 | pos = sem->prio_wait.head.next; | ||
1073 | queued = (struct task_struct*) list_entry(pos, wait_queue_entry_t, | ||
1074 | entry)->private; | ||
1075 | if (edf_higher_prio(queued, found)) | ||
1076 | found = queued; | ||
1077 | } | ||
1078 | |||
1079 | return found; | ||
1080 | } | ||
1081 | |||
1082 | /* already locked */ | ||
1083 | static void omlp_enqueue(struct omlp_semaphore* sem, prio_wait_queue_t* wait) | ||
1084 | { | ||
1085 | if (sem->num_free > 0) { | ||
1086 | /* there is space in the FIFO queue */ | ||
1087 | sem->num_free--; | ||
1088 | __add_wait_queue_entry_tail_exclusive(&sem->fifo_wait, &wait->wq); | ||
1089 | } else { | ||
1090 | /* nope, gotta go to the priority queue */ | ||
1091 | __add_wait_queue_prio_exclusive(&sem->prio_wait, wait); | ||
1092 | } | ||
1093 | } | ||
1094 | |||
1095 | /* already locked */ | ||
1096 | static int omlp_move(struct omlp_semaphore* sem) | ||
1097 | { | ||
1098 | struct list_head* first; | ||
1099 | |||
1100 | if (waitqueue_active(&sem->prio_wait)) { | ||
1101 | first = sem->prio_wait.head.next; | ||
1102 | list_move_tail(first, &sem->fifo_wait.head); | ||
1103 | return 1; | ||
1104 | } | ||
1105 | return 0; | ||
1106 | } | ||
1107 | |||
1108 | /* already locked */ | ||
1109 | static struct task_struct* omlp_dequeue(struct omlp_semaphore* sem) | ||
1110 | { | ||
1111 | struct task_struct* first = __waitqueue_remove_first(&sem->fifo_wait); | ||
1112 | |||
1113 | /* don't replace tmp with omlp_move! shortcircuiting will break omlp for m=1 */ | ||
1114 | int tmp = omlp_move(sem); | ||
1115 | if (first && !tmp) | ||
1116 | sem->num_free++; | ||
1117 | if (!first && tmp) | ||
1118 | first = __waitqueue_remove_first(&sem->fifo_wait); | ||
1119 | return first; | ||
1120 | } | ||
1121 | |||
1122 | int gsnedf_omlp_budgeted_lock(struct litmus_lock* l, lt_t fz_len) | ||
1123 | { | ||
1124 | struct task_struct* t = current; | ||
1125 | struct omlp_semaphore* sem = omlp_from_lock(l); | ||
1126 | unsigned long flags; | ||
1127 | lt_t budget_used; | ||
1128 | lt_t budget_remaining; | ||
1129 | prio_wait_queue_t pwq; | ||
1130 | |||
1131 | if (!is_realtime(t)) | ||
1132 | return -EPERM; | ||
1133 | |||
1134 | /* prevent nested lock acquisition -- not supported by global OMLP by default */ | ||
1135 | if (tsk_rt(t)->num_locks_held) | ||
1136 | return -EBUSY; | ||
1137 | |||
1138 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1139 | |||
1140 | if (sem->owner) { | ||
1141 | /* resource is not free => must suspend and wait */ | ||
1142 | litmus_current_budget(&budget_used, &budget_remaining); | ||
1143 | if (fz_len < budget_remaining) { | ||
1144 | /* Not enough budget for the CS => deny forbidden zone access */ | ||
1145 | TRACE_CUR("failed forbidden zone check, %llu / %llu budget needed\n", | ||
1146 | budget_remaining, fz_len); | ||
1147 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1148 | return -EACCES; | ||
1149 | } | ||
1150 | |||
1151 | init_prio_waitqueue_entry(&pwq, t, get_deadline(t)); | ||
1152 | |||
1153 | omlp_enqueue(sem, &pwq); | ||
1154 | |||
1155 | /* check if we need to activate priority inheritance */ | ||
1156 | if (edf_higher_prio(t, sem->hp_waiter)) { | ||
1157 | sem->hp_waiter = t; | ||
1158 | if (edf_higher_prio(sem->hp_waiter, sem->owner)) | ||
1159 | set_priority_inheritance(sem->owner, sem->hp_waiter); | ||
1160 | } | ||
1161 | |||
1162 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1163 | |||
1164 | TS_LOCK_SUSPEND; | ||
1165 | |||
1166 | /* release lock before sleeping */ | ||
1167 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1168 | |||
1169 | schedule(); | ||
1170 | |||
1171 | TS_LOCK_RESUME; | ||
1172 | } else { | ||
1173 | /* we now have lock */ | ||
1174 | sem->owner = t; | ||
1175 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1176 | } | ||
1177 | |||
1178 | make_np(t); | ||
1179 | tsk_rt(t)->num_locks_held++; | ||
1180 | |||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | int gsnedf_omlp_lock(struct litmus_lock* l) | ||
1185 | { | ||
1186 | return gsnedf_omlp_budgeted_lock(l, ULLONG_MAX); | ||
1187 | } | ||
1188 | |||
1189 | int gsnedf_omlp_unlock(struct litmus_lock* l) | ||
1190 | { | ||
1191 | struct task_struct* t = current; | ||
1192 | struct omlp_semaphore* sem = omlp_from_lock(l); | ||
1193 | struct task_struct* next; | ||
1194 | unsigned long flags; | ||
1195 | |||
1196 | if (sem->owner != t) | ||
1197 | return -EINVAL; | ||
1198 | |||
1199 | hrtimer_cancel(&sem->budgeting_timer); | ||
1200 | |||
1201 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1202 | |||
1203 | tsk_rt(t)->num_locks_held--; | ||
1204 | |||
1205 | next = omlp_dequeue(sem); | ||
1206 | /* check if there are jobs waiting for this resource */ | ||
1207 | if (next) { | ||
1208 | /* next becomes the resource holder */ | ||
1209 | sem->owner = next; | ||
1210 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
1211 | |||
1212 | if (next == sem->hp_waiter) { | ||
1213 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
1214 | /* next has the highest priority --- it doesn't need to | ||
1215 | * inherit. However, we need to make sure that the | ||
1216 | * next-highest priority in the queue is reflected in | ||
1217 | * hp_waiter. */ | ||
1218 | sem->hp_waiter = omlp_find_hp_waiter(sem, next); | ||
1219 | if (sem->hp_waiter) | ||
1220 | TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); | ||
1221 | else | ||
1222 | TRACE("no further waiters\n"); | ||
1223 | } else { | ||
1224 | /* Well, if next is not the highest-priority waiter, | ||
1225 | * then it ought to inherit the highest-priority | ||
1226 | * waiter's priority. */ | ||
1227 | set_priority_inheritance(next, sem->hp_waiter); | ||
1228 | } | ||
1229 | |||
1230 | /* wake up next */ | ||
1231 | wake_up_process(next); | ||
1232 | } else { | ||
1233 | /* become available */ | ||
1234 | sem->owner = NULL; | ||
1235 | } | ||
1236 | |||
1237 | /* we make ourself preemptive again and lose priority inheritance (if any) */ | ||
1238 | take_np(t); | ||
1239 | clear_priority_inheritance(t); | ||
1240 | |||
1241 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1242 | |||
1243 | return 0; | ||
1244 | } | ||
1245 | |||
1246 | int gsnedf_omlp_tstart(struct litmus_lock* l, lt_t budget) | ||
1247 | { | ||
1248 | struct omlp_semaphore* sem = omlp_from_lock(l); | ||
1249 | |||
1250 | sem->enforce_time = litmus_clock() + budget; | ||
1251 | hrtimer_start(&sem->budgeting_timer, | ||
1252 | ns_to_ktime(sem->enforce_time), | ||
1253 | HRTIMER_MODE_ABS_PINNED_HARD); | ||
1254 | return 0; | ||
1255 | } | ||
1256 | |||
1257 | int gsnedf_omlp_tstop(struct litmus_lock* l) | ||
1258 | { | ||
1259 | struct omlp_semaphore* sem = omlp_from_lock(l); | ||
1260 | |||
1261 | hrtimer_cancel(&sem->budgeting_timer); | ||
1262 | return 0; | ||
1263 | } | ||
1264 | |||
1265 | int gsnedf_omlp_close(struct litmus_lock* l) | ||
1266 | { | ||
1267 | struct task_struct* t = current; | ||
1268 | struct omlp_semaphore* sem = omlp_from_lock(l); | ||
1269 | unsigned long flags; | ||
1270 | int owner; | ||
1271 | |||
1272 | spin_lock_irqsave(&sem->fifo_wait.lock, flags); | ||
1273 | |||
1274 | owner = sem->owner == t; | ||
1275 | |||
1276 | spin_unlock_irqrestore(&sem->fifo_wait.lock, flags); | ||
1277 | |||
1278 | if (owner) | ||
1279 | gsnedf_omlp_unlock(l); | ||
1280 | |||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | void gsnedf_omlp_free(struct litmus_lock* l) | ||
1285 | { | ||
1286 | kfree(omlp_from_lock(l)); | ||
1287 | } | ||
1288 | |||
1289 | static struct litmus_lock_ops gsnedf_omlp_lock_ops = { | ||
1290 | .close = gsnedf_omlp_close, | ||
1291 | .lock = gsnedf_omlp_lock, | ||
1292 | .budgeted_lock = gsnedf_omlp_budgeted_lock, | ||
1293 | .unlock = gsnedf_omlp_unlock, | ||
1294 | .timer_start = gsnedf_omlp_tstart, | ||
1295 | .timer_stop = gsnedf_omlp_tstop, | ||
1296 | .deallocate = gsnedf_omlp_free, | ||
1297 | }; | ||
1298 | |||
1299 | static struct litmus_lock* gsnedf_new_omlp(void) | ||
1300 | { | ||
1301 | struct omlp_semaphore* sem; | ||
1302 | int release_master = | ||
1303 | #ifdef CONFIG_RELEASE_MASTER | ||
1304 | atomic_read(&release_master_cpu); | ||
1305 | #else | ||
1306 | NO_CPU; | ||
1307 | #endif | ||
1308 | int num_rt_cpus = num_online_cpus() - (release_master != NO_CPU); | ||
1309 | |||
1310 | sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
1311 | if (!sem) | ||
1312 | return NULL; | ||
1313 | |||
1314 | sem->owner = NULL; | ||
1315 | sem->hp_waiter = NULL; | ||
1316 | init_waitqueue_head(&sem->fifo_wait); | ||
1317 | init_waitqueue_head(&sem->prio_wait); | ||
1318 | sem->litmus_lock.ops = &gsnedf_omlp_lock_ops; | ||
1319 | sem->num_free = num_rt_cpus; | ||
1320 | hrtimer_init(&sem->budgeting_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); | ||
1321 | sem->budgeting_timer.function = bomlp_budget_enforce; | ||
950 | 1322 | ||
951 | return &sem->litmus_lock; | 1323 | return &sem->litmus_lock; |
952 | } | 1324 | } |
@@ -970,7 +1342,13 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
970 | else | 1342 | else |
971 | err = -ENOMEM; | 1343 | err = -ENOMEM; |
972 | break; | 1344 | break; |
973 | 1345 | case OMLP_SEM: | |
1346 | /* Optimal Multiprocessor Locking Protocol */ | ||
1347 | *lock = gsnedf_new_omlp(); | ||
1348 | if (*lock) | ||
1349 | err = 0; | ||
1350 | else | ||
1351 | err = -ENOMEM; | ||
974 | }; | 1352 | }; |
975 | 1353 | ||
976 | return err; | 1354 | return err; |
@@ -1057,7 +1435,7 @@ static long gsnedf_deactivate_plugin(void) | |||
1057 | 1435 | ||
1058 | /* Plugin object */ | 1436 | /* Plugin object */ |
1059 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | 1437 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { |
1060 | .plugin_name = "GSN-EDF", | 1438 | .plugin_name = "GSN-EDF-BUDGETED", |
1061 | .finish_switch = gsnedf_finish_switch, | 1439 | .finish_switch = gsnedf_finish_switch, |
1062 | .task_new = gsnedf_task_new, | 1440 | .task_new = gsnedf_task_new, |
1063 | .complete_job = complete_job, | 1441 | .complete_job = complete_job, |