aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile4
-rw-r--r--bin/rwrnlp.c17
-rw-r--r--include/litmus/rt_param.h52
-rw-r--r--include/spinlocks.h11
-rw-r--r--multi.csv8
-rw-r--r--src/spinlocks.c41
6 files changed, 95 insertions, 38 deletions
diff --git a/Makefile b/Makefile
index 6bf9ed9..84f83e2 100644
--- a/Makefile
+++ b/Makefile
@@ -207,7 +207,7 @@ obj-base_mt_task = base_mt_task.o
207ldf-base_mt_task = -pthread 207ldf-base_mt_task = -pthread
208 208
209obj-rwrnlp = rwrnlp.o 209obj-rwrnlp = rwrnlp.o
210ldf-rwrnlp = -pthread -lrt 210ldf-rwrnlp = -lrt -pthread
211 211
212obj-rt_launch = rt_launch.o common.o 212obj-rt_launch = rt_launch.o common.o
213 213
@@ -224,7 +224,7 @@ lib-measure_syscall = -lm
224 224
225.SECONDEXPANSION: 225.SECONDEXPANSION:
226${rt-apps}: $${obj-$$@} liblitmus.a 226${rt-apps}: $${obj-$$@} liblitmus.a
227 $(CC) -o $@ $(LDFLAGS) ${ldf-$@} $(filter-out liblitmus.a,$+) $(LOADLIBS) $(LDLIBS) ${liblitmus-flags} ${lib-$@} 227 $(CC) -o $@ $(LDFLAGS) ${ldf-$@} $(filter-out liblitmus.a,$+) $(LOADLIBS) $(LDLIBS) ${liblitmus-flags} ${lib-$@} -lrt
228 228
229# ############################################################################## 229# ##############################################################################
230# Dependency resolution. 230# Dependency resolution.
diff --git a/bin/rwrnlp.c b/bin/rwrnlp.c
index 07e3899..a73e2ad 100644
--- a/bin/rwrnlp.c
+++ b/bin/rwrnlp.c
@@ -146,7 +146,7 @@ struct thread_context* parse_csv(const char *file, int *num_tasks)
146 } else { 146 } else {
147 ctx[cur_task].type = write_req; 147 ctx[cur_task].type = write_req;
148 } 148 }
149 if (1 != fscanf(fstream, "%ld",&ctx[cur_task].resources)){ 149 if (1 != fscanf(fstream, "%lu",&ctx[cur_task].resources)){
150 fprintf(stderr, "invalid resource mask near line %d\n", cur_task); 150 fprintf(stderr, "invalid resource mask near line %d\n", cur_task);
151 exit(EXIT_FAILURE); 151 exit(EXIT_FAILURE);
152 } 152 }
@@ -286,32 +286,35 @@ static int loop_for(double exec_time, double emergency_exit)
286static int job(struct thread_context *ctx, double program_end) 286static int job(struct thread_context *ctx, double program_end)
287{ 287{
288 double ncs_length; 288 double ncs_length;
289 double cs_length; 289 long lock_overhead, unlock_overhead;
290 if (wctime() > program_end){ 290 if (wctime() > program_end){
291 printf("Terminating...\n"); 291 printf("Terminating...\n");
292 return 0; 292 return 0;
293 } 293 }
294 else { 294 else {
295 ncs_length = (ctx->cost-ctx->cs_length)/2*S_PER_MS; 295 ncs_length = (ctx->cost-ctx->cs_length)/2*S_PER_MS;
296 cs_length = ctx->cs_length * S_PER_MS;
297 loop_for(ncs_length, program_end + 1); 296 loop_for(ncs_length, program_end + 1);
298 297
299 if(ctx->type == read_req){ 298 if(ctx->type == read_req){
300 //printf("%d:%d read locking...\n", __sync_fetch_and_add(&events,1), gettid()); 299 //printf("%d:%d read locking...\n", __sync_fetch_and_add(&events,1), gettid());
301 rwrnlp_read_lock(&rw_lock, ctx->resources, ctx->processor); 300 lock_overhead = rwrnlp_read_lock(&rw_lock, ctx->resources, ctx->processor);
302 //printf("%d:%d read CS...\n", __sync_fetch_and_add(&events, 1), gettid()); 301 //printf("%d:%d read CS...\n", __sync_fetch_and_add(&events, 1), gettid());
303 loop_for(ctx->cs_length*S_PER_MS, program_end + 1); 302 loop_for(ctx->cs_length*S_PER_MS, program_end + 1);
304 //printf("%d:%d read unlocking...\n", __sync_fetch_and_add(&events,1), gettid()); 303 //printf("%d:%d read unlocking...\n", __sync_fetch_and_add(&events,1), gettid());
305 rwrnlp_read_unlock(&rw_lock, ctx->processor); 304 unlock_overhead = rwrnlp_read_unlock(&rw_lock, ctx->processor);
306 //printf("%d:%d ncs...\n", __sync_fetch_and_add(&events,1), gettid()); 305 //printf("%d:%d ncs...\n", __sync_fetch_and_add(&events,1), gettid());
306 printf("read lock overhead: %ld\n", lock_overhead);
307 printf("read unlock overhead: %ld\n", unlock_overhead);
307 }else{ 308 }else{
308 //printf("%d:%d write locking %lu\n", __sync_fetch_and_add(&events,1), gettid(), ctx->resources); 309 //printf("%d:%d write locking %lu\n", __sync_fetch_and_add(&events,1), gettid(), ctx->resources);
309 rwrnlp_write_lock(&rw_lock, ctx->resources, ctx->processor); 310 lock_overhead = rwrnlp_write_lock(&rw_lock, ctx->resources, ctx->processor);
310 //printf("%d:%d write CS...\n", __sync_fetch_and_add(&events,1), gettid()); 311 //printf("%d:%d write CS...\n", __sync_fetch_and_add(&events,1), gettid());
311 loop_for(ctx->cs_length*S_PER_MS, program_end + 1); 312 loop_for(ctx->cs_length*S_PER_MS, program_end + 1);
312 //printf("%d:%d write unlocking...\n", __sync_fetch_and_add(&events,1), gettid()); 313 //printf("%d:%d write unlocking...\n", __sync_fetch_and_add(&events,1), gettid());
313 rwrnlp_write_unlock(&rw_lock, ctx->processor); 314 unlock_overhead = rwrnlp_write_unlock(&rw_lock, ctx->processor);
314 //printf("%d:%d ncs...\n", __sync_fetch_and_add(&events,1), gettid()); 315 //printf("%d:%d ncs...\n", __sync_fetch_and_add(&events,1), gettid());
316 printf("write lock overhead: %ld\n", lock_overhead);
317 printf("write unlock overhead: %ld\n", unlock_overhead);
315 } 318 }
316 319
317 320
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 2026819..de29bac 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -33,6 +33,24 @@ typedef enum {
33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ 33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */
34} budget_policy_t; 34} budget_policy_t;
35 35
36/* Release behaviors for jobs. PERIODIC and EARLY jobs
37 must end by calling sys_complete_job() (or equivalent)
38 to set up their next release and deadline. */
39typedef enum {
40 /* Jobs are released sporadically (provided job precedence
41 constraints are met). */
42 SPORADIC,
43
44 /* Jobs are released periodically (provided job precedence
45 constraints are met). */
46 PERIODIC,
47
48 /* Jobs are released immediately after meeting precedence
49 constraints. Beware this can peg your CPUs if used in
50 the wrong applications. Only supported by EDF schedulers. */
51 EARLY
52} release_policy_t;
53
36/* We use the common priority interpretation "lower index == higher priority", 54/* We use the common priority interpretation "lower index == higher priority",
37 * which is commonly used in fixed-priority schedulability analysis papers. 55 * which is commonly used in fixed-priority schedulability analysis papers.
38 * So, a numerically lower priority value implies higher scheduling priority, 56 * So, a numerically lower priority value implies higher scheduling priority,
@@ -61,7 +79,8 @@ struct rt_task {
61 unsigned int cpu; 79 unsigned int cpu;
62 unsigned int priority; 80 unsigned int priority;
63 task_class_t cls; 81 task_class_t cls;
64 budget_policy_t budget_policy; /* ignored by pfair */ 82 budget_policy_t budget_policy; /* ignored by pfair */
83 release_policy_t release_policy;
65}; 84};
66 85
67union np_flag { 86union np_flag {
@@ -89,11 +108,29 @@ union np_flag {
89 * determining preemption/migration overheads). 108 * determining preemption/migration overheads).
90 */ 109 */
91struct control_page { 110struct control_page {
111 /* This flag is used by userspace to communicate non-preempive
112 * sections. */
92 volatile union np_flag sched; 113 volatile union np_flag sched;
93 114
115 volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is
116 * handled. */
117
118 /* Locking overhead tracing: userspace records here the time stamp
119 * and IRQ counter prior to starting the system call. */
120 uint64_t ts_syscall_start; /* Feather-Trace cycles */
121 uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall
122 * started. */
123
94 /* to be extended */ 124 /* to be extended */
95}; 125};
96 126
127/* Expected offsets within the control page. */
128
129#define LITMUS_CP_OFFSET_SCHED 0
130#define LITMUS_CP_OFFSET_IRQ_COUNT 8
131#define LITMUS_CP_OFFSET_TS_SC_START 16
132#define LITMUS_CP_OFFSET_IRQ_SC_START 24
133
97/* don't export internal data structures to user space (liblitmus) */ 134/* don't export internal data structures to user space (liblitmus) */
98#ifdef __KERNEL__ 135#ifdef __KERNEL__
99 136
@@ -142,11 +179,19 @@ struct rt_param {
142 /* is the task present? (true if it can be scheduled) */ 179 /* is the task present? (true if it can be scheduled) */
143 unsigned int present:1; 180 unsigned int present:1;
144 181
182 /* has the task completed? */
183 unsigned int completed:1;
184
145#ifdef CONFIG_LITMUS_LOCKING 185#ifdef CONFIG_LITMUS_LOCKING
146 /* Is the task being priority-boosted by a locking protocol? */ 186 /* Is the task being priority-boosted by a locking protocol? */
147 unsigned int priority_boosted:1; 187 unsigned int priority_boosted:1;
148 /* If so, when did this start? */ 188 /* If so, when did this start? */
149 lt_t boost_start_time; 189 lt_t boost_start_time;
190
191 /* How many LITMUS^RT locks does the task currently hold/wait for? */
192 unsigned int num_locks_held;
193 /* How many PCP/SRP locks does the task currently hold/wait for? */
194 unsigned int num_local_locks_held;
150#endif 195#endif
151 196
152 /* user controlled parameters */ 197 /* user controlled parameters */
@@ -233,11 +278,6 @@ struct rt_param {
233 lt_t tot_exec_time; 278 lt_t tot_exec_time;
234}; 279};
235 280
236/* Possible RT flags */
237#define RT_F_RUNNING 0x00000000
238#define RT_F_SLEEP 0x00000001
239#define RT_F_EXIT_SEM 0x00000008
240
241#endif 281#endif
242 282
243#endif 283#endif
diff --git a/include/spinlocks.h b/include/spinlocks.h
index d83a0d8..2ae3d27 100644
--- a/include/spinlocks.h
+++ b/include/spinlocks.h
@@ -28,7 +28,8 @@ typedef struct rwrnlp_struct {
28 28
29 int enter[NR_CPUS]; 29 int enter[NR_CPUS];
30 int leave[NR_CPUS]; 30 int leave[NR_CPUS];
31 request requests[NR_CPUS]; 31 request requests[NR_CPUS][2];
32 int curr[NR_CPUS];
32 33
33 request* wqueue[NR_RESOURCES][NR_CPUS]; 34 request* wqueue[NR_RESOURCES][NR_CPUS];
34 unsigned int whead[NR_RESOURCES]; 35 unsigned int whead[NR_RESOURCES];
@@ -47,12 +48,12 @@ void spin_unlock(spinlock_t *lock);
47 48
48void rwrnlp_init(rwrnlp *lock); 49void rwrnlp_init(rwrnlp *lock);
49 50
50void rwrnlp_read_lock(rwrnlp *lock, resource_mask_t resources, int processor); 51long rwrnlp_read_lock(rwrnlp *lock, resource_mask_t resources, int processor);
51 52
52void rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor); 53long rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor);
53 54
54void rwrnlp_read_unlock(rwrnlp *lock, int processor); 55long rwrnlp_read_unlock(rwrnlp *lock, int processor);
55 56
56void rwrnlp_write_unlock(rwrnlp *lock, int processor); 57long rwrnlp_write_unlock(rwrnlp *lock, int processor);
57 58
58#endif //SPINLOCKS_H 59#endif //SPINLOCKS_H
diff --git a/multi.csv b/multi.csv
index acf5a3c..937b869 100644
--- a/multi.csv
+++ b/multi.csv
@@ -1,6 +1,8 @@
10 10.0 17.0 3.0 0 1 10 10.0 17.0 3.0 0 1
20 05.0 17.0 3.0 0 5
21 12.0 19.0 8.0 0 3 31 12.0 19.0 8.0 0 3
33 17.0 23.0 8.0 1 4
42 10.0 17.0 3.0 0 5
51 12.0 29.0 8.0 1 1 41 12.0 29.0 8.0 1 1
63 17.0 21.0 8.0 1 4 52 10.0 17.0 3.0 0 5
62 05.0 19.0 3.0 0 3
73 17.0 23.0 8.0 1 4
83 17.0 21.0 8.0 1 5
diff --git a/src/spinlocks.c b/src/spinlocks.c
index 9b04702..520e12a 100644
--- a/src/spinlocks.c
+++ b/src/spinlocks.c
@@ -60,12 +60,13 @@ void rwrnlp_init(rwrnlp *lock)
60 for(i = 0; i < NR_CPUS; i++){ 60 for(i = 0; i < NR_CPUS; i++){
61 lock->enter[i] = 0; 61 lock->enter[i] = 0;
62 lock->leave[i] = 0; 62 lock->leave[i] = 0;
63 lock->curr[i] = 0;
63 } 64 }
64 spin_init(lock->enqueue); 65 spin_init(lock->enqueue);
65 spin_init(lock->state); 66 spin_init(lock->state);
66} 67}
67 68
68void rwrnlp_read_lock(rwrnlp *lock, resource_mask_t resources, int processor) 69long rwrnlp_read_lock(rwrnlp *lock, resource_mask_t resources, int processor)
69{ 70{
70 request *req; 71 request *req;
71#if MEASURE==TRUE 72#if MEASURE==TRUE
@@ -76,7 +77,7 @@ void rwrnlp_read_lock(rwrnlp *lock, resource_mask_t resources, int processor)
76 77
77 enter_np(); 78 enter_np();
78 79
79 req = &lock->requests[processor]; 80 req = &lock->requests[processor][lock->curr[processor]];
80 req->resources = resources; 81 req->resources = resources;
81 req->type = read_req; 82 req->type = read_req;
82 req->status = waiting; 83 req->status = waiting;
@@ -116,13 +117,15 @@ void rwrnlp_read_lock(rwrnlp *lock, resource_mask_t resources, int processor)
116 clock_gettime(CLOCK_MONOTONIC, &now); 117 clock_gettime(CLOCK_MONOTONIC, &now);
117 overhead += diff_ns(&last, &now); 118 overhead += diff_ns(&last, &now);
118 } 119 }
119 printf("read lock overhead: %ld\n", overhead); 120 return overhead;
121#else
122 return 0;
120#endif 123#endif
121 124
122// printf("%d:%d reader satisfied\n", __sync_fetch_and_add(&events,1), gettid()); 125// printf("%d:%d reader satisfied\n", __sync_fetch_and_add(&events,1), gettid());
123} 126}
124 127
125void rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor) 128long rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor)
126{ 129{
127 int r,i,start,end; 130 int r,i,start,end;
128 request *req, *contender; 131 request *req, *contender;
@@ -137,7 +140,7 @@ void rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor)
137 140
138// printf("%d:%d rwrnlp_write_lock\n", __sync_fetch_and_add(&events,1), gettid()); 141// printf("%d:%d rwrnlp_write_lock\n", __sync_fetch_and_add(&events,1), gettid());
139 142
140 req = &lock->requests[processor]; 143 req = &lock->requests[processor][lock->curr[processor]];
141 144
142 req->resources = resources; 145 req->resources = resources;
143 req->type = write_req; 146 req->type = write_req;
@@ -179,9 +182,9 @@ void rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor)
179 182
180 for(i = 0; i < NR_CPUS; i++){ 183 for(i = 0; i < NR_CPUS; i++){
181 if(i != processor){ 184 if(i != processor){
182 start = lock->enter[i];
183 contender = &lock->requests[i];
184 end = lock->leave[i]; 185 end = lock->leave[i];
186 contender = &lock->requests[i][lock->curr[processor]];
187 start = lock->enter[i];
185 if(start <= end || 188 if(start <= end ||
186 contender->type == write_req || 189 contender->type == write_req ||
187 contender->status == waiting || 190 contender->status == waiting ||
@@ -210,27 +213,32 @@ void rwrnlp_write_lock(rwrnlp *lock, resource_mask_t resources, int processor)
210#if MEASURE==TRUE 213#if MEASURE==TRUE
211 clock_gettime(CLOCK_MONOTONIC, &now); 214 clock_gettime(CLOCK_MONOTONIC, &now);
212 overhead += diff_ns(&last, &now); 215 overhead += diff_ns(&last, &now);
213 printf("write lock overhead: %ld\n", overhead); 216 return overhead;
217#else
218 return 0;
214#endif 219#endif
215} 220}
216 221
217void rwrnlp_read_unlock(rwrnlp *lock, int processor) 222long rwrnlp_read_unlock(rwrnlp *lock, int processor)
218{ 223{
219#if MEASURE==TRUE 224#if MEASURE==TRUE
220 struct timespec now, last; 225 struct timespec now, last;
221 clock_gettime(CLOCK_MONOTONIC, &last); 226 clock_gettime(CLOCK_MONOTONIC, &last);
222#endif 227#endif
223 lock->leave[processor] += 1; 228 lock->leave[processor] += 1;
229 lock->curr[processor] = (lock->curr[processor] + 1) % 2;
224// printf("%d:%d rwrnlp_read_unlock\n", __sync_fetch_and_add(&events,1), gettid()); 230// printf("%d:%d rwrnlp_read_unlock\n", __sync_fetch_and_add(&events,1), gettid());
225 exit_np(); 231 exit_np();
226 232
227#if MEASURE==TRUE 233#if MEASURE==TRUE
228 clock_gettime(CLOCK_MONOTONIC, &now); 234 clock_gettime(CLOCK_MONOTONIC, &now);
229 printf("read unlock overhead: %ld\n", diff_ns(&last, &now)); 235 return diff_ns(&last, &now);
236#else
237 return 0;
230#endif 238#endif
231} 239}
232 240
233void rwrnlp_write_unlock(rwrnlp *lock, int processor) 241long rwrnlp_write_unlock(rwrnlp *lock, int processor)
234{ 242{
235 int r; 243 int r;
236 request *req; 244 request *req;
@@ -239,13 +247,11 @@ void rwrnlp_write_unlock(rwrnlp *lock, int processor)
239 struct timespec now, last; 247 struct timespec now, last;
240 clock_gettime(CLOCK_MONOTONIC, &last); 248 clock_gettime(CLOCK_MONOTONIC, &last);
241#endif 249#endif
242 req= &lock->requests[processor]; 250 req= &lock->requests[processor][lock->curr[processor]];
243 tmp = req->resources; 251 tmp = req->resources;
244 252
245// printf("%d:%d rwrnlp_write_unlock\n", __sync_fetch_and_add(&events,1), gettid()); 253// printf("%d:%d rwrnlp_write_unlock\n", __sync_fetch_and_add(&events,1), gettid());
246 254
247 lock->leave[processor] += 1;
248
249 spin_lock(lock->state); 255 spin_lock(lock->state);
250 r = ffsl(tmp); 256 r = ffsl(tmp);
251 while(r != 0){ 257 while(r != 0){
@@ -257,12 +263,17 @@ void rwrnlp_write_unlock(rwrnlp *lock, int processor)
257 lock->wlocked &= ~(req->resources); 263 lock->wlocked &= ~(req->resources);
258 lock->unavailable &= ~(req->resources); 264 lock->unavailable &= ~(req->resources);
259 spin_unlock(lock->state); 265 spin_unlock(lock->state);
266
267 lock->leave[processor] += 1;
268 lock->curr[processor] = (lock->curr[processor] + 1) % 2;
260 269
261// printf("%d:%d write unlocked %lu\n", __sync_fetch_and_add(&events,1), gettid(), req->resources); 270// printf("%d:%d write unlocked %lu\n", __sync_fetch_and_add(&events,1), gettid(), req->resources);
262// printf("unavailable %lu\nwentitled %lu\nwlocked %lu\n", lock->unavailable, lock->wentitled, lock->wlocked); 271// printf("unavailable %lu\nwentitled %lu\nwlocked %lu\n", lock->unavailable, lock->wentitled, lock->wlocked);
263 exit_np(); 272 exit_np();
264#if MEASURE==TRUE 273#if MEASURE==TRUE
265 clock_gettime(CLOCK_MONOTONIC, &now); 274 clock_gettime(CLOCK_MONOTONIC, &now);
266 printf("write unlock overhead: %ld\n", diff_ns(&last, &now)); 275 return diff_ns(&last, &now);
276#else
277 return 0;
267#endif 278#endif
268} 279}