diff options
-rw-r--r-- | litmus/bank_proc.c | 2 | ||||
-rw-r--r-- | litmus/reservation.c | 2 | ||||
-rw-r--r-- | litmus/sched_mc2.c | 200 |
3 files changed, 163 insertions, 41 deletions
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index 67a5e71c0da7..ed8b6f1df01f 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c | |||
@@ -329,7 +329,7 @@ out_unlock: | |||
329 | out: | 329 | out: |
330 | if( smallest_nr_pages() == 0) { | 330 | if( smallest_nr_pages() == 0) { |
331 | //do_add_pages(); | 331 | //do_add_pages(); |
332 | printk(KERN_ALERT "ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n"); | 332 | //printk(KERN_ALERT "ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n"); |
333 | } | 333 | } |
334 | 334 | ||
335 | return rPage; | 335 | return rPage; |
diff --git a/litmus/reservation.c b/litmus/reservation.c index 0d0c4930b872..e3381d631daf 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c | |||
@@ -480,6 +480,8 @@ static void gmp_queue_active( | |||
480 | struct reservation *queued; | 480 | struct reservation *queued; |
481 | int check_preempt = 1, found = 0; | 481 | int check_preempt = 1, found = 0; |
482 | 482 | ||
483 | TRACE("R%d has been queued on active\n", res->id); | ||
484 | |||
483 | list_for_each(pos, &gmp_env->active_reservations) { | 485 | list_for_each(pos, &gmp_env->active_reservations) { |
484 | queued = list_entry(pos, struct reservation, list); | 486 | queued = list_entry(pos, struct reservation, list); |
485 | if (queued->priority > res->priority) { | 487 | if (queued->priority > res->priority) { |
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index dabccbc7a0db..3b407bd780d0 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c | |||
@@ -90,7 +90,7 @@ struct mc2_cpu_state { | |||
90 | int cpu; | 90 | int cpu; |
91 | struct task_struct* scheduled; | 91 | struct task_struct* scheduled; |
92 | //struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | 92 | //struct crit_entry crit_entries[NUM_CRIT_LEVELS]; |
93 | bool spin_flag; //not used on cpu 0 | 93 | //bool spin_flag; //not used on cpu 0 |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state); | 96 | static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state); |
@@ -116,12 +116,13 @@ bool cpu_0_spin_flag; | |||
116 | bool seen_once; | 116 | bool seen_once; |
117 | bool cpu_0_task_exist; | 117 | bool cpu_0_task_exist; |
118 | bool mode_changed; | 118 | bool mode_changed; |
119 | bool mode_poll_exited; | ||
119 | static DEFINE_PER_CPU(unsigned long, mode_counter); | 120 | static DEFINE_PER_CPU(unsigned long, mode_counter); |
120 | #define local_mode_counter() (this_cpu_ptr(&mode_counter)) | 121 | #define local_mode_counter() (this_cpu_ptr(&mode_counter)) |
121 | #define cpu_0_mode_counter() (&per_cpu(mode_counter, 0)) | 122 | #define cpu_0_mode_counter() (&per_cpu(mode_counter, 0)) |
122 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) | 123 | #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) |
123 | #define pending mode != requested_mode | 124 | #define pending (mode != requested_mode) |
124 | #define ready !res_reported | 125 | #define ready (!res_reported) |
125 | 126 | ||
126 | /* | 127 | /* |
127 | * To be called from level A task's with period equal to | 128 | * To be called from level A task's with period equal to |
@@ -135,11 +136,21 @@ asmlinkage long sys_enact_mode(void) | |||
135 | struct list_head *pos; | 136 | struct list_head *pos; |
136 | unsigned long flags; | 137 | unsigned long flags; |
137 | TRACE_TASK(current, "ENACTING SYSCALL\n"); | 138 | TRACE_TASK(current, "ENACTING SYSCALL\n"); |
138 | if (state->cpu == 0){ | 139 | if (state->cpu == 0 && !mode_poll_exited){ |
140 | unsigned long *other_cpu_counter; | ||
141 | unsigned long cpu0_val = this_cpu_read(mode_counter); | ||
142 | int i; | ||
143 | for(i = 1; i < NR_CPUS; i++){ | ||
144 | other_cpu_counter = &per_cpu(mode_counter, i); | ||
145 | while(cpu0_val == *other_cpu_counter && !mode_poll_exited){ | ||
146 | udelay(1); | ||
147 | } | ||
148 | } | ||
139 | mode_changed = false; | 149 | mode_changed = false; |
140 | local_irq_save(flags); | 150 | local_irq_save(flags); |
141 | if (pending){ //MCR has entered | 151 | if (pending){ //MCR has entered |
142 | raw_spin_lock(&state->lock); | 152 | raw_spin_lock(&state->lock); |
153 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
143 | raw_spin_lock(&global_lock); | 154 | raw_spin_lock(&global_lock); |
144 | raw_spin_lock(&mode_lock); | 155 | raw_spin_lock(&mode_lock); |
145 | 156 | ||
@@ -152,7 +163,7 @@ asmlinkage long sys_enact_mode(void) | |||
152 | res = list_entry(pos, struct reservation, list); | 163 | res = list_entry(pos, struct reservation, list); |
153 | if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){ | 164 | if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){ |
154 | res->reported = 1; | 165 | res->reported = 1; |
155 | TRACE_CUR("R%d RES_REPORTED_ACTIVE = %d mode %d\n", res->id, res_reported, res->mode); | 166 | TRACE_TASK(res->tsk,"R%d RES_REPORTED_ACTIVE = %d mode %d\n", res->id, res_reported, res->mode); |
156 | res_reported--; | 167 | res_reported--; |
157 | } | 168 | } |
158 | } | 169 | } |
@@ -160,7 +171,7 @@ asmlinkage long sys_enact_mode(void) | |||
160 | res = list_entry(pos, struct reservation, list); | 171 | res = list_entry(pos, struct reservation, list); |
161 | if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){ | 172 | if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){ |
162 | res->reported = 1; | 173 | res->reported = 1; |
163 | TRACE_CUR("R%d RES_REPORTED_DEPLETED = %d mode %d\n",res->id, res_reported, res->mode); | 174 | TRACE_TASK(res->tsk,"R%d RES_REPORTED_DEPLETED = %d mode %d\n",res->id, res_reported, res->mode); |
164 | res_reported--; | 175 | res_reported--; |
165 | } | 176 | } |
166 | 177 | ||
@@ -169,12 +180,33 @@ asmlinkage long sys_enact_mode(void) | |||
169 | res = list_entry(pos, struct reservation, list); | 180 | res = list_entry(pos, struct reservation, list); |
170 | if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){ | 181 | if (tsk_rt(res->tsk)->completed && res->mode == mode && !res->reported){ |
171 | res->reported = 1; | 182 | res->reported = 1; |
172 | TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode); | 183 | TRACE_TASK(res->tsk,"R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode); |
173 | res_reported--; | 184 | res_reported--; |
174 | } | 185 | } |
175 | } | 186 | } |
176 | seen_once = true; | 187 | seen_once = true; |
177 | } | 188 | } |
189 | if (seen_once && !(ready)){ | ||
190 | TRACE("Mode change waiting on tasks\n"); | ||
191 | list_for_each(pos, &_global_env->active_reservations){ | ||
192 | res = list_entry(pos, struct reservation, list); | ||
193 | if (!res->reported){ | ||
194 | TRACE_TASK(res->tsk, "R%d is ACTIVE LIST with state %d scheduled on %d and hasn't reported\n", res->id, res->state, res->scheduled_on); | ||
195 | } | ||
196 | } | ||
197 | list_for_each(pos, &_global_env->depleted_reservations){ | ||
198 | res = list_entry(pos, struct reservation, list); | ||
199 | if (!res->reported){ | ||
200 | TRACE_TASK(res->tsk, "R%d is DEPLETED LIST and hasn't reported\n", res->id); | ||
201 | } | ||
202 | } | ||
203 | list_for_each(pos, &_global_env->inactive_reservations){ | ||
204 | res = list_entry(pos, struct reservation, list); | ||
205 | if (!res->reported){ | ||
206 | TRACE_TASK(res->tsk, "R%d is INACTIVE LIST and hasn't reported\n", res->id); | ||
207 | } | ||
208 | } | ||
209 | } | ||
178 | if( ready ){ //C is throttled | 210 | if( ready ){ //C is throttled |
179 | lt_t new_mode_basetime = get_release(current); | 211 | lt_t new_mode_basetime = get_release(current); |
180 | //TRACE("Timer canceled\n"); | 212 | //TRACE("Timer canceled\n"); |
@@ -221,6 +253,7 @@ asmlinkage long sys_enact_mode(void) | |||
221 | TRACE("ENACT\n"); | 253 | TRACE("ENACT\n"); |
222 | } | 254 | } |
223 | raw_spin_unlock(&mode_lock); | 255 | raw_spin_unlock(&mode_lock); |
256 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
224 | raw_spin_unlock(&global_lock); | 257 | raw_spin_unlock(&global_lock); |
225 | //raw_spin_unlock(&state->lock); | 258 | //raw_spin_unlock(&state->lock); |
226 | mc2_update_timer_and_unlock(state); | 259 | mc2_update_timer_and_unlock(state); |
@@ -229,11 +262,15 @@ asmlinkage long sys_enact_mode(void) | |||
229 | local_irq_restore(flags); | 262 | local_irq_restore(flags); |
230 | //cpu_0_spin_flag = !cpu_0_spin_flag; | 263 | //cpu_0_spin_flag = !cpu_0_spin_flag; |
231 | } | 264 | } |
232 | else if (cpu_0_task_exist) { | 265 | else if (!mode_poll_exited) { |
266 | unsigned long *cpu0_counter = cpu_0_mode_counter(); | ||
267 | unsigned long my_val; | ||
268 | //int timeout = 0; | ||
269 | this_cpu_inc(mode_counter); | ||
270 | my_val = this_cpu_read(mode_counter); | ||
233 | //spin, wait for CPU 0 to stabilize mode decision | 271 | //spin, wait for CPU 0 to stabilize mode decision |
234 | //before scheduling next hyperperiod | 272 | //before scheduling next hyperperiod |
235 | //TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); | 273 | //TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); |
236 | unsigned long *cpu0_counter = cpu_0_mode_counter(); | ||
237 | /* | 274 | /* |
238 | if (state->spin_flag) { | 275 | if (state->spin_flag) { |
239 | while(cpu_0_spin_flag) | 276 | while(cpu_0_spin_flag) |
@@ -245,8 +282,15 @@ asmlinkage long sys_enact_mode(void) | |||
245 | } | 282 | } |
246 | */ | 283 | */ |
247 | //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); | 284 | //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); |
248 | while (*cpu0_counter == this_cpu_read(mode_counter)) | 285 | while (*cpu0_counter < my_val && !mode_poll_exited){ |
249 | udelay(1); | 286 | udelay(1); |
287 | //if (timeout++ > 1000){ | ||
288 | // if (!cpu_0_task_exist){ | ||
289 | // break; | ||
290 | // } | ||
291 | // timeout = 0; | ||
292 | //} | ||
293 | } | ||
250 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); | 294 | TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); |
251 | local_irq_save(flags); | 295 | local_irq_save(flags); |
252 | if (mode_changed) { | 296 | if (mode_changed) { |
@@ -275,7 +319,7 @@ asmlinkage long sys_enact_mode(void) | |||
275 | //local_irq_restore(flags); | 319 | //local_irq_restore(flags); |
276 | 320 | ||
277 | } | 321 | } |
278 | this_cpu_write(mode_counter, *cpu0_counter); | 322 | //this_cpu_write(mode_counter, *cpu0_counter); |
279 | local_irq_restore(flags); | 323 | local_irq_restore(flags); |
280 | //state->spin_flag = !state->spin_flag; | 324 | //state->spin_flag = !state->spin_flag; |
281 | } | 325 | } |
@@ -301,21 +345,47 @@ asmlinkage long sys_enact_mode(void) | |||
301 | * Called from non-real time program | 345 | * Called from non-real time program |
302 | * Protect by exclusive lock to prevent from occuring while mode change is enacted | 346 | * Protect by exclusive lock to prevent from occuring while mode change is enacted |
303 | */ | 347 | */ |
348 | |||
349 | #define GET_MODE -1 | ||
350 | #define GET_REP -2 | ||
351 | #define GET_SIZE -3 | ||
352 | |||
304 | asmlinkage long sys_request_mode(int new_mode){ | 353 | asmlinkage long sys_request_mode(int new_mode){ |
354 | TRACE("Requesting mode %d\n", new_mode); | ||
305 | preempt_disable(); | 355 | preempt_disable(); |
306 | raw_spin_lock(&mode_lock); | 356 | raw_spin_lock(&mode_lock); |
357 | if (new_mode == GET_MODE){ | ||
358 | int tmp_mode = mode; | ||
359 | raw_spin_unlock(&mode_lock); | ||
360 | preempt_enable(); | ||
361 | return tmp_mode; | ||
362 | } | ||
363 | if (new_mode == GET_REP){ | ||
364 | int tmp_rep = res_reported; | ||
365 | raw_spin_unlock(&mode_lock); | ||
366 | preempt_enable(); | ||
367 | return tmp_rep; | ||
368 | } | ||
369 | if (new_mode == GET_SIZE){ | ||
370 | int tmp_size = mode_sizes[mode]; | ||
371 | raw_spin_unlock(&mode_lock); | ||
372 | preempt_enable(); | ||
373 | return tmp_size; | ||
374 | } | ||
307 | if (pending){ | 375 | if (pending){ |
376 | TRACE("Request to %d denied due to pending to %d\n", new_mode, requested_mode); | ||
308 | raw_spin_unlock(&mode_lock); | 377 | raw_spin_unlock(&mode_lock); |
309 | preempt_enable(); | 378 | preempt_enable(); |
310 | return -EAGAIN; | 379 | return -EAGAIN; |
311 | } | 380 | } |
312 | if (mode == new_mode){ | 381 | if (mode == new_mode){ |
382 | TRACE("Request to %d denied becuase I am in that mode already\n", new_mode); | ||
313 | raw_spin_unlock(&mode_lock); | 383 | raw_spin_unlock(&mode_lock); |
314 | preempt_enable(); | 384 | preempt_enable(); |
315 | return 0; | 385 | return -EINVAL; |
316 | } | 386 | } |
317 | requested_mode = new_mode; | 387 | requested_mode = new_mode; |
318 | TRACE("MCR received\n"); | 388 | TRACE("MCR received: %d, old:%d\n",requested_mode,mode); |
319 | res_reported = mode_sizes[mode]; | 389 | res_reported = mode_sizes[mode]; |
320 | TRACE_CUR("RES_REPORTED = %d\n",res_reported); | 390 | TRACE_CUR("RES_REPORTED = %d\n",res_reported); |
321 | seen_once = false; | 391 | seen_once = false; |
@@ -539,6 +609,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
539 | */ | 609 | */ |
540 | local = local_cpu_state() == state; | 610 | local = local_cpu_state() == state; |
541 | 611 | ||
612 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
542 | raw_spin_lock(&global_lock); | 613 | raw_spin_lock(&global_lock); |
543 | 614 | ||
544 | list_for_each_entry_safe(event, next, &_global_env->next_events, list) { | 615 | list_for_each_entry_safe(event, next, &_global_env->next_events, list) { |
@@ -574,6 +645,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) | |||
574 | 645 | ||
575 | /* Must drop state lock before calling into hrtimer_start(), which | 646 | /* Must drop state lock before calling into hrtimer_start(), which |
576 | * may raise a softirq, which in turn may wake ksoftirqd. */ | 647 | * may raise a softirq, which in turn may wake ksoftirqd. */ |
648 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
577 | raw_spin_unlock(&global_lock); | 649 | raw_spin_unlock(&global_lock); |
578 | raw_spin_unlock(&state->lock); | 650 | raw_spin_unlock(&state->lock); |
579 | 651 | ||
@@ -722,8 +794,9 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
722 | restart = HRTIMER_RESTART; | 794 | restart = HRTIMER_RESTART; |
723 | } | 795 | } |
724 | 796 | ||
797 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
725 | raw_spin_lock(&global_lock); | 798 | raw_spin_lock(&global_lock); |
726 | global_schedule_now = gmp_update_time(_global_env, now); | 799 | global_schedule_now = gmp_update_time(_global_env, litmus_clock()); |
727 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); | 800 | BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); |
728 | 801 | ||
729 | /* Find the lowest cpu, and call reschedule */ | 802 | /* Find the lowest cpu, and call reschedule */ |
@@ -740,6 +813,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) | |||
740 | reschedule[cpu] = 1; | 813 | reschedule[cpu] = 1; |
741 | } | 814 | } |
742 | } | 815 | } |
816 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
743 | raw_spin_unlock(&global_lock); | 817 | raw_spin_unlock(&global_lock); |
744 | raw_spin_unlock_irqrestore(&state->lock, flags); | 818 | raw_spin_unlock_irqrestore(&state->lock, flags); |
745 | //raw_spin_unlock(&state->lock); | 819 | //raw_spin_unlock(&state->lock); |
@@ -808,6 +882,7 @@ static long mc2_complete_job(void) | |||
808 | int i; | 882 | int i; |
809 | state = local_cpu_state(); | 883 | state = local_cpu_state(); |
810 | raw_spin_lock(&state->lock); | 884 | raw_spin_lock(&state->lock); |
885 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
811 | raw_spin_lock(&global_lock); | 886 | raw_spin_lock(&global_lock); |
812 | for (i = 0; i < NR_MODES; i++) { | 887 | for (i = 0; i < NR_MODES; i++) { |
813 | if (in_mode(current,i) || i == 0) { | 888 | if (in_mode(current,i) || i == 0) { |
@@ -835,9 +910,10 @@ static long mc2_complete_job(void) | |||
835 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); | 910 | res->env->change_state(res->env, res, RESERVATION_DEPLETED); |
836 | 911 | ||
837 | // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); | 912 | // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); |
838 | if (lv == CRIT_LEVEL_C) | 913 | if (lv == CRIT_LEVEL_C){ |
914 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
839 | raw_spin_unlock(&global_lock); | 915 | raw_spin_unlock(&global_lock); |
840 | 916 | } | |
841 | raw_spin_unlock(&state->lock); | 917 | raw_spin_unlock(&state->lock); |
842 | local_irq_restore(flags); | 918 | local_irq_restore(flags); |
843 | } | 919 | } |
@@ -923,6 +999,8 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st | |||
923 | } else { | 999 | } else { |
924 | //if (!is_init_finished(tsk)) { | 1000 | //if (!is_init_finished(tsk)) { |
925 | TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); | 1001 | TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); |
1002 | if (mode != res->mode) | ||
1003 | TRACE_CUR("Mode does nto match res mode %d\n", res->mode); | ||
926 | // if (num_sync_released != 0 && mode == 0) { | 1004 | // if (num_sync_released != 0 && mode == 0) { |
927 | //ce = &state->crit_entries[lv]; | 1005 | //ce = &state->crit_entries[lv]; |
928 | sup_scheduler_update_after(sup_env, res->cur_budget); | 1006 | sup_scheduler_update_after(sup_env, res->cur_budget); |
@@ -959,6 +1037,7 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
959 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { | 1037 | if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { |
960 | tsk = res->ops->dispatch_client(res, &time_slice); | 1038 | tsk = res->ops->dispatch_client(res, &time_slice); |
961 | if (pending && res->reported && !in_mode(tsk, requested_mode)){ | 1039 | if (pending && res->reported && !in_mode(tsk, requested_mode)){ |
1040 | TRACE_TASK(tsk, "Rejected because task not in requested mode %d\n", requested_mode); | ||
962 | continue; | 1041 | continue; |
963 | } | 1042 | } |
964 | if (likely(tsk)) { | 1043 | if (likely(tsk)) { |
@@ -970,7 +1049,8 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) | |||
970 | #endif | 1049 | #endif |
971 | res->event_added = 1; | 1050 | res->event_added = 1; |
972 | res->blocked_by_ghost = 0; | 1051 | res->blocked_by_ghost = 0; |
973 | res->is_ghost = NO_CPU; | 1052 | res->is_ghost = NO_CPU; |
1053 | TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu); | ||
974 | res->scheduled_on = state->cpu; | 1054 | res->scheduled_on = state->cpu; |
975 | raw_spin_unlock(&mode_lock); | 1055 | raw_spin_unlock(&mode_lock); |
976 | return tsk; | 1056 | return tsk; |
@@ -1031,9 +1111,11 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1031 | 1111 | ||
1032 | raw_spin_lock(&state->lock); | 1112 | raw_spin_lock(&state->lock); |
1033 | 1113 | ||
1114 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1034 | raw_spin_lock(&global_lock); | 1115 | raw_spin_lock(&global_lock); |
1035 | preempt = resched_cpu[state->cpu]; | 1116 | preempt = resched_cpu[state->cpu]; |
1036 | resched_cpu[state->cpu] = 0; | 1117 | resched_cpu[state->cpu] = 0; |
1118 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1037 | raw_spin_unlock(&global_lock); | 1119 | raw_spin_unlock(&global_lock); |
1038 | 1120 | ||
1039 | pre_schedule(prev, state->cpu); | 1121 | pre_schedule(prev, state->cpu); |
@@ -1073,11 +1155,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1073 | raw_spin_unlock(&_global_env.lock); | 1155 | raw_spin_unlock(&_global_env.lock); |
1074 | }*/ | 1156 | }*/ |
1075 | if (is_realtime(current) && blocks) { | 1157 | if (is_realtime(current) && blocks) { |
1076 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | 1158 | if (get_task_crit_level(current) == CRIT_LEVEL_C){ |
1159 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1077 | raw_spin_lock(&global_lock); | 1160 | raw_spin_lock(&global_lock); |
1161 | } | ||
1078 | task_departs(current, is_completed(current)); | 1162 | task_departs(current, is_completed(current)); |
1079 | if (get_task_crit_level(current) == CRIT_LEVEL_C) | 1163 | if (get_task_crit_level(current) == CRIT_LEVEL_C){ |
1164 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1080 | raw_spin_unlock(&global_lock); | 1165 | raw_spin_unlock(&global_lock); |
1166 | } | ||
1081 | } | 1167 | } |
1082 | 1168 | ||
1083 | /* figure out what to schedule next */ | 1169 | /* figure out what to schedule next */ |
@@ -1085,26 +1171,28 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1085 | state->scheduled = mc2_dispatch(state->sup_env, state); | 1171 | state->scheduled = mc2_dispatch(state->sup_env, state); |
1086 | 1172 | ||
1087 | if (!state->scheduled) { | 1173 | if (!state->scheduled) { |
1174 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1088 | raw_spin_lock(&global_lock); | 1175 | raw_spin_lock(&global_lock); |
1089 | if (is_realtime(prev)) | 1176 | if (is_realtime(prev)) |
1090 | gmp_update_time(_global_env, now); | 1177 | gmp_update_time(_global_env, now); |
1091 | state->scheduled = mc2_global_dispatch(state); | 1178 | state->scheduled = mc2_global_dispatch(state); |
1179 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1092 | raw_spin_unlock(&global_lock); | 1180 | raw_spin_unlock(&global_lock); |
1093 | } | 1181 | } |
1094 | 1182 | ||
1095 | /* | 1183 | /* |
1096 | if (!state->scheduled) { | 1184 | if (!state->scheduled) { |
1097 | raw_spin_lock(&global_lock); | 1185 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); |
1098 | //to_schedule = gmp_update_time(_global_env, now); | 1186 | //to_schedule = gmp_update_time(_global_env, now); |
1099 | state->scheduled = mc2_global_dispatch(state); | 1187 | state->scheduled = mc2_global_dispatch(state); |
1100 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 1188 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
1101 | update_cpu_prio(state); | 1189 | update_cpu_prio(state); |
1102 | raw_spin_unlock(&global_lock); | 1190 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); |
1103 | } else { | 1191 | } else { |
1104 | raw_spin_lock(&global_lock); | 1192 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); |
1105 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 1193 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
1106 | update_cpu_prio(state); | 1194 | update_cpu_prio(state); |
1107 | raw_spin_unlock(&global_lock); | 1195 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); |
1108 | } | 1196 | } |
1109 | */ | 1197 | */ |
1110 | 1198 | ||
@@ -1127,13 +1215,14 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1127 | struct mc2_task_state* tinfo = get_mc2_state(prev); | 1215 | struct mc2_task_state* tinfo = get_mc2_state(prev); |
1128 | struct reservation* res = tinfo->res_info[mode].client.reservation; | 1216 | struct reservation* res = tinfo->res_info[mode].client.reservation; |
1129 | if (res) { | 1217 | if (res) { |
1130 | TRACE_TASK(prev, "PREV JOB was scheduled_on = P%d\n", res->scheduled_on); | 1218 | TRACE_TASK(prev, "PREV JOB of mode %d was scheduled_on = P%d\n", mode, res->scheduled_on); |
1131 | res->scheduled_on = NO_CPU; | 1219 | res->scheduled_on = NO_CPU; |
1132 | } | 1220 | } |
1133 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); | 1221 | TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); |
1134 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ | 1222 | /* if prev is preempted and a global task, find the lowest cpu and reschedule */ |
1135 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { | 1223 | if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { |
1136 | int cpu; | 1224 | int cpu; |
1225 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1137 | raw_spin_lock(&global_lock); | 1226 | raw_spin_lock(&global_lock); |
1138 | cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); | 1227 | cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); |
1139 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 1228 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
@@ -1143,13 +1232,14 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1143 | //raw_spin_unlock(&_lowest_prio_cpu.lock); | 1232 | //raw_spin_unlock(&_lowest_prio_cpu.lock); |
1144 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); | 1233 | TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); |
1145 | } | 1234 | } |
1235 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1146 | raw_spin_unlock(&global_lock); | 1236 | raw_spin_unlock(&global_lock); |
1147 | } | 1237 | } |
1148 | } | 1238 | } |
1149 | 1239 | ||
1150 | /* | 1240 | /* |
1151 | if (to_schedule != 0) { | 1241 | if (to_schedule != 0) { |
1152 | raw_spin_lock(&global_lock); | 1242 | TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); |
1153 | while (to_schedule--) { | 1243 | while (to_schedule--) { |
1154 | int cpu = get_lowest_prio_cpu(0); | 1244 | int cpu = get_lowest_prio_cpu(0); |
1155 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { | 1245 | if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { |
@@ -1157,14 +1247,16 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) | |||
1157 | resched_cpu[cpu] = 1; | 1247 | resched_cpu[cpu] = 1; |
1158 | } | 1248 | } |
1159 | } | 1249 | } |
1160 | raw_spin_unlock(&global_lock); | 1250 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); |
1161 | } | 1251 | } |
1162 | */ | 1252 | */ |
1163 | post_schedule(state->scheduled, state->cpu); | 1253 | post_schedule(state->scheduled, state->cpu); |
1164 | 1254 | ||
1255 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1165 | raw_spin_lock(&global_lock); | 1256 | raw_spin_lock(&global_lock); |
1166 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; | 1257 | _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; |
1167 | update_cpu_prio(state); | 1258 | update_cpu_prio(state); |
1259 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1168 | raw_spin_unlock(&global_lock); | 1260 | raw_spin_unlock(&global_lock); |
1169 | 1261 | ||
1170 | raw_spin_unlock(&state->lock); | 1262 | raw_spin_unlock(&state->lock); |
@@ -1239,8 +1331,10 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1239 | */ | 1331 | */ |
1240 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); | 1332 | TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); |
1241 | tsk_mc2_data(tsk)->init_finished = 1; | 1333 | tsk_mc2_data(tsk)->init_finished = 1; |
1334 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1242 | raw_spin_lock(&global_lock); | 1335 | raw_spin_lock(&global_lock); |
1243 | num_sync_released--; | 1336 | num_sync_released--; |
1337 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1244 | raw_spin_unlock(&global_lock); | 1338 | raw_spin_unlock(&global_lock); |
1245 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); | 1339 | TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", num_sync_released); |
1246 | } | 1340 | } |
@@ -1254,9 +1348,11 @@ static void mc2_task_resume(struct task_struct *tsk) | |||
1254 | sup_update_time(state->sup_env, litmus_clock()); | 1348 | sup_update_time(state->sup_env, litmus_clock()); |
1255 | task_arrives(state, tsk); | 1349 | task_arrives(state, tsk); |
1256 | } else { | 1350 | } else { |
1351 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1257 | raw_spin_lock(&global_lock); | 1352 | raw_spin_lock(&global_lock); |
1258 | gmp_update_time(_global_env, litmus_clock()); | 1353 | gmp_update_time(_global_env, litmus_clock()); |
1259 | task_arrives(state, tsk); | 1354 | task_arrives(state, tsk); |
1355 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1260 | raw_spin_unlock(&global_lock); | 1356 | raw_spin_unlock(&global_lock); |
1261 | } | 1357 | } |
1262 | 1358 | ||
@@ -1363,8 +1459,10 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1363 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { | 1459 | if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { |
1364 | cpu_0_task_exist = true; | 1460 | cpu_0_task_exist = true; |
1365 | } | 1461 | } |
1462 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1366 | raw_spin_lock(&global_lock); | 1463 | raw_spin_lock(&global_lock); |
1367 | num_sync_released++; | 1464 | num_sync_released++; |
1465 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1368 | raw_spin_unlock(&global_lock); | 1466 | raw_spin_unlock(&global_lock); |
1369 | local_irq_restore(flags); | 1467 | local_irq_restore(flags); |
1370 | raw_spin_unlock(&state->lock); | 1468 | raw_spin_unlock(&state->lock); |
@@ -1390,6 +1488,7 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1390 | 1488 | ||
1391 | // TRACE_TASK(tsk, "Mode 0\n"); | 1489 | // TRACE_TASK(tsk, "Mode 0\n"); |
1392 | 1490 | ||
1491 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1393 | raw_spin_lock(&global_lock); | 1492 | raw_spin_lock(&global_lock); |
1394 | res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); | 1493 | res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); |
1395 | 1494 | ||
@@ -1425,15 +1524,17 @@ static long mc2_admit_task(struct task_struct *tsk) | |||
1425 | tsk_rt(tsk)->plugin_state = tinfo; | 1524 | tsk_rt(tsk)->plugin_state = tinfo; |
1426 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; | 1525 | tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; |
1427 | raw_spin_lock(&mode_lock); | 1526 | raw_spin_lock(&mode_lock); |
1428 | for(i = 0; i < NR_MODES; i++){ | 1527 | for(i = 1; i < NR_MODES; i++){ |
1429 | if (in_mode(tsk, i)){ | 1528 | if (in_mode(tsk, i)){ |
1430 | mode_sizes[i]++; | 1529 | mode_sizes[i]++; |
1431 | } | 1530 | } |
1432 | } | 1531 | } |
1532 | mode_sizes[0]++; | ||
1433 | raw_spin_unlock(&mode_lock); | 1533 | raw_spin_unlock(&mode_lock); |
1434 | 1534 | ||
1435 | } | 1535 | } |
1436 | num_sync_released++; | 1536 | num_sync_released++; |
1537 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1437 | raw_spin_unlock(&global_lock); | 1538 | raw_spin_unlock(&global_lock); |
1438 | //raw_spin_unlock_irqrestore(&state->lock, flags); | 1539 | //raw_spin_unlock_irqrestore(&state->lock, flags); |
1439 | local_irq_restore(flags); | 1540 | local_irq_restore(flags); |
@@ -1485,6 +1586,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1485 | raw_spin_lock(&state->lock); | 1586 | raw_spin_lock(&state->lock); |
1486 | 1587 | ||
1487 | if (lv == CRIT_LEVEL_C) { | 1588 | if (lv == CRIT_LEVEL_C) { |
1589 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1488 | raw_spin_lock(&global_lock); | 1590 | raw_spin_lock(&global_lock); |
1489 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); | 1591 | res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); |
1490 | } | 1592 | } |
@@ -1513,15 +1615,19 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, | |||
1513 | mc2_update_ghost_state(state); | 1615 | mc2_update_ghost_state(state); |
1514 | */ | 1616 | */ |
1515 | task_arrives(state, tsk); | 1617 | task_arrives(state, tsk); |
1516 | if (lv == CRIT_LEVEL_C) | 1618 | if (lv == CRIT_LEVEL_C){ |
1619 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1517 | raw_spin_unlock(&global_lock); | 1620 | raw_spin_unlock(&global_lock); |
1621 | } | ||
1518 | /* NOTE: drops state->lock */ | 1622 | /* NOTE: drops state->lock */ |
1519 | TRACE("mc2_new()\n"); | 1623 | TRACE("mc2_new()\n"); |
1520 | 1624 | ||
1521 | mc2_update_timer_and_unlock(state); | 1625 | mc2_update_timer_and_unlock(state); |
1522 | } else { | 1626 | } else { |
1523 | if (lv == CRIT_LEVEL_C) | 1627 | if (lv == CRIT_LEVEL_C){ |
1628 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1524 | raw_spin_unlock(&global_lock); | 1629 | raw_spin_unlock(&global_lock); |
1630 | } | ||
1525 | raw_spin_unlock(&state->lock); | 1631 | raw_spin_unlock(&state->lock); |
1526 | //raw_spin_unlock(&_global_env.lock); | 1632 | //raw_spin_unlock(&_global_env.lock); |
1527 | } | 1633 | } |
@@ -1556,6 +1662,7 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1556 | if (cpu == -1) { | 1662 | if (cpu == -1) { |
1557 | struct next_timer_event *event, *e_next; | 1663 | struct next_timer_event *event, *e_next; |
1558 | local_irq_save(flags); | 1664 | local_irq_save(flags); |
1665 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1559 | raw_spin_lock(&global_lock); | 1666 | raw_spin_lock(&global_lock); |
1560 | 1667 | ||
1561 | /* if the reservation is global reservation */ | 1668 | /* if the reservation is global reservation */ |
@@ -1603,6 +1710,7 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) | |||
1603 | } | 1710 | } |
1604 | } | 1711 | } |
1605 | 1712 | ||
1713 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1606 | raw_spin_unlock(&global_lock); | 1714 | raw_spin_unlock(&global_lock); |
1607 | local_irq_restore(flags); | 1715 | local_irq_restore(flags); |
1608 | } else { | 1716 | } else { |
@@ -1709,11 +1817,12 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1709 | /* update both global and partitioned */ | 1817 | /* update both global and partitioned */ |
1710 | if (lv < CRIT_LEVEL_C) { | 1818 | if (lv < CRIT_LEVEL_C) { |
1711 | sup_update_time(state->sup_env, litmus_clock()); | 1819 | sup_update_time(state->sup_env, litmus_clock()); |
1712 | /* raw_spin_lock(&global_lock); | 1820 | /* TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); |
1713 | gmp_update_time(_global_env, litmus_clock()); | 1821 | gmp_update_time(_global_env, litmus_clock()); |
1714 | raw_spin_unlock(&global_lock); | 1822 | TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); |
1715 | */ } | 1823 | */ } |
1716 | else if (lv == CRIT_LEVEL_C) { | 1824 | else if (lv == CRIT_LEVEL_C) { |
1825 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1717 | raw_spin_lock(&global_lock); | 1826 | raw_spin_lock(&global_lock); |
1718 | gmp_update_time(_global_env, litmus_clock()); | 1827 | gmp_update_time(_global_env, litmus_clock()); |
1719 | //raw_spin_unlock(&_global_env.lock); | 1828 | //raw_spin_unlock(&_global_env.lock); |
@@ -1722,14 +1831,17 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1722 | mc2_update_ghost_state(state); | 1831 | mc2_update_ghost_state(state); |
1723 | */ | 1832 | */ |
1724 | task_departs(tsk, 0); | 1833 | task_departs(tsk, 0); |
1725 | if (lv == CRIT_LEVEL_C) | 1834 | if (lv == CRIT_LEVEL_C){ |
1835 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1726 | raw_spin_unlock(&global_lock); | 1836 | raw_spin_unlock(&global_lock); |
1727 | 1837 | } | |
1728 | /* NOTE: drops state->lock */ | 1838 | /* NOTE: drops state->lock */ |
1729 | TRACE("mc2_exit()\n"); | 1839 | TRACE("mc2_exit()\n"); |
1730 | 1840 | ||
1841 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1731 | raw_spin_lock(&global_lock); | 1842 | raw_spin_lock(&global_lock); |
1732 | num_sync_released--; | 1843 | num_sync_released--; |
1844 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1733 | raw_spin_unlock(&global_lock); | 1845 | raw_spin_unlock(&global_lock); |
1734 | mc2_update_timer_and_unlock(state); | 1846 | mc2_update_timer_and_unlock(state); |
1735 | } else { | 1847 | } else { |
@@ -1738,6 +1850,7 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1738 | } | 1850 | } |
1739 | 1851 | ||
1740 | if (lv == CRIT_LEVEL_C) { | 1852 | if (lv == CRIT_LEVEL_C) { |
1853 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
1741 | raw_spin_lock(&global_lock); | 1854 | raw_spin_lock(&global_lock); |
1742 | raw_spin_lock(&mode_lock); | 1855 | raw_spin_lock(&mode_lock); |
1743 | for(i = 0; i < NR_MODES; i++){ | 1856 | for(i = 0; i < NR_MODES; i++){ |
@@ -1746,6 +1859,7 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1746 | mode_sizes[i]--; | 1859 | mode_sizes[i]--; |
1747 | } | 1860 | } |
1748 | raw_spin_unlock(&mode_lock); | 1861 | raw_spin_unlock(&mode_lock); |
1862 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
1749 | raw_spin_unlock(&global_lock); | 1863 | raw_spin_unlock(&global_lock); |
1750 | 1864 | ||
1751 | for_each_online_cpu(cpu) { | 1865 | for_each_online_cpu(cpu) { |
@@ -1767,9 +1881,10 @@ static void mc2_task_exit(struct task_struct *tsk) | |||
1767 | 1881 | ||
1768 | local_irq_restore(flags); | 1882 | local_irq_restore(flags); |
1769 | 1883 | ||
1770 | if (is_mode_poll_task(tsk) && (tinfo->cpu == 0)) { | 1884 | if (is_mode_poll_task(tsk)){// && (tinfo->cpu == 0)) { |
1771 | cpu_0_spin_flag = !cpu_0_spin_flag; // release other cpu before exit. | 1885 | //cpu_0_spin_flag = !cpu_0_spin_flag; // release other cpu before exit. |
1772 | cpu_0_task_exist = false; | 1886 | //cpu_0_task_exist = false; |
1887 | mode_poll_exited = true; | ||
1773 | } | 1888 | } |
1774 | 1889 | ||
1775 | kfree(tsk_rt(tsk)->plugin_state); | 1890 | kfree(tsk_rt(tsk)->plugin_state); |
@@ -2171,6 +2286,7 @@ static long mc2_activate_plugin(void) | |||
2171 | struct mc2_cpu_state *state; | 2286 | struct mc2_cpu_state *state; |
2172 | struct cpu_entry *ce; | 2287 | struct cpu_entry *ce; |
2173 | int i; | 2288 | int i; |
2289 | //unsigned long *cpu_counter; | ||
2174 | 2290 | ||
2175 | for(i = 0; i < NR_MODES; i++){ | 2291 | for(i = 0; i < NR_MODES; i++){ |
2176 | gmp_init(&(_global_env_modes[i])); | 2292 | gmp_init(&(_global_env_modes[i])); |
@@ -2214,11 +2330,14 @@ static long mc2_activate_plugin(void) | |||
2214 | 2330 | ||
2215 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); | 2331 | hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
2216 | state->timer.function = on_scheduling_timer; | 2332 | state->timer.function = on_scheduling_timer; |
2217 | state->spin_flag = false; | 2333 | //state->spin_flag = false; |
2334 | this_cpu_write(mode_counter, 0); | ||
2218 | } | 2335 | } |
2219 | 2336 | ||
2220 | mc2_setup_domain_proc(); | 2337 | mc2_setup_domain_proc(); |
2221 | 2338 | ||
2339 | mode_poll_exited = false; | ||
2340 | |||
2222 | mode = 0; | 2341 | mode = 0; |
2223 | requested_mode = 0; | 2342 | requested_mode = 0; |
2224 | 2343 | ||
@@ -2226,8 +2345,8 @@ static long mc2_activate_plugin(void) | |||
2226 | mode_sizes[i] = 0; | 2345 | mode_sizes[i] = 0; |
2227 | } | 2346 | } |
2228 | res_reported = 0; | 2347 | res_reported = 0; |
2229 | cpu_0_spin_flag = false; | 2348 | //cpu_0_spin_flag = false; |
2230 | cpu_0_task_exist = false; | 2349 | //cpu_0_task_exist = false; |
2231 | 2350 | ||
2232 | return 0; | 2351 | return 0; |
2233 | } | 2352 | } |
@@ -2304,6 +2423,7 @@ static long mc2_deactivate_plugin(void) | |||
2304 | raw_spin_unlock(&state->lock); | 2423 | raw_spin_unlock(&state->lock); |
2305 | } | 2424 | } |
2306 | 2425 | ||
2426 | //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); | ||
2307 | raw_spin_lock(&global_lock); | 2427 | raw_spin_lock(&global_lock); |
2308 | for(i = 0; i < NR_MODES; i++){ | 2428 | for(i = 0; i < NR_MODES; i++){ |
2309 | _global_env = &_global_env_modes[i]; | 2429 | _global_env = &_global_env_modes[i]; |
@@ -2341,6 +2461,7 @@ static long mc2_deactivate_plugin(void) | |||
2341 | 2461 | ||
2342 | } | 2462 | } |
2343 | num_sync_released = 0; | 2463 | num_sync_released = 0; |
2464 | //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); | ||
2344 | raw_spin_unlock(&global_lock); | 2465 | raw_spin_unlock(&global_lock); |
2345 | destroy_domain_proc_info(&mc2_domain_proc_info); | 2466 | destroy_domain_proc_info(&mc2_domain_proc_info); |
2346 | return 0; | 2467 | return 0; |
@@ -2368,4 +2489,3 @@ static int __init init_mc2(void) | |||
2368 | } | 2489 | } |
2369 | 2490 | ||
2370 | module_init(init_mc2); | 2491 | module_init(init_mc2); |
2371 | >>>>>>> c0d86034b5c4983e2a142e5f2b1cbde69661db3f | ||