From d97c84a58867f481fede002fbd1510fc51b34e9e Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Mon, 1 May 2017 14:13:05 -0400 Subject: RTSS 2017 submission --- include/litmus/trace.h | 1 - litmus/bank_proc.c | 80 ++------- litmus/jobs.c | 3 + litmus/litmus.c | 111 +++--------- litmus/sched_mc2.c | 458 ++++++------------------------------------------- 5 files changed, 96 insertions(+), 557 deletions(-) diff --git a/include/litmus/trace.h b/include/litmus/trace.h index eb0a07f4ba04..4dbb39ea8a14 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h @@ -3,7 +3,6 @@ #ifdef CONFIG_SCHED_OVERHEAD_TRACE - #include #include diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index 08b58f94c391..097cff177a2d 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c @@ -1,7 +1,7 @@ /* * bank_proc.c -- Implementation of the page coloring for cache and bank partition. * The file will keep a pool of colored pages. Users can require pages with - * specific color or bank number. + * specific color or bank number. * Part of the code is modified from Jonathan Herman's code */ #include @@ -19,9 +19,6 @@ #include #include -//#define TRACE(fmt, args...) do {} while (false) -//#define TRACE_TASK(fmt, args...) do {} while (false) - #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 // This Address Decoding is used in imx6-sabredsd platform @@ -86,7 +83,6 @@ int node_index[9] = { struct mutex void_lockdown_proc; - /* * Every page list should contain a lock, a list, and a number recording how many pages it store */ @@ -105,7 +101,6 @@ static struct color_group *color_groups; */ unsigned int counting_one_set(unsigned int v) { -// unsigned int v; // count the number of bits set in v unsigned int c; // c accumulates the total bits set in v for (c = 0; v; v >>= 1) @@ -257,8 +252,6 @@ static inline unsigned int page_list_index(struct page *page) { unsigned int idx; idx = (page_color(page) + page_bank(page)*(number_cachecolors)); -// printk("address = %lx, ", page_to_phys(page)); -// printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx); return idx; } @@ -289,10 +282,10 @@ static void show_nr_pages(void) printk("show nr pages***************************************\n"); for (i = 0; i < NUM_PAGE_LIST; ++i) { cgroup = &color_groups[i]; - printk("(%03d) = %03d, ", i, atomic_read(&cgroup->nr_pages)); - if((i % 8) ==7){ + printk("(%03ld) = %03d, ", i, atomic_read(&cgroup->nr_pages)); + if((i % 8) ==7) { printk("\n"); - } + } } } @@ -316,6 +309,7 @@ void add_page_to_color_list(struct page *page) * Replenish the page pool. * If the newly allocate page is what we want, it will be pushed to the correct page list * otherwise, it will be freed. + * A user needs to invoke this function until the page pool has enough pages. */ static int do_add_pages(void) { @@ -329,8 +323,6 @@ static int do_add_pages(void) // until all the page lists contain enough pages for (i=0; i< 1024*20;i++) { - //while (smallest_nr_pages() < PAGES_PER_COLOR) { - // printk("smallest = %d\n", smallest_nr_pages()); page = alloc_page(GFP_HIGHUSER_MOVABLE); if (unlikely(!page)) { @@ -340,55 +332,20 @@ static int do_add_pages(void) } color = page_list_index(page); counter[color]++; - // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); - //show_nr_pages(); - //if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=16) { - if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { - //if ( PAGES_PER_COLOR && color>=16*2) { + if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=0) { add_page_to_color_list(page); - // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); - } else{ + } else { // Pages here will be freed later list_add_tail(&page->lru, &free_later); free_counter++; - //list_del(&page->lru); - // __free_page(page); - // printk("useless page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page)); } - //show_nr_pages(); - /* - if(free_counter >= PAGES_PER_COLOR) - { - printk("free unwanted page list eariler"); - free_counter = 0; - list_for_each_entry_safe(page, page_tmp, &free_later, lru) { - list_del(&page->lru); - __free_page(page); - } - - show_nr_pages(); - } - */ - } -/* printk("page counter = \n"); - for (i=0; i<128; i++) - { - printk("(%03d) = %4d, ", i , counter[i]); - if(i%8 == 7){ - printk("\n"); - } + } - } -*/ - //printk("After refill : \n"); - //show_nr_pages(); -#if 1 // Free the unwanted pages list_for_each_entry_safe(page, page_tmp, &free_later, lru) { list_del(&page->lru); __free_page(page); } -#endif out: return ret; } @@ -407,7 +364,6 @@ static struct page *new_alloc_page_color( unsigned long color) if( (color <0) || (color)>(number_cachecolors*number_banks -1)) { TRACE_CUR("Wrong color %lu\n", color); -// printk(KERN_WARNING "Wrong color %lu\n", color); goto out; } @@ -416,7 +372,6 @@ static struct page *new_alloc_page_color( unsigned long color) spin_lock(&cgroup->lock); if (unlikely(!atomic_read(&cgroup->nr_pages))) { TRACE_CUR("No free %lu colored pages.\n", color); -// printk(KERN_WARNING "no free %lu colored pages.\n", color); goto out_unlock; } rPage = list_first_entry(&cgroup->list, struct page, lru); @@ -428,12 +383,6 @@ static struct page *new_alloc_page_color( unsigned long color) out_unlock: spin_unlock(&cgroup->lock); out: -/* - if( smallest_nr_pages() == 0) { - //do_add_pages(); - //printk(KERN_ALERT "ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n"); - } -*/ return rPage; } @@ -456,9 +405,7 @@ struct page* get_colored_page(unsigned long color) */ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) { - struct color_group *cgroup; struct page *rPage = NULL; - unsigned int color; int try = 0; unsigned int idx; @@ -479,7 +426,7 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) if (try>=256) break; idx = get_next_index(node, idx); - printk(KERN_ALERT "try = %d out of page! requesting node = %d, idx = %d\n", try, node, idx); + printk(KERN_ALERT "try = %d out of page! requesting node = %ld, idx = %d\n", try, node, idx); BUG_ON(idx<0 || idx>127); rPage = new_alloc_page_color(idx); } @@ -494,20 +441,19 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) void reclaim_page(struct page *page) { const unsigned long color = page_list_index(page); - unsigned long nr_reclaimed = 0; spin_lock(&reclaim_lock); put_page(page); add_page_to_color_list(page); spin_unlock(&reclaim_lock); - printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); + printk("Reclaimed page(%ld) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages)); } /* * Initialize the numbers of banks and cache colors */ -static int __init init_variables(void) +static void __init init_variables(void) { number_banks = counting_one_set(BANK_MASK); number_banks = two_exp(number_banks); @@ -592,7 +538,7 @@ out: int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret = 0, i = 0; + int ret = 0; mutex_lock(&void_lockdown_proc); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret) @@ -608,7 +554,7 @@ out: int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int ret = 0, i = 0; + int ret = 0; mutex_lock(&void_lockdown_proc); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret) diff --git a/litmus/jobs.c b/litmus/jobs.c index 59c29d517074..368e0b308f3f 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -21,6 +21,7 @@ static inline void setup_release(struct task_struct *t, lt_t release) /* update job sequence number */ t->rt_param.job_params.job_no++; } + #define INIT_PHASE_LENGTH_NS (1000000000) void prepare_for_next_period(struct task_struct *t) @@ -34,6 +35,8 @@ void prepare_for_next_period(struct task_struct *t) (long long)litmus_clock() - (long long)t->rt_param.job_params.deadline; + /* Mode 0 is used for initializations * + * Use sporadic releases for all tasks not to overutilize cpus in mode 0 */ if (tsk_rt(t)->sporadic_release) { TRACE_TASK(t, "sporadic release at %llu\n", tsk_rt(t)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(t)->job_params.job_no)); diff --git a/litmus/litmus.c b/litmus/litmus.c index 90e21eeadabb..400fd1472705 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -37,9 +37,6 @@ #include #endif -//#define TRACE(fmt, args...) do {} while (false) -//#define TRACE_TASK(fmt, args...) do {} while (false) - extern void l2c310_flush_all(void); /* Number of RT tasks that exist in the system */ @@ -354,12 +351,24 @@ extern int isolate_lru_page(struct page *page); extern void putback_movable_page(struct page *page); extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); +/* + * sys_set_page_color + * @cpu: CPU number to assign page colors. + * Syscall for recoloring pages + * Returns -1 on error. + * N on success. N is the number of pages that could not + * be moved. A return of zero means that all pages + * were successfully moved. Currently, two pages + * cannot be moved, signal handler and litmus ctrl + * pages. + * Only mc2 tasks may be configured with this system call. + * Use static linking to isolate all pages. + */ asmlinkage long sys_set_page_color(int cpu) { long ret = 0; - //struct page *page_itr = NULL; struct vm_area_struct *vma_itr = NULL; - int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0, nr_not_migrated = 0; + int nr_pages = 0, nr_failed = 0, nr_not_migrated = 0; unsigned long node; enum crit_level lv; struct mm_struct *mm; @@ -375,19 +384,13 @@ asmlinkage long sys_set_page_color(int cpu) mm = get_task_mm(current); put_task_struct(current); - //down_read(¤t->mm->mmap_sem); down_read(&mm->mmap_sem); - //TRACE_TASK(current, "SYSCALL set_page_color\n"); vma_itr = mm->mmap; while (vma_itr != NULL) { unsigned int num_pages = 0, i; struct page *old_page = NULL; num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; - // print vma flags - //printk(KERN_INFO "flags: 0x%lx\n", vma_itr->vm_flags); - //printk(KERN_INFO "start - end: 0x%lx - 0x%lx (%lu)\n", vma_itr->vm_start, vma_itr->vm_end, (vma_itr->vm_end - vma_itr->vm_start)/PAGE_SIZE); - //printk(KERN_INFO "vm_page_prot: 0x%lx\n", vma_itr->vm_page_prot); for (i = 0; i < num_pages; i++) { old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); @@ -401,30 +404,15 @@ asmlinkage long sys_set_page_color(int cpu) put_page(old_page); continue; } - - //TRACE_TASK(current, "addr: %08x, pfn: %x, _mapcount: %d, _count: %d\n", vma_itr->vm_start + PAGE_SIZE*i, __page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page)); - - //if (page_mapcount(old_page) == 1) { - ret = isolate_lru_page(old_page); - if (!ret) { - list_add_tail(&old_page->lru, &pagelist); - inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); - nr_pages++; - } else { - //TRACE_TASK(current, "isolate_lru_page failed\n"); - //TRACE_TASK(current, "page_lru = %d PageLRU = %d\n", page_lru(old_page), PageLRU(old_page)); - nr_failed++; - } - //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); - put_page(old_page); - //} - /* - else { - nr_shared_pages++; - //printk(KERN_INFO "SHARED _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); - put_page(old_page); + ret = isolate_lru_page(old_page); + if (!ret) { + list_add_tail(&old_page->lru, &pagelist); + inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); + nr_pages++; + } else { + nr_failed++; } - */ + put_page(old_page); } vma_itr = vma_itr->vm_next; @@ -434,7 +422,7 @@ asmlinkage long sys_set_page_color(int cpu) if (tsk_rt(current)->mc2_data) lv = tsk_rt(current)->mc2_data->crit; else - BUG();//lv = 0; + BUG(); //lv = 0; if (cpu == -1) node = 8; @@ -444,34 +432,16 @@ asmlinkage long sys_set_page_color(int cpu) if (!list_empty(&pagelist)) { ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); TRACE_TASK(current, "%ld pages not migrated.\n", ret); - printk(KERN_INFO "%ld pages not migrated.\n", ret); nr_not_migrated = ret; if (ret) { putback_movable_pages(&pagelist); } } - - /* handle sigpage and litmus ctrl_page */ -/* vma_itr = current->mm->mmap; - while (vma_itr != NULL) { - if (vma_itr->vm_start == tsk_rt(current)->addr_ctrl_page) { - TRACE("litmus ctrl_page = %08x\n", vma_itr->vm_start); - vma_itr->vm_page_prot = PAGE_SHARED; - break; - } - vma_itr = vma_itr->vm_next; - } -*/ + up_read(&mm->mmap_sem); -/* - list_for_each_entry(page_itr, &shared_pagelist, lru) { - TRACE("S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr)); - } -*/ - TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); - printk(KERN_INFO "node = %ld, nr_migrated_pages = %d, nr_shared_pages = %d, nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_failed-2, nr_failed); - //printk(KERN_INFO "node = %d\n", cpu_to_node(smp_processor_id())); + TRACE_TASK(current, "node = %ld, nr_migrated_pages = %d, nr_pages = %d nr_failed = %d\n", node, nr_pages-nr_not_migrated, nr_pages, nr_failed); + return ret; } @@ -479,12 +449,12 @@ asmlinkage long sys_set_page_color(int cpu) asmlinkage long sys_test_call(unsigned int param) { long ret = 0; - unsigned long flags; struct vm_area_struct *vma_itr = NULL; TRACE_CUR("test_call param = %d\n", param); if (param == 0) { + /* Print page information */ down_read(¤t->mm->mmap_sem); vma_itr = current->mm->mmap; while (vma_itr != NULL) { @@ -494,37 +464,12 @@ asmlinkage long sys_test_call(unsigned int param) printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); - /* if (vma_itr->vm_file) { - struct file *fp = vma_itr->vm_file; - unsigned long fcount = atomic_long_read(&(fp->f_count)); - printk(KERN_INFO "f_count : %ld\n", fcount); - if (fcount > 1) { - vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot); - } - } - printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); - */ vma_itr = vma_itr->vm_next; } printk(KERN_INFO "--------------------------------------------\n"); up_read(¤t->mm->mmap_sem); - - local_irq_save(flags); - l2c310_flush_all(); - local_irq_restore(flags); - } - else if (param == 1) { - int i; - for (i = 0; i < 4; i++) { - lock_cache(i, 0x00003fff); - } - } - else if (param == 2) { - int i; - for (i = 0; i < 4; i++) { - lock_cache(i, 0xffffffff); - } } + return ret; } diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 7f6fefff0a3b..d7cf3fb83296 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c @@ -3,7 +3,7 @@ * * Implementation of the Mixed-Criticality on MultiCore scheduler * - * Thus plugin implements a scheduling algorithm proposed in + * This plugin implements a scheduling algorithm proposed in * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper. */ @@ -29,19 +29,12 @@ #include #include -#ifdef CONFIG_PGMRT_SUPPORT -#include -#endif - -//#define TRACE(fmt, args...) do {} while (false) -//#define TRACE_TASK(fmt, args...) do {} while (false) - #define BUDGET_ENFORCEMENT_AT_C 0 extern atomic_t num_sync_released; extern void do_partition(enum crit_level lv, int cpu); -/* _global_env - reservation container for level-C tasks*/ +/* _global_env - reservation container for level-C tasks */ struct gmp_reservation_environment _global_env_modes[NR_MODES]; struct gmp_reservation_environment *_global_env; raw_spinlock_t global_lock; @@ -55,7 +48,7 @@ struct cpu_entry { int cpu; enum crit_level lv; /* if will_schedule is true, this cpu is already selected and - call mc2_schedule() soon. */ + mc2_schedule() will be executed soon. */ bool will_schedule; }; @@ -69,6 +62,7 @@ struct cpu_priority _lowest_prio_cpu; /* mc2_task_state - a task state structure */ struct mc2_task_state { + /* A task can be shared by multiple modes */ struct task_client res_info[NR_MODES]; /* if cpu == -1, this task is a global task (level C) */ int cpu; @@ -78,7 +72,6 @@ struct mc2_task_state { /* mc2_cpu_state - maintain the scheduled state and ghost jobs * timer : timer for partitioned tasks (level A and B) - * g_timer : timer for global tasks (level C) */ struct mc2_cpu_state { raw_spinlock_t lock; @@ -89,21 +82,16 @@ struct mc2_cpu_state { int cpu; struct task_struct* scheduled; - //struct crit_entry crit_entries[NUM_CRIT_LEVELS]; - //bool spin_flag; //not used on cpu 0 }; static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state); static int resched_cpu[NR_CPUS]; static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); -//level_a_priorities unused -//static int level_a_priorities[NR_CPUS]; #define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id)) #define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state)) - unsigned int mode; //currently executing mode, from 0 to NR_MODES-1 unsigned int requested_mode; //The pending mode /* Prevent multiple requests from entering and prevent request from entering while old @@ -118,6 +106,8 @@ bool cpu_0_task_exist; bool mode_changed; bool mode_poll_exited; static DEFINE_PER_CPU(unsigned long, mode_counter); + +/* Mode change macros */ #define local_mode_counter() (this_cpu_ptr(&mode_counter)) #define cpu_0_mode_counter() (&per_cpu(mode_counter, 0)) #define in_mode(t, modenum) (tsk_mc2_data(t)->mode_mask & (1 << modenum)) @@ -135,7 +125,6 @@ asmlinkage long sys_enact_mode(void) struct reservation *res; struct list_head *pos; unsigned long flags; - TRACE_TASK(current, "ENACTING SYSCALL\n"); if (state->cpu == 0 && !mode_poll_exited){ unsigned long *other_cpu_counter; unsigned long cpu0_val = this_cpu_read(mode_counter); @@ -149,7 +138,6 @@ asmlinkage long sys_enact_mode(void) mode_changed = false; if (pending){ //MCR has entered raw_spin_lock_irqsave(&state->lock, flags); - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); raw_spin_lock(&mode_lock); @@ -208,13 +196,11 @@ asmlinkage long sys_enact_mode(void) } if( ready ){ //C is throttled lt_t new_mode_basetime = get_release(current); - //TRACE("Timer canceled\n"); - //hrtimer_cancel(&state->timer);//stop listening to old mode timers mode = requested_mode; TRACE("Mode has been changed.\n"); mode_changed = true; _global_env = &_global_env_modes[mode]; - //set res->reported for new global tasks + /* set res->reported for new global tasks */ list_for_each(pos, &_global_env->active_reservations){ res = list_entry(pos, struct reservation, list); release_at(res->tsk, new_mode_basetime); @@ -231,7 +217,6 @@ asmlinkage long sys_enact_mode(void) res->reported = 0; } gmp_update_time(_global_env, litmus_clock()); - //raw_spin_lock(&state->lock); state->sup_env = &state->sup_env_modes[mode]; list_for_each(pos, &state->sup_env->active_reservations){ @@ -247,57 +232,33 @@ asmlinkage long sys_enact_mode(void) release_at(res->tsk, new_mode_basetime); } sup_update_time(state->sup_env, litmus_clock()); - //raw_spin_unlock(&state->lock); sched_trace_enact_mode(current); TRACE("ENACT\n"); } raw_spin_unlock(&mode_lock); - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); - //raw_spin_unlock(&state->lock); raw_spin_unlock_irqrestore(&state->lock, flags); raw_spin_lock(&state->lock); mc2_update_timer_and_unlock(state); } this_cpu_inc(mode_counter); - //local_irq_restore(flags); - //cpu_0_spin_flag = !cpu_0_spin_flag; } else if (!mode_poll_exited) { unsigned long *cpu0_counter = cpu_0_mode_counter(); unsigned long my_val; - //int timeout = 0; this_cpu_inc(mode_counter); my_val = this_cpu_read(mode_counter); //spin, wait for CPU 0 to stabilize mode decision //before scheduling next hyperperiod - //TRACE("CPU%d start spinning. %d\n",state->cpu, mode_changed); -/* - if (state->spin_flag) { - while(cpu_0_spin_flag) - udelay(1); - } - else { - while(!cpu_0_spin_flag) - udelay(1); - } - */ - //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); + while (*cpu0_counter < my_val && !mode_poll_exited){ udelay(1); - //if (timeout++ > 1000){ - // if (!cpu_0_task_exist){ - // break; - // } - // timeout = 0; - //} } TRACE("CPU%d counter check. %d\n",state->cpu, this_cpu_read(mode_counter)); - //local_irq_save(flags); if (mode_changed) { lt_t new_mode_basetime = get_release(current); - //TRACE("CPU%d mode changed\n",state->cpu); + TRACE("CPU%d mode changed\n",state->cpu); hrtimer_cancel(&state->timer); //stop listening to old mode timers TRACE("Timer is cancelled at %llu. mode-change\n", litmus_clock()); raw_spin_lock_irqsave(&state->lock, flags); @@ -319,24 +280,13 @@ asmlinkage long sys_enact_mode(void) raw_spin_lock(&state->lock); mc2_update_timer_and_unlock(state); - //local_irq_restore(flags); } - //state->spin_flag = !state->spin_flag; } else { - //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); + TRACE("CPU%d exits sys_enact_mode(). No cpu_0_task_exist.%d\n",state->cpu, mode_changed); return 0; } - TRACE("CPU%d enact syscall ends m_c? %d new_mode %d\n",state->cpu, mode_changed, mode); - //if mode didn't change this has no effect on what's being scheduled - //raw_spin_lock(&state->lock); - //state->sup_env = &state->sup_env_modes[mode]; - //raw_spin_unlock(&state->lock); - //sup_update_time(state->sup_env, litmus_clock()); - //raw_spin_lock(&state->lock); - //mc2_update_timer_and_unlock(state); - TRACE("is timer active? %d remaining %llu\n",hrtimer_active(&state->timer), hrtimer_get_remaining(&state->timer)); return 0; } @@ -426,21 +376,6 @@ static enum crit_level get_task_crit_level(struct task_struct *tsk) return mp->crit; } -static int is_init_finished(struct task_struct *tsk) -{ - struct mc2_task *mp; - - if (!tsk || !is_realtime(tsk)) - return 0; - - mp = tsk_rt(tsk)->mc2_data; - - if (!mp) - return 0; - else - return mp->init_finished; -} - /* task_depart - remove a task from its reservation * If the job has remaining budget, convert it to a ghost job * and update crit_entries[] @@ -450,7 +385,6 @@ static int is_init_finished(struct task_struct *tsk) static void task_departs(struct task_struct *tsk, int job_complete) { struct mc2_task_state* tinfo = get_mc2_state(tsk); - //struct mc2_cpu_state* state = local_cpu_state(); struct reservation* res = NULL; struct reservation_client *client = NULL; int i; @@ -470,14 +404,6 @@ static void task_departs(struct task_struct *tsk, int job_complete) res->ops->client_departs(res, client, job_complete); } -/* 9/18/2015 fix start - no ghost job handling, empty remaining budget */ -/* - if (job_complete) { - //res->cur_budget = 0; - } -*/ -/* fix end */ - tinfo->has_departed = true; TRACE_TASK(tsk, "CLIENT DEPART with budget %llu at %llu\n", res->cur_budget, litmus_clock()); } @@ -557,12 +483,7 @@ static int get_lowest_prio_cpu(lt_t priority) ce = &_lowest_prio_cpu.cpu_entries[cpu]; /* If a CPU will call schedule() in the near future, we don't return that CPU. */ -/* - TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, - ce->scheduled ? (ce->scheduled)->comm : "null", - ce->scheduled ? (ce->scheduled)->pid : 0, - ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); -*/ + if (!ce->will_schedule) { if (!ce->scheduled) { /* Idle cpu, return this. */ @@ -599,7 +520,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) { int local, cpus; lt_t update, now; - //enum crit_level lv = get_task_crit_level(state->scheduled); struct next_timer_event *event, *next; int reschedule[NR_CPUS]; unsigned long flags; @@ -618,7 +538,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) */ local = local_cpu_state() == state; - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); list_for_each_entry_safe(event, next, &_global_env->next_events, list) { @@ -632,18 +551,11 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) if (event->timer_armed_on == NO_CPU) { struct reservation *res = gmp_find_by_id(_global_env, event->id); int cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); - //TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); list_del(&event->list); kfree(event); if (cpu != NO_CPU) { - //raw_spin_lock(&_lowest_prio_cpu.lock); _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; - //raw_spin_unlock(&_lowest_prio_cpu.lock); - - //if (cpu == local_cpu_state()->cpu) - // litmus_reschedule_local(); - //else - reschedule[cpu] = 1; + reschedule[cpu] = 1; } } } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { @@ -655,7 +567,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) /* Must drop state lock before calling into hrtimer_start(), which * may raise a softirq, which in turn may wake ksoftirqd. */ - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); + raw_spin_unlock(&global_lock); local_irq_restore(flags); raw_spin_unlock(&state->lock); @@ -663,11 +575,6 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) if ((update <= now) || reschedule[state->cpu]) { reschedule[state->cpu] = 0; litmus_reschedule(state->cpu); - /* - raw_spin_lock(&state->lock); - preempt_if_preemptable(state->scheduled, state->cpu); - raw_spin_unlock(&state->lock); - */ } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { /* Reprogram only if not already set correctly. */ if (!hrtimer_active(&state->timer) || @@ -708,22 +615,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) state->cpu, hrtimer_active(&state->timer), ktime_to_ns(hrtimer_get_expires(&state->timer))); - //litmus_reschedule(state->cpu); -/* - raw_spin_lock(&state->lock); - preempt_if_preemptable(state->scheduled, state->cpu); - raw_spin_unlock(&state->lock); - reschedule[state->cpu] = 0; -*/ - } - } -/* - for (cpus = 0; cpuslock, flags); - //raw_spin_lock(&state->lock); - //local_irq_save(flags); now = litmus_clock(); sup_update_time(state->sup_env, now); -/* 9/20/2015 fix - no ghost job - remain_budget = mc2_update_ghost_state(state); -*/ update = state->sup_env->next_scheduler_update; now = state->sup_env->env.current_time; - if (update <= now) { litmus_reschedule_local(); } else if (update != SUP_NO_SCHEDULER_UPDATE) { @@ -805,7 +691,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) restart = HRTIMER_RESTART; } - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); global_schedule_now = gmp_update_time(_global_env, litmus_clock()); BUG_ON(global_schedule_now < 0 || global_schedule_now > 4); @@ -814,39 +699,24 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) while (global_schedule_now--) { int cpu = get_lowest_prio_cpu(0); if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { - //raw_spin_lock(&_lowest_prio_cpu.lock); _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; - //raw_spin_unlock(&_lowest_prio_cpu.lock); - TRACE("LOWEST CPU = P%d\n", cpu); if (cpu == state->cpu && update > now) - ;//litmus_reschedule_local(); + ; else reschedule[cpu] = 1; } } - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); raw_spin_unlock_irqrestore(&state->lock, flags); - //raw_spin_unlock(&state->lock); - //local_irq_restore(flags); TS_ISR_END; for (cpus = 0; cpuslock); - preempt_if_preemptable(remote_state->scheduled, remote_state->cpu); - raw_spin_unlock(&remote_state->lock); - */ } } - - + return restart; } @@ -898,7 +768,6 @@ static long mc2_complete_job(void) int i; state = local_cpu_state(); raw_spin_lock(&state->lock); - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); for (i = 0; i < NR_MODES; i++) { if (in_mode(current,i) || i == 0) { @@ -956,10 +825,6 @@ static long mc2_complete_job(void) res_reported--; TRACE_CUR("RES_REPORTED = %d\n", res_reported); res->reported = 1; - //Current task doesn't exist in new mode - //if ( !in_mode(current, requested_mode) ){ - // litmus_reschedule_local(); - //} } raw_spin_unlock(&mode_lock); } @@ -993,17 +858,8 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st { struct reservation *res, *next; struct task_struct *tsk = NULL; - //struct crit_entry *ce; enum crit_level lv; lt_t time_slice; - - - list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { - if (res->state == RESERVATION_ACTIVE) { - struct task_struct *t = res->ops->dispatch_client(res, &time_slice); - TRACE_TASK(tsk, "CPU%d ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", state->cpu, res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); - } - } list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { if (res->state == RESERVATION_ACTIVE) { @@ -1014,22 +870,10 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st sup_scheduler_update_after(sup_env, res->cur_budget); return tsk; } else { - //if (!is_init_finished(tsk)) { -// TRACE_TASK(tsk, "num_sync_released = %d, mode = %d\n", num_sync_released, mode); -// if (num_sync_released != 0 && mode == 0) { - //ce = &state->crit_entries[lv]; sup_scheduler_update_after(sup_env, res->cur_budget); res->blocked_by_ghost = 0; res->is_ghost = NO_CPU; return tsk; -/* - } else if (res->mode == mode) { - sup_scheduler_update_after(sup_env, res->cur_budget); - res->blocked_by_ghost = 0; - res->is_ghost = NO_CPU; - return tsk; - } -*/ } } } @@ -1042,18 +886,9 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) { struct reservation *res, *next; struct task_struct *tsk = NULL; - //struct crit_entry *ce; enum crit_level lv; lt_t time_slice; - list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { - if (res->state == RESERVATION_ACTIVE) { - struct task_struct *t = res->ops->dispatch_client(res, &time_slice); - TRACE_TASK(tsk, "GLOBAL ACT_LIST R%d cur.mode = %d, res->mode = %d budget = %llu next_repl. = %llu, deadline = %llu\n", res->id, mode, res->mode, res->cur_budget, res->next_replenishment, get_deadline(t)); - } - } - - //raw_spin_lock(&mode_lock); list_for_each_entry_safe(res, next, &_global_env->active_reservations, list) { BUG_ON(!res); if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { @@ -1074,12 +909,10 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state) res->is_ghost = NO_CPU; TRACE_TASK(res->tsk, "R%d global dispatched on %d\n", res->id, state->cpu); res->scheduled_on = state->cpu; - //raw_spin_unlock(&mode_lock); return tsk; } } } - //raw_spin_unlock(&mode_lock); return NULL; } @@ -1126,7 +959,7 @@ static inline void post_schedule(struct task_struct *next, int cpu) */ static struct task_struct* mc2_schedule(struct task_struct * prev) { - int np, blocks, exists, to_schedule; + int np, blocks, exists; /* next == NULL means "schedule background work". */ lt_t now = litmus_clock(); struct mc2_cpu_state *state = local_cpu_state(); @@ -1138,11 +971,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) BUG_ON(state->scheduled && state->scheduled != prev); BUG_ON(state->scheduled && !is_realtime(prev)); - //if (state->scheduled && state->scheduled != prev) - // printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); - //if (state->scheduled && !is_realtime(prev)) - // printk(KERN_ALERT "BUG2!!!!!!!! \n"); - /* (0) Determine state */ exists = state->scheduled != NULL; blocks = exists && !is_current_running(); @@ -1151,32 +979,13 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) /* update time */ state->sup_env->will_schedule = true; sup_update_time(state->sup_env, now); - /* 9/20/2015 fix */ - //raw_spin_lock(&_global_env.lock); - //to_schedule = gmp_update_time(&_global_env, now); - //raw_spin_unlock(&_global_env.lock); - - /* 9/20/2015 fix - mc2_update_ghost_state(state); - */ - - /* remove task from reservation if it blocks */ - /* - if (is_realtime(prev) && !is_running(prev)) { - if (get_task_crit_level(prev) == CRIT_LEVEL_C) - raw_spin_lock(&_global_env.lock); - task_departs(prev, is_completed(prev)); - if (get_task_crit_level(prev) == CRIT_LEVEL_C) - raw_spin_unlock(&_global_env.lock); - }*/ + if (is_realtime(current) && blocks) { if (get_task_crit_level(current) == CRIT_LEVEL_C){ - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); } task_departs(current, is_completed(current)); if (get_task_crit_level(current) == CRIT_LEVEL_C){ - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); } } @@ -1186,7 +995,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) state->scheduled = mc2_dispatch(state->sup_env, state); if (!state->scheduled) { - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); if (is_realtime(prev)) gmp_update_time(_global_env, now); @@ -1200,26 +1008,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) update_cpu_prio(state); raw_spin_unlock(&global_lock); } - /* - if (!state->scheduled) { - TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); - //to_schedule = gmp_update_time(_global_env, now); - state->scheduled = mc2_global_dispatch(state); - _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; - update_cpu_prio(state); - TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); - } else { - TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); - _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; - update_cpu_prio(state); - TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); - } - */ - - //raw_spin_lock(&_lowest_prio_cpu.lock); - //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; - //update_cpu_prio(state); - //raw_spin_unlock(&_lowest_prio_cpu.lock); /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ sched_state_task_picked(); @@ -1235,41 +1023,22 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) struct mc2_task_state* tinfo = get_mc2_state(prev); struct reservation* res = tinfo->res_info[mode].client.reservation; if (res) { - TRACE_TASK(prev, "PREV JOB of mode %d was scheduled_on = P%d\n", mode, res->scheduled_on); res->scheduled_on = NO_CPU; } TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock()); /* if prev is preempted and a global task, find the lowest cpu and reschedule */ if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { int cpu; - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { - //raw_spin_lock(&_lowest_prio_cpu.lock); _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; resched_cpu[cpu] = 1; - //raw_spin_unlock(&_lowest_prio_cpu.lock); - TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); } - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); } } -/* - if (to_schedule != 0) { - TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); - while (to_schedule--) { - int cpu = get_lowest_prio_cpu(0); - if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { - _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; - resched_cpu[cpu] = 1; - } - } - TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); - } -*/ post_schedule(state->scheduled, state->cpu); raw_spin_unlock(&state->lock); @@ -1277,7 +1046,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) TRACE_TASK(state->scheduled, "scheduled.\n"); } - return state->scheduled; } @@ -1308,7 +1076,6 @@ static void mc2_task_resume(struct task_struct *tsk) TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); - //local_irq_save(flags); preempt_disable(); tinfo = get_mc2_state(tsk); if (tinfo->cpu != -1) @@ -1317,41 +1084,36 @@ static void mc2_task_resume(struct task_struct *tsk) state = local_cpu_state(); preempt_enable(); - /* 9/20/2015 fix - raw_spin_lock(&_global_env.lock); - */ /* Requeue only if self-suspension was already processed. */ if (tinfo->has_departed) { - /* We don't want to consider jobs before synchronous releases */ +#ifdef CONFIG_SCHED_OVERHEAD_TRACE + switch(get_task_crit_level(tsk)) { + case CRIT_LEVEL_A: + TS_RELEASE_LATENCY_A(get_release(tsk)); + break; + case CRIT_LEVEL_B: + TS_RELEASE_LATENCY_B(get_release(tsk)); + break; + case CRIT_LEVEL_C: + TS_RELEASE_LATENCY_C(get_release(tsk)); + break; + default: + break; + } +#endif + /* We don't want to consider jobs in the initialization mode */ if (tsk_rt(tsk)->job_params.job_no == 2) { -/* - switch(get_task_crit_level(tsk)) { - case CRIT_LEVEL_A: - TS_RELEASE_LATENCY_A(get_release(tsk)); - break; - case CRIT_LEVEL_B: - TS_RELEASE_LATENCY_B(get_release(tsk)); - break; - case CRIT_LEVEL_C: - TS_RELEASE_LATENCY_C(get_release(tsk)); - break; - default: - break; - } -*/ - TRACE_TASK(tsk, "INIT_FINISHED is SET\n"); tsk_mc2_data(tsk)->init_finished = 1; atomic_dec(&num_sync_released); if (atomic_read(&num_sync_released) == 0) { lt_t start = tsk_rt(tsk)->sporadic_release_time + INIT_PHASE_LENGTH_NS*(tsk_rt(tsk)->job_params.job_no); - TRACE("INIT_PHASE FINISHED. CHANGE TO MODE 1\n"); sys_request_mode(1); sched_trace_sys_start(&start); } - TRACE_TASK(tsk, "INIT_FINISHED is SET, num_sync_released decreased to %d\n", atomic_read(&num_sync_released)); } + raw_spin_lock_irqsave(&state->lock, flags); /* Assumption: litmus_clock() is synchronized across cores, @@ -1361,18 +1123,12 @@ static void mc2_task_resume(struct task_struct *tsk) sup_update_time(state->sup_env, litmus_clock()); task_arrives(state, tsk); } else { - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); gmp_update_time(_global_env, litmus_clock()); task_arrives(state, tsk); - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); } - /* 9/20/2015 fix - mc2_update_ghost_state(state); - */ - //task_arrives(state, tsk); /* NOTE: drops state->lock */ TRACE_TASK(tsk, "mc2_resume()\n"); raw_spin_unlock_irqrestore(&state->lock, flags); @@ -1381,12 +1137,8 @@ static void mc2_task_resume(struct task_struct *tsk) mc2_update_timer_and_unlock(state); } else { TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); - //raw_spin_unlock(&_global_env.lock); } - //local_irq_restore(flags); - - //gmp_free_passed_event(); resume_legacy_task_model_updates(tsk); } @@ -1417,8 +1169,6 @@ static long mc2_admit_task(struct task_struct *tsk) if (lv < CRIT_LEVEL_C) { state = cpu_state_for(task_cpu(tsk)); - //local_irq_save(flags); - //raw_spin_lock(&state->lock); raw_spin_lock_irqsave(&state->lock, flags); tinfo->mc2_param.crit = mp->crit; @@ -1427,15 +1177,11 @@ static long mc2_admit_task(struct task_struct *tsk) tinfo->mc2_param.res_id = mp->res_id; tinfo->mc2_param.mode_mask = mp->mode_mask; tinfo->mc2_param.init_finished = 0; -// TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); - -// TRACE_TASK(tsk, "Mode 0\n"); + res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id); /* found the appropriate reservation */ if (res) { -// TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n"); - /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); } @@ -1446,16 +1192,13 @@ static long mc2_admit_task(struct task_struct *tsk) for(i = 1; i < NR_MODES; i++){ if (!in_mode(tsk, i)){ - //task not present in mode + // task not present in mode continue; } -// TRACE_TASK(tsk, "Mode %d\n",i); res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); /* found the appropriate reservation */ if (res) { - // TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i); - /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); } @@ -1475,18 +1218,10 @@ static long mc2_admit_task(struct task_struct *tsk) cpu_0_task_exist = true; } atomic_inc(&num_sync_released); - //raw_spin_unlock(&state->lock); - //local_irq_restore(flags); raw_spin_unlock_irqrestore(&state->lock, flags); } else if (lv == CRIT_LEVEL_C) { -// TRACE_TASK(tsk, "Task being admitted is Level C\n"); state = local_cpu_state(); raw_spin_lock_irqsave(&state->lock, flags); - //local_irq_save(flags); - //raw_spin_lock(&state->lock); - //state = local_cpu_state(); - - //raw_spin_lock(&state->lock); tinfo->mc2_param.crit = mp->crit; tinfo->cpu = -1; @@ -1495,18 +1230,11 @@ static long mc2_admit_task(struct task_struct *tsk) tinfo->mc2_param.mode_mask = mp->mode_mask; tinfo->mc2_param.init_finished = 0; - // TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); - -// TRACE_TASK(tsk, "Mode 0\n"); - - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); /* found the appropriate reservation */ if (res) { - // TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n"); - /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); } @@ -1522,8 +1250,6 @@ static long mc2_admit_task(struct task_struct *tsk) /* found the appropriate reservation (or vCPU) */ if (res) { - TRACE_TASK(tsk, "GMP FOUND RES ID in mode %d\n", i); - /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); @@ -1548,10 +1274,7 @@ static long mc2_admit_task(struct task_struct *tsk) atomic_inc(&num_sync_released); raw_spin_unlock_irqrestore(&state->lock, flags); - //raw_spin_unlock(&state->lock); - //local_irq_restore(flags); } - if (err) kfree(tinfo); @@ -1568,7 +1291,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; struct reservation *res; enum crit_level lv = get_task_crit_level(tsk); lt_t release = 0; @@ -1584,54 +1307,42 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, state = cpu_state_for(tinfo->cpu); - /* acquire the lock protecting the state and disable interrupts */ - //raw_spin_lock(&_global_env.lock); - //raw_spin_lock(&state->lock); if (is_running) { state->scheduled = tsk; /* make sure this task should actually be running */ litmus_reschedule_local(); } - + + /* acquire the lock protecting the state and disable interrupts */ local_irq_save(flags); raw_spin_lock(&state->lock); if (lv == CRIT_LEVEL_C) { - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); res = gmp_find_by_id(_global_env, tinfo->mc2_param.res_id); } else { res = sup_find_by_id(state->sup_env, tinfo->mc2_param.res_id); } - - //BUG_ON(!res); + BUG_ON(!res); + // the current mode doesn't have this task. // do not update timer and set the next release time. - //res = res_find_by_id(state, tinfo->mc2_param.res_id); - BUG_ON(!res); - if (on_runqueue || is_running) { /* Assumption: litmus_clock() is synchronized across cores * [see comment in pres_task_resume()] */ if (lv == CRIT_LEVEL_C) { gmp_update_time(_global_env, litmus_clock()); - //raw_spin_unlock(&_global_env.lock); } else sup_update_time(state->sup_env, litmus_clock()); - //mc2_update_time(lv, state, litmus_clock()); - /* 9/20/2015 fix - mc2_update_ghost_state(state); - */ + task_arrives(state, tsk); if (lv == CRIT_LEVEL_C){ - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); } /* NOTE: drops state->lock */ - TRACE("mc2_new()\n"); raw_spin_unlock(&state->lock); local_irq_restore(flags); @@ -1639,7 +1350,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, mc2_update_timer_and_unlock(state); } else { if (lv == CRIT_LEVEL_C){ - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); } raw_spin_unlock(&state->lock); @@ -1647,14 +1357,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, } release = res->next_replenishment; - //local_irq_restore(flags); - if (!release) { - /*TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); - release = res->next_replenishment; - TRACE_TASK(tsk, "mc2_task_new() next_release SET! = %llu\n", release); - release_at(tsk, release); - */ BUG(); } else @@ -1670,22 +1373,17 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) struct reservation *res = NULL, *next; struct sup_reservation_environment *sup_env; int found = 0; - //enum crit_level lv = get_task_crit_level(current); unsigned long flags; int i; if (cpu == -1) { struct next_timer_event *event, *e_next; local_irq_save(flags); - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); - /* if the reservation is global reservation */ - //state = local_cpu_state(); - //delete reservation id in all modes + /* if the reservation is global reservation, + * delete reservation id in all modes */ for(i = 0; i < NR_MODES; i++) { - //raw_spin_lock(&state->lock); - list_for_each_entry_safe(res, next, &_global_env_modes[i].depleted_reservations, list) { if (res->id == reservation_id) { list_del(&res->list); @@ -1715,17 +1413,14 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) } } - //raw_spin_unlock(&state->lock); list_for_each_entry_safe(event, e_next, &_global_env_modes[i].next_events, list) { if (event->id == reservation_id) { list_del(&event->list); - TRACE("EVENT id %d deleted\n", event->id); kfree(event); } } } - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); local_irq_restore(flags); } else { @@ -1734,8 +1429,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) for (i = 0; i < NR_MODES; i++){ local_irq_save(flags); raw_spin_lock(&state->lock); - - // res = sup_find_by_id(state->sup_env, reservation_id); sup_env = &(state->sup_env_modes[i]); list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { if (res->id == reservation_id) { @@ -1750,7 +1443,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) kfree(res); found = 1; ret = 0; - TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); } } if (!found) { @@ -1766,7 +1458,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) kfree(res); found = 1; ret = 0; - TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); } } } @@ -1783,7 +1474,6 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) kfree(res); found = 1; ret = 0; - TRACE_CUR("FOUND id %d mode %d\n",res->id, res->mode); } } } @@ -1805,9 +1495,7 @@ static void mc2_task_exit(struct task_struct *tsk) struct mc2_task_state* tinfo = get_mc2_state(tsk); struct mc2_cpu_state *state; enum crit_level lv = tinfo->mc2_param.crit; - //struct crit_entry* ce; - int cpu; - int i; + int cpu, i; local_irq_save(flags); if (tinfo->cpu != -1) @@ -1819,10 +1507,6 @@ static void mc2_task_exit(struct task_struct *tsk) if (state->scheduled == tsk) state->scheduled = NULL; - - //ce = &state->crit_entries[lv]; - //if (ce->running == tsk) - // ce->running = NULL; /* remove from queues */ if (is_running(tsk)) { @@ -1832,37 +1516,24 @@ static void mc2_task_exit(struct task_struct *tsk) /* update both global and partitioned */ if (lv < CRIT_LEVEL_C) { sup_update_time(state->sup_env, litmus_clock()); -/* TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n");raw_spin_lock(&global_lock); - gmp_update_time(_global_env, litmus_clock()); - TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n");raw_spin_unlock(&global_lock); -*/ } + } else if (lv == CRIT_LEVEL_C) { - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); gmp_update_time(_global_env, litmus_clock()); - //raw_spin_unlock(&_global_env.lock); } - /* 9/20/2015 fix - mc2_update_ghost_state(state); - */ task_departs(tsk, 0); if (lv == CRIT_LEVEL_C){ - //TRACE_CUR("LOCK_MSG: GLOBAL UNLOCK\n"); raw_spin_unlock(&global_lock); } - /* NOTE: drops state->lock */ - TRACE("mc2_exit()\n"); - atomic_dec(&num_sync_released); + /* NOTE: drops state->lock */ mc2_update_timer_and_unlock(state); } else { raw_spin_unlock(&state->lock); - } if (lv == CRIT_LEVEL_C) { - //raw_spin_lock(&global_lock); raw_spin_lock(&mode_lock); for(i = 1; i < NR_MODES; i++){ if ( !(tsk_mc2_data(tsk)->mode_mask & (1<scheduled == tsk) state->scheduled = NULL; - - //ce = &state->crit_entries[lv]; - //if (ce->running == tsk) - // ce->running = NULL; - + raw_spin_unlock(&state->lock); } } local_irq_restore(flags); - if (is_mode_poll_task(tsk)){// && (tinfo->cpu == 0)) { - //cpu_0_spin_flag = !cpu_0_spin_flag; // release other cpu before exit. - //cpu_0_task_exist = false; + if (is_mode_poll_task(tsk)) { mode_poll_exited = true; } @@ -1911,7 +1575,6 @@ static long create_polling_reservation( struct reservation_config *config) { struct mc2_cpu_state *state = NULL; - //struct reservation* res = NULL; struct polling_reservation *pres; unsigned long flags; int use_edf = config->priority == LITMUS_NO_PRIORITY; @@ -1955,7 +1618,7 @@ static long create_polling_reservation( TRACE("CREATE_POLLING_RESERVATION id %d mode %d\n", config->id, config->mode); if (config->cpu != -1) { int i, is_exist = 0; - //raw_spin_lock_irqsave(&_global_env.lock, flags); + state = cpu_state_for(config->cpu); raw_spin_lock_irqsave(&state->lock, flags); @@ -2007,10 +1670,6 @@ static long create_polling_reservation( pres->res.blocked_by_ghost = 0; pres->res.is_ghost = NO_CPU; pres->res.mode = config->mode; - /*if (config->priority == LITMUS_MAX_PRIORITY) { - level_a_priorities[config->cpu]++; - pres->res.priority = level_a_priorities[config->cpu]; - }*/ if (!use_edf) pres->res.priority = config->priority; sup_add_new_reservation(&(state->sup_env_modes[config->mode]), &pres->res); @@ -2021,7 +1680,6 @@ static long create_polling_reservation( } raw_spin_unlock_irqrestore(&state->lock, flags); - //raw_spin_unlock_irqrestore(&_global_env.lock, flags); } else { int i, is_exist = 0; @@ -2062,7 +1720,6 @@ static long create_polling_reservation( pres_0->res.tsk = current; } - //force id's unique within desired mode if (gmp_find_by_id(&(_global_env_modes[config->mode]), config->id)){ resExist = true; @@ -2293,18 +1950,16 @@ static void mc2_setup_domain_proc(void) static long mc2_activate_plugin(void) { - int cpu;//, lv; + int cpu; struct mc2_cpu_state *state; struct cpu_entry *ce; int i; - //unsigned long *cpu_counter; for(i = 0; i < NR_MODES; i++){ gmp_init(&(_global_env_modes[i])); } _global_env = &_global_env_modes[0]; - //raw_spin_lock_init(&_lowest_prio_cpu.lock); raw_spin_lock_init(&mode_lock); raw_spin_lock_init(&global_lock); @@ -2314,7 +1969,6 @@ static long mc2_activate_plugin(void) TRACE("Initializing CPU%d...\n", cpu); resched_cpu[cpu] = 0; - //level_a_priorities[cpu] = 0; this_cpu_write(mode_counter, 0); state = cpu_state_for(cpu); @@ -2330,11 +1984,7 @@ static long mc2_activate_plugin(void) printk(KERN_ALERT "CPU%d state->lock %p\n", cpu, &state->lock); state->cpu = cpu; state->scheduled = NULL; - //for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { - // struct crit_entry *cr_entry = &state->crit_entries[lv]; - // cr_entry->level = lv; - // cr_entry->running = NULL; - //} + for(i = 0; i < NR_MODES; i++){ sup_init(&(state->sup_env_modes[i])); } @@ -2342,7 +1992,6 @@ static long mc2_activate_plugin(void) hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); state->timer.function = on_scheduling_timer; - //state->spin_flag = false; this_cpu_write(mode_counter, 0); } @@ -2357,8 +2006,6 @@ static long mc2_activate_plugin(void) mode_sizes[i] = 0; } res_reported = 0; - //cpu_0_spin_flag = false; - //cpu_0_task_exist = false; return 0; } @@ -2436,7 +2083,6 @@ static long mc2_deactivate_plugin(void) raw_spin_unlock(&state->lock); } - //TRACE_CUR("LOCK_MSG: GLOBAL LOCK\n"); raw_spin_lock(&global_lock); for(i = 0; i < NR_MODES; i++){ _global_env = &_global_env_modes[i]; -- cgit v1.2.2