aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2017-10-09 11:51:16 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2017-10-09 11:51:16 -0400
commitbda9461f8f90ed9ae69a57a797daed44340c8484 (patch)
tree2ac71d76e33dafba85c39d503ee6d0ee5b89188b /litmus
parentb0905375748cbc0fde1dfd7578bd4ff7ac47913b (diff)
rtas18 done
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/cache_proc.c4
-rw-r--r--litmus/fakedev0.c123
-rw-r--r--litmus/litmus.c44
-rw-r--r--litmus/page_dev.c49
-rw-r--r--litmus/sched_mc2.c198
-rw-r--r--litmus/uncachedev.c4
7 files changed, 312 insertions, 113 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 29ae4b04f046..ccd532d81b78 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -30,7 +30,8 @@ obj-y = sched_plugin.o litmus.o \
30 color_shm.o \ 30 color_shm.o \
31 replicate_lib.o \ 31 replicate_lib.o \
32 cache_proc.o \ 32 cache_proc.o \
33 page_dev.o 33 page_dev.o \
34 fakedev0.o
34 35
35obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 36obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
36obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 37obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index 2d90454ad5cc..102feaf5c9e6 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -10,7 +10,10 @@
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/random.h> 12#include <linux/random.h>
13#include <linux/sched.h>
13 14
15#include <litmus/rt_param.h>
16#include <litmus/litmus.h>
14#include <litmus/litmus_proc.h> 17#include <litmus/litmus_proc.h>
15#include <litmus/sched_trace.h> 18#include <litmus/sched_trace.h>
16#include <litmus/cache_proc.h> 19#include <litmus/cache_proc.h>
@@ -19,7 +22,6 @@
19#include <asm/hardware/cache-l2x0.h> 22#include <asm/hardware/cache-l2x0.h>
20#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
21 24
22
23#define UNLOCK_ALL 0x00000000 /* allocation in any way */ 25#define UNLOCK_ALL 0x00000000 /* allocation in any way */
24#define LOCK_ALL (~UNLOCK_ALL) 26#define LOCK_ALL (~UNLOCK_ALL)
25#define MAX_NR_WAYS 16 27#define MAX_NR_WAYS 16
diff --git a/litmus/fakedev0.c b/litmus/fakedev0.c
new file mode 100644
index 000000000000..0b8909e77777
--- /dev/null
+++ b/litmus/fakedev0.c
@@ -0,0 +1,123 @@
1#include <linux/sched.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/fs.h>
5#include <linux/errno.h>
6#include <linux/highmem.h>
7#include <asm/page.h>
8#include <linux/miscdevice.h>
9#include <linux/module.h>
10
11#include <litmus/litmus.h>
12
13/* device for allocating pages not cached by the CPU */
14
15#define FAKEDEV0_NAME "litmus/fakedev0"
16
17#define NUM_BANKS 8
18#define BANK_MASK 0x38000000
19#define BANK_SHIFT 27
20
21#define NUM_COLORS 16
22#define CACHE_MASK 0x0000f000
23#define CACHE_SHIFT 12
24
25/* Decoding page color, 0~15 */
26static inline unsigned int page_color(struct page *page)
27{
28 return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
29}
30
31/* Decoding page bank number, 0~7 */
32static inline unsigned int page_bank(struct page *page)
33{
34 return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
35}
36
37void litmus_fakedev0_vm_open(struct vm_area_struct *vma)
38{
39}
40
41void litmus_fakedev0_vm_close(struct vm_area_struct *vma)
42{
43}
44
45int litmus_fakedev0_vm_fault(struct vm_area_struct* vma,
46 struct vm_fault* vmf)
47{
48 /* modeled after SG DMA video4linux, but without DMA. */
49 /* (see drivers/media/video/videobuf-dma-sg.c) */
50 struct page *page;
51
52 page = alloc_page(GFP_USER|GFP_COLOR|GFP_CPU1);
53 if (!page)
54 return VM_FAULT_OOM;
55
56 clear_user_highpage(page, (unsigned long)vmf->virtual_address);
57 vmf->page = page;
58
59 return 0;
60}
61
62static struct vm_operations_struct litmus_fakedev0_vm_ops = {
63 .open = litmus_fakedev0_vm_open,
64 .close = litmus_fakedev0_vm_close,
65 .fault = litmus_fakedev0_vm_fault,
66};
67
68static int litmus_fakedev0_mmap(struct file* filp, struct vm_area_struct* vma)
69{
70 /* first make sure mapper knows what he's doing */
71
72 /* you can only map the "first" page */
73 if (vma->vm_pgoff != 0)
74 return -EINVAL;
75
76 /* you can't share it with anyone */
77 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
78 return -EINVAL;
79
80 /* cannot be expanded, and is not a "normal" page. */
81 vma->vm_flags |= (VM_DONTEXPAND|VM_DONOTMOVE);
82
83 /* noncached pages are not explicitly locked in memory (for now). */
84 //vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
85
86 vma->vm_ops = &litmus_fakedev0_vm_ops;
87
88 return 0;
89}
90
91static struct file_operations litmus_fakedev0_fops = {
92 .owner = THIS_MODULE,
93 .mmap = litmus_fakedev0_mmap,
94};
95
96static struct miscdevice litmus_fakedev0_dev = {
97 .name = FAKEDEV0_NAME,
98 .minor = MISC_DYNAMIC_MINOR,
99 .fops = &litmus_fakedev0_fops,
100 /* pages are not locked, so there is no reason why
101 anyone cannot allocate an fakedev0 pages */
102 .mode = (S_IRUGO | S_IWUGO),
103};
104
105static int __init init_litmus_fakedev0_dev(void)
106{
107 int err;
108
109 printk("Initializing LITMUS^RT fakedev0 device.\n");
110 err = misc_register(&litmus_fakedev0_dev);
111 if (err)
112 printk("Could not allocate %s device (%d).\n", FAKEDEV0_NAME, err);
113 return err;
114}
115
116static void __exit exit_litmus_fakedev0_dev(void)
117{
118 misc_deregister(&litmus_fakedev0_dev);
119}
120
121module_init(init_litmus_fakedev0_dev);
122module_exit(exit_litmus_fakedev0_dev);
123
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 1105408e405a..ec9379979e1a 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -28,6 +28,7 @@
28#include <litmus/cache_proc.h> 28#include <litmus/cache_proc.h>
29#include <litmus/mc2_common.h> 29#include <litmus/mc2_common.h>
30#include <litmus/replicate_lib.h> 30#include <litmus/replicate_lib.h>
31#include <litmus/page_dev.h>
31 32
32#ifdef CONFIG_SCHED_CPU_AFFINITY 33#ifdef CONFIG_SCHED_CPU_AFFINITY
33#include <litmus/affinity.h> 34#include <litmus/affinity.h>
@@ -350,8 +351,15 @@ extern struct page *new_alloc_page(struct page *page, unsigned long node, int **
350static struct page *alloc_colored_page(struct page *page, unsigned long node, int **result) 351static struct page *alloc_colored_page(struct page *page, unsigned long node, int **result)
351{ 352{
352 struct page *newpage; 353 struct page *newpage;
354 gfp_t gfp_mask;
353 355
354 newpage = alloc_pages(GFP_HIGHUSER_MOVABLE|GFP_COLOR, 0); 356 gfp_mask = GFP_HIGHUSER_MOVABLE;
357 if (node != 8)
358 gfp_mask |= GFP_COLOR;
359 if (node == 9)
360 gfp_mask |= GFP_CPU1;
361
362 newpage = alloc_pages(gfp_mask, 0);
355 363
356 return newpage; 364 return newpage;
357} 365}
@@ -378,6 +386,7 @@ asmlinkage long sys_set_page_color(int cpu)
378 386
379 LIST_HEAD(pagelist); 387 LIST_HEAD(pagelist);
380 LIST_HEAD(task_shared_pagelist); 388 LIST_HEAD(task_shared_pagelist);
389 LIST_HEAD(fakedev_pagelist);
381 390
382 migrate_prep(); 391 migrate_prep();
383 392
@@ -396,7 +405,11 @@ asmlinkage long sys_set_page_color(int cpu)
396 unsigned int num_pages = 0, i; 405 unsigned int num_pages = 0, i;
397 struct page *old_page = NULL; 406 struct page *old_page = NULL;
398 int pages_in_vma = 0; 407 int pages_in_vma = 0;
408 int fakedev_pages = 0;
399 409
410 if (vma_itr->vm_flags & VM_DONOTMOVE) {
411 fakedev_pages = 1;
412 }
400 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; 413 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
401 /* Traverse all pages in vm_area_struct */ 414 /* Traverse all pages in vm_area_struct */
402 for (i = 0; i < num_pages; i++) { 415 for (i = 0; i < num_pages; i++) {
@@ -412,7 +425,13 @@ asmlinkage long sys_set_page_color(int cpu)
412 put_page(old_page); 425 put_page(old_page);
413 continue; 426 continue;
414 } 427 }
415 428 /*
429 if (PageDirty(old_page)) {
430 TRACE("Dirty Page!\n");
431 put_page(old_page);
432 continue;
433 }
434 */
416 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); 435 TRACE_TASK(current, "addr: %08x, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-");
417 pages_in_vma++; 436 pages_in_vma++;
418 437
@@ -460,12 +479,18 @@ asmlinkage long sys_set_page_color(int cpu)
460 else { 479 else {
461 ret = isolate_lru_page(old_page); 480 ret = isolate_lru_page(old_page);
462 if (!ret) { 481 if (!ret) {
463 list_add_tail(&old_page->lru, &pagelist); 482 if (fakedev_pages == 0)
483 list_add_tail(&old_page->lru, &pagelist);
484 else
485 list_add_tail(&old_page->lru, &fakedev_pagelist);
486
464 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); 487 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
465 nr_pages++; 488 nr_pages++;
466 } else { 489 } else if (!is_in_correct_bank(old_page, cpu)) {
467 TRACE_TASK(current, "isolate_lru_page for a private page failed\n"); 490 TRACE_TASK(current, "isolate_lru_page for a private page failed\n");
468 nr_failed++; 491 nr_failed++;
492 } else {
493 TRACE_TASK(current, "page is already in the correct bank\n");
469 } 494 }
470 put_page(old_page); 495 put_page(old_page);
471 } 496 }
@@ -491,6 +516,16 @@ asmlinkage long sys_set_page_color(int cpu)
491 } 516 }
492 } 517 }
493 518
519 /* Migrate fakedev pages */
520 if (!list_empty(&fakedev_pagelist)) {
521 ret = migrate_pages(&fakedev_pagelist, alloc_colored_page, NULL, 9, MIGRATE_SYNC, MR_SYSCALL);
522 TRACE_TASK(current, "%ld pages not migrated.\n", ret);
523 nr_not_migrated = ret;
524 if (ret) {
525 putback_movable_pages(&fakedev_pagelist);
526 }
527 }
528
494 /* Replicate shared pages */ 529 /* Replicate shared pages */
495 if (!list_empty(&task_shared_pagelist)) { 530 if (!list_empty(&task_shared_pagelist)) {
496 ret = replicate_pages(&task_shared_pagelist, alloc_colored_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); 531 ret = replicate_pages(&task_shared_pagelist, alloc_colored_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
@@ -569,6 +604,7 @@ asmlinkage long sys_test_call(unsigned int param)
569 } 604 }
570 605
571 TRACE_TASK(current, "addr: %08x, phy: %08x, color: %d, bank: %d, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_phys(old_page), page_color(old_page), page_bank(old_page), page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping)); 606 TRACE_TASK(current, "addr: %08x, phy: %08x, color: %d, bank: %d, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_phys(old_page), page_color(old_page), page_bank(old_page), page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping));
607 printk(KERN_INFO "addr: %08x, phy: %08x, color: %d, bank: %d, pfn: %05lx, _mapcount: %d, _count: %d flags: %s%s%s mapping: %p\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_phys(old_page), page_color(old_page), page_bank(old_page), page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-", &(old_page->mapping));
572 put_page(old_page); 608 put_page(old_page);
573 } 609 }
574 vma_itr = vma_itr->vm_next; 610 vma_itr = vma_itr->vm_next;
diff --git a/litmus/page_dev.c b/litmus/page_dev.c
index ea5d5f5cb36d..cde8e52d9111 100644
--- a/litmus/page_dev.c
+++ b/litmus/page_dev.c
@@ -44,15 +44,6 @@ unsigned int dram_partition[NR_DRAM_PARTITIONS] = {
44 0x00000080, 44 0x00000080,
45 0x0000000f, 45 0x0000000f,
46}; 46};
47/*
48unsigned int dram_partition[NR_DRAM_PARTITIONS] = {
49 0x00000001,
50 0x00000002,
51 0x00000004,
52 0x00000008,
53 0x000000f0,
54};
55*/
56 47
57/* Decoding page color, 0~15 */ 48/* Decoding page color, 0~15 */
58static inline unsigned int page_color(struct page *page) 49static inline unsigned int page_color(struct page *page)
@@ -79,6 +70,35 @@ int bank_to_partition(unsigned int bank)
79 return -EINVAL; 70 return -EINVAL;
80} 71}
81 72
73int get_area_index(int cpu)
74{
75 int index = 0x10, area_index = 0;
76
77 while (index < 0x100) {
78 if (dram_partition[cpu]&index)
79 break;
80 index = index << 1;
81 area_index++;
82 }
83
84 return area_index;
85}
86
87/* use this function ONLY for Lv.A/B pages */
88int is_in_correct_bank(struct page* page, int cpu)
89{
90 int bank;
91 unsigned int page_bank_bit;
92
93 bank = page_bank(page);
94 page_bank_bit = 1 << bank;
95
96 if (cpu == -1 || cpu == NR_CPUS)
97 return (page_bank_bit & dram_partition[NR_CPUS]);
98 else
99 return (page_bank_bit & dram_partition[cpu]);
100}
101
82int is_in_llc_partition(struct page* page, int cpu) 102int is_in_llc_partition(struct page* page, int cpu)
83{ 103{
84 int color; 104 int color;
@@ -87,8 +107,8 @@ int is_in_llc_partition(struct page* page, int cpu)
87 color = page_color(page); 107 color = page_color(page);
88 page_color_bit = 1 << color; 108 page_color_bit = 1 << color;
89 109
90 if (cpu == NR_CPUS) 110 if (cpu == -1 || cpu == NR_CPUS)
91 return (page_color_bit&llc_partition[cpu*2]); 111 return (page_color_bit & llc_partition[8]);
92 else 112 else
93 return (page_color_bit & (llc_partition[cpu*2] | llc_partition[cpu*2+1])); 113 return (page_color_bit & (llc_partition[cpu*2] | llc_partition[cpu*2+1]));
94} 114}
@@ -117,12 +137,14 @@ int slabtest_handler(struct ctl_table *table, int write, void __user *buffer, si
117 int idx; 137 int idx;
118 int n_data = buf_size/sizeof(int); 138 int n_data = buf_size/sizeof(int);
119 139
120 testbuffer = kmalloc(sizeof(int*)*buf_num, GFP_KERNEL|GFP_COLOR); 140 printk(KERN_INFO "-------SLABTEST on CPU%d with %d buffer size\n", raw_smp_processor_id(), buf_size);
141
142 testbuffer = kmalloc(sizeof(int*)*buf_num, GFP_KERNEL|GFP_COLOR|GFP_CPU1);
121 143
122 for (idx=0; idx<buf_num; idx++) 144 for (idx=0; idx<buf_num; idx++)
123 { 145 {
124 printk(KERN_INFO "kmalloc size %d, n_data %d\n", buf_size, n_data); 146 printk(KERN_INFO "kmalloc size %d, n_data %d\n", buf_size, n_data);
125 testbuffer[idx] = kmalloc(buf_size, GFP_KERNEL|GFP_COLOR); 147 testbuffer[idx] = kmalloc(buf_size, GFP_KERNEL|GFP_COLOR|GFP_CPU1);
126 148
127 if (!testbuffer[idx]) { 149 if (!testbuffer[idx]) {
128 printk(KERN_ERR "kmalloc failed size = %d\n", buf_size); 150 printk(KERN_ERR "kmalloc failed size = %d\n", buf_size);
@@ -151,6 +173,7 @@ int slabtest_handler(struct ctl_table *table, int write, void __user *buffer, si
151 kfree(testbuffer[idx]); 173 kfree(testbuffer[idx]);
152 174
153 kfree(testbuffer); 175 kfree(testbuffer);
176 printk(KERN_INFO "-------SLABTEST FINISHED on CPU%d\n", raw_smp_processor_id());
154 } 177 }
155out: 178out:
156 mutex_unlock(&dev_mutex); 179 mutex_unlock(&dev_mutex);
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index b0300abf18e4..b4b159be77d2 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -160,7 +160,7 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
160 struct mc2_task_state* tinfo = get_mc2_state(tsk); 160 struct mc2_task_state* tinfo = get_mc2_state(tsk);
161 struct reservation* res; 161 struct reservation* res;
162 struct reservation_client *client; 162 struct reservation_client *client;
163 enum crit_level lv = get_task_crit_level(tsk); 163 //enum crit_level lv = get_task_crit_level(tsk);
164 164
165 res = tinfo->res_info.client.reservation; 165 res = tinfo->res_info.client.reservation;
166 client = &tinfo->res_info.client; 166 client = &tinfo->res_info.client;
@@ -169,14 +169,15 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
169 169
170 res->ops->client_arrives(res, client); 170 res->ops->client_arrives(res, client);
171 TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock()); 171 TRACE_TASK(tsk, "Client arrives at %llu\n", litmus_clock());
172 172/*
173 if (lv != NUM_CRIT_LEVELS) { 173 if (lv != NUM_CRIT_LEVELS) {
174 struct crit_entry *ce; 174 struct crit_entry *ce;
175 ce = &state->crit_entries[lv]; 175 ce = &state->crit_entries[lv];
176 /* if the currrent task is a ghost job, remove it */ 176 // if the currrent task is a ghost job, remove it
177 if (ce->running == tsk) 177 if (ce->running == tsk)
178 ce->running = NULL; 178 ce->running = NULL;
179 } 179 }
180*/
180} 181}
181 182
182/* get_lowest_prio_cpu - return the lowest priority cpu 183/* get_lowest_prio_cpu - return the lowest priority cpu
@@ -190,6 +191,9 @@ static int get_lowest_prio_cpu(lt_t priority)
190 int cpu, ret = NO_CPU; 191 int cpu, ret = NO_CPU;
191 lt_t latest_deadline = 0; 192 lt_t latest_deadline = 0;
192 193
194 if (priority == LITMUS_NO_PRIORITY)
195 return ret;
196
193 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; 197 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
194 if (!ce->will_schedule && !ce->scheduled) { 198 if (!ce->will_schedule && !ce->scheduled) {
195 TRACE("CPU %d (local) is the lowest!\n", ce->cpu); 199 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
@@ -202,10 +206,12 @@ static int get_lowest_prio_cpu(lt_t priority)
202 ce = &_lowest_prio_cpu.cpu_entries[cpu]; 206 ce = &_lowest_prio_cpu.cpu_entries[cpu];
203 /* If a CPU will call schedule() in the near future, we don't 207 /* If a CPU will call schedule() in the near future, we don't
204 return that CPU. */ 208 return that CPU. */
209 /*
205 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule, 210 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
206 ce->scheduled ? (ce->scheduled)->comm : "null", 211 ce->scheduled ? (ce->scheduled)->comm : "null",
207 ce->scheduled ? (ce->scheduled)->pid : 0, 212 ce->scheduled ? (ce->scheduled)->pid : 0,
208 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0); 213 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
214 */
209 if (!ce->will_schedule) { 215 if (!ce->will_schedule) {
210 if (!ce->scheduled) { 216 if (!ce->scheduled) {
211 /* Idle cpu, return this. */ 217 /* Idle cpu, return this. */
@@ -242,6 +248,9 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
242 lt_t update, now; 248 lt_t update, now;
243 struct next_timer_event *event, *next; 249 struct next_timer_event *event, *next;
244 int reschedule[NR_CPUS]; 250 int reschedule[NR_CPUS];
251 unsigned long flags;
252
253 local_irq_save(flags);
245 254
246 for (cpus = 0; cpus<NR_CPUS; cpus++) 255 for (cpus = 0; cpus<NR_CPUS; cpus++)
247 reschedule[cpus] = 0; 256 reschedule[cpus] = 0;
@@ -268,15 +277,12 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
268 if (event->timer_armed_on == NO_CPU) { 277 if (event->timer_armed_on == NO_CPU) {
269 struct reservation *res = gmp_find_by_id(&_global_env, event->id); 278 struct reservation *res = gmp_find_by_id(&_global_env, event->id);
270 int cpu = get_lowest_prio_cpu(res?res->priority:0); 279 int cpu = get_lowest_prio_cpu(res?res->priority:0);
271 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); 280 //TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
272 list_del(&event->list); 281 list_del(&event->list);
273 kfree(event); 282 kfree(event);
274 if (cpu != NO_CPU) { 283 if (cpu != NO_CPU) {
275 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 284 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
276 if (cpu == local_cpu_state()->cpu) 285 reschedule[cpu] = 1;
277 litmus_reschedule_local();
278 else
279 reschedule[cpu] = 1;
280 } 286 }
281 } 287 }
282 } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) { 288 } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) {
@@ -289,6 +295,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
289 /* Must drop state lock before calling into hrtimer_start(), which 295 /* Must drop state lock before calling into hrtimer_start(), which
290 * may raise a softirq, which in turn may wake ksoftirqd. */ 296 * may raise a softirq, which in turn may wake ksoftirqd. */
291 raw_spin_unlock(&_global_env.lock); 297 raw_spin_unlock(&_global_env.lock);
298 local_irq_restore(flags);
292 raw_spin_unlock(&state->lock); 299 raw_spin_unlock(&state->lock);
293 300
294 if (update <= now || reschedule[state->cpu]) { 301 if (update <= now || reschedule[state->cpu]) {
@@ -325,9 +332,7 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
325 */ 332 */
326 TRACE("mc2_update_timer for remote CPU %d (update=%llu, " 333 TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
327 "active:%d, set:%llu)\n", 334 "active:%d, set:%llu)\n",
328 state->cpu, 335 state->cpu, update, hrtimer_active(&state->timer),
329 update,
330 hrtimer_active(&state->timer),
331 ktime_to_ns(hrtimer_get_expires(&state->timer))); 336 ktime_to_ns(hrtimer_get_expires(&state->timer)));
332 if (!hrtimer_active(&state->timer) || 337 if (!hrtimer_active(&state->timer) ||
333 ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { 338 ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
@@ -336,17 +341,19 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
336 state->cpu, 341 state->cpu,
337 hrtimer_active(&state->timer), 342 hrtimer_active(&state->timer),
338 ktime_to_ns(hrtimer_get_expires(&state->timer))); 343 ktime_to_ns(hrtimer_get_expires(&state->timer)));
339 raw_spin_lock(&state->lock); 344 //raw_spin_lock(&state->lock);
340 preempt_if_preemptable(state->scheduled, state->cpu); 345 //preempt_if_preemptable(state->scheduled, state->cpu);
341 raw_spin_unlock(&state->lock); 346 //raw_spin_unlock(&state->lock);
342 reschedule[state->cpu] = 0; 347 //reschedule[state->cpu] = 0;
343 } 348 }
344 } 349 }
350
345 for (cpus = 0; cpus<NR_CPUS; cpus++) { 351 for (cpus = 0; cpus<NR_CPUS; cpus++) {
346 if (reschedule[cpus]) { 352 if (reschedule[cpus]) {
347 litmus_reschedule(cpus); 353 litmus_reschedule(cpus);
348 } 354 }
349 } 355 }
356
350} 357}
351 358
352/* update_cpu_prio - Update cpu's priority 359/* update_cpu_prio - Update cpu's priority
@@ -428,15 +435,13 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
428 int cpu = get_lowest_prio_cpu(0); 435 int cpu = get_lowest_prio_cpu(0);
429 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 436 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
430 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 437 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
431 TRACE("LOWEST CPU = P%d\n", cpu);
432 if (cpu == state->cpu && update > now) 438 if (cpu == state->cpu && update > now)
433 litmus_reschedule_local(); 439 ; //litmus_reschedule_local();
434 else 440 else
435 reschedule[cpu] = 1; 441 reschedule[cpu] = 1;
436 } 442 }
437 } 443 }
438 raw_spin_unlock(&_global_env.lock); 444 raw_spin_unlock(&_global_env.lock);
439
440 raw_spin_unlock_irqrestore(&state->lock, flags); 445 raw_spin_unlock_irqrestore(&state->lock, flags);
441 446
442 TS_ISR_END; 447 TS_ISR_END;
@@ -447,7 +452,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
447 } 452 }
448 } 453 }
449 454
450
451 return restart; 455 return restart;
452} 456}
453 457
@@ -470,7 +474,7 @@ static long mc2_complete_job(void)
470 unsigned long flags; 474 unsigned long flags;
471 enum crit_level lv; 475 enum crit_level lv;
472 476
473 preempt_disable(); 477 //preempt_disable();
474 local_irq_save(flags); 478 local_irq_save(flags);
475 479
476 tinfo = get_mc2_state(current); 480 tinfo = get_mc2_state(current);
@@ -504,7 +508,7 @@ static long mc2_complete_job(void)
504 508
505 raw_spin_unlock(&state->lock); 509 raw_spin_unlock(&state->lock);
506 local_irq_restore(flags); 510 local_irq_restore(flags);
507 preempt_enable(); 511 //preempt_enable();
508 } 512 }
509 513
510 sched_trace_task_completion(current, 0); 514 sched_trace_task_completion(current, 0);
@@ -568,7 +572,6 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
568{ 572{
569 struct reservation *res, *next; 573 struct reservation *res, *next;
570 struct task_struct *tsk = NULL; 574 struct task_struct *tsk = NULL;
571
572 enum crit_level lv; 575 enum crit_level lv;
573 lt_t time_slice; 576 lt_t time_slice;
574 577
@@ -578,31 +581,20 @@ struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
578 tsk = res->ops->dispatch_client(res, &time_slice); 581 tsk = res->ops->dispatch_client(res, &time_slice);
579 if (likely(tsk)) { 582 if (likely(tsk)) {
580 lv = get_task_crit_level(tsk); 583 lv = get_task_crit_level(tsk);
581 if (lv == NUM_CRIT_LEVELS) { 584 if (lv != CRIT_LEVEL_C)
585 BUG();
582#if BUDGET_ENFORCEMENT_AT_C 586#if BUDGET_ENFORCEMENT_AT_C
583 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN); 587 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
584#endif
585 res->event_added = 1;
586 res->blocked_by_ghost = 0;
587 res->is_ghost = NO_CPU;
588 res->scheduled_on = state->cpu;
589 return tsk;
590 } else if (lv == CRIT_LEVEL_C) {
591#if BUDGET_ENFORCEMENT_AT_C
592 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
593#endif 588#endif
594 res->event_added = 1; 589 res->event_added = 1;
595 res->blocked_by_ghost = 0; 590 res->blocked_by_ghost = 0;
596 res->is_ghost = NO_CPU; 591 res->is_ghost = NO_CPU;
597 res->scheduled_on = state->cpu; 592 res->scheduled_on = state->cpu;
598 return tsk; 593 return tsk;
599 } else {
600 BUG();
601 }
602 } 594 }
603 } 595 }
604 } 596 }
605 597
606 return NULL; 598 return NULL;
607} 599}
608 600
@@ -621,7 +613,7 @@ static inline void post_schedule(struct task_struct *next, int cpu)
621{ 613{
622 enum crit_level lev; 614 enum crit_level lev;
623 if ((!next) || !is_realtime(next)) { 615 if ((!next) || !is_realtime(next)) {
624 do_partition(NUM_CRIT_LEVELS, -1); 616 //do_partition(NUM_CRIT_LEVELS, -1);
625 return; 617 return;
626 } 618 }
627 619
@@ -646,15 +638,15 @@ static inline void post_schedule(struct task_struct *next, int cpu)
646 */ 638 */
647static struct task_struct* mc2_schedule(struct task_struct * prev) 639static struct task_struct* mc2_schedule(struct task_struct * prev)
648{ 640{
649 int np, blocks, exists, preempt, to_schedule; 641 int np, blocks, exists, cpu; //preempt, to_schedule;
650 /* next == NULL means "schedule background work". */ 642 /* next == NULL means "schedule background work". */
651 lt_t now; 643 lt_t now = litmus_clock();
652 struct mc2_cpu_state *state = local_cpu_state(); 644 struct mc2_cpu_state *state = local_cpu_state();
653 645
654 pre_schedule(prev, state->cpu);
655
656 raw_spin_lock(&state->lock); 646 raw_spin_lock(&state->lock);
657 647
648 pre_schedule(prev, state->cpu);
649
658 if (state->scheduled && state->scheduled != prev) 650 if (state->scheduled && state->scheduled != prev)
659 printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null"); 651 printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null");
660 if (state->scheduled && !is_realtime(prev)) 652 if (state->scheduled && !is_realtime(prev))
@@ -664,16 +656,9 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
664 exists = state->scheduled != NULL; 656 exists = state->scheduled != NULL;
665 blocks = exists && !is_current_running(); 657 blocks = exists && !is_current_running();
666 np = exists && is_np(state->scheduled); 658 np = exists && is_np(state->scheduled);
667
668 raw_spin_lock(&_global_env.lock);
669 preempt = resched_cpu[state->cpu];
670 resched_cpu[state->cpu] = 0;
671 raw_spin_unlock(&_global_env.lock);
672 659
673 /* update time */ 660 /* update time */
674 state->sup_env.will_schedule = true; 661 state->sup_env.will_schedule = true;
675
676 now = litmus_clock();
677 sup_update_time(&state->sup_env, now); 662 sup_update_time(&state->sup_env, now);
678 663
679 if (is_realtime(current) && blocks) { 664 if (is_realtime(current) && blocks) {
@@ -690,7 +675,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
690 675
691 if (!state->scheduled) { 676 if (!state->scheduled) {
692 raw_spin_lock(&_global_env.lock); 677 raw_spin_lock(&_global_env.lock);
693 to_schedule = gmp_update_time(&_global_env, now); 678 if (is_realtime(prev))
679 gmp_update_time(&_global_env, now);
694 state->scheduled = mc2_global_dispatch(state); 680 state->scheduled = mc2_global_dispatch(state);
695 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; 681 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
696 update_cpu_prio(state); 682 update_cpu_prio(state);
@@ -711,18 +697,18 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
711 /* NOTE: drops state->lock */ 697 /* NOTE: drops state->lock */
712 mc2_update_timer_and_unlock(state); 698 mc2_update_timer_and_unlock(state);
713 699
700 raw_spin_lock(&state->lock);
714 if (prev != state->scheduled && is_realtime(prev)) { 701 if (prev != state->scheduled && is_realtime(prev)) {
715 struct mc2_task_state* tinfo = get_mc2_state(prev); 702 struct mc2_task_state* tinfo = get_mc2_state(prev);
716 struct reservation* res = tinfo->res_info.client.reservation; 703 struct reservation* res = tinfo->res_info.client.reservation;
717 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
718 res->scheduled_on = NO_CPU; 704 res->scheduled_on = NO_CPU;
719 TRACE_TASK(prev, "descheduled.\n"); 705 TRACE_TASK(prev, "descheduled at %llu.\n", litmus_clock());
720 /* if prev is preempted and a global task, find the lowest cpu and reschedule */ 706 /* if prev is preempted and a global task, find the lowest cpu and reschedule */
721 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { 707 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
722 int cpu; 708 int cpu;
723 raw_spin_lock(&_global_env.lock); 709 raw_spin_lock(&_global_env.lock);
724 cpu = get_lowest_prio_cpu(res?res->priority:0); 710 cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY);
725 TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu); 711 //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
726 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { 712 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
727 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; 713 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
728 resched_cpu[cpu] = 1; 714 resched_cpu[cpu] = 1;
@@ -730,7 +716,8 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
730 raw_spin_unlock(&_global_env.lock); 716 raw_spin_unlock(&_global_env.lock);
731 } 717 }
732 } 718 }
733 719
720/*
734 if (to_schedule != 0) { 721 if (to_schedule != 0) {
735 raw_spin_lock(&_global_env.lock); 722 raw_spin_lock(&_global_env.lock);
736 while (to_schedule--) { 723 while (to_schedule--) {
@@ -742,13 +729,15 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
742 } 729 }
743 raw_spin_unlock(&_global_env.lock); 730 raw_spin_unlock(&_global_env.lock);
744 } 731 }
732*/
745 733
734 post_schedule(state->scheduled, state->cpu);
735
736 raw_spin_unlock(&state->lock);
746 if (state->scheduled) { 737 if (state->scheduled) {
747 TRACE_TASK(state->scheduled, "scheduled.\n"); 738 TRACE_TASK(state->scheduled, "scheduled.\n");
748 } 739 }
749 740
750 post_schedule(state->scheduled, state->cpu);
751
752 return state->scheduled; 741 return state->scheduled;
753} 742}
754 743
@@ -758,38 +747,40 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
758static void mc2_task_resume(struct task_struct *tsk) 747static void mc2_task_resume(struct task_struct *tsk)
759{ 748{
760 unsigned long flags; 749 unsigned long flags;
761 struct mc2_task_state* tinfo = get_mc2_state(tsk); 750 struct mc2_task_state* tinfo;
762 struct mc2_cpu_state *state; 751 struct mc2_cpu_state *state;
763 752
764 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); 753 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
765 754
766 local_irq_save(flags); 755 preempt_disable();
756 tinfo = get_mc2_state(tsk);
767 if (tinfo->cpu != -1) 757 if (tinfo->cpu != -1)
768 state = cpu_state_for(tinfo->cpu); 758 state = cpu_state_for(tinfo->cpu);
769 else 759 else
770 state = local_cpu_state(); 760 state = local_cpu_state();
761 preempt_enable();
771 762
772 /* Requeue only if self-suspension was already processed. */ 763 /* Requeue only if self-suspension was already processed. */
773 if (tinfo->has_departed) 764 if (tinfo->has_departed)
774 { 765 {
775 /* We don't want to consider jobs before synchronous releases */ 766#ifdef CONFIG_SCHED_OVERHEAD_TRACE
776 if (tsk_rt(tsk)->job_params.job_no > 5) { 767 switch(get_task_crit_level(tsk)) {
777 switch(get_task_crit_level(tsk)) { 768 case CRIT_LEVEL_A:
778 case CRIT_LEVEL_A: 769 TS_RELEASE_LATENCY_A(get_release(tsk));
779 TS_RELEASE_LATENCY_A(get_release(tsk)); 770 break;
780 break; 771 case CRIT_LEVEL_B:
781 case CRIT_LEVEL_B: 772 TS_RELEASE_LATENCY_B(get_release(tsk));
782 TS_RELEASE_LATENCY_B(get_release(tsk)); 773 break;
783 break; 774 case CRIT_LEVEL_C:
784 case CRIT_LEVEL_C: 775 TS_RELEASE_LATENCY_C(get_release(tsk));
785 TS_RELEASE_LATENCY_C(get_release(tsk)); 776 break;
786 break; 777 default:
787 default: 778 break;
788 break;
789 }
790 } 779 }
780#endif
781
782 raw_spin_lock_irqsave(&state->lock, flags);
791 783
792 raw_spin_lock(&state->lock);
793 /* Assumption: litmus_clock() is synchronized across cores, 784 /* Assumption: litmus_clock() is synchronized across cores,
794 * since we might not actually be executing on tinfo->cpu 785 * since we might not actually be executing on tinfo->cpu
795 * at the moment. */ 786 * at the moment. */
@@ -805,12 +796,14 @@ static void mc2_task_resume(struct task_struct *tsk)
805 796
806 /* NOTE: drops state->lock */ 797 /* NOTE: drops state->lock */
807 TRACE_TASK(tsk, "mc2_resume()\n"); 798 TRACE_TASK(tsk, "mc2_resume()\n");
799 raw_spin_unlock_irqrestore(&state->lock, flags);
800
801 raw_spin_lock(&state->lock);
808 mc2_update_timer_and_unlock(state); 802 mc2_update_timer_and_unlock(state);
809 } else { 803 } else {
810 TRACE_TASK(tsk, "resume event ignored, still scheduled\n"); 804 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
811 } 805 }
812 806
813 local_irq_restore(flags);
814} 807}
815 808
816 809
@@ -818,7 +811,7 @@ static void mc2_task_resume(struct task_struct *tsk)
818 */ 811 */
819static long mc2_admit_task(struct task_struct *tsk) 812static long mc2_admit_task(struct task_struct *tsk)
820{ 813{
821 long err = -ESRCH; 814 long err = 0;
822 unsigned long flags; 815 unsigned long flags;
823 struct reservation *res; 816 struct reservation *res;
824 struct mc2_cpu_state *state; 817 struct mc2_cpu_state *state;
@@ -831,11 +824,10 @@ static long mc2_admit_task(struct task_struct *tsk)
831 824
832 if (!mp) { 825 if (!mp) {
833 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); 826 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
834 return err; 827 return -ESRCH;
835 } 828 }
836 829
837 lv = mp->crit; 830 lv = mp->crit;
838 preempt_disable();
839 831
840 if (lv < CRIT_LEVEL_C) { 832 if (lv < CRIT_LEVEL_C) {
841 state = cpu_state_for(task_cpu(tsk)); 833 state = cpu_state_for(task_cpu(tsk));
@@ -858,6 +850,9 @@ static long mc2_admit_task(struct task_struct *tsk)
858 /* disable LITMUS^RT's per-thread budget enforcement */ 850 /* disable LITMUS^RT's per-thread budget enforcement */
859 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 851 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
860 } 852 }
853 else {
854 err = -ESRCH;
855 }
861 856
862 raw_spin_unlock_irqrestore(&state->lock, flags); 857 raw_spin_unlock_irqrestore(&state->lock, flags);
863 } else if (lv == CRIT_LEVEL_C) { 858 } else if (lv == CRIT_LEVEL_C) {
@@ -882,12 +877,13 @@ static long mc2_admit_task(struct task_struct *tsk)
882 /* disable LITMUS^RT's per-thread budget enforcement */ 877 /* disable LITMUS^RT's per-thread budget enforcement */
883 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; 878 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
884 } 879 }
880 else {
881 err = -ESRCH;
882 }
885 883
886 raw_spin_unlock(&_global_env.lock); 884 raw_spin_unlock(&_global_env.lock);
887 raw_spin_unlock_irqrestore(&state->lock, flags); 885 raw_spin_unlock_irqrestore(&state->lock, flags);
888 } 886 }
889
890 preempt_enable();
891 887
892 if (err) 888 if (err)
893 kfree(tinfo); 889 kfree(tinfo);
@@ -908,6 +904,8 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
908 enum crit_level lv = get_task_crit_level(tsk); 904 enum crit_level lv = get_task_crit_level(tsk);
909 lt_t release = 0; 905 lt_t release = 0;
910 906
907 BUG_ON(lv < CRIT_LEVEL_A || lv > CRIT_LEVEL_C);
908
911 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", 909 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
912 litmus_clock(), on_runqueue, is_running); 910 litmus_clock(), on_runqueue, is_running);
913 911
@@ -934,8 +932,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
934 else { 932 else {
935 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); 933 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
936 } 934 }
937 release = res->next_replenishment; 935
938
939 if (on_runqueue || is_running) { 936 if (on_runqueue || is_running) {
940 /* Assumption: litmus_clock() is synchronized across cores 937 /* Assumption: litmus_clock() is synchronized across cores
941 * [see comment in pres_task_resume()] */ 938 * [see comment in pres_task_resume()] */
@@ -944,22 +941,29 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
944 } 941 }
945 else 942 else
946 sup_update_time(&state->sup_env, litmus_clock()); 943 sup_update_time(&state->sup_env, litmus_clock());
944
947 task_arrives(state, tsk); 945 task_arrives(state, tsk);
948 if (lv == CRIT_LEVEL_C) 946 if (lv == CRIT_LEVEL_C)
949 raw_spin_unlock(&_global_env.lock); 947 raw_spin_unlock(&_global_env.lock);
950 /* NOTE: drops state->lock */ 948 /* NOTE: drops state->lock */
949 raw_spin_unlock(&state->lock);
950 local_irq_restore(flags);
951
951 TRACE("mc2_new()\n"); 952 TRACE("mc2_new()\n");
952 953
954 raw_spin_lock(&state->lock);
953 mc2_update_timer_and_unlock(state); 955 mc2_update_timer_and_unlock(state);
954 } else { 956 } else {
955 if (lv == CRIT_LEVEL_C) 957 if (lv == CRIT_LEVEL_C)
956 raw_spin_unlock(&_global_env.lock); 958 raw_spin_unlock(&_global_env.lock);
957 raw_spin_unlock(&state->lock); 959 raw_spin_unlock(&state->lock);
960 local_irq_restore(flags);
958 } 961 }
959 local_irq_restore(flags); 962 release = res->next_replenishment;
960 963
961 if (!release) { 964 if (!release) {
962 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release); 965 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
966 BUG();
963 } 967 }
964 else 968 else
965 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n"); 969 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
@@ -977,7 +981,10 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
977 unsigned long flags; 981 unsigned long flags;
978 982
979 if (cpu == -1) { 983 if (cpu == -1) {
984 struct next_timer_event *event, *e_next;
985
980 /* if the reservation is global reservation */ 986 /* if the reservation is global reservation */
987
981 local_irq_save(flags); 988 local_irq_save(flags);
982 raw_spin_lock(&_global_env.lock); 989 raw_spin_lock(&_global_env.lock);
983 990
@@ -1009,6 +1016,13 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1009 } 1016 }
1010 } 1017 }
1011 } 1018 }
1019 /* delete corresponding events */
1020 list_for_each_entry_safe(event, e_next, &_global_env.next_events, list) {
1021 if (event->id == reservation_id) {
1022 list_del(&event->list);
1023 kfree(event);
1024 }
1025 }
1012 1026
1013 raw_spin_unlock(&_global_env.lock); 1027 raw_spin_unlock(&_global_env.lock);
1014 local_irq_restore(flags); 1028 local_irq_restore(flags);
@@ -1105,7 +1119,6 @@ static void mc2_task_exit(struct task_struct *tsk)
1105 mc2_update_timer_and_unlock(state); 1119 mc2_update_timer_and_unlock(state);
1106 } else { 1120 } else {
1107 raw_spin_unlock(&state->lock); 1121 raw_spin_unlock(&state->lock);
1108
1109 } 1122 }
1110 1123
1111 if (lv == CRIT_LEVEL_C) { 1124 if (lv == CRIT_LEVEL_C) {
@@ -1412,7 +1425,7 @@ static long mc2_activate_plugin(void)
1412 struct cpu_entry *ce; 1425 struct cpu_entry *ce;
1413 1426
1414 gmp_init(&_global_env); 1427 gmp_init(&_global_env);
1415 raw_spin_lock_init(&_lowest_prio_cpu.lock); 1428 //raw_spin_lock_init(&_lowest_prio_cpu.lock);
1416 1429
1417 for_each_online_cpu(cpu) { 1430 for_each_online_cpu(cpu) {
1418 TRACE("Initializing CPU%d...\n", cpu); 1431 TRACE("Initializing CPU%d...\n", cpu);
@@ -1456,7 +1469,8 @@ static void mc2_finish_switch(struct task_struct *prev)
1456 state->scheduled = is_realtime(current) ? current : NULL; 1469 state->scheduled = is_realtime(current) ? current : NULL;
1457 if (lv == CRIT_LEVEL_C) { 1470 if (lv == CRIT_LEVEL_C) {
1458 for (cpus = 0; cpus<NR_CPUS; cpus++) { 1471 for (cpus = 0; cpus<NR_CPUS; cpus++) {
1459 if (resched_cpu[cpus]) { 1472 if (resched_cpu[cpus] && state->cpu != cpus) {
1473 resched_cpu[cpus] = 0;
1460 litmus_reschedule(cpus); 1474 litmus_reschedule(cpus);
1461 } 1475 }
1462 } 1476 }
diff --git a/litmus/uncachedev.c b/litmus/uncachedev.c
index 06a6a7c17983..86875816c6ef 100644
--- a/litmus/uncachedev.c
+++ b/litmus/uncachedev.c
@@ -28,8 +28,8 @@ int litmus_uncache_vm_fault(struct vm_area_struct* vma,
28 /* modeled after SG DMA video4linux, but without DMA. */ 28 /* modeled after SG DMA video4linux, but without DMA. */
29 /* (see drivers/media/video/videobuf-dma-sg.c) */ 29 /* (see drivers/media/video/videobuf-dma-sg.c) */
30 struct page *page; 30 struct page *page;
31 31
32 page = alloc_page(GFP_USER); 32 page = alloc_page(GFP_USER|GFP_COLOR);
33 if (!page) 33 if (!page)
34 return VM_FAULT_OOM; 34 return VM_FAULT_OOM;
35 35