aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-05-05 22:32:30 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-05-05 22:32:30 -0400
commit10bc618f3663fd34d6ffaf8adcaa369db8668eda (patch)
tree11cdf9d728568584a23d02f069da0d701d55cd33 /litmus
parent2f421a06c3663fff3e3f0d0238f6d4651a8cb50d (diff)
Tasks specify colors and pages when allocating pages
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/color_dev.c86
-rw-r--r--litmus/dgl.c2
-rw-r--r--litmus/sched_color.c59
4 files changed, 85 insertions, 63 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 1b9b75aa38c9..d490cedbd7bb 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -22,6 +22,7 @@ obj-y = sched_plugin.o litmus.o \
22 rt_server.o \ 22 rt_server.o \
23 dgl.o \ 23 dgl.o \
24 fifo_common.o \ 24 fifo_common.o \
25 rm_common.o \
25 sched_psn_edf.o \ 26 sched_psn_edf.o \
26 sched_gsn_edf.o 27 sched_gsn_edf.o
27 28
diff --git a/litmus/color_dev.c b/litmus/color_dev.c
index 7ccdaf03740b..d8480d7fd543 100644
--- a/litmus/color_dev.c
+++ b/litmus/color_dev.c
@@ -152,53 +152,65 @@ out:
152static int do_map_colored_pages(struct vm_area_struct *vma) 152static int do_map_colored_pages(struct vm_area_struct *vma)
153{ 153{
154 const unsigned long nr_pages = vma_nr_pages(vma); 154 const unsigned long nr_pages = vma_nr_pages(vma);
155 struct color_ctrl_page *color_ctrl = tsk_rt(current)->color_ctrl_page;
155 unsigned long nr_mapped; 156 unsigned long nr_mapped;
156 color_t *cur_color; 157 int i, err = 0;
157 int err;
158 158
159 TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", 159 TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n",
160 nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); 160 nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
161 161
162 for ( nr_mapped = 0, 162 for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) {
163 cur_color = tsk_rt(current)->color_ctrl_page->colors; 163 const unsigned long color_no = color_ctrl->colors[i];
164 nr_mapped < nr_pages; 164 unsigned int page_no = 0;
165 nr_mapped++, cur_color++) 165
166 { 166 for (; page_no < color_ctrl->pages[i]; ++page_no, ++nr_mapped) {
167 const unsigned long this_color = *cur_color; 167 const unsigned long addr = vma->vm_start +
168 const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT); 168 (nr_mapped << PAGE_SHIFT);
169 struct page *page = get_colored_page(this_color); 169 struct page *page = get_colored_page(color_no);
170 170
171 if (!page) { 171 if (!page) {
172 TRACE_CUR(ALLOC_NAME ": Could not get page with " 172 TRACE_CUR(ALLOC_NAME ": Could not get page with"
173 " color %lu.\n", this_color); 173 " color %lu.\n", color_no);
174 /* TODO unmap mapped pages */ 174 /* TODO unmap mapped pages */
175 err = -ENOMEM; 175 err = -ENOMEM;
176 break; 176 goto out;
177 } 177 }
178
178#ifdef CONFIG_SPARC 179#ifdef CONFIG_SPARC
179 clear_user_highpage(page, addr); 180 clear_user_highpage(page, addr);
180#endif 181#endif
181 TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " 182
182 "color:%3lu) at 0x%lx (flags:%16lx prot:%16lx) " 183 TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, "
183 "PAGE_SHARED:0x%16lx\n", 184 "pfn:%8lu, color:%3lu) at 0x%lx (flags:%16lx "
184 page_to_phys(page), 185 "prot:%16lx) PAGE_SHARED:0x%16lx\n",
185 page_to_pfn(page), this_color, addr, 186 page_to_phys(page),
186 vma->vm_flags, pgprot_val(vma->vm_page_prot), 187 page_to_pfn(page), color_no, addr,
187 PAGE_SHARED); 188 vma->vm_flags, pgprot_val(vma->vm_page_prot),
188 //err = vm_insert_page(vma, addr, page); 189 PAGE_SHARED);
189 err = remap_pfn_range(vma, addr, page_to_pfn(page), 190
190 PAGE_SIZE, PAGE_SHARED); 191 err = remap_pfn_range(vma, addr, page_to_pfn(page),
191 if (err) { 192 PAGE_SIZE, PAGE_SHARED);
192 TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed " 193
193 "(%d) (flags:%lx prot:%lx)\n", err, 194 if (err) {
194 vma->vm_flags, 195 TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail "
195 pgprot_val(vma->vm_page_prot)); 196 "(%d) (flags:%lx prot:%lx)\n", err,
196 /* TODO unmap mapped pages */ 197 vma->vm_flags,
198 pgprot_val(vma->vm_page_prot));
199 /* TODO unmap mapped pages */
200 err = -EINVAL;
201 goto out;
202 }
203 add_page_to_alloced_list(page, vma);
204 }
205
206 if (!page_no) {
207 TRACE_CUR(ALLOC_NAME ": 0 pages given for color %lu\n",
208 color_no);
197 err = -EINVAL; 209 err = -EINVAL;
198 break; 210 goto out;
199 } 211 }
200 add_page_to_alloced_list(page, vma);
201 } 212 }
213 out:
202 return err; 214 return err;
203} 215}
204 216
diff --git a/litmus/dgl.c b/litmus/dgl.c
index af710e72b5ef..6c1267839123 100644
--- a/litmus/dgl.c
+++ b/litmus/dgl.c
@@ -129,6 +129,8 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq,
129 mask_idx(resource, &word, &bit); 129 mask_idx(resource, &word, &bit);
130 __set_bit(bit, &greq->requested[word]); 130 __set_bit(bit, &greq->requested[word]);
131 131
132 TRACE("0x%p requesting %d of %d\n", greq, replicas, resource);
133
132 req = &greq->requests[resource]; 134 req = &greq->requests[resource];
133 req->greq = greq; 135 req->greq = greq;
134 INIT_LIST_HEAD(&req->list); 136 INIT_LIST_HEAD(&req->list);
diff --git a/litmus/sched_color.c b/litmus/sched_color.c
index b0a92caeae91..f6115e552cf8 100644
--- a/litmus/sched_color.c
+++ b/litmus/sched_color.c
@@ -9,7 +9,7 @@
9#include <litmus/jobs.h> 9#include <litmus/jobs.h>
10#include <litmus/preempt.h> 10#include <litmus/preempt.h>
11#include <litmus/sched_plugin.h> 11#include <litmus/sched_plugin.h>
12#include <litmus/edf_common.h> 12#include <litmus/rm_common.h>
13#include <litmus/sched_trace.h> 13#include <litmus/sched_trace.h>
14#include <litmus/color.h> 14#include <litmus/color.h>
15#include <litmus/fifo_common.h> 15#include <litmus/fifo_common.h>
@@ -32,13 +32,13 @@ struct fifo_server {
32 32
33/** 33/**
34 * @server Common server functionality. 34 * @server Common server functionality.
35 * @edf_domain PEDF domain. 35 * @rm_domain PRM domain.
36 * @scheduled Task physically running on CPU. 36 * @scheduled Task physically running on CPU.
37 * @fifo_server Server partitioned to this CPU. 37 * @fifo_server Server partitioned to this CPU.
38 */ 38 */
39struct cpu_entry { 39struct cpu_entry {
40 struct rt_server server; 40 struct rt_server server;
41 rt_domain_t edf_domain; 41 rt_domain_t rm_domain;
42 struct task_struct* scheduled; 42 struct task_struct* scheduled;
43 struct fifo_server fifo_server; 43 struct fifo_server fifo_server;
44}; 44};
@@ -55,10 +55,10 @@ static raw_spinlock_t dgl_lock;
55#define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) 55#define remote_entry(cpu) (&per_cpu(color_cpus, cpu))
56#define task_entry(task) remote_entry(get_partition(task)) 56#define task_entry(task) remote_entry(get_partition(task))
57#define task_fserver(task) (&task_entry(task)->fifo_server.server) 57#define task_fserver(task) (&task_entry(task)->fifo_server.server)
58#define entry_lock(entry) (&entry->edf_domain.ready_lock) 58#define entry_lock(entry) (&entry->rm_domain.ready_lock)
59 59
60#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) 60#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c])
61#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->edf_domain) 61#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain)
62#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) 62#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry))
63 63
64/* 64/*
@@ -283,9 +283,9 @@ static struct task_struct* schedule_server(struct rt_server *server)
283} 283}
284 284
285/* 285/*
286 * Dumb requeue for PEDF (CPU) servers. 286 * Dumb requeue for PRM (CPU) servers.
287 */ 287 */
288static void edf_requeue(struct rt_server *server, struct task_struct *t) 288static void rm_requeue(struct rt_server *server, struct task_struct *t)
289{ 289{
290 BUG_ON(is_be(t)); 290 BUG_ON(is_be(t));
291 requeue(server->domain, t); 291 requeue(server->domain, t);
@@ -346,7 +346,9 @@ static void fifo_update(struct rt_server *server)
346 } 346 }
347 347
348 /* Calculate next task */ 348 /* Calculate next task */
349 raw_spin_lock(&fifo_lock);
349 schedule_server(&fserver->server); 350 schedule_server(&fserver->server);
351 raw_spin_unlock(&fifo_lock);
350 352
351 /* Reserve needed resources */ 353 /* Reserve needed resources */
352 raw_spin_lock(&dgl_lock); 354 raw_spin_lock(&dgl_lock);
@@ -356,22 +358,22 @@ static void fifo_update(struct rt_server *server)
356} 358}
357 359
358/* 360/*
359 * Triggers preemption on edf-scheduled "linked" field only. 361 * Triggers preemption on rm-scheduled "linked" field only.
360 */ 362 */
361static void color_edf_release(rt_domain_t *edf, struct bheap *tasks) 363static void color_rm_release(rt_domain_t *rm, struct bheap *tasks)
362{ 364{
363 unsigned long flags; 365 unsigned long flags;
364 struct cpu_entry *entry; 366 struct cpu_entry *entry;
365 367
366 TRACE_TASK(bheap2task(bheap_peek(edf->order, tasks)), 368 TRACE_TASK(bheap2task(bheap_peek(rm->order, tasks)),
367 "Released set of EDF tasks\n"); 369 "Released set of RM tasks\n");
368 370
369 entry = container_of(edf, struct cpu_entry, edf_domain); 371 entry = container_of(rm, struct cpu_entry, rm_domain);
370 raw_spin_lock_irqsave(entry_lock(entry), flags); 372 raw_spin_lock_irqsave(entry_lock(entry), flags);
371 373
372 __merge_ready(edf, tasks); 374 __merge_ready(rm, tasks);
373 375
374 if (edf_preemption_needed(edf, entry->server.linked) && 376 if (rm_preemption_needed(rm, entry->server.linked) &&
375 (!entry->server.linked || !is_kernel_np(entry->server.linked))) { 377 (!entry->server.linked || !is_kernel_np(entry->server.linked))) {
376 litmus_reschedule(entry->server.cpu); 378 litmus_reschedule(entry->server.cpu);
377 } 379 }
@@ -501,10 +503,11 @@ static struct task_struct* color_schedule(struct task_struct *prev)
501static void color_task_new(struct task_struct *t, int on_rq, int running) 503static void color_task_new(struct task_struct *t, int on_rq, int running)
502{ 504{
503 unsigned long flags; 505 unsigned long flags;
504 int i, replicas; 506 int i;
505 raw_spinlock_t *lock; 507 raw_spinlock_t *lock;
506 struct cpu_entry *entry; 508 struct cpu_entry *entry;
507 struct dgl_group_req *req; 509 struct dgl_group_req *req;
510 color_t *colors, *pages;
508 511
509 TRACE_TASK(t, "New colored task\n"); 512 TRACE_TASK(t, "New colored task\n");
510 local_irq_save(flags); 513 local_irq_save(flags);
@@ -514,16 +517,18 @@ static void color_task_new(struct task_struct *t, int on_rq, int running)
514 517
515 release_at(t, litmus_clock()); 518 release_at(t, litmus_clock());
516 519
517 /* Create request for dynamic group locks */
518 req = kmalloc(sizeof(*req), GFP_ATOMIC); 520 req = kmalloc(sizeof(*req), GFP_ATOMIC);
519 dgl_group_req_init(&group_lock, req); 521 dgl_group_req_init(&group_lock, req);
520 for (i = 0; i < group_lock.num_resources; i++) {
521 replicas = get_control_page(t)->requests[i];
522 if (replicas)
523 set_req(&group_lock, req, i, replicas);
524 }
525 tsk_rt(t)->req = req; 522 tsk_rt(t)->req = req;
526 523
524 /* Fill request */
525 if (tsk_rt(t)->color_ctrl_page) {
526 colors = tsk_rt(t)->color_ctrl_page->colors;
527 pages = tsk_rt(t)->color_ctrl_page->pages;
528 for (i = 0; pages[i]; i++)
529 set_req(&group_lock, req, colors[i], pages[i]);
530 }
531
527 /* Join system */ 532 /* Join system */
528 raw_spin_lock(lock); 533 raw_spin_lock(lock);
529 if (running) { 534 if (running) {
@@ -535,7 +540,7 @@ static void color_task_new(struct task_struct *t, int on_rq, int running)
535 requeue(task_dom(entry, t), t); 540 requeue(task_dom(entry, t), t);
536 raw_spin_unlock(lock); 541 raw_spin_unlock(lock);
537 542
538 /* Trigger preemptions */ 543 /* Trigger necessary preemptions */
539 if (is_be(t)) 544 if (is_be(t))
540 check_for_fifo_preempt(); 545 check_for_fifo_preempt();
541 else 546 else
@@ -683,8 +688,10 @@ static long color_activate_plugin(void)
683 entry->fifo_server.start_time = 0; 688 entry->fifo_server.start_time = 0;
684 entry->scheduled = NULL; 689 entry->scheduled = NULL;
685 690
691 cancel_enforcement_timer(&entry->fifo_server.timer);
692
686 if (!is_queued(server_task)) 693 if (!is_queued(server_task))
687 requeue(&entry->edf_domain, server_task); 694 requeue(&entry->rm_domain, server_task);
688 695
689 TRACE_TASK(server_task, "Created server with wcet: %llu, " 696 TRACE_TASK(server_task, "Created server with wcet: %llu, "
690 "period: %llu\n", tp.exec_cost, tp.period); 697 "period: %llu\n", tp.exec_cost, tp.period);
@@ -766,7 +773,7 @@ static int __init init_color(void)
766 773
767 for_each_online_cpu(cpu) { 774 for_each_online_cpu(cpu) {
768 entry = remote_entry(cpu); 775 entry = remote_entry(cpu);
769 edf_domain_init(&entry->edf_domain, NULL, color_edf_release); 776 rm_domain_init(&entry->rm_domain, NULL, color_rm_release);
770 777
771 entry->scheduled = NULL; 778 entry->scheduled = NULL;
772 779
@@ -797,8 +804,8 @@ static int __init init_color(void)
797 /* Create CPU server */ 804 /* Create CPU server */
798 cpu_server = &entry->server; 805 cpu_server = &entry->server;
799 init_rt_server(cpu_server, cpu + 1, cpu, 806 init_rt_server(cpu_server, cpu + 1, cpu,
800 &entry->edf_domain, edf_preemption_needed, 807 &entry->rm_domain, rm_preemption_needed,
801 edf_requeue, NULL, NULL); 808 rm_requeue, NULL, NULL);
802 cpu_server->running = 1; 809 cpu_server->running = 1;
803 810
804 init_enforcement_timer(&fifo_server->timer); 811 init_enforcement_timer(&fifo_server->timer);