aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_pdumb.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-02-14 16:34:43 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2012-02-14 16:34:43 -0500
commit949ce5e04b64f4ddc1a0e1a101f0f838c16f5492 (patch)
tree2f0bd4617912e834be8823d6894964b946d12fc8 /litmus/sched_pdumb.c
parent32e1c16c01276dceddf445a696b7e2c6ac4fb912 (diff)
Further refinementsdemo
Diffstat (limited to 'litmus/sched_pdumb.c')
-rw-r--r--litmus/sched_pdumb.c48
1 files changed, 32 insertions, 16 deletions
diff --git a/litmus/sched_pdumb.c b/litmus/sched_pdumb.c
index c4a123475160..af1076dc9be0 100644
--- a/litmus/sched_pdumb.c
+++ b/litmus/sched_pdumb.c
@@ -19,8 +19,11 @@ struct dumb_cpu {
19 int blocked; 19 int blocked;
20 raw_spinlock_t lock; 20 raw_spinlock_t lock;
21}; 21};
22
22/* Define AND initialize one dumb_cpu per CPU. 23/* Define AND initialize one dumb_cpu per CPU.
23 * Use of this macro is good for safety and performance reasons. 24 * Use of this macro is good for safety and performance reasons.
25 * Beware: this macro creates global variables. If two plugins pick
26 * the same name here, this will not compile.
24 */ 27 */
25DEFINE_PER_CPU(struct dumb_cpu, pdumb_cpus); 28DEFINE_PER_CPU(struct dumb_cpu, pdumb_cpus);
26 29
@@ -40,7 +43,7 @@ static long pdumb_activate_plugin(void)
40 cpu = &per_cpu(pdumb_cpus, i); 43 cpu = &per_cpu(pdumb_cpus, i);
41 cpu->dumb_task = NULL; 44 cpu->dumb_task = NULL;
42 cpu->scheduled = 0; 45 cpu->scheduled = 0;
43 cpu->blocked = 0; 46 cpu->blocked = 0;
44 } 47 }
45 48
46 printk(KERN_ERR "Activated a dumb plugin!\n"); 49 printk(KERN_ERR "Activated a dumb plugin!\n");
@@ -55,6 +58,7 @@ static long pdumb_activate_plugin(void)
55static long pdumb_admit_task(struct task_struct *t) 58static long pdumb_admit_task(struct task_struct *t)
56{ 59{
57 long ret; 60 long ret;
61 unsigned long flags;
58 int cpu_num = get_partition(t); 62 int cpu_num = get_partition(t);
59 63
60 /* Per CPU variables have to be accessed wierd. 64 /* Per CPU variables have to be accessed wierd.
@@ -62,22 +66,23 @@ static long pdumb_admit_task(struct task_struct *t)
62 */ 66 */
63 struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, cpu_num); 67 struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, cpu_num);
64 68
65 raw_spin_lock(&cpu->lock); 69 /* We are accessing state atomically, we need a lock and to
70 * disable irqs.
71 */
72 raw_spin_lock_irqsave(&cpu->lock, flags);
66 73
67 if (cpu->dumb_task) { 74 if (cpu->dumb_task) {
68 printk(KERN_ERR "Already have a dumb task on %d!\n", 75 /* Reject the task, causing a failure in userspace */
69 cpu_num); 76 printk(KERN_ERR "Already have a dumb task on %d!\n", cpu_num);
70 ret = -EINVAL; 77 ret = -EINVAL;
71 } else { 78 } else {
72 printk(KERN_ERR "Taking your dumb task on %d!\n", 79 /* Assign our dumb task */
73 cpu_num); 80 printk(KERN_ERR "Taking your dumb task on %d!\n", cpu_num);
74
75 /* Assign our dumb task! */
76 cpu->dumb_task = t; 81 cpu->dumb_task = t;
77 ret = 0; 82 ret = 0;
78 } 83 }
79 84
80 raw_spin_unlock(&cpu->lock); 85 raw_spin_unlock_irqrestore(&cpu->lock, flags);
81 return ret; 86 return ret;
82} 87}
83 88
@@ -89,15 +94,16 @@ static void pdumb_task_new(struct task_struct *t, int on_rq, int running)
89{ 94{
90 /* Needed to disable interrupts */ 95 /* Needed to disable interrupts */
91 unsigned long flags; 96 unsigned long flags;
92 /* The macro __get_cpu_var() returns the local cpu variable */
93 struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t)); 97 struct dumb_cpu *cpu = &per_cpu(pdumb_cpus, get_partition(t));
94 98
95 /* We are accessing state atomically, we need a lock and to
96 * disable irqs.
97 */
98 raw_spin_lock_irqsave(&cpu->lock, flags); 99 raw_spin_lock_irqsave(&cpu->lock, flags);
99 100
100 /* The task could already be running in Linux. 101 /* We only admit one task per cpu, this better be it */
102 BUG_ON(cpu->dumb_task != t);
103
104 /* The task could already be running in Linux. For example, it
105 * could have been running, then switched into real-time
106 * mode. This is how all real-time tasks begin execution.
101 * Lets just say its running here too. 107 * Lets just say its running here too.
102 */ 108 */
103 if (running) { 109 if (running) {
@@ -120,6 +126,7 @@ static void pdumb_task_exit(struct task_struct *t)
120 raw_spin_lock_irqsave(&cpu->lock, flags); 126 raw_spin_lock_irqsave(&cpu->lock, flags);
121 cpu->dumb_task = NULL; 127 cpu->dumb_task = NULL;
122 cpu->scheduled = 0; 128 cpu->scheduled = 0;
129 cpu->blocked = 0;
123 raw_spin_unlock_irqrestore(&cpu->lock, flags); 130 raw_spin_unlock_irqrestore(&cpu->lock, flags);
124} 131}
125 132
@@ -131,6 +138,7 @@ static void pdumb_task_exit(struct task_struct *t)
131 */ 138 */
132static struct task_struct* pdumb_schedule(struct task_struct *prev) 139static struct task_struct* pdumb_schedule(struct task_struct *prev)
133{ 140{
141 /* The macro __get_cpu_var() returns the local cpu variable */
134 struct dumb_cpu *cpu = &__get_cpu_var(pdumb_cpus); 142 struct dumb_cpu *cpu = &__get_cpu_var(pdumb_cpus);
135 struct task_struct *sched; 143 struct task_struct *sched;
136 144
@@ -140,6 +148,7 @@ static struct task_struct* pdumb_schedule(struct task_struct *prev)
140 */ 148 */
141 raw_spin_lock(&cpu->lock); 149 raw_spin_lock(&cpu->lock);
142 150
151 /* Switch between dumb_task and nothing arbitrarily */
143 if (cpu->scheduled || cpu->blocked || !cpu->dumb_task) { 152 if (cpu->scheduled || cpu->blocked || !cpu->dumb_task) {
144 cpu->scheduled = 0; 153 cpu->scheduled = 0;
145 sched = NULL; 154 sched = NULL;
@@ -150,9 +159,15 @@ static struct task_struct* pdumb_schedule(struct task_struct *prev)
150 TRACE_TASK(cpu->dumb_task, "Scheduled!\n"); 159 TRACE_TASK(cpu->dumb_task, "Scheduled!\n");
151 } 160 }
152 161
162 /* This must be done when the cpu state has been atomically
163 * chosen. This allows litmus to manage some race conditions
164 * with tasks blocking / preempting / releasing without any
165 * work on the plugin's part.
166 */
153 sched_state_task_picked(); 167 sched_state_task_picked();
154 168
155 raw_spin_unlock(&cpu->lock); 169 raw_spin_unlock(&cpu->lock);
170
156 return sched; 171 return sched;
157} 172}
158 173
@@ -171,7 +186,7 @@ static void pdumb_task_block(struct task_struct *t)
171 TRACE_TASK(t, "Blocked!\n"); 186 TRACE_TASK(t, "Blocked!\n");
172 187
173 raw_spin_lock_irqsave(&cpu->lock, flags); 188 raw_spin_lock_irqsave(&cpu->lock, flags);
174 cpu->blocked = 1; 189 cpu->blocked = 1;
175 cpu->scheduled = 0; 190 cpu->scheduled = 0;
176 raw_spin_unlock_irqrestore(&cpu->lock, flags); 191 raw_spin_unlock_irqrestore(&cpu->lock, flags);
177} 192}
@@ -188,7 +203,7 @@ static void pdumb_task_wake_up(struct task_struct *t)
188 TRACE_TASK(t, "Awoken!\n"); 203 TRACE_TASK(t, "Awoken!\n");
189 204
190 raw_spin_lock_irqsave(&cpu->lock, flags); 205 raw_spin_lock_irqsave(&cpu->lock, flags);
191 cpu->blocked = 0; 206 cpu->blocked = 0;
192 cpu->scheduled = 0; 207 cpu->scheduled = 0;
193 raw_spin_unlock_irqrestore(&cpu->lock, flags); 208 raw_spin_unlock_irqrestore(&cpu->lock, flags);
194} 209}
@@ -212,6 +227,7 @@ static struct sched_plugin pdumb_plugin __cacheline_aligned_in_smp = {
212 .task_exit = pdumb_task_exit, 227 .task_exit = pdumb_task_exit,
213}; 228};
214 229
230
215/** 231/**
216 * Called when the system boots up. 232 * Called when the system boots up.
217 */ 233 */