aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-03-23 08:20:51 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-03-23 08:20:51 -0400
commitff210e0441b743890ad85c7335e41894b34a1431 (patch)
tree21027c2433f5ca9a26731b3af72fa6eb620df369
parent2e23e3f0cc7c3249b510e94b5b3ec92577b67e81 (diff)
MC2 scheduler and partition modules
-rw-r--r--include/litmus/cache_proc.h5
-rw-r--r--include/litmus/litmus.h7
-rw-r--r--include/litmus/mc2_common.h31
-rw-r--r--include/litmus/polling_reservations.h36
-rw-r--r--include/litmus/reservation.h256
-rw-r--r--include/litmus/rt_param.h51
-rw-r--r--include/litmus/sched_plugin.h8
-rw-r--r--include/litmus/trace.h16
-rw-r--r--litmus/Makefile5
-rw-r--r--litmus/bank_proc.c737
-rw-r--r--litmus/cache_proc.c257
-rw-r--r--litmus/mc2_common.c78
-rw-r--r--litmus/polling_reservations.c564
-rw-r--r--litmus/reservation.c709
-rw-r--r--litmus/sched_mc2.c1849
15 files changed, 4605 insertions, 4 deletions
diff --git a/include/litmus/cache_proc.h b/include/litmus/cache_proc.h
index 586224118435..962851da34cc 100644
--- a/include/litmus/cache_proc.h
+++ b/include/litmus/cache_proc.h
@@ -4,6 +4,11 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6void litmus_setup_lockdown(void __iomem*, u32); 6void litmus_setup_lockdown(void __iomem*, u32);
7void enter_irq_mode(void);
8void exit_irq_mode(void);
9void flush_cache(int all);
10
11extern struct page *new_alloc_page_color(unsigned long color);
7 12
8#endif 13#endif
9 14
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index a6eb534ee0fa..441210c84ed8 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -113,6 +113,13 @@ static inline lt_t litmus_clock(void)
113 ((current)->state == TASK_RUNNING || \ 113 ((current)->state == TASK_RUNNING || \
114 preempt_count() & PREEMPT_ACTIVE) 114 preempt_count() & PREEMPT_ACTIVE)
115 115
116#define is_running(t) \
117 ((t)->state == TASK_RUNNING || \
118 task_thread_info(t)->preempt_count & PREEMPT_ACTIVE)
119
120#define is_blocked(t) \
121 (!is_running(t))
122
116#define is_released(t, now) \ 123#define is_released(t, now) \
117 (lt_before_eq(get_release(t), now)) 124 (lt_before_eq(get_release(t), now))
118#define is_tardy(t, now) \ 125#define is_tardy(t, now) \
diff --git a/include/litmus/mc2_common.h b/include/litmus/mc2_common.h
new file mode 100644
index 000000000000..e3c0af28f1b9
--- /dev/null
+++ b/include/litmus/mc2_common.h
@@ -0,0 +1,31 @@
1/*
2 * MC^2 common data structures
3 */
4
5#ifndef __UNC_MC2_COMMON_H__
6#define __UNC_MC2_COMMON_H__
7
8enum crit_level {
9 CRIT_LEVEL_A = 0,
10 CRIT_LEVEL_B = 1,
11 CRIT_LEVEL_C = 2,
12 NUM_CRIT_LEVELS = 3,
13};
14
15struct mc2_task {
16 enum crit_level crit;
17 unsigned int res_id;
18};
19
20#ifdef __KERNEL__
21
22#include <litmus/reservation.h>
23
24#define tsk_mc2_data(t) (tsk_rt(t)->mc2_data)
25
26long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk,
27 struct reservation *res);
28
29#endif /* __KERNEL__ */
30
31#endif \ No newline at end of file
diff --git a/include/litmus/polling_reservations.h b/include/litmus/polling_reservations.h
new file mode 100644
index 000000000000..66c9b1e31f20
--- /dev/null
+++ b/include/litmus/polling_reservations.h
@@ -0,0 +1,36 @@
1#ifndef LITMUS_POLLING_RESERVATIONS_H
2#define LITMUS_POLLING_RESERVATIONS_H
3
4#include <litmus/reservation.h>
5
6struct polling_reservation {
7 /* extend basic reservation */
8 struct reservation res;
9
10 lt_t max_budget;
11 lt_t period;
12 lt_t deadline;
13 lt_t offset;
14};
15
16void polling_reservation_init(struct polling_reservation *pres, int use_edf_prio,
17 int use_periodic_polling, lt_t budget, lt_t period, lt_t deadline, lt_t offset);
18
19struct table_driven_reservation {
20 /* extend basic reservation */
21 struct reservation res;
22
23 lt_t major_cycle;
24 unsigned int next_interval;
25 unsigned int num_intervals;
26 struct lt_interval *intervals;
27
28 /* info about current scheduling slot */
29 struct lt_interval cur_interval;
30 lt_t major_cycle_start;
31};
32
33void table_driven_reservation_init(struct table_driven_reservation *tdres,
34 lt_t major_cycle, struct lt_interval *intervals, unsigned int num_intervals);
35
36#endif
diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h
new file mode 100644
index 000000000000..7e022b34470f
--- /dev/null
+++ b/include/litmus/reservation.h
@@ -0,0 +1,256 @@
1#ifndef LITMUS_RESERVATION_H
2#define LITMUS_RESERVATION_H
3
4#include <linux/list.h>
5#include <linux/hrtimer.h>
6
7struct reservation_client;
8struct reservation_environment;
9struct reservation;
10
11typedef enum {
12 /* reservation has no clients, is not consuming budget */
13 RESERVATION_INACTIVE = 0,
14
15 /* reservation has clients, consumes budget when scheduled */
16 RESERVATION_ACTIVE,
17
18 /* reservation has no clients, but may be consuming budget */
19 RESERVATION_ACTIVE_IDLE,
20
21 /* Reservation has no budget and waits for
22 * replenishment. May or may not have clients. */
23 RESERVATION_DEPLETED,
24} reservation_state_t;
25
26
27/* ************************************************************************** */
28
29/* Select which task to dispatch. If NULL is returned, it means there is nothing
30 * to schedule right now and background work can be scheduled. */
31typedef struct task_struct * (*dispatch_t) (
32 struct reservation_client *client
33);
34
35/* Something that can be managed in a reservation and that can yield
36 * a process for dispatching. Contains a pointer to the reservation
37 * to which it "belongs". */
38struct reservation_client {
39 struct list_head list;
40 struct reservation* reservation;
41 dispatch_t dispatch;
42};
43
44
45/* ************************************************************************** */
46
47/* Called by reservations to request state change. */
48typedef void (*reservation_change_state_t) (
49 struct reservation_environment* env,
50 struct reservation *res,
51 reservation_state_t new_state
52);
53
54/* The framework within wich reservations operate. */
55struct reservation_environment {
56 lt_t time_zero;
57 lt_t current_time;
58
59 /* services invoked by reservations */
60 reservation_change_state_t change_state;
61};
62
63
64/* ************************************************************************** */
65
66/* A new client is added or an existing client resumes. */
67typedef void (*client_arrives_t) (
68 struct reservation *reservation,
69 struct reservation_client *client
70);
71
72/* A client suspends or terminates. */
73typedef void (*client_departs_t) (
74 struct reservation *reservation,
75 struct reservation_client *client,
76 int did_signal_job_completion
77);
78
79/* A previously requested replenishment has occurred. */
80typedef void (*on_replenishment_timer_t) (
81 struct reservation *reservation
82);
83
84/* Update the reservation's budget to reflect execution or idling. */
85typedef void (*drain_budget_t) (
86 struct reservation *reservation,
87 lt_t how_much
88);
89
90/* Select a ready task from one of the clients for scheduling. */
91typedef struct task_struct* (*dispatch_client_t) (
92 struct reservation *reservation,
93 lt_t *time_slice /* May be used to force rescheduling after
94 some amount of time. 0 => no limit */
95);
96
97
98struct reservation_ops {
99 dispatch_client_t dispatch_client;
100
101 client_arrives_t client_arrives;
102 client_departs_t client_departs;
103
104 on_replenishment_timer_t replenish;
105 drain_budget_t drain_budget;
106};
107
108struct reservation {
109 /* used to queue in environment */
110 struct list_head list;
111
112 reservation_state_t state;
113 unsigned int id;
114
115 /* exact meaning defined by impl. */
116 lt_t priority;
117 lt_t cur_budget;
118 lt_t next_replenishment;
119
120 /* budget stats */
121 lt_t budget_consumed; /* how much budget consumed in this allocation cycle? */
122 lt_t budget_consumed_total;
123
124 /* interaction with framework */
125 struct reservation_environment *env;
126 struct reservation_ops *ops;
127
128 struct list_head clients;
129
130 /* for global env. */
131 int scheduled_on;
132 int event_added;
133 /* for blocked by ghost. Do not charge budget when ACTIVE */
134 int blocked_by_ghost;
135 /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */
136 int is_ghost;
137};
138
139void reservation_init(struct reservation *res);
140
141/* Default implementations */
142
143/* simply select the first client in the list, set *for_at_most to zero */
144struct task_struct* default_dispatch_client(
145 struct reservation *res,
146 lt_t *for_at_most
147);
148
149/* "connector" reservation client to hook up tasks with reservations */
150struct task_client {
151 struct reservation_client client;
152 struct task_struct *task;
153};
154
155void task_client_init(struct task_client *tc, struct task_struct *task,
156 struct reservation *reservation);
157
158#define SUP_RESCHEDULE_NOW (0)
159#define SUP_NO_SCHEDULER_UPDATE (ULLONG_MAX)
160
161/* A simple uniprocessor (SUP) flat (i.e., non-hierarchical) reservation
162 * environment.
163 */
164struct sup_reservation_environment {
165 struct reservation_environment env;
166
167 /* ordered by priority */
168 struct list_head active_reservations;
169
170 /* ordered by next_replenishment */
171 struct list_head depleted_reservations;
172
173 /* unordered */
174 struct list_head inactive_reservations;
175
176 /* - SUP_RESCHEDULE_NOW means call sup_dispatch() now
177 * - SUP_NO_SCHEDULER_UPDATE means nothing to do
178 * any other value means program a timer for the given time
179 */
180 lt_t next_scheduler_update;
181 /* set to true if a call to sup_dispatch() is imminent */
182 bool will_schedule;
183};
184
185/* Contract:
186 * - before calling into sup_ code, or any reservation methods,
187 * update the time with sup_update_time(); and
188 * - after calling into sup_ code, or any reservation methods,
189 * check next_scheduler_update and program timer or trigger
190 * scheduler invocation accordingly.
191 */
192
193void sup_init(struct sup_reservation_environment* sup_env);
194void sup_add_new_reservation(struct sup_reservation_environment* sup_env,
195 struct reservation* new_res);
196void sup_scheduler_update_after(struct sup_reservation_environment* sup_env,
197 lt_t timeout);
198void sup_update_time(struct sup_reservation_environment* sup_env, lt_t now);
199struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env);
200
201struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
202 unsigned int id);
203
204/* A global multiprocessor reservation environment. */
205
206typedef enum {
207 EVENT_REPLENISH = 0,
208 EVENT_DRAIN,
209 EVENT_OTHERS,
210} event_type_t;
211
212
213struct next_timer_event {
214 lt_t next_update;
215 int timer_armed_on;
216 unsigned int id;
217 event_type_t type;
218 struct list_head list;
219};
220
221struct gmp_reservation_environment {
222 raw_spinlock_t lock;
223 struct reservation_environment env;
224
225 /* ordered by priority */
226 struct list_head active_reservations;
227
228 /* ordered by next_replenishment */
229 struct list_head depleted_reservations;
230
231 /* unordered */
232 struct list_head inactive_reservations;
233
234 /* timer event ordered by next_update */
235 struct list_head next_events;
236
237 /* (schedule_now == true) means call gmp_dispatch() now */
238 int schedule_now;
239 /* set to true if a call to gmp_dispatch() is imminent */
240 bool will_schedule;
241};
242
243void gmp_init(struct gmp_reservation_environment* gmp_env);
244void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env,
245 struct reservation* new_res);
246void gmp_add_event_after(struct gmp_reservation_environment* gmp_env,
247 lt_t timeout, unsigned int id, event_type_t type);
248void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now);
249int gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now);
250struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env);
251struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id);
252struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when);
253struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
254 unsigned int id);
255
256#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 7b9a90965c25..998762051a6b 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -62,6 +62,7 @@ typedef enum {
62#define LITMUS_MAX_PRIORITY 512 62#define LITMUS_MAX_PRIORITY 512
63#define LITMUS_HIGHEST_PRIORITY 1 63#define LITMUS_HIGHEST_PRIORITY 1
64#define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1) 64#define LITMUS_LOWEST_PRIORITY (LITMUS_MAX_PRIORITY - 1)
65#define LITMUS_NO_PRIORITY UINT_MAX
65 66
66/* Provide generic comparison macros for userspace, 67/* Provide generic comparison macros for userspace,
67 * in case that we change this later. */ 68 * in case that we change this later. */
@@ -71,6 +72,46 @@ typedef enum {
71 ((p) >= LITMUS_HIGHEST_PRIORITY && \ 72 ((p) >= LITMUS_HIGHEST_PRIORITY && \
72 (p) <= LITMUS_LOWEST_PRIORITY) 73 (p) <= LITMUS_LOWEST_PRIORITY)
73 74
75/* reservation support */
76
77typedef enum {
78 PERIODIC_POLLING,
79 SPORADIC_POLLING,
80 TABLE_DRIVEN,
81} reservation_type_t;
82
83struct lt_interval {
84 lt_t start;
85 lt_t end;
86};
87
88#ifndef __KERNEL__
89#define __user
90#endif
91
92struct reservation_config {
93 unsigned int id;
94 lt_t priority;
95 int cpu;
96
97 union {
98 struct {
99 lt_t period;
100 lt_t budget;
101 lt_t relative_deadline;
102 lt_t offset;
103 } polling_params;
104
105 struct {
106 lt_t major_cycle_length;
107 unsigned int num_intervals;
108 struct lt_interval __user *intervals;
109 } table_driven_params;
110 };
111};
112
113/* regular sporadic task support */
114
74struct rt_task { 115struct rt_task {
75 lt_t exec_cost; 116 lt_t exec_cost;
76 lt_t period; 117 lt_t period;
@@ -165,6 +206,7 @@ struct rt_job {
165}; 206};
166 207
167struct pfair_param; 208struct pfair_param;
209struct mc2_task;
168 210
169/* RT task parameters for scheduling extensions 211/* RT task parameters for scheduling extensions
170 * These parameters are inherited during clone and therefore must 212 * These parameters are inherited during clone and therefore must
@@ -246,7 +288,10 @@ struct rt_param {
246 volatile int linked_on; 288 volatile int linked_on;
247 289
248 /* PFAIR/PD^2 state. Allocated on demand. */ 290 /* PFAIR/PD^2 state. Allocated on demand. */
249 struct pfair_param* pfair; 291 union {
292 void *plugin_state;
293 struct pfair_param *pfair;
294 };
250 295
251 /* Fields saved before BE->RT transition. 296 /* Fields saved before BE->RT transition.
252 */ 297 */
@@ -275,6 +320,10 @@ struct rt_param {
275 320
276 /* Pointer to the page shared between userspace and kernel. */ 321 /* Pointer to the page shared between userspace and kernel. */
277 struct control_page * ctrl_page; 322 struct control_page * ctrl_page;
323
324 /* Mixed-criticality specific data */
325 struct mc2_task* mc2_data;
326 unsigned long addr_ctrl_page;
278}; 327};
279 328
280#endif 329#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index f36bb3875f58..4c8aaa6b6674 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -83,6 +83,10 @@ typedef void (*synchronous_release_at_t)(lt_t time_zero);
83 * reservation-specific values. */ 83 * reservation-specific values. */
84typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining); 84typedef void (*current_budget_t)(lt_t *used_so_far, lt_t *remaining);
85 85
86/* Reservation creation/removal backends. Meaning of reservation_type and
87 * reservation_id are entirely plugin-specific. */
88typedef long (*reservation_create_t)(int reservation_type, void* __user config);
89typedef long (*reservation_destroy_t)(unsigned int reservation_id, int cpu);
86 90
87/************************ misc routines ***********************/ 91/************************ misc routines ***********************/
88 92
@@ -118,6 +122,10 @@ struct sched_plugin {
118 122
119 current_budget_t current_budget; 123 current_budget_t current_budget;
120 124
125 /* Reservation support */
126 reservation_create_t reservation_create;
127 reservation_destroy_t reservation_destroy;
128
121#ifdef CONFIG_LITMUS_LOCKING 129#ifdef CONFIG_LITMUS_LOCKING
122 /* locking protocols */ 130 /* locking protocols */
123 allocate_lock_t allocate_lock; 131 allocate_lock_t allocate_lock;
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index 601787214037..24ca412e1184 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -118,6 +118,9 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
118#define TS_TICK_START(t) CPU_TTIMESTAMP(110, t) 118#define TS_TICK_START(t) CPU_TTIMESTAMP(110, t)
119#define TS_TICK_END(t) CPU_TTIMESTAMP(111, t) 119#define TS_TICK_END(t) CPU_TTIMESTAMP(111, t)
120 120
121#define TS_RELEASE_C_START CPU_DTIMESTAMP(108, TSK_RT)
122#define TS_RELEASE_C_END CPU_DTIMESTAMP(109, TSK_RT)
123
121#define TS_QUANTUM_BOUNDARY_START CPU_TIMESTAMP_CUR(112) 124#define TS_QUANTUM_BOUNDARY_START CPU_TIMESTAMP_CUR(112)
122#define TS_QUANTUM_BOUNDARY_END CPU_TIMESTAMP_CUR(113) 125#define TS_QUANTUM_BOUNDARY_END CPU_TIMESTAMP_CUR(113)
123 126
@@ -137,6 +140,17 @@ feather_callback void save_cpu_task_latency(unsigned long event, unsigned long w
137#define TS_SEND_RESCHED_START(c) MSG_TIMESTAMP_SENT(190, c) 140#define TS_SEND_RESCHED_START(c) MSG_TIMESTAMP_SENT(190, c)
138#define TS_SEND_RESCHED_END MSG_TIMESTAMP_RECEIVED(191) 141#define TS_SEND_RESCHED_END MSG_TIMESTAMP_RECEIVED(191)
139 142
140#define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when)) 143#define TS_ISR_START CPU_TIMESTAMP_CUR(192)
144#define TS_ISR_END CPU_TIMESTAMP_CUR(193)
145
146#define TS_RELEASE_LATENCY(when) CPU_LTIMESTAMP(208, &(when))
147#define TS_RELEASE_LATENCY_A(when) CPU_LTIMESTAMP(209, &(when))
148#define TS_RELEASE_LATENCY_B(when) CPU_LTIMESTAMP(210, &(when))
149#define TS_RELEASE_LATENCY_C(when) CPU_LTIMESTAMP(211, &(when))
150
151#define TS_SCHED_A_START CPU_DTIMESTAMP(212, TSK_UNKNOWN)
152#define TS_SCHED_A_END(t) CPU_TTIMESTAMP(213, t)
153#define TS_SCHED_C_START CPU_DTIMESTAMP(214, TSK_UNKNOWN)
154#define TS_SCHED_C_END(t) CPU_TTIMESTAMP(215, t)
141 155
142#endif /* !_SYS_TRACE_H_ */ 156#endif /* !_SYS_TRACE_H_ */
diff --git a/litmus/Makefile b/litmus/Makefile
index f80a3c0d05aa..4a34b4d338a1 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -11,6 +11,7 @@ obj-y = sched_plugin.o litmus.o \
11 sync.o \ 11 sync.o \
12 rt_domain.o \ 12 rt_domain.o \
13 edf_common.o \ 13 edf_common.o \
14 mc2_common.o \
14 fp_common.o \ 15 fp_common.o \
15 fdso.o \ 16 fdso.o \
16 locking.o \ 17 locking.o \
@@ -19,9 +20,13 @@ obj-y = sched_plugin.o litmus.o \
19 binheap.o \ 20 binheap.o \
20 ctrldev.o \ 21 ctrldev.o \
21 uncachedev.o \ 22 uncachedev.o \
23 reservation.o \
24 polling_reservations.o \
22 sched_gsn_edf.o \ 25 sched_gsn_edf.o \
23 sched_psn_edf.o \ 26 sched_psn_edf.o \
24 sched_pfp.o \ 27 sched_pfp.o \
28 sched_mc2.o \
29 bank_proc.o \
25 cache_proc.o 30 cache_proc.o
26 31
27obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 32obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
new file mode 100644
index 000000000000..6103611211ce
--- /dev/null
+++ b/litmus/bank_proc.c
@@ -0,0 +1,737 @@
1/*
2 * bank_proc.c -- Implementation of the page coloring for cache and bank partition.
3 * The file will keep a pool of colored pages. Users can require pages with
4 * specific color or bank number.
5 * Part of the code is modified from Jonathan Herman's code
6 */
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sysctl.h>
12#include <linux/slab.h>
13#include <linux/io.h>
14#include <linux/mutex.h>
15#include <linux/mm.h>
16#include <linux/random.h>
17
18#include <litmus/litmus_proc.h>
19#include <litmus/sched_trace.h>
20#include <litmus/litmus.h>
21
22#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
23
24// This Address Decoding is used in imx6-sabredsd platform
25#define BANK_MASK 0x38000000
26#define BANK_SHIFT 27
27#define CACHE_MASK 0x0000f000
28#define CACHE_SHIFT 12
29
30#define PAGES_PER_COLOR 1024
31unsigned int NUM_PAGE_LIST; //8*16
32
33unsigned int number_banks;
34unsigned int number_cachecolors;
35
36unsigned int set_partition_max = 0x0000ffff;
37unsigned int set_partition_min = 0;
38unsigned int bank_partition_max = 0x000000ff;
39unsigned int bank_partition_min = 0;
40
41int show_page_pool = 0;
42int refill_page_pool = 0;
43spinlock_t reclaim_lock;
44
45unsigned int set_partition[9] = {
46 0x00000003, /* Core 0, and Level A*/
47 0x00000003, /* Core 0, and Level B*/
48 0x0000000C, /* Core 1, and Level A*/
49 0x0000000C, /* Core 1, and Level B*/
50 0x00000030, /* Core 2, and Level A*/
51 0x00000030, /* Core 2, and Level B*/
52 0x000000C0, /* Core 3, and Level A*/
53 0x000000C0, /* Core 3, and Level B*/
54 0x0000ff00, /* Level C */
55};
56
57unsigned int bank_partition[9] = {
58 0x00000010, /* Core 0, and Level A*/
59 0x00000010, /* Core 0, and Level B*/
60 0x00000020, /* Core 1, and Level A*/
61 0x00000020, /* Core 1, and Level B*/
62 0x00000040, /* Core 2, and Level A*/
63 0x00000040, /* Core 2, and Level B*/
64 0x00000080, /* Core 3, and Level A*/
65 0x00000080, /* Core 3, and Level B*/
66 0x0000000c, /* Level C */
67};
68
69unsigned int set_index[9] = {
70 0, 0, 0, 0, 0, 0, 0, 0, 0
71};
72
73unsigned int bank_index[9] = {
74 0, 0, 0, 0, 0, 0, 0, 0, 0
75};
76
77struct mutex void_lockdown_proc;
78
79
80/*
81 * Every page list should contain a lock, a list, and a number recording how many pages it store
82 */
83struct color_group {
84 spinlock_t lock;
85 char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN];
86 struct list_head list;
87 atomic_t nr_pages;
88};
89
90
91static struct color_group *color_groups;
92
93/*
94 * Naive function to count the number of 1's
95 */
96unsigned int counting_one_set(unsigned int v)
97{
98// unsigned int v; // count the number of bits set in v
99 unsigned int c; // c accumulates the total bits set in v
100
101 for (c = 0; v; v >>= 1)
102 {
103 c += v & 1;
104 }
105 return c;
106}
107
108unsigned int two_exp(unsigned int e)
109{
110 unsigned int v = 1;
111 for (; e>0; e-- )
112 {
113 v=v*2;
114 }
115 return v;
116}
117
118unsigned int num_by_bitmask_index(unsigned int bitmask, unsigned int index)
119{
120 unsigned int pos = 0;
121
122 while(true)
123 {
124 if(index ==0 && (bitmask & 1)==1)
125 {
126 break;
127 }
128 if(index !=0 && (bitmask & 1)==1){
129 index--;
130 }
131 pos++;
132 bitmask = bitmask >>1;
133
134 }
135 return pos;
136}
137
138
139
140/* Decoding page color, 0~15 */
141static inline unsigned int page_color(struct page *page)
142{
143 return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
144}
145
146/* Decoding page bank number, 0~7 */
147static inline unsigned int page_bank(struct page *page)
148{
149 return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
150}
151
152static inline unsigned int page_list_index(struct page *page)
153{
154 unsigned int idx;
155 idx = (page_color(page) + page_bank(page)*(number_cachecolors));
156// printk("address = %lx, ", page_to_phys(page));
157// printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx);
158
159 return idx;
160}
161
162
163
164/*
165 * It is used to determine the smallest number of page lists.
166 */
167static unsigned long smallest_nr_pages(void)
168{
169 unsigned long i, min_pages;
170 struct color_group *cgroup;
171 cgroup = &color_groups[16*2];
172 min_pages =atomic_read(&cgroup->nr_pages);
173 for (i = 16*2; i < NUM_PAGE_LIST; ++i) {
174 cgroup = &color_groups[i];
175 if (atomic_read(&cgroup->nr_pages) < min_pages)
176 min_pages = atomic_read(&cgroup->nr_pages);
177 }
178 return min_pages;
179}
180
181static void show_nr_pages(void)
182{
183 unsigned long i;
184 struct color_group *cgroup;
185 printk("show nr pages***************************************\n");
186 for (i = 0; i < NUM_PAGE_LIST; ++i) {
187 cgroup = &color_groups[i];
188 printk("(%03d) = %03d, ", i, atomic_read(&cgroup->nr_pages));
189 if((i % 8) ==7){
190 printk("\n");
191 }
192 }
193}
194
195/*
196 * Add a page to current pool.
197 */
198void add_page_to_color_list(struct page *page)
199{
200 const unsigned long color = page_list_index(page);
201 struct color_group *cgroup = &color_groups[color];
202 BUG_ON(in_list(&page->lru) || PageLRU(page));
203 BUG_ON(page_count(page) > 1);
204 spin_lock(&cgroup->lock);
205 list_add_tail(&page->lru, &cgroup->list);
206 atomic_inc(&cgroup->nr_pages);
207 SetPageLRU(page);
208 spin_unlock(&cgroup->lock);
209}
210
211/*
212 * Replenish the page pool.
213 * If the newly allocate page is what we want, it will be pushed to the correct page list
214 * otherwise, it will be freed.
215 */
216static int do_add_pages(void)
217{
218 //printk("LITMUS do add pages\n");
219
220 struct page *page, *page_tmp;
221 LIST_HEAD(free_later);
222 unsigned long color;
223 int ret = 0;
224 int i = 0;
225 int free_counter = 0;
226 unsigned long counter[128]= {0};
227
228 //printk("Before refill : \n");
229 //show_nr_pages();
230
231 // until all the page lists contain enough pages
232 //for (i =0; i<5; i++) {
233 for (i=0; i< 1024*100;i++) {
234 //while (smallest_nr_pages() < PAGES_PER_COLOR) {
235 // printk("smallest = %d\n", smallest_nr_pages());
236 page = alloc_page(GFP_HIGHUSER_MOVABLE);
237 // page = alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
238
239 if (unlikely(!page)) {
240 printk(KERN_WARNING "Could not allocate pages.\n");
241 ret = -ENOMEM;
242 goto out;
243 }
244 color = page_list_index(page);
245 counter[color]++;
246 // printk("page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
247 //show_nr_pages();
248 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR && color>=32) {
249 //if ( PAGES_PER_COLOR && color>=16*2) {
250 add_page_to_color_list(page);
251 // printk("add page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
252 } else{
253 // Pages here will be freed later
254 list_add_tail(&page->lru, &free_later);
255 free_counter++;
256 //list_del(&page->lru);
257 // __free_page(page);
258 // printk("useless page(%d) = color %x, bank %x\n", color, page_color(page), page_bank(page));
259 }
260 //show_nr_pages();
261 /*
262 if(free_counter >= PAGES_PER_COLOR)
263 {
264 printk("free unwanted page list eariler");
265 free_counter = 0;
266 list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
267 list_del(&page->lru);
268 __free_page(page);
269 }
270
271 show_nr_pages();
272 }
273 */
274 }
275/* printk("page counter = \n");
276 for (i=0; i<128; i++)
277 {
278 printk("(%03d) = %4d, ", i , counter[i]);
279 if(i%8 == 7){
280 printk("\n");
281 }
282
283 }
284*/
285 //printk("After refill : \n");
286 //show_nr_pages();
287#if 1
288 // Free the unwanted pages
289 list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
290 list_del(&page->lru);
291 __free_page(page);
292 }
293#endif
294out:
295 return ret;
296}
297
298/*
299 * Provide pages for replacement according cache color
300 * This should be the only implementation here
301 * This function should not be accessed by others directly.
302 *
303 */
304static struct page *new_alloc_page_color( unsigned long color)
305{
306// printk("allocate new page color = %d\n", color);
307 struct color_group *cgroup;
308 struct page *rPage = NULL;
309
310 if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
311 TRACE_CUR("Wrong color %lu\n", color);
312// printk(KERN_WARNING "Wrong color %lu\n", color);
313 goto out;
314 }
315
316
317 cgroup = &color_groups[color];
318 spin_lock(&cgroup->lock);
319 if (unlikely(!atomic_read(&cgroup->nr_pages))) {
320 TRACE_CUR("No free %lu colored pages.\n", color);
321// printk(KERN_WARNING "no free %lu colored pages.\n", color);
322 goto out_unlock;
323 }
324 rPage = list_first_entry(&cgroup->list, struct page, lru);
325 BUG_ON(page_count(rPage) > 1);
326 //get_page(rPage);
327 list_del(&rPage->lru);
328 atomic_dec(&cgroup->nr_pages);
329 ClearPageLRU(rPage);
330out_unlock:
331 spin_unlock(&cgroup->lock);
332out:
333 if( smallest_nr_pages() == 0)
334 {
335 do_add_pages();
336 // printk("ERROR(bank_proc.c) = We don't have enough pages in bank_proc.c\n");
337
338 }
339 return rPage;
340}
341
342struct page* get_colored_page(unsigned long color)
343{
344 return new_alloc_page_color(color);
345}
346
347/*
348 * provide pages for replacement according to
349 * node = 0 for Level A tasks in Cpu 0
350 * node = 1 for Level B tasks in Cpu 0
351 * node = 2 for Level A tasks in Cpu 1
352 * node = 3 for Level B tasks in Cpu 1
353 * node = 4 for Level A tasks in Cpu 2
354 * node = 5 for Level B tasks in Cpu 2
355 * node = 6 for Level A tasks in Cpu 3
356 * node = 7 for Level B tasks in Cpu 3
357 * node = 8 for Level C tasks
358 */
359struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
360{
361// printk("allocate new page node = %d\n", node);
362// return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
363 struct color_group *cgroup;
364 struct page *rPage = NULL;
365 unsigned int color;
366
367
368 unsigned int idx = 0;
369 idx += num_by_bitmask_index(set_partition[node], set_index[node]);
370 idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
371 //printk("node = %d, idx = %d\n", node, idx);
372
373 rPage = new_alloc_page_color(idx);
374
375
376 set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
377 bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
378 return rPage;
379}
380
381
382/*
383 * Reclaim pages.
384 */
385void reclaim_page(struct page *page)
386{
387 const unsigned long color = page_list_index(page);
388 unsigned long nr_reclaimed = 0;
389 spin_lock(&reclaim_lock);
390 put_page(page);
391 add_page_to_color_list(page);
392
393 spin_unlock(&reclaim_lock);
394 printk("Reclaimed page(%d) = color %x, bank %x, [color] =%d \n", color, page_color(page), page_bank(page), atomic_read(&color_groups[color].nr_pages));
395}
396
397
398/*
399 * Initialize the numbers of banks and cache colors
400 */
401static int __init init_variables(void)
402{
403 number_banks = counting_one_set(BANK_MASK);
404 number_banks = two_exp(number_banks);
405
406 number_cachecolors = counting_one_set(CACHE_MASK);
407 number_cachecolors = two_exp(number_cachecolors);
408 NUM_PAGE_LIST = number_banks * number_cachecolors;
409 printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
410 mutex_init(&void_lockdown_proc);
411 spin_lock_init(&reclaim_lock);
412
413}
414
415
416/*
417 * Initialize the page pool
418 */
419static int __init init_color_groups(void)
420{
421 struct color_group *cgroup;
422 unsigned long i;
423 int err = 0;
424
425 printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
426 color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
427
428 if (!color_groups) {
429 printk(KERN_WARNING "Could not allocate color groups.\n");
430 err = -ENOMEM;
431 }else{
432
433 for (i = 0; i < NUM_PAGE_LIST; ++i) {
434 cgroup = &color_groups[i];
435 atomic_set(&cgroup->nr_pages, 0);
436 INIT_LIST_HEAD(&cgroup->list);
437 spin_lock_init(&cgroup->lock);
438 }
439 }
440 return err;
441}
442
443int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
444 size_t *lenp, loff_t *ppos)
445{
446 int ret = 0, i = 0;
447 mutex_lock(&void_lockdown_proc);
448 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
449 if (ret)
450 goto out;
451 if (write) {
452 printk("New set Partition : \n");
453 for(i =0;i <9;i++)
454 {
455 set_index[i] = 0;
456 printk("set[%d] = %x \n", i, set_partition[i]);
457 }
458 }
459out:
460 mutex_unlock(&void_lockdown_proc);
461 return ret;
462}
463
464int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
465 size_t *lenp, loff_t *ppos)
466{
467 int ret = 0, i = 0;
468 mutex_lock(&void_lockdown_proc);
469 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
470 if (ret)
471 goto out;
472 if (write) {
473 for(i =0;i <9;i++)
474 {
475 bank_index[i] = 0;
476 }
477 }
478out:
479 mutex_unlock(&void_lockdown_proc);
480 return ret;
481}
482
483int show_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
484 size_t *lenp, loff_t *ppos)
485{
486 int ret = 0, i = 0;
487 mutex_lock(&void_lockdown_proc);
488 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
489 if (ret)
490 goto out;
491 if (write) {
492 show_nr_pages();
493 }
494out:
495 mutex_unlock(&void_lockdown_proc);
496 return ret;
497}
498
499int refill_page_pool_handler(struct ctl_table *table, int write, void __user *buffer,
500 size_t *lenp, loff_t *ppos)
501{
502 int ret = 0, i = 0;
503 mutex_lock(&void_lockdown_proc);
504 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
505 if (ret)
506 goto out;
507 if (write) {
508 do_add_pages();
509 }
510out:
511 mutex_unlock(&void_lockdown_proc);
512 return ret;
513}
514
515static struct ctl_table cache_table[] =
516{
517
518 {
519 .procname = "C0_LA_set",
520 .mode = 0666,
521 .proc_handler = set_partition_handler,
522 .data = &set_partition[0],
523 .maxlen = sizeof(set_partition[0]),
524 .extra1 = &set_partition_min,
525 .extra2 = &set_partition_max,
526 },
527 {
528 .procname = "C0_LB_set",
529 .mode = 0666,
530 .proc_handler = set_partition_handler,
531 .data = &set_partition[1],
532 .maxlen = sizeof(set_partition[1]),
533 .extra1 = &set_partition_min,
534 .extra2 = &set_partition_max,
535 },
536 {
537 .procname = "C1_LA_set",
538 .mode = 0666,
539 .proc_handler = set_partition_handler,
540 .data = &set_partition[2],
541 .maxlen = sizeof(set_partition[2]),
542 .extra1 = &set_partition_min,
543 .extra2 = &set_partition_max,
544 },
545 {
546 .procname = "C1_LB_set",
547 .mode = 0666,
548 .proc_handler = set_partition_handler,
549 .data = &set_partition[3],
550 .maxlen = sizeof(set_partition[3]),
551 .extra1 = &set_partition_min,
552 .extra2 = &set_partition_max,
553 },
554 {
555 .procname = "C2_LA_set",
556 .mode = 0666,
557 .proc_handler = set_partition_handler,
558 .data = &set_partition[4],
559 .maxlen = sizeof(set_partition[4]),
560 .extra1 = &set_partition_min,
561 .extra2 = &set_partition_max,
562 },
563 {
564 .procname = "C2_LB_set",
565 .mode = 0666,
566 .proc_handler = set_partition_handler,
567 .data = &set_partition[5],
568 .maxlen = sizeof(set_partition[5]),
569 .extra1 = &set_partition_min,
570 .extra2 = &set_partition_max,
571 },
572 {
573 .procname = "C3_LA_set",
574 .mode = 0666,
575 .proc_handler = set_partition_handler,
576 .data = &set_partition[6],
577 .maxlen = sizeof(set_partition[6]),
578 .extra1 = &set_partition_min,
579 .extra2 = &set_partition_max,
580 },
581 {
582 .procname = "C3_LB_set",
583 .mode = 0666,
584 .proc_handler = set_partition_handler,
585 .data = &set_partition[7],
586 .maxlen = sizeof(set_partition[7]),
587 .extra1 = &set_partition_min,
588 .extra2 = &set_partition_max,
589 },
590 {
591 .procname = "Call_LC_set",
592 .mode = 0666,
593 .proc_handler = set_partition_handler,
594 .data = &set_partition[8],
595 .maxlen = sizeof(set_partition[8]),
596 .extra1 = &set_partition_min,
597 .extra2 = &set_partition_max,
598 },
599 {
600 .procname = "C0_LA_bank",
601 .mode = 0666,
602 .proc_handler = bank_partition_handler,
603 .data = &bank_partition[0],
604 .maxlen = sizeof(set_partition[0]),
605 .extra1 = &bank_partition_min,
606 .extra2 = &bank_partition_max,
607 },
608 {
609 .procname = "C0_LB_bank",
610 .mode = 0666,
611 .proc_handler = bank_partition_handler,
612 .data = &bank_partition[1],
613 .maxlen = sizeof(set_partition[1]),
614 .extra1 = &bank_partition_min,
615 .extra2 = &bank_partition_max,
616 },
617 {
618 .procname = "C1_LA_bank",
619 .mode = 0666,
620 .proc_handler = bank_partition_handler,
621 .data = &bank_partition[2],
622 .maxlen = sizeof(set_partition[2]),
623 .extra1 = &bank_partition_min,
624 .extra2 = &bank_partition_max,
625 },
626 {
627 .procname = "C1_LB_bank",
628 .mode = 0666,
629 .proc_handler = bank_partition_handler,
630 .data = &bank_partition[3],
631 .maxlen = sizeof(set_partition[3]),
632 .extra1 = &bank_partition_min,
633 .extra2 = &bank_partition_max,
634 },
635 {
636 .procname = "C2_LA_bank",
637 .mode = 0666,
638 .proc_handler = bank_partition_handler,
639 .data = &bank_partition[4],
640 .maxlen = sizeof(set_partition[4]),
641 .extra1 = &bank_partition_min,
642 .extra2 = &bank_partition_max,
643 },
644 {
645 .procname = "C2_LB_bank",
646 .mode = 0666,
647 .proc_handler = bank_partition_handler,
648 .data = &bank_partition[5],
649 .maxlen = sizeof(set_partition[5]),
650 .extra1 = &bank_partition_min,
651 .extra2 = &bank_partition_max,
652 },
653 {
654 .procname = "C3_LA_bank",
655 .mode = 0666,
656 .proc_handler = bank_partition_handler,
657 .data = &bank_partition[6],
658 .maxlen = sizeof(set_partition[6]),
659 .extra1 = &bank_partition_min,
660 .extra2 = &bank_partition_max,
661 },
662 {
663 .procname = "C3_LB_bank",
664 .mode = 0666,
665 .proc_handler = bank_partition_handler,
666 .data = &bank_partition[7],
667 .maxlen = sizeof(set_partition[7]),
668 .extra1 = &bank_partition_min,
669 .extra2 = &bank_partition_max,
670 },
671 {
672 .procname = "Call_LC_bank",
673 .mode = 0666,
674 .proc_handler = bank_partition_handler,
675 .data = &bank_partition[8],
676 .maxlen = sizeof(set_partition[8]),
677 .extra1 = &bank_partition_min,
678 .extra2 = &bank_partition_max,
679 },
680 {
681 .procname = "show_page_pool",
682 .mode = 0666,
683 .proc_handler = show_page_pool_handler,
684 .data = &show_page_pool,
685 .maxlen = sizeof(show_page_pool),
686 }, {
687 .procname = "refill_page_pool",
688 .mode = 0666,
689 .proc_handler = refill_page_pool_handler,
690 .data = &refill_page_pool,
691 .maxlen = sizeof(refill_page_pool),
692 },
693 { }
694};
695
696static struct ctl_table litmus_dir_table[] = {
697 {
698 .procname = "litmus",
699 .mode = 0555,
700 .child = cache_table,
701 },
702 { }
703};
704
705
706static struct ctl_table_header *litmus_sysctls;
707
708
709/*
710 * Initialzie this proc
711 */
712static int __init litmus_color_init(void)
713{
714 int err=0;
715 printk("Init bankproc.c\n");
716
717 init_variables();
718
719 printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
720
721 litmus_sysctls = register_sysctl_table(litmus_dir_table);
722 if (!litmus_sysctls) {
723 printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
724 err = -EFAULT;
725 goto out;
726 }
727
728 init_color_groups();
729 do_add_pages();
730
731 printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
732out:
733 return err;
734}
735
736module_init(litmus_color_init);
737
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index f5879f32232a..85d86c02d6e9 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -11,6 +11,7 @@
11#include <litmus/litmus_proc.h> 11#include <litmus/litmus_proc.h>
12#include <litmus/sched_trace.h> 12#include <litmus/sched_trace.h>
13#include <litmus/cache_proc.h> 13#include <litmus/cache_proc.h>
14#include <litmus/mc2_common.h>
14 15
15#include <asm/hardware/cache-l2x0.h> 16#include <asm/hardware/cache-l2x0.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
@@ -144,6 +145,8 @@ static int l1_prefetch_proc;
144static int l2_prefetch_hint_proc; 145static int l2_prefetch_hint_proc;
145static int l2_double_linefill_proc; 146static int l2_double_linefill_proc;
146static int l2_data_prefetch_proc; 147static int l2_data_prefetch_proc;
148static int os_isolation;
149static int use_part;
147 150
148u32 lockdown_reg[9] = { 151u32 lockdown_reg[9] = {
149 0x00000000, 152 0x00000000,
@@ -168,6 +171,7 @@ int lock_all;
168int nr_lockregs; 171int nr_lockregs;
169static raw_spinlock_t cache_lock; 172static raw_spinlock_t cache_lock;
170static raw_spinlock_t prefetch_lock; 173static raw_spinlock_t prefetch_lock;
174static void ***flusher_pages = NULL;
171 175
172extern void l2c310_flush_all(void); 176extern void l2c310_flush_all(void);
173 177
@@ -379,6 +383,79 @@ void cache_lockdown(u32 lock_val, int cpu)
379 //raw_spin_unlock_irqrestore(&cache_lock, flags); 383 //raw_spin_unlock_irqrestore(&cache_lock, flags);
380} 384}
381 385
386void do_partition(enum crit_level lv, int cpu)
387{
388 u32 regs;
389 unsigned long flags;
390
391 if (lock_all || !use_part)
392 return;
393 raw_spin_lock_irqsave(&cache_lock, flags);
394 switch(lv) {
395 case CRIT_LEVEL_A:
396 regs = ~way_partitions[cpu*2];
397 regs &= 0x0000ffff;
398 break;
399 case CRIT_LEVEL_B:
400 regs = ~way_partitions[cpu*2+1];
401 regs &= 0x0000ffff;
402 break;
403 case CRIT_LEVEL_C:
404 case NUM_CRIT_LEVELS:
405 regs = ~way_partitions[8];
406 regs &= 0x0000ffff;
407 break;
408 default:
409 BUG();
410
411 }
412 barrier();
413 cache_lockdown(regs, cpu);
414 barrier();
415
416 raw_spin_unlock_irqrestore(&cache_lock, flags);
417
418 flush_cache(0);
419}
420
421int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
422 size_t *lenp, loff_t *ppos)
423{
424 int ret = 0;
425
426 mutex_lock(&lockdown_proc);
427
428 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
429 if (ret)
430 goto out;
431
432
433 printk("USE_PART HANDLER = %d\n", use_part);
434
435out:
436 mutex_unlock(&lockdown_proc);
437 return ret;
438}
439
440int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
441 size_t *lenp, loff_t *ppos)
442{
443 int ret = 0;
444
445 mutex_lock(&lockdown_proc);
446
447 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
448 if (ret)
449 goto out;
450
451
452 printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
453
454out:
455 mutex_unlock(&lockdown_proc);
456 return ret;
457}
458
382int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer, 459int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
383 size_t *lenp, loff_t *ppos) 460 size_t *lenp, loff_t *ppos)
384{ 461{
@@ -429,6 +506,30 @@ out:
429 return ret; 506 return ret;
430} 507}
431 508
509void inline enter_irq_mode(void)
510{
511 int cpu = smp_processor_id();
512
513 if (os_isolation == 0)
514 return;
515
516 prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
517 prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
518
519 writel_relaxed(way_partitions[8], ld_i_reg(cpu));
520 writel_relaxed(way_partitions[8], ld_d_reg(cpu));
521}
522
523void inline exit_irq_mode(void)
524{
525 int cpu = smp_processor_id();
526
527 if (os_isolation == 0)
528 return;
529 writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
530 writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));
531}
532
432/* Operate on the Cortex-A9's ACTLR register */ 533/* Operate on the Cortex-A9's ACTLR register */
433#define ACTLR_L2_PREFETCH_HINT (1 << 1) 534#define ACTLR_L2_PREFETCH_HINT (1 << 1)
434#define ACTLR_L1_PREFETCH (1 << 2) 535#define ACTLR_L1_PREFETCH (1 << 2)
@@ -684,6 +785,20 @@ static struct ctl_table cache_table[] =
684 .maxlen = sizeof(l2_data_prefetch_proc), 785 .maxlen = sizeof(l2_data_prefetch_proc),
685 }, 786 },
686 { 787 {
788 .procname = "os_isolation",
789 .mode = 0644,
790 .proc_handler = os_isolation_proc_handler,
791 .data = &os_isolation,
792 .maxlen = sizeof(os_isolation),
793 },
794 {
795 .procname = "use_part",
796 .mode = 0644,
797 .proc_handler = use_part_proc_handler,
798 .data = &use_part,
799 .maxlen = sizeof(use_part),
800 },
801 {
687 .procname = "do_perf_test", 802 .procname = "do_perf_test",
688 .mode = 0644, 803 .mode = 0644,
689 .proc_handler = do_perf_test_proc_handler, 804 .proc_handler = do_perf_test_proc_handler,
@@ -838,8 +953,146 @@ extern void v7_flush_kern_cache_all(void);
838 */ 953 */
839void color_flush_page(void *vaddr, size_t size) 954void color_flush_page(void *vaddr, size_t size)
840{ 955{
841 //v7_flush_kern_dcache_area(vaddr, size); 956 v7_flush_kern_dcache_area(vaddr, size);
842 v7_flush_kern_cache_all(); 957 //v7_flush_kern_cache_all();
958}
959
960extern struct page* get_colored_page(unsigned long color);
961
962int setup_flusher_array(void)
963{
964 int color, way, ret = 0;
965 struct page *page;
966
967 if (flusher_pages != NULL)
968 goto out;
969
970 flusher_pages = (void***) kmalloc(MAX_NR_WAYS
971 * sizeof(*flusher_pages), GFP_KERNEL);
972 if (!flusher_pages) {
973 printk(KERN_WARNING "No memory for flusher array!\n");
974 ret = -EINVAL;
975 goto out;
976 }
977
978 for (way = 0; way < MAX_NR_WAYS; way++) {
979 void **flusher_color_arr;
980 flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
981 * MAX_NR_COLORS, GFP_KERNEL);
982 if (!flusher_color_arr) {
983 printk(KERN_WARNING "No memory for flusher array!\n");
984 ret = -ENOMEM;
985 goto out_free;
986 }
987
988 flusher_pages[way] = flusher_color_arr;
989
990 for (color = 0; color < MAX_NR_COLORS; color++) {
991 int node;
992 switch (color) {
993 case 0:
994 node = 32;
995 break;
996 case 1:
997 node = 33;
998 break;
999 case 2:
1000 node = 50;
1001 break;
1002 case 3:
1003 node = 51;
1004 break;
1005 case 4:
1006 node = 68;
1007 break;
1008 case 5:
1009 node = 69;
1010 break;
1011 case 6:
1012 node = 86;
1013 break;
1014 case 7:
1015 node = 87;
1016 break;
1017 case 8:
1018 node = 88;
1019 break;
1020 case 9:
1021 node = 105;
1022 break;
1023 case 10:
1024 node = 106;
1025 break;
1026 case 11:
1027 node = 107;
1028 break;
1029 case 12:
1030 node = 108;
1031 break;
1032 case 13:
1033 node = 125;
1034 break;
1035 case 14:
1036 node = 126;
1037 break;
1038 case 15:
1039 node = 127;
1040 break;
1041 }
1042 page = get_colored_page(node);
1043 if (!page) {
1044 printk(KERN_WARNING "no more colored pages\n");
1045 ret = -EINVAL;
1046 goto out_free;
1047 }
1048 flusher_pages[way][color] = page_address(page);
1049 if (!flusher_pages[way][color]) {
1050 printk(KERN_WARNING "bad page address\n");
1051 ret = -EINVAL;
1052 goto out_free;
1053 }
1054 }
1055 }
1056out:
1057 return ret;
1058out_free:
1059 for (way = 0; way < MAX_NR_WAYS; way++) {
1060 for (color = 0; color < MAX_NR_COLORS; color++) {
1061 /* not bothering to try and give back colored pages */
1062 }
1063 kfree(flusher_pages[way]);
1064 }
1065 kfree(flusher_pages);
1066 flusher_pages = NULL;
1067 return ret;
1068}
1069
1070void flush_cache(int all)
1071{
1072 int way, color, cpu;
1073 unsigned long flags;
1074
1075 raw_spin_lock_irqsave(&cache_lock, flags);
1076 cpu = raw_smp_processor_id();
1077
1078 prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
1079 prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
1080 for (way=0;way<MAX_NR_WAYS;way++) {
1081 if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
1082 !all)
1083 continue;
1084 for (color=0;color<MAX_NR_COLORS;color++) {
1085 void *vaddr = flusher_pages[way][color];
1086 u32 lvalue = unlocked_way[way];
1087 color_read_in_mem_lock(lvalue, LOCK_ALL,
1088 vaddr, vaddr + PAGE_SIZE);
1089 }
1090
1091 }
1092
1093 writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
1094 writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
1095 raw_spin_unlock_irqrestore(&cache_lock, flags);
843} 1096}
844 1097
845#define TRIALS 1000 1098#define TRIALS 1000
diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c
new file mode 100644
index 000000000000..a8ea5d9889f3
--- /dev/null
+++ b/litmus/mc2_common.c
@@ -0,0 +1,78 @@
1/*
2 * litmus/mc2_common.c
3 *
4 * Common functions for MC2 plugin.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10#include <linux/slab.h>
11#include <asm/uaccess.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_plugin.h>
15#include <litmus/sched_trace.h>
16
17#include <litmus/mc2_common.h>
18
19long mc2_task_client_init(struct task_client *tc, struct mc2_task *mc2_param, struct task_struct *tsk, struct reservation *res)
20{
21 task_client_init(tc, tsk, res);
22 if ((mc2_param->crit < CRIT_LEVEL_A) ||
23 (mc2_param->crit > CRIT_LEVEL_C))
24 return -EINVAL;
25
26 TRACE_TASK(tsk, "mc2_task_client_init: crit_level = %d\n", mc2_param->crit);
27
28 return 0;
29}
30
31asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param)
32{
33 struct task_struct *target;
34 int retval = -EINVAL;
35 struct mc2_task *mp = kzalloc(sizeof(*mp), GFP_KERNEL);
36
37 if (!mp)
38 return -ENOMEM;
39
40 printk("Setting up mc^2 task parameters for process %d.\n", pid);
41
42 if (pid < 0 || param == 0) {
43 goto out;
44 }
45 if (copy_from_user(mp, param, sizeof(*mp))) {
46 retval = -EFAULT;
47 goto out;
48 }
49
50 /* Task search and manipulation must be protected */
51 read_lock_irq(&tasklist_lock);
52 if (!(target = find_task_by_vpid(pid))) {
53 retval = -ESRCH;
54 goto out_unlock;
55 }
56
57 if (is_realtime(target)) {
58 /* The task is already a real-time task.
59 * We cannot not allow parameter changes at this point.
60 */
61 retval = -EBUSY;
62 goto out_unlock;
63 }
64 if (mp->crit < CRIT_LEVEL_A || mp->crit >= NUM_CRIT_LEVELS) {
65 printk(KERN_INFO "litmus: real-time task %d rejected "
66 "because of invalid criticality level\n", pid);
67 goto out_unlock;
68 }
69
70 //target->rt_param.plugin_state = mp;
71 target->rt_param.mc2_data = mp;
72
73 retval = 0;
74out_unlock:
75 read_unlock_irq(&tasklist_lock);
76out:
77 return retval;
78} \ No newline at end of file
diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c
new file mode 100644
index 000000000000..4a2fee575127
--- /dev/null
+++ b/litmus/polling_reservations.c
@@ -0,0 +1,564 @@
1#include <linux/sched.h>
2
3#include <litmus/litmus.h>
4#include <litmus/reservation.h>
5#include <litmus/polling_reservations.h>
6
7
8static void periodic_polling_client_arrives(
9 struct reservation* res,
10 struct reservation_client *client
11)
12{
13 struct polling_reservation *pres =
14 container_of(res, struct polling_reservation, res);
15 lt_t instances, tmp;
16
17 list_add_tail(&client->list, &res->clients);
18
19 switch (res->state) {
20 case RESERVATION_INACTIVE:
21 /* Figure out next replenishment time. */
22 if (res->env->time_zero == 0) {
23 tmp = res->env->current_time - res->env->time_zero;
24 instances = div64_u64(tmp, pres->period);
25 res->next_replenishment =
26 (instances + 1) * pres->period + pres->offset;
27 }
28 else {
29 tmp = res->env->current_time - res->env->time_zero;
30 instances = div64_u64(tmp, pres->period);
31 res->next_replenishment = res->env->time_zero + instances * pres->period;
32 }
33
34 TRACE("ENV_TIME_ZERO %llu\n", res->env->time_zero);
35 TRACE("pol-res: R%d activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n",
36 res->id, tmp, instances, pres->period, res->next_replenishment,
37 res->env->current_time);
38
39 res->env->change_state(res->env, res,
40 RESERVATION_DEPLETED);
41 break;
42
43 case RESERVATION_ACTIVE:
44 case RESERVATION_DEPLETED:
45 /* do nothing */
46 break;
47
48 case RESERVATION_ACTIVE_IDLE:
49 res->blocked_by_ghost = 0;
50 res->env->change_state(res->env, res,
51 RESERVATION_ACTIVE);
52 break;
53 }
54}
55
56
57static void periodic_polling_client_departs(
58 struct reservation *res,
59 struct reservation_client *client,
60 int did_signal_job_completion
61)
62{
63 list_del(&client->list);
64
65 switch (res->state) {
66 case RESERVATION_INACTIVE:
67 case RESERVATION_ACTIVE_IDLE:
68 BUG(); /* INACTIVE or IDLE <=> no client */
69 break;
70
71 case RESERVATION_ACTIVE:
72 if (list_empty(&res->clients)) {
73 res->env->change_state(res->env, res,
74// RESERVATION_ACTIVE_IDLE);
75 res->cur_budget ?
76 RESERVATION_ACTIVE_IDLE :
77 RESERVATION_DEPLETED);
78// did_signal_job_completion ?
79// RESERVATION_DEPLETED :
80// RESERVATION_ACTIVE_IDLE);
81 } /* else: nothing to do, more clients ready */
82 break;
83
84 case RESERVATION_DEPLETED:
85 /* do nothing */
86 break;
87 }
88}
89
90static void periodic_polling_on_replenishment(
91 struct reservation *res
92)
93{
94 struct polling_reservation *pres =
95 container_of(res, struct polling_reservation, res);
96
97 /* replenish budget */
98 res->cur_budget = pres->max_budget;
99 res->next_replenishment += pres->period;
100 res->budget_consumed = 0;
101
102 TRACE("polling_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
103 switch (res->state) {
104 case RESERVATION_DEPLETED:
105 case RESERVATION_INACTIVE:
106 case RESERVATION_ACTIVE_IDLE:
107 if (list_empty(&res->clients))
108 /* no clients => poll again later */
109 res->env->change_state(res->env, res,
110 RESERVATION_INACTIVE);
111 else
112 /* we have clients & budget => ACTIVE */
113 res->env->change_state(res->env, res,
114 RESERVATION_ACTIVE);
115 break;
116
117 case RESERVATION_ACTIVE:
118 /* Replenished while active => tardy? In any case,
119 * go ahead and stay active. */
120 break;
121 }
122}
123
124static void periodic_polling_on_replenishment_edf(
125 struct reservation *res
126)
127{
128 struct polling_reservation *pres =
129 container_of(res, struct polling_reservation, res);
130
131 /* update current priority */
132 res->priority = res->next_replenishment + pres->deadline;
133
134 /* do common updates */
135 periodic_polling_on_replenishment(res);
136}
137
138static void common_drain_budget(
139 struct reservation *res,
140 lt_t how_much)
141{
142 if (how_much >= res->cur_budget)
143 res->cur_budget = 0;
144 else
145 res->cur_budget -= how_much;
146
147 res->budget_consumed += how_much;
148 res->budget_consumed_total += how_much;
149
150 switch (res->state) {
151 case RESERVATION_DEPLETED:
152 case RESERVATION_INACTIVE:
153 //BUG();
154 TRACE("!!!!!!!!!!!!!!!STATE ERROR R%d STATE(%d)\n", res->id, res->state);
155 break;
156
157 case RESERVATION_ACTIVE_IDLE:
158 case RESERVATION_ACTIVE:
159 if (!res->cur_budget) {
160 res->env->change_state(res->env, res,
161 RESERVATION_DEPLETED);
162 } /* else: stay in current state */
163 break;
164 }
165}
166
167static struct reservation_ops periodic_polling_ops_fp = {
168 .dispatch_client = default_dispatch_client,
169 .client_arrives = periodic_polling_client_arrives,
170 .client_departs = periodic_polling_client_departs,
171 .replenish = periodic_polling_on_replenishment,
172 .drain_budget = common_drain_budget,
173};
174
175static struct reservation_ops periodic_polling_ops_edf = {
176 .dispatch_client = default_dispatch_client,
177 .client_arrives = periodic_polling_client_arrives,
178 .client_departs = periodic_polling_client_departs,
179 .replenish = periodic_polling_on_replenishment_edf,
180 .drain_budget = common_drain_budget,
181};
182
183
184
185
186static void sporadic_polling_client_arrives_fp(
187 struct reservation* res,
188 struct reservation_client *client
189)
190{
191 struct polling_reservation *pres =
192 container_of(res, struct polling_reservation, res);
193
194 list_add_tail(&client->list, &res->clients);
195
196 switch (res->state) {
197 case RESERVATION_INACTIVE:
198 /* Replenish now. */
199 res->cur_budget = pres->max_budget;
200 res->next_replenishment =
201 res->env->current_time + pres->period;
202
203 res->env->change_state(res->env, res,
204 RESERVATION_ACTIVE);
205 break;
206
207 case RESERVATION_ACTIVE:
208 case RESERVATION_DEPLETED:
209 /* do nothing */
210 break;
211
212 case RESERVATION_ACTIVE_IDLE:
213 res->env->change_state(res->env, res,
214 RESERVATION_ACTIVE);
215 break;
216 }
217}
218
219static void sporadic_polling_client_arrives_edf(
220 struct reservation* res,
221 struct reservation_client *client
222)
223{
224 struct polling_reservation *pres =
225 container_of(res, struct polling_reservation, res);
226
227 list_add_tail(&client->list, &res->clients);
228
229 switch (res->state) {
230 case RESERVATION_INACTIVE:
231 /* Replenish now. */
232 res->cur_budget = pres->max_budget;
233 res->next_replenishment =
234 res->env->current_time + pres->period;
235 res->priority =
236 res->env->current_time + pres->deadline;
237
238 res->env->change_state(res->env, res,
239 RESERVATION_ACTIVE);
240 break;
241
242 case RESERVATION_ACTIVE:
243 case RESERVATION_DEPLETED:
244 /* do nothing */
245 break;
246
247 case RESERVATION_ACTIVE_IDLE:
248 res->env->change_state(res->env, res,
249 RESERVATION_ACTIVE);
250 break;
251 }
252}
253
254static struct reservation_ops sporadic_polling_ops_fp = {
255 .dispatch_client = default_dispatch_client,
256 .client_arrives = sporadic_polling_client_arrives_fp,
257 .client_departs = periodic_polling_client_departs,
258 .replenish = periodic_polling_on_replenishment,
259 .drain_budget = common_drain_budget,
260};
261
262static struct reservation_ops sporadic_polling_ops_edf = {
263 .dispatch_client = default_dispatch_client,
264 .client_arrives = sporadic_polling_client_arrives_edf,
265 .client_departs = periodic_polling_client_departs,
266 .replenish = periodic_polling_on_replenishment_edf,
267 .drain_budget = common_drain_budget,
268};
269
270void polling_reservation_init(
271 struct polling_reservation *pres,
272 int use_edf_prio,
273 int use_periodic_polling,
274 lt_t budget, lt_t period, lt_t deadline, lt_t offset
275)
276{
277 if (!deadline)
278 deadline = period;
279 BUG_ON(budget > period);
280 BUG_ON(budget > deadline);
281 BUG_ON(offset >= period);
282
283 reservation_init(&pres->res);
284 pres->max_budget = budget;
285 pres->period = period;
286 pres->deadline = deadline;
287 pres->offset = offset;
288 TRACE_TASK(current, "polling_reservation_init: periodic %d, use_edf %d\n", use_periodic_polling, use_edf_prio);
289 if (use_periodic_polling) {
290 if (use_edf_prio)
291 pres->res.ops = &periodic_polling_ops_edf;
292 else
293 pres->res.ops = &periodic_polling_ops_fp;
294 } else {
295 if (use_edf_prio)
296 pres->res.ops = &sporadic_polling_ops_edf;
297 else
298 pres->res.ops = &sporadic_polling_ops_fp;
299 }
300}
301
302
303static lt_t td_cur_major_cycle_start(struct table_driven_reservation *tdres)
304{
305 lt_t x, tmp;
306
307 tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
308 x = div64_u64(tmp, tdres->major_cycle);
309 x *= tdres->major_cycle;
310 return x;
311}
312
313
314static lt_t td_next_major_cycle_start(struct table_driven_reservation *tdres)
315{
316 lt_t x, tmp;
317
318 tmp = tdres->res.env->current_time - tdres->res.env->time_zero;
319 x = div64_u64(tmp, tdres->major_cycle) + 1;
320 x *= tdres->major_cycle;
321 return x;
322}
323
324static void td_client_arrives(
325 struct reservation* res,
326 struct reservation_client *client
327)
328{
329 struct table_driven_reservation *tdres =
330 container_of(res, struct table_driven_reservation, res);
331
332 list_add_tail(&client->list, &res->clients);
333
334 switch (res->state) {
335 case RESERVATION_INACTIVE:
336 /* Figure out first replenishment time. */
337 tdres->major_cycle_start = td_next_major_cycle_start(tdres);
338 res->next_replenishment = tdres->major_cycle_start;
339 res->next_replenishment += tdres->intervals[0].start;
340 tdres->next_interval = 0;
341
342 res->env->change_state(res->env, res,
343 RESERVATION_DEPLETED);
344 break;
345
346 case RESERVATION_ACTIVE:
347 case RESERVATION_DEPLETED:
348 /* do nothing */
349 break;
350
351 case RESERVATION_ACTIVE_IDLE:
352 res->env->change_state(res->env, res,
353 RESERVATION_ACTIVE);
354 break;
355 }
356}
357
358static void td_client_departs(
359 struct reservation *res,
360 struct reservation_client *client,
361 int did_signal_job_completion
362)
363{
364 list_del(&client->list);
365
366 switch (res->state) {
367 case RESERVATION_INACTIVE:
368 case RESERVATION_ACTIVE_IDLE:
369 //BUG(); /* INACTIVE or IDLE <=> no client */
370 break;
371
372 case RESERVATION_ACTIVE:
373 if (list_empty(&res->clients)) {
374 res->env->change_state(res->env, res,
375 RESERVATION_ACTIVE_IDLE);
376 } /* else: nothing to do, more clients ready */
377 break;
378
379 case RESERVATION_DEPLETED:
380 /* do nothing */
381 break;
382 }
383}
384
385static lt_t td_time_remaining_until_end(struct table_driven_reservation *tdres)
386{
387 lt_t now = tdres->res.env->current_time;
388 lt_t end = tdres->cur_interval.end;
389 //TRACE("td_remaining(%u): start=%llu now=%llu end=%llu state=%d\n", tdres->res.id, tdres->cur_interval.start, now, end, tdres->res.state);
390 if (now >= end)
391 return 0;
392 else
393 return end - now;
394}
395
396static void td_replenish(
397 struct reservation *res)
398{
399 struct table_driven_reservation *tdres =
400 container_of(res, struct table_driven_reservation, res);
401
402 //TRACE("td_replenish(%u): expected_replenishment=%llu\n", res->id, res->next_replenishment);
403
404 /* figure out current interval */
405 tdres->cur_interval.start = tdres->major_cycle_start +
406 tdres->intervals[tdres->next_interval].start;
407 tdres->cur_interval.end = tdres->major_cycle_start +
408 tdres->intervals[tdres->next_interval].end;
409/* TRACE("major_cycle_start=%llu => [%llu, %llu]\n",
410 tdres->major_cycle_start,
411 tdres->cur_interval.start,
412 tdres->cur_interval.end);
413*/
414 /* reset budget */
415 res->cur_budget = td_time_remaining_until_end(tdres);
416 res->budget_consumed = 0;
417 //TRACE("td_replenish(%u): %s budget=%llu\n", res->id, res->cur_budget ? "" : "WARNING", res->cur_budget);
418
419 /* prepare next slot */
420 tdres->next_interval = (tdres->next_interval + 1) % tdres->num_intervals;
421 if (!tdres->next_interval)
422 /* wrap to next major cycle */
423 tdres->major_cycle_start += tdres->major_cycle;
424
425 /* determine next time this reservation becomes eligible to execute */
426 res->next_replenishment = tdres->major_cycle_start;
427 res->next_replenishment += tdres->intervals[tdres->next_interval].start;
428 //TRACE("td_replenish(%u): next_replenishment=%llu\n", res->id, res->next_replenishment);
429
430
431 switch (res->state) {
432 case RESERVATION_DEPLETED:
433 case RESERVATION_ACTIVE:
434 case RESERVATION_ACTIVE_IDLE:
435 if (list_empty(&res->clients))
436 res->env->change_state(res->env, res,
437 RESERVATION_ACTIVE_IDLE);
438 else
439 /* we have clients & budget => ACTIVE */
440 res->env->change_state(res->env, res,
441 RESERVATION_ACTIVE);
442 break;
443
444 case RESERVATION_INACTIVE:
445 BUG();
446 break;
447 }
448}
449
450static void td_drain_budget(
451 struct reservation *res,
452 lt_t how_much)
453{
454 struct table_driven_reservation *tdres =
455 container_of(res, struct table_driven_reservation, res);
456
457 res->budget_consumed += how_much;
458 res->budget_consumed_total += how_much;
459
460 /* Table-driven scheduling: instead of tracking the budget, we compute
461 * how much time is left in this allocation interval. */
462
463 /* sanity check: we should never try to drain from future slots */
464 //TRACE("TD_DRAIN STATE(%d) [%llu,%llu] %llu ?\n", res->state, tdres->cur_interval.start, tdres->cur_interval.end, res->env->current_time);
465 //BUG_ON(tdres->cur_interval.start > res->env->current_time);
466 if (tdres->cur_interval.start > res->env->current_time)
467 TRACE("TD_DRAIN BUG!!!!!!!!!!\n");
468
469 switch (res->state) {
470 case RESERVATION_DEPLETED:
471 case RESERVATION_INACTIVE:
472 //BUG();
473 TRACE("TD_DRAIN!!!!!!!!! RES_STATE = %d\n", res->state);
474 break;
475
476 case RESERVATION_ACTIVE_IDLE:
477 case RESERVATION_ACTIVE:
478 res->cur_budget = td_time_remaining_until_end(tdres);
479 //TRACE("td_drain_budget(%u): drained to budget=%llu\n", res->id, res->cur_budget);
480 if (!res->cur_budget) {
481 res->env->change_state(res->env, res,
482 RESERVATION_DEPLETED);
483 } else {
484 /* sanity check budget calculation */
485 //BUG_ON(res->env->current_time >= tdres->cur_interval.end);
486 //BUG_ON(res->env->current_time < tdres->cur_interval.start);
487 if (res->env->current_time >= tdres->cur_interval.end)
488 printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING1\n");
489 if (res->env->current_time < tdres->cur_interval.start)
490 printk(KERN_ALERT "TD_DRAIN_BUDGET WARNING2\n");
491 }
492
493 break;
494 }
495}
496
497static struct task_struct* td_dispatch_client(
498 struct reservation *res,
499 lt_t *for_at_most)
500{
501 struct task_struct *t;
502 struct table_driven_reservation *tdres =
503 container_of(res, struct table_driven_reservation, res);
504
505 /* usual logic for selecting a client */
506 t = default_dispatch_client(res, for_at_most);
507
508 TRACE_TASK(t, "td_dispatch_client(%u): selected, budget=%llu\n",
509 res->id, res->cur_budget);
510
511 /* check how much budget we have left in this time slot */
512 res->cur_budget = td_time_remaining_until_end(tdres);
513
514 TRACE_TASK(t, "td_dispatch_client(%u): updated to budget=%llu next=%d\n",
515 res->id, res->cur_budget, tdres->next_interval);
516
517 if (unlikely(!res->cur_budget)) {
518 /* Unlikely case: if we ran out of budget, the user configured
519 * a broken scheduling table (overlapping table slots).
520 * Not much we can do about this, but we can't dispatch a job
521 * now without causing overload. So let's register this reservation
522 * as depleted and wait for the next allocation. */
523 TRACE("td_dispatch_client(%u): budget unexpectedly depleted "
524 "(check scheduling table for unintended overlap)\n",
525 res->id);
526 res->env->change_state(res->env, res,
527 RESERVATION_DEPLETED);
528 return NULL;
529 } else
530 return t;
531}
532
533static struct reservation_ops td_ops = {
534 .dispatch_client = td_dispatch_client,
535 .client_arrives = td_client_arrives,
536 .client_departs = td_client_departs,
537 .replenish = td_replenish,
538 .drain_budget = td_drain_budget,
539};
540
541void table_driven_reservation_init(
542 struct table_driven_reservation *tdres,
543 lt_t major_cycle,
544 struct lt_interval *intervals,
545 unsigned int num_intervals)
546{
547 unsigned int i;
548
549 /* sanity checking */
550 BUG_ON(!num_intervals);
551 for (i = 0; i < num_intervals; i++)
552 BUG_ON(intervals[i].end <= intervals[i].start);
553 for (i = 0; i + 1 < num_intervals; i++)
554 BUG_ON(intervals[i + 1].start <= intervals[i].end);
555 BUG_ON(intervals[num_intervals - 1].end > major_cycle);
556
557 reservation_init(&tdres->res);
558 tdres->major_cycle = major_cycle;
559 tdres->intervals = intervals;
560 tdres->cur_interval.start = 0;
561 tdres->cur_interval.end = 0;
562 tdres->num_intervals = num_intervals;
563 tdres->res.ops = &td_ops;
564}
diff --git a/litmus/reservation.c b/litmus/reservation.c
new file mode 100644
index 000000000000..07e38cb7d138
--- /dev/null
+++ b/litmus/reservation.c
@@ -0,0 +1,709 @@
1#include <linux/sched.h>
2#include <linux/slab.h>
3
4#include <litmus/litmus.h>
5#include <litmus/reservation.h>
6
7//#define TRACE(fmt, args...) do {} while (false)
8//#define TRACE_TASK(fmt, args...) do {} while (false)
9
10#define BUDGET_ENFORCEMENT_AT_C 0
11
12void reservation_init(struct reservation *res)
13{
14 memset(res, sizeof(*res), 0);
15 res->state = RESERVATION_INACTIVE;
16 INIT_LIST_HEAD(&res->clients);
17}
18
19struct task_struct* default_dispatch_client(
20 struct reservation *res,
21 lt_t *for_at_most)
22{
23 struct reservation_client *client, *next;
24 struct task_struct* tsk;
25
26 BUG_ON(res->state != RESERVATION_ACTIVE);
27 *for_at_most = 0;
28
29 list_for_each_entry_safe(client, next, &res->clients, list) {
30 tsk = client->dispatch(client);
31 if (likely(tsk)) {
32 return tsk;
33 }
34 }
35 return NULL;
36}
37
38static struct task_struct * task_client_dispatch(struct reservation_client *client)
39{
40 struct task_client *tc = container_of(client, struct task_client, client);
41 return tc->task;
42}
43
44void task_client_init(struct task_client *tc, struct task_struct *tsk,
45 struct reservation *res)
46{
47 memset(&tc->client, sizeof(tc->client), 0);
48 tc->client.dispatch = task_client_dispatch;
49 tc->client.reservation = res;
50 tc->task = tsk;
51}
52
53static void sup_scheduler_update_at(
54 struct sup_reservation_environment* sup_env,
55 lt_t when)
56{
57 //TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when);
58 if (sup_env->next_scheduler_update > when)
59 sup_env->next_scheduler_update = when;
60}
61
62void sup_scheduler_update_after(
63 struct sup_reservation_environment* sup_env,
64 lt_t timeout)
65{
66 sup_scheduler_update_at(sup_env, sup_env->env.current_time + timeout);
67}
68
69static int _sup_queue_depleted(
70 struct sup_reservation_environment* sup_env,
71 struct reservation *res)
72{
73 struct list_head *pos;
74 struct reservation *queued;
75 int passed_earlier = 0;
76
77 list_for_each(pos, &sup_env->depleted_reservations) {
78 queued = list_entry(pos, struct reservation, list);
79 if (queued->next_replenishment > res->next_replenishment) {
80 list_add(&res->list, pos->prev);
81 return passed_earlier;
82 } else
83 passed_earlier = 1;
84 }
85
86 list_add_tail(&res->list, &sup_env->depleted_reservations);
87
88 return passed_earlier;
89}
90
91static void sup_queue_depleted(
92 struct sup_reservation_environment* sup_env,
93 struct reservation *res)
94{
95 int passed_earlier = _sup_queue_depleted(sup_env, res);
96
97 /* check for updated replenishment time */
98 if (!passed_earlier)
99 sup_scheduler_update_at(sup_env, res->next_replenishment);
100}
101
102static int _sup_queue_active(
103 struct sup_reservation_environment* sup_env,
104 struct reservation *res)
105{
106 struct list_head *pos;
107 struct reservation *queued;
108 int passed_active = 0;
109
110 list_for_each(pos, &sup_env->active_reservations) {
111 queued = list_entry(pos, struct reservation, list);
112 if (queued->priority > res->priority) {
113 list_add(&res->list, pos->prev);
114 return passed_active;
115 } else if (queued->state == RESERVATION_ACTIVE)
116 passed_active = 1;
117 }
118
119 list_add_tail(&res->list, &sup_env->active_reservations);
120 return passed_active;
121}
122
123static void sup_queue_active(
124 struct sup_reservation_environment* sup_env,
125 struct reservation *res)
126{
127 int passed_active = _sup_queue_active(sup_env, res);
128
129 /* check for possible preemption */
130 if (res->state == RESERVATION_ACTIVE && !passed_active)
131 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
132 else {
133 /* Active means this reservation is draining budget => make sure
134 * the scheduler is called to notice when the reservation budget has been
135 * drained completely. */
136 sup_scheduler_update_after(sup_env, res->cur_budget);
137 }
138}
139
140static void sup_queue_reservation(
141 struct sup_reservation_environment* sup_env,
142 struct reservation *res)
143{
144 switch (res->state) {
145 case RESERVATION_INACTIVE:
146 list_add(&res->list, &sup_env->inactive_reservations);
147 break;
148
149 case RESERVATION_DEPLETED:
150 sup_queue_depleted(sup_env, res);
151 break;
152
153 case RESERVATION_ACTIVE_IDLE:
154 case RESERVATION_ACTIVE:
155 sup_queue_active(sup_env, res);
156 break;
157 }
158}
159
160void sup_add_new_reservation(
161 struct sup_reservation_environment* sup_env,
162 struct reservation* new_res)
163{
164 new_res->env = &sup_env->env;
165 sup_queue_reservation(sup_env, new_res);
166}
167
168struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env,
169 unsigned int id)
170{
171 struct reservation *res;
172
173 list_for_each_entry(res, &sup_env->active_reservations, list) {
174 if (res->id == id)
175 return res;
176 }
177 list_for_each_entry(res, &sup_env->inactive_reservations, list) {
178 if (res->id == id)
179 return res;
180 }
181 list_for_each_entry(res, &sup_env->depleted_reservations, list) {
182 if (res->id == id)
183 return res;
184 }
185
186 return NULL;
187}
188
189static void sup_charge_budget(
190 struct sup_reservation_environment* sup_env,
191 lt_t delta)
192{
193 struct list_head *pos, *next;
194 struct reservation *res;
195
196 int encountered_active = 0;
197
198 list_for_each_safe(pos, next, &sup_env->active_reservations) {
199 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
200 res = list_entry(pos, struct reservation, list);
201 if (res->state == RESERVATION_ACTIVE) {
202 TRACE("sup_charge_budget ACTIVE R%u drain %llu\n", res->id, delta);
203 if (encountered_active == 0 && res->blocked_by_ghost == 0) {
204 TRACE("DRAIN !!\n");
205 res->ops->drain_budget(res, delta);
206 encountered_active = 1;
207 }
208 } else {
209 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
210 TRACE("sup_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
211 res->ops->drain_budget(res, delta);
212 }
213 if (res->state == RESERVATION_ACTIVE ||
214 res->state == RESERVATION_ACTIVE_IDLE)
215 {
216 /* make sure scheduler is invoked when this reservation expires
217 * its remaining budget */
218 TRACE("requesting scheduler update for reservation %u in %llu nanoseconds\n",
219 res->id, res->cur_budget);
220 sup_scheduler_update_after(sup_env, res->cur_budget);
221 }
222 //if (encountered_active == 2)
223 /* stop at the first ACTIVE reservation */
224 // break;
225 }
226 //TRACE("finished charging budgets\n");
227}
228
229static void sup_replenish_budgets(struct sup_reservation_environment* sup_env)
230{
231 struct list_head *pos, *next;
232 struct reservation *res;
233
234 list_for_each_safe(pos, next, &sup_env->depleted_reservations) {
235 res = list_entry(pos, struct reservation, list);
236 if (res->next_replenishment <= sup_env->env.current_time) {
237 res->ops->replenish(res);
238 } else {
239 /* list is ordered by increasing depletion times */
240 break;
241 }
242 }
243 //TRACE("finished replenishing budgets\n");
244
245 /* request a scheduler update at the next replenishment instant */
246 res = list_first_entry_or_null(&sup_env->depleted_reservations,
247 struct reservation, list);
248 if (res)
249 sup_scheduler_update_at(sup_env, res->next_replenishment);
250}
251
252void sup_update_time(
253 struct sup_reservation_environment* sup_env,
254 lt_t now)
255{
256 lt_t delta;
257
258 /* If the time didn't advance, there is nothing to do.
259 * This check makes it safe to call sup_advance_time() potentially
260 * multiple times (e.g., via different code paths. */
261 //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time);
262 if (unlikely(now <= sup_env->env.current_time))
263 return;
264
265 delta = now - sup_env->env.current_time;
266 sup_env->env.current_time = now;
267
268 /* check if future updates are required */
269 if (sup_env->next_scheduler_update <= sup_env->env.current_time)
270 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
271
272 /* deplete budgets by passage of time */
273 //TRACE("CHARGE###\n");
274 sup_charge_budget(sup_env, delta);
275
276 /* check if any budgets where replenished */
277 //TRACE("REPLENISH###\n");
278 sup_replenish_budgets(sup_env);
279}
280
281struct task_struct* sup_dispatch(struct sup_reservation_environment* sup_env)
282{
283 struct reservation *res, *next;
284 struct task_struct *tsk = NULL;
285 lt_t time_slice;
286
287 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
288 if (res->state == RESERVATION_ACTIVE) {
289 tsk = res->ops->dispatch_client(res, &time_slice);
290 if (likely(tsk)) {
291 if (time_slice)
292 sup_scheduler_update_after(sup_env, time_slice);
293 sup_scheduler_update_after(sup_env, res->cur_budget);
294 return tsk;
295 }
296 }
297 }
298
299 return NULL;
300}
301
302static void sup_res_change_state(
303 struct reservation_environment* env,
304 struct reservation *res,
305 reservation_state_t new_state)
306{
307 struct sup_reservation_environment* sup_env;
308
309 sup_env = container_of(env, struct sup_reservation_environment, env);
310
311 TRACE("reservation R%d state %d->%d at %llu\n",
312 res->id, res->state, new_state, env->current_time);
313
314 list_del(&res->list);
315 /* check if we need to reschedule because we lost an active reservation */
316 if (res->state == RESERVATION_ACTIVE && !sup_env->will_schedule)
317 sup_env->next_scheduler_update = SUP_RESCHEDULE_NOW;
318 res->state = new_state;
319 sup_queue_reservation(sup_env, res);
320}
321
322void sup_init(struct sup_reservation_environment* sup_env)
323{
324 memset(sup_env, sizeof(*sup_env), 0);
325
326 INIT_LIST_HEAD(&sup_env->active_reservations);
327 INIT_LIST_HEAD(&sup_env->depleted_reservations);
328 INIT_LIST_HEAD(&sup_env->inactive_reservations);
329
330 sup_env->env.change_state = sup_res_change_state;
331
332 sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE;
333}
334
335struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env,
336 unsigned int id)
337{
338 struct reservation *res;
339
340 list_for_each_entry(res, &gmp_env->active_reservations, list) {
341 if (res->id == id)
342 return res;
343 }
344 list_for_each_entry(res, &gmp_env->inactive_reservations, list) {
345 if (res->id == id)
346 return res;
347 }
348 list_for_each_entry(res, &gmp_env->depleted_reservations, list) {
349 if (res->id == id)
350 return res;
351 }
352
353 return NULL;
354}
355
356
357struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env,
358 unsigned int id)
359{
360 struct next_timer_event *event;
361
362 list_for_each_entry(event, &gmp_env->next_events, list) {
363 if (event->id == id)
364 return event;
365 }
366
367 return NULL;
368}
369
370
371struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env,
372 lt_t when)
373{
374 struct next_timer_event *event;
375
376 list_for_each_entry(event, &gmp_env->next_events, list) {
377 if (event->next_update == when)
378 return event;
379 }
380
381 return NULL;
382}
383
384#define TIMER_RESOLUTION 100000L
385
386static void gmp_add_event(
387 struct gmp_reservation_environment* gmp_env,
388 lt_t when, unsigned int id, event_type_t type)
389{
390 struct next_timer_event *nevent, *queued;
391 struct list_head *pos;
392 int found = 0, update = 0;
393
394 //when = div64_u64(when, TIMER_RESOLUTION);
395 //when *= TIMER_RESOLUTION;
396//printk(KERN_ALERT "GMP_ADD id=%d type=%d when=%llu\n", id, type, when);
397 nevent = gmp_find_event_by_id(gmp_env, id);
398
399 if (nevent)
400 TRACE("EVENT R%d update prev = %llu, new = %llu\n", nevent->id, nevent->next_update, when);
401
402 if (nevent && nevent->next_update > when) {
403 list_del(&nevent->list);
404 update = 1;
405
406 }
407
408 if (!nevent || nevent->type != type || update == 1) {
409 if (update == 0)
410 nevent = kzalloc(sizeof(*nevent), GFP_ATOMIC);
411 BUG_ON(!nevent);
412 nevent->next_update = when;
413 nevent->id = id;
414 nevent->type = type;
415 nevent->timer_armed_on = NO_CPU;
416
417 list_for_each(pos, &gmp_env->next_events) {
418 queued = list_entry(pos, struct next_timer_event, list);
419 if (queued->next_update > nevent->next_update) {
420 list_add(&nevent->list, pos->prev);
421 found = 1;
422 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at before %llu\n", nevent->id, nevent->type, nevent->next_update, queued->next_update);
423 break;
424 }
425 }
426
427 if (!found) {
428 list_add_tail(&nevent->list, &gmp_env->next_events);
429 TRACE("NEXT_EVENT id=%d type=%d update=%llu ADDED at TAIL\n", nevent->id, nevent->type, nevent->next_update);
430 }
431 } else {
432 //TRACE("EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
433; //printk(KERN_ALERT "EVENT FOUND id = %d type=%d when=%llu, NEW EVENT type=%d when=%llu\n", nevent->id, nevent->type, nevent->next_update, type, when);
434 }
435
436 TRACE("======START PRINTING EVENT LIST======\n");
437 gmp_print_events(gmp_env, litmus_clock());
438 TRACE("======FINISH PRINTING EVENT LIST======\n");
439}
440
441void gmp_add_event_after(
442 struct gmp_reservation_environment* gmp_env, lt_t timeout, unsigned int id, event_type_t type)
443{
444 //printk(KERN_ALERT "ADD_EVENT_AFTER id = %d\n", id);
445 gmp_add_event(gmp_env, gmp_env->env.current_time + timeout, id, type);
446}
447
448static void gmp_queue_depleted(
449 struct gmp_reservation_environment* gmp_env,
450 struct reservation *res)
451{
452 struct list_head *pos;
453 struct reservation *queued;
454 int found = 0;
455
456//printk(KERN_ALERT "R%d request to enqueue depleted_list\n", res->id);
457
458 list_for_each(pos, &gmp_env->depleted_reservations) {
459 queued = list_entry(pos, struct reservation, list);
460 if (queued && (queued->next_replenishment > res->next_replenishment)) {
461//printk(KERN_ALERT "QUEUED R%d %llu\n", queued->id, queued->next_replenishment);
462 list_add(&res->list, pos->prev);
463 found = 1;
464 break;
465 }
466 }
467
468 if (!found)
469 list_add_tail(&res->list, &gmp_env->depleted_reservations);
470
471 TRACE("R%d queued to depleted_list\n", res->id);
472//printk(KERN_ALERT "R%d queued to depleted_list\n", res->id);
473 gmp_add_event(gmp_env, res->next_replenishment, res->id, EVENT_REPLENISH);
474}
475
476static void gmp_queue_active(
477 struct gmp_reservation_environment* gmp_env,
478 struct reservation *res)
479{
480 struct list_head *pos;
481 struct reservation *queued;
482 int check_preempt = 1, found = 0;
483
484 list_for_each(pos, &gmp_env->active_reservations) {
485 queued = list_entry(pos, struct reservation, list);
486 if (queued->priority > res->priority) {
487 list_add(&res->list, pos->prev);
488 found = 1;
489 break;
490 } else if (queued->scheduled_on == NO_CPU)
491 check_preempt = 0;
492 }
493
494 if (!found)
495 list_add_tail(&res->list, &gmp_env->active_reservations);
496
497 /* check for possible preemption */
498 if (res->state == RESERVATION_ACTIVE && check_preempt)
499 gmp_env->schedule_now++;
500
501#if BUDGET_ENFORCEMENT_AT_C
502 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
503#endif
504 res->event_added = 1;
505}
506
507static void gmp_queue_reservation(
508 struct gmp_reservation_environment* gmp_env,
509 struct reservation *res)
510{
511
512//printk(KERN_ALERT "DEBUG: Passed %s %d %p R%d STATE %d\n",__FUNCTION__,__LINE__, gmp_env, res->id, res->state);
513 switch (res->state) {
514 case RESERVATION_INACTIVE:
515 list_add(&res->list, &gmp_env->inactive_reservations);
516 break;
517
518 case RESERVATION_DEPLETED:
519 gmp_queue_depleted(gmp_env, res);
520 break;
521
522 case RESERVATION_ACTIVE_IDLE:
523 case RESERVATION_ACTIVE:
524 gmp_queue_active(gmp_env, res);
525 break;
526 }
527}
528
529void gmp_add_new_reservation(
530 struct gmp_reservation_environment* gmp_env,
531 struct reservation* new_res)
532{
533 new_res->env = &gmp_env->env;
534 gmp_queue_reservation(gmp_env, new_res);
535}
536
537#if BUDGET_ENFORCEMENT_AT_C
538static void gmp_charge_budget(
539 struct gmp_reservation_environment* gmp_env,
540 lt_t delta)
541{
542 struct list_head *pos, *next;
543 struct reservation *res;
544
545 list_for_each_safe(pos, next, &gmp_env->active_reservations) {
546 int drained = 0;
547 /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */
548 res = list_entry(pos, struct reservation, list);
549 if (res->state == RESERVATION_ACTIVE) {
550 TRACE("gmp_charge_budget ACTIVE R%u scheduled_on=%d drain %llu\n", res->id, res->scheduled_on, delta);
551 if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) {
552 TRACE("DRAIN !!\n");
553 drained = 1;
554 res->ops->drain_budget(res, delta);
555 } else {
556 TRACE("NO DRAIN (not scheduled)!!\n");
557 }
558 } else {
559 //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE);
560 if (res->state != RESERVATION_ACTIVE_IDLE)
561 TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n");
562 TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta);
563 //if (res->is_ghost != NO_CPU) {
564 TRACE("DRAIN !!\n");
565 drained = 1;
566 res->ops->drain_budget(res, delta);
567 //}
568 }
569 if ((res->state == RESERVATION_ACTIVE ||
570 res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1))
571 {
572 /* make sure scheduler is invoked when this reservation expires
573 * its remaining budget */
574 TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", res->id, res->cur_budget);
575 gmp_add_event_after(gmp_env, res->cur_budget, res->id, EVENT_DRAIN);
576 res->event_added = 1;
577 }
578 //if (encountered_active == 2)
579 /* stop at the first ACTIVE reservation */
580 // break;
581 }
582 //TRACE("finished charging budgets\n");
583}
584#else
585
586static void gmp_charge_budget(
587 struct gmp_reservation_environment* gmp_env,
588 lt_t delta)
589{
590 return;
591}
592
593#endif
594
595static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env)
596{
597 struct list_head *pos, *next;
598 struct reservation *res;
599
600 list_for_each_safe(pos, next, &gmp_env->depleted_reservations) {
601 res = list_entry(pos, struct reservation, list);
602 if (res->next_replenishment <= gmp_env->env.current_time) {
603 res->ops->replenish(res);
604 if (res->is_ghost != NO_CPU) {
605 TRACE("R%d replenished! scheduled_on=%d\n", res->id, res->scheduled_on);
606 }
607 } else {
608 /* list is ordered by increasing depletion times */
609 break;
610 }
611 }
612 //TRACE("finished replenishing budgets\n");
613}
614
615#define EPSILON 50
616
617/* return schedule_now */
618int gmp_update_time(
619 struct gmp_reservation_environment* gmp_env,
620 lt_t now)
621{
622 struct next_timer_event *event, *next;
623 lt_t delta, ret;
624
625 /* If the time didn't advance, there is nothing to do.
626 * This check makes it safe to call sup_advance_time() potentially
627 * multiple times (e.g., via different code paths. */
628 //TRACE("(gmp_update_time) now: %llu, current_time: %llu\n", now, gmp_env->env.current_time);
629 if (unlikely(now <= gmp_env->env.current_time + EPSILON))
630 return 0;
631
632 delta = now - gmp_env->env.current_time;
633 gmp_env->env.current_time = now;
634
635
636 //gmp_print_events(gmp_env, now);
637 /* deplete budgets by passage of time */
638 //TRACE("CHARGE###\n");
639 gmp_charge_budget(gmp_env, delta);
640
641 /* check if any budgets where replenished */
642 //TRACE("REPLENISH###\n");
643 gmp_replenish_budgets(gmp_env);
644
645
646 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
647 if (event->next_update < now) {
648 list_del(&event->list);
649 //TRACE("EVENT at %llu IS DELETED\n", event->next_update);
650 kfree(event);
651 } else {
652 break;
653 }
654 }
655
656 //gmp_print_events(gmp_env, litmus_clock());
657
658 ret = min(gmp_env->schedule_now, NR_CPUS);
659 gmp_env->schedule_now = 0;
660
661 return ret;
662}
663
664void gmp_print_events(struct gmp_reservation_environment* gmp_env, lt_t now)
665{
666 struct next_timer_event *event, *next;
667
668 TRACE("GLOBAL EVENTS now=%llu\n", now);
669 list_for_each_entry_safe(event, next, &gmp_env->next_events, list) {
670 TRACE("at %llu type=%d id=%d armed_on=%d\n", event->next_update, event->type, event->id, event->timer_armed_on);
671 }
672}
673
674static void gmp_res_change_state(
675 struct reservation_environment* env,
676 struct reservation *res,
677 reservation_state_t new_state)
678{
679 struct gmp_reservation_environment* gmp_env;
680
681 gmp_env = container_of(env, struct gmp_reservation_environment, env);
682
683 TRACE("GMP reservation R%d state %d->%d at %llu\n",
684 res->id, res->state, new_state, env->current_time);
685
686 list_del(&res->list);
687 /* check if we need to reschedule because we lost an active reservation */
688 if (res->state == RESERVATION_ACTIVE)
689 gmp_env->schedule_now++;
690 res->state = new_state;
691 gmp_queue_reservation(gmp_env, res);
692}
693
694void gmp_init(struct gmp_reservation_environment* gmp_env)
695{
696 memset(gmp_env, sizeof(*gmp_env), 0);
697
698 INIT_LIST_HEAD(&gmp_env->active_reservations);
699 INIT_LIST_HEAD(&gmp_env->depleted_reservations);
700 INIT_LIST_HEAD(&gmp_env->inactive_reservations);
701 INIT_LIST_HEAD(&gmp_env->next_events);
702
703 gmp_env->env.change_state = gmp_res_change_state;
704
705 gmp_env->schedule_now = 0;
706 gmp_env->will_schedule = false;
707
708 raw_spin_lock_init(&gmp_env->lock);
709}
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
new file mode 100644
index 000000000000..0ff27135c825
--- /dev/null
+++ b/litmus/sched_mc2.c
@@ -0,0 +1,1849 @@
1/*
2 * litmus/sched_mc2.c
3 *
4 * Implementation of the Mixed-Criticality on MultiCore scheduler
5 *
6 * Thus plugin implements a scheduling algorithm proposed in
7 * "Mixed-Criticality Real-Time Scheduling for Multicore System" paper.
8 */
9
10#include <linux/percpu.h>
11#include <linux/slab.h>
12#include <asm/uaccess.h>
13
14#include <litmus/sched_plugin.h>
15#include <litmus/preempt.h>
16#include <litmus/debug_trace.h>
17
18#include <litmus/litmus.h>
19#include <litmus/jobs.h>
20#include <litmus/budget.h>
21#include <litmus/litmus_proc.h>
22#include <litmus/sched_trace.h>
23#include <litmus/cache_proc.h>
24#include <litmus/trace.h>
25
26#include <litmus/mc2_common.h>
27#include <litmus/reservation.h>
28#include <litmus/polling_reservations.h>
29
30//#define TRACE(fmt, args...) do {} while (false)
31//#define TRACE_TASK(fmt, args...) do {} while (false)
32
33#define BUDGET_ENFORCEMENT_AT_C 0
34
35extern void do_partition(enum crit_level lv, int cpu);
36
37/* _global_env - reservation container for level-C tasks*/
38struct gmp_reservation_environment _global_env;
39
40/* cpu_entry - keep track of a running task on a cpu
41 * This state is used to decide the lowest priority cpu
42 */
43struct cpu_entry {
44 struct task_struct *scheduled;
45 lt_t deadline;
46 int cpu;
47 enum crit_level lv;
48 /* if will_schedule is true, this cpu is already selected and
49 call mc2_schedule() soon. */
50 bool will_schedule;
51};
52
53/* cpu_priority - a global state for choosing the lowest priority CPU */
54struct cpu_priority {
55 raw_spinlock_t lock;
56 struct cpu_entry cpu_entries[NR_CPUS];
57};
58
59struct cpu_priority _lowest_prio_cpu;
60
61/* mc2_task_state - a task state structure */
62struct mc2_task_state {
63 struct task_client res_info;
64 /* if cpu == -1, this task is a global task (level C) */
65 int cpu;
66 bool has_departed;
67 struct mc2_task mc2_param;
68};
69
70/* crit_entry - maintain the logically running job (ghost job) */
71struct crit_entry {
72 enum crit_level level;
73 struct task_struct *running;
74};
75
76/* mc2_cpu_state - maintain the scheduled state and ghost jobs
77 * timer : timer for partitioned tasks (level A and B)
78 * g_timer : timer for global tasks (level C)
79 */
80struct mc2_cpu_state {
81 raw_spinlock_t lock;
82
83 struct sup_reservation_environment sup_env;
84 struct hrtimer timer;
85
86 int cpu;
87 struct task_struct* scheduled;
88 struct crit_entry crit_entries[NUM_CRIT_LEVELS];
89};
90
91static int resched_cpu[NR_CPUS];
92static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state);
93
94#define cpu_state_for(cpu_id) (&per_cpu(mc2_cpu_state, cpu_id))
95#define local_cpu_state() (this_cpu_ptr(&mc2_cpu_state))
96
97/* get_mc2_state - get the task's state */
98static struct mc2_task_state* get_mc2_state(struct task_struct *tsk)
99{
100 struct mc2_task_state* tinfo;
101
102 tinfo = (struct mc2_task_state*)tsk_rt(tsk)->plugin_state;
103
104 if (tinfo)
105 return tinfo;
106 else
107 return NULL;
108}
109
110/* get_task_crit_level - return the criticaility level of a task */
111static enum crit_level get_task_crit_level(struct task_struct *tsk)
112{
113 struct mc2_task *mp;
114
115 if (!tsk || !is_realtime(tsk))
116 return NUM_CRIT_LEVELS;
117
118 mp = tsk_rt(tsk)->mc2_data;
119
120 if (!mp)
121 return NUM_CRIT_LEVELS;
122 else
123 return mp->crit;
124}
125
126/* task_depart - remove a task from its reservation
127 * If the job has remaining budget, convert it to a ghost job
128 * and update crit_entries[]
129 *
130 * @job_complete indicate whether job completes or not
131 */
132static void task_departs(struct task_struct *tsk, int job_complete)
133{
134 struct mc2_task_state* tinfo = get_mc2_state(tsk);
135 //struct mc2_cpu_state* state = local_cpu_state();
136 struct reservation* res = NULL;
137 struct reservation_client *client = NULL;
138
139 BUG_ON(!is_realtime(tsk));
140
141 res = tinfo->res_info.client.reservation;
142 client = &tinfo->res_info.client;
143 BUG_ON(!res);
144 BUG_ON(!client);
145
146/* 9/18/2015 fix start - no ghost job handling, empty remaining budget */
147 if (job_complete) {
148 res->cur_budget = 0;
149 sched_trace_task_completion(tsk, 0);
150 }
151/* fix end */
152
153 res->ops->client_departs(res, client, job_complete);
154 tinfo->has_departed = true;
155 TRACE_TASK(tsk, "CLIENT DEPART with budget %llu\n", res->cur_budget);
156/* 9/18/2015 fix start - no remaining budget
157 *
158 if (job_complete && res->cur_budget) {
159 struct crit_entry* ce;
160 enum crit_level lv = tinfo->mc2_param.crit;
161
162 ce = &state->crit_entries[lv];
163 ce->running = tsk;
164 res->is_ghost = state->cpu;
165#if BUDGET_ENFORCEMENT_AT_C
166 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
167#endif
168 TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock());
169 }
170 * fix -end
171 */
172
173}
174
175/* task_arrive - put a task into its reservation
176 * If the job was a ghost job, remove it from crit_entries[]
177 */
178static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk)
179{
180 struct mc2_task_state* tinfo = get_mc2_state(tsk);
181 struct reservation* res;
182 struct reservation_client *client;
183 enum crit_level lv = get_task_crit_level(tsk);
184
185 res = tinfo->res_info.client.reservation;
186 client = &tinfo->res_info.client;
187
188 tinfo->has_departed = false;
189
190 switch(lv) {
191 case CRIT_LEVEL_A:
192 case CRIT_LEVEL_B:
193 TS_RELEASE_START;
194 break;
195 case CRIT_LEVEL_C:
196 TS_RELEASE_C_START;
197 break;
198 default:
199 break;
200 }
201
202 res->ops->client_arrives(res, client);
203
204 if (lv != NUM_CRIT_LEVELS) {
205 struct crit_entry *ce;
206 ce = &state->crit_entries[lv];
207 /* if the currrent task is a ghost job, remove it */
208 if (ce->running == tsk)
209 ce->running = NULL;
210 }
211 /* do we need this??
212 if (resched_cpu[state->cpu])
213 litmus_reschedule(state->cpu);
214 */
215
216 switch(lv) {
217 case CRIT_LEVEL_A:
218 case CRIT_LEVEL_B:
219 TS_RELEASE_END;
220 break;
221 case CRIT_LEVEL_C:
222 TS_RELEASE_C_END;
223 break;
224 default:
225 break;
226 }
227}
228
229/* get_lowest_prio_cpu - return the lowest priority cpu
230 * This will be used for scheduling level-C tasks.
231 * If all CPUs are running tasks which has
232 * higher priority than level C, return NO_CPU.
233 */
234static int get_lowest_prio_cpu(lt_t priority)
235{
236 struct cpu_entry *ce;
237 int cpu, ret = NO_CPU;
238 lt_t latest_deadline = 0;
239
240 //raw_spin_lock(&_lowest_prio_cpu.lock);
241 ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu];
242 if (!ce->will_schedule && !ce->scheduled) {
243 //raw_spin_unlock(&_lowest_prio_cpu.lock);
244 TRACE("CPU %d (local) is the lowest!\n", ce->cpu);
245 return ce->cpu;
246 } else {
247 TRACE("Local CPU will_schedule=%d, scheduled=(%s/%d)\n", ce->will_schedule, ce->scheduled ? (ce->scheduled)->comm : "null", ce->scheduled ? (ce->scheduled)->pid : 0);
248 }
249
250 for_each_online_cpu(cpu) {
251 ce = &_lowest_prio_cpu.cpu_entries[cpu];
252 /* If a CPU will call schedule() in the near future, we don't
253 return that CPU. */
254 TRACE("CPU %d will_schedule=%d, scheduled=(%s/%d:%d)\n", cpu, ce->will_schedule,
255 ce->scheduled ? (ce->scheduled)->comm : "null",
256 ce->scheduled ? (ce->scheduled)->pid : 0,
257 ce->scheduled ? (ce->scheduled)->rt_param.job_params.job_no : 0);
258 if (!ce->will_schedule) {
259 if (!ce->scheduled) {
260 /* Idle cpu, return this. */
261 //raw_spin_unlock(&_lowest_prio_cpu.lock);
262 TRACE("CPU %d is the lowest!\n", ce->cpu);
263 return ce->cpu;
264 } else if (ce->lv == CRIT_LEVEL_C &&
265 ce->deadline > latest_deadline) {
266 latest_deadline = ce->deadline;
267 ret = ce->cpu;
268 }
269 }
270 }
271
272 //raw_spin_unlock(&_lowest_prio_cpu.lock);
273
274 if (priority >= latest_deadline)
275 ret = NO_CPU;
276
277 TRACE("CPU %d is the lowest!\n", ret);
278
279 return ret;
280}
281
282/* mc2_update_time - update time for a given criticality level.
283 * caller must hold a proper lock
284 * (cpu_state lock or global lock)
285 */
286/* 9/24/2015 temporally not using
287static void mc2_update_time(enum crit_level lv,
288 struct mc2_cpu_state *state, lt_t time)
289{
290 int global_schedule_now;
291
292 if (lv < CRIT_LEVEL_C)
293 sup_update_time(&state->sup_env, time);
294 else if (lv == CRIT_LEVEL_C) {
295 global_schedule_now = gmp_update_time(&_global_env, time);
296 while (global_schedule_now--) {
297 int cpu = get_lowest_prio_cpu(0);
298 if (cpu != NO_CPU) {
299 raw_spin_lock(&_lowest_prio_cpu.lock);
300 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
301 raw_spin_unlock(&_lowest_prio_cpu.lock);
302 TRACE("LOWEST CPU = P%d\n", cpu);
303 litmus_reschedule(cpu);
304 }
305 }
306 }
307 else
308 TRACE("update_time(): Criticality level error!!!!\n");
309}
310*/
311
312/* NOTE: drops state->lock */
313/* mc2_update_timer_and_unlock - set a timer and g_timer and unlock
314 * Whenever res_env.current_time is updated,
315 * we check next_scheduler_update and set
316 * a timer.
317 * If there exist a global event which is
318 * not armed on any CPU and g_timer is not
319 * active, set a g_timer for that event.
320 */
321static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state)
322{
323 int local, cpus;
324 lt_t update, now;
325 //enum crit_level lv = get_task_crit_level(state->scheduled);
326 struct next_timer_event *event, *next;
327 int reschedule[NR_CPUS];
328
329 for (cpus = 0; cpus<NR_CPUS; cpus++)
330 reschedule[cpus] = 0;
331
332 update = state->sup_env.next_scheduler_update;
333 now = state->sup_env.env.current_time;
334
335 /* Be sure we're actually running on the right core,
336 * as pres_update_timer() is also called from pres_task_resume(),
337 * which might be called on any CPU when a thread resumes.
338 */
339 local = local_cpu_state() == state;
340
341 raw_spin_lock(&_global_env.lock);
342
343 list_for_each_entry_safe(event, next, &_global_env.next_events, list) {
344 /* If the event time is already passed, we call schedule() on
345 the lowest priority cpu */
346 if (event->next_update >= update) {
347 break;
348 }
349
350 if (event->next_update < litmus_clock()) {
351 if (event->timer_armed_on == NO_CPU) {
352 struct reservation *res = gmp_find_by_id(&_global_env, event->id);
353 int cpu = get_lowest_prio_cpu(res?res->priority:0);
354 TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu);
355 list_del(&event->list);
356 kfree(event);
357 if (cpu != NO_CPU) {
358 //raw_spin_lock(&_lowest_prio_cpu.lock);
359 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
360 //raw_spin_unlock(&_lowest_prio_cpu.lock);
361 if (cpu == local_cpu_state()->cpu)
362 litmus_reschedule_local();
363 else
364 reschedule[cpu] = 1;
365 }
366 }
367 } else if (event->next_update < update && (event->timer_armed_on == NO_CPU || event->timer_armed_on == state->cpu)) {
368 event->timer_armed_on = state->cpu;
369 update = event->next_update;
370 break;
371 }
372 }
373
374 /* Must drop state lock before calling into hrtimer_start(), which
375 * may raise a softirq, which in turn may wake ksoftirqd. */
376 raw_spin_unlock(&_global_env.lock);
377 raw_spin_unlock(&state->lock);
378
379 if (update <= now || reschedule[state->cpu]) {
380 //litmus_reschedule(state->cpu);
381 raw_spin_lock(&state->lock);
382 preempt_if_preemptable(state->scheduled, state->cpu);
383 raw_spin_unlock(&state->lock);
384 reschedule[state->cpu] = 0;
385 } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) {
386 /* Reprogram only if not already set correctly. */
387 if (!hrtimer_active(&state->timer) ||
388 ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) {
389 TRACE("canceling timer...at %llu\n",
390 ktime_to_ns(hrtimer_get_expires(&state->timer)));
391 hrtimer_cancel(&state->timer);
392 TRACE("setting scheduler timer for %llu\n", update);
393 /* We cannot use hrtimer_start() here because the
394 * wakeup flag must be set to zero. */
395 __hrtimer_start_range_ns(&state->timer,
396 ns_to_ktime(update),
397 0 /* timer coalescing slack */,
398 HRTIMER_MODE_ABS_PINNED,
399 0 /* wakeup */);
400 }
401 } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) {
402 /* Poke remote core only if timer needs to be set earlier than
403 * it is currently set.
404 */
405 TRACE("mc2_update_timer for remote CPU %d (update=%llu, "
406 "active:%d, set:%llu)\n",
407 state->cpu,
408 update,
409 hrtimer_active(&state->timer),
410 ktime_to_ns(hrtimer_get_expires(&state->timer)));
411 if (!hrtimer_active(&state->timer) ||
412 ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) {
413 TRACE("poking CPU %d so that it can update its "
414 "scheduling timer (active:%d, set:%llu)\n",
415 state->cpu,
416 hrtimer_active(&state->timer),
417 ktime_to_ns(hrtimer_get_expires(&state->timer)));
418 //litmus_reschedule(state->cpu);
419 raw_spin_lock(&state->lock);
420 preempt_if_preemptable(state->scheduled, state->cpu);
421 raw_spin_unlock(&state->lock);
422 reschedule[state->cpu] = 0;
423 }
424 }
425 for (cpus = 0; cpus<NR_CPUS; cpus++) {
426 if (reschedule[cpus]) {
427 //litmus_reschedule(cpus);
428 struct mc2_cpu_state *remote_state;
429
430 remote_state = cpu_state_for(cpus);
431 raw_spin_lock(&remote_state->lock);
432 preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
433 raw_spin_unlock(&remote_state->lock);
434 }
435 }
436}
437
438/* mc2_update_ghost_state - Update crit_entries[] to track ghost jobs
439 * If the budget of a ghost is exhausted,
440 * clear is_ghost and reschedule
441 */
442/*
443static lt_t mc2_update_ghost_state(struct mc2_cpu_state *state)
444{
445 int lv = 0;
446 struct crit_entry* ce;
447 struct reservation *res;
448 struct mc2_task_state *tinfo;
449 lt_t ret = ULLONG_MAX;
450
451 BUG_ON(!state);
452
453 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
454 ce = &state->crit_entries[lv];
455 if (ce->running != NULL) {
456//printk(KERN_ALERT "P%d ce->running : %s/%d\n", state->cpu, ce->running ? (ce->running)->comm : "null", ce->running ? (ce->running)->pid : 0);
457 tinfo = get_mc2_state(ce->running);
458 if (!tinfo)
459 continue;
460
461 res = res_find_by_id(state, tinfo->mc2_param.res_id);
462 //BUG_ON(!res);
463 if (!res) {
464 printk(KERN_ALERT "mc2_update_ghost_state(): R%d not found!\n", tinfo->mc2_param.res_id);
465 return 0;
466 }
467
468 TRACE("LV %d running id %d budget %llu\n",
469 lv, tinfo->mc2_param.res_id, res->cur_budget);
470 // If the budget is exhausted, clear is_ghost and reschedule
471 if (!res->cur_budget) {
472 struct sup_reservation_environment* sup_env = &state->sup_env;
473
474 TRACE("GHOST FINISH id %d at %llu\n",
475 tinfo->mc2_param.res_id, litmus_clock());
476 ce->running = NULL;
477 res->is_ghost = NO_CPU;
478
479 if (lv < CRIT_LEVEL_C) {
480 res = list_first_entry_or_null(
481 &sup_env->active_reservations,
482 struct reservation, list);
483 if (res)
484 litmus_reschedule_local();
485 } else if (lv == CRIT_LEVEL_C) {
486 res = list_first_entry_or_null(
487 &_global_env.active_reservations,
488 struct reservation, list);
489 if (res)
490 litmus_reschedule(state->cpu);
491 }
492 } else {
493 //TRACE("GHOST NOT FINISH id %d budget %llu\n", res->id, res->cur_budget);
494 //gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
495 if (ret > res->cur_budget) {
496 ret = res->cur_budget;
497 }
498 }
499 }
500 }
501
502 return ret;
503}
504*/
505
506/* update_cpu_prio - Update cpu's priority
507 * When a cpu picks a new task, call this function
508 * to update cpu priorities.
509 */
510static void update_cpu_prio(struct mc2_cpu_state *state)
511{
512 struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu];
513 enum crit_level lv = get_task_crit_level(state->scheduled);
514
515 if (!state->scheduled) {
516 /* cpu is idle. */
517 ce->scheduled = NULL;
518 ce->deadline = ULLONG_MAX;
519 ce->lv = NUM_CRIT_LEVELS;
520 } else if (lv == CRIT_LEVEL_C) {
521 ce->scheduled = state->scheduled;
522 ce->deadline = get_deadline(state->scheduled);
523 ce->lv = lv;
524 } else if (lv < CRIT_LEVEL_C) {
525 /* If cpu is running level A or B tasks, it is not eligible
526 to run level-C tasks */
527 ce->scheduled = state->scheduled;
528 ce->deadline = 0;
529 ce->lv = lv;
530 }
531};
532
533/* on_scheduling_timer - timer event for partitioned tasks
534 */
535static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer)
536{
537 unsigned long flags;
538 enum hrtimer_restart restart = HRTIMER_NORESTART;
539 struct mc2_cpu_state *state;
540 lt_t update, now;
541 int global_schedule_now;
542 //lt_t remain_budget; // no ghost jobs
543 int reschedule[NR_CPUS];
544 int cpus;
545
546 for (cpus = 0; cpus<NR_CPUS; cpus++)
547 reschedule[cpus] = 0;
548
549 state = container_of(timer, struct mc2_cpu_state, timer);
550
551 /* The scheduling timer should only fire on the local CPU, because
552 * otherwise deadlocks via timer_cancel() are possible.
553 * Note: this does not interfere with dedicated interrupt handling, as
554 * even under dedicated interrupt handling scheduling timers for
555 * budget enforcement must occur locally on each CPU.
556 */
557 BUG_ON(state->cpu != raw_smp_processor_id());
558
559 TS_ISR_START;
560
561 TRACE("Timer fired at %llu\n", litmus_clock());
562 //raw_spin_lock_irqsave(&_global_env.lock, flags);
563 raw_spin_lock_irqsave(&state->lock, flags);
564 now = litmus_clock();
565 sup_update_time(&state->sup_env, now);
566
567/* 9/20/2015 fix - no ghost job
568 remain_budget = mc2_update_ghost_state(state);
569*/
570 update = state->sup_env.next_scheduler_update;
571 now = state->sup_env.env.current_time;
572
573/* 9/20/2015 fix - no ghost job
574 if (remain_budget != ULLONG_MAX && update > now + remain_budget) {
575 update = now + remain_budget;
576 }
577
578 TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d) g_schedule_now:%d remain_budget:%llu\n", now, update, state->cpu, global_schedule_now, remain_budget);
579*/
580
581 if (update <= now) {
582 litmus_reschedule_local();
583 } else if (update != SUP_NO_SCHEDULER_UPDATE) {
584 hrtimer_set_expires(timer, ns_to_ktime(update));
585 restart = HRTIMER_RESTART;
586 }
587
588 raw_spin_lock(&_global_env.lock);
589 global_schedule_now = gmp_update_time(&_global_env, now);
590
591 BUG_ON(global_schedule_now < 0 || global_schedule_now > 4);
592
593 /* Find the lowest cpu, and call reschedule */
594 while (global_schedule_now--) {
595 int cpu = get_lowest_prio_cpu(0);
596 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
597 //raw_spin_lock(&_lowest_prio_cpu.lock);
598 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
599 //raw_spin_unlock(&_lowest_prio_cpu.lock);
600 TRACE("LOWEST CPU = P%d\n", cpu);
601 if (cpu == state->cpu && update > now)
602 litmus_reschedule_local();
603 else
604 reschedule[cpu] = 1;
605 }
606 }
607 raw_spin_unlock(&_global_env.lock);
608
609 raw_spin_unlock_irqrestore(&state->lock, flags);
610 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
611
612 TS_ISR_END;
613
614 for (cpus = 0; cpus<NR_CPUS; cpus++) {
615 if (reschedule[cpus]) {
616 //litmus_reschedule(cpus);
617 struct mc2_cpu_state *remote_state;
618
619 remote_state = cpu_state_for(cpus);
620 raw_spin_lock(&remote_state->lock);
621 preempt_if_preemptable(remote_state->scheduled, remote_state->cpu);
622 raw_spin_unlock(&remote_state->lock);
623 }
624 }
625
626
627 return restart;
628}
629
630/* mc2_dispatch - Select the next task to schedule.
631 */
632struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, struct mc2_cpu_state* state)
633{
634 struct reservation *res, *next;
635 struct task_struct *tsk = NULL;
636 struct crit_entry *ce;
637 enum crit_level lv;
638 lt_t time_slice;
639
640 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
641 if (res->state == RESERVATION_ACTIVE) {
642 tsk = res->ops->dispatch_client(res, &time_slice);
643 if (likely(tsk)) {
644 lv = get_task_crit_level(tsk);
645 if (lv == NUM_CRIT_LEVELS) {
646 sup_scheduler_update_after(sup_env, res->cur_budget);
647 return tsk;
648 } else {
649 ce = &state->crit_entries[lv];
650 sup_scheduler_update_after(sup_env, res->cur_budget);
651 res->blocked_by_ghost = 0;
652 res->is_ghost = NO_CPU;
653 return tsk;
654/* no ghost jobs
655 if (likely(!ce->running)) {
656 sup_scheduler_update_after(sup_env, res->cur_budget);
657 res->blocked_by_ghost = 0;
658 res->is_ghost = NO_CPU;
659 return tsk;
660 } else {
661 res->blocked_by_ghost = 1;
662 TRACE_TASK(ce->running, " is GHOST\n");
663 }
664*/
665 }
666 }
667 }
668 }
669
670 return NULL;
671}
672
673struct task_struct* mc2_global_dispatch(struct mc2_cpu_state* state)
674{
675 struct reservation *res, *next;
676 struct task_struct *tsk = NULL;
677 //struct crit_entry *ce;
678 enum crit_level lv;
679 lt_t time_slice;
680
681 /* no eligible level A or B tasks exists */
682 /* check the ghost job */
683 /*
684 ce = &state->crit_entries[CRIT_LEVEL_C];
685 if (ce->running) {
686 TRACE_TASK(ce->running," is GHOST\n");
687 return NULL;
688 }
689 */
690 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
691 BUG_ON(!res);
692 if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) {
693 tsk = res->ops->dispatch_client(res, &time_slice);
694 if (likely(tsk)) {
695 lv = get_task_crit_level(tsk);
696 if (lv == NUM_CRIT_LEVELS) {
697#if BUDGET_ENFORCEMENT_AT_C
698 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
699#endif
700 res->event_added = 1;
701 res->blocked_by_ghost = 0;
702 res->is_ghost = NO_CPU;
703 res->scheduled_on = state->cpu;
704 return tsk;
705 } else if (lv == CRIT_LEVEL_C) {
706 //ce = &state->crit_entries[lv];
707 //if (likely(!ce->running)) {
708#if BUDGET_ENFORCEMENT_AT_C
709 gmp_add_event_after(&_global_env, res->cur_budget, res->id, EVENT_DRAIN);
710#endif
711 res->event_added = 1;
712 res->blocked_by_ghost = 0;
713 res->is_ghost = NO_CPU;
714 res->scheduled_on = state->cpu;
715 return tsk;
716 //} else {
717 // res->blocked_by_ghost = 1;
718 // TRACE_TASK(ce->running, " is GHOST\n");
719 // return NULL;
720 //}
721 } else {
722 BUG();
723 }
724 }
725 }
726 }
727
728 return NULL;
729}
730
731static inline void pre_schedule(struct task_struct *prev, int cpu)
732{
733 TS_SCHED_A_START;
734 TS_SCHED_C_START;
735
736 if (!prev || !is_realtime(prev))
737 return;
738
739 do_partition(CRIT_LEVEL_C, cpu);
740}
741
742static inline void post_schedule(struct task_struct *next, int cpu)
743{
744 enum crit_level lev;
745 if ((!next) || !is_realtime(next))
746 return;
747
748 lev = get_task_crit_level(next);
749 do_partition(lev, cpu);
750
751 switch(lev) {
752 case CRIT_LEVEL_A:
753 case CRIT_LEVEL_B:
754 TS_SCHED_A_END(next);
755 break;
756 case CRIT_LEVEL_C:
757 TS_SCHED_C_END(next);
758 break;
759 default:
760 break;
761 }
762
763}
764
765/* mc2_schedule - main scheduler function. pick the next task to run
766 */
767static struct task_struct* mc2_schedule(struct task_struct * prev)
768{
769 /* next == NULL means "schedule background work". */
770 lt_t now;
771 struct mc2_cpu_state *state = local_cpu_state();
772
773 pre_schedule(prev, state->cpu);
774
775 /* 9/20/2015 fix
776 raw_spin_lock(&_global_env.lock);
777 */
778 raw_spin_lock(&state->lock);
779
780 //BUG_ON(state->scheduled && state->scheduled != prev);
781 //BUG_ON(state->scheduled && !is_realtime(prev));
782 if (state->scheduled && state->scheduled != prev)
783 ; //printk(KERN_ALERT "BUG1!!!!!!!! %s %s\n", state->scheduled ? (state->scheduled)->comm : "null", prev ? (prev)->comm : "null");
784 if (state->scheduled && !is_realtime(prev))
785 ; //printk(KERN_ALERT "BUG2!!!!!!!! \n");
786
787 /* update time */
788 state->sup_env.will_schedule = true;
789
790 now = litmus_clock();
791 sup_update_time(&state->sup_env, now);
792 /* 9/20/2015 fix
793 gmp_update_time(&_global_env, now);
794 */
795 /* 9/20/2015 fix
796 mc2_update_ghost_state(state);
797 */
798
799 /* remove task from reservation if it blocks */
800 if (is_realtime(prev) && !is_running(prev)) {
801 if (get_task_crit_level(prev) == CRIT_LEVEL_C)
802 raw_spin_lock(&_global_env.lock);
803 task_departs(prev, is_completed(prev));
804 if (get_task_crit_level(prev) == CRIT_LEVEL_C)
805 raw_spin_unlock(&_global_env.lock);
806 }
807
808 /* figure out what to schedule next */
809 state->scheduled = mc2_dispatch(&state->sup_env, state);
810/* if (state->scheduled && is_realtime(state->scheduled))
811 TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n");
812*/
813 if (!state->scheduled) {
814 raw_spin_lock(&_global_env.lock);
815 gmp_update_time(&_global_env, now);
816 state->scheduled = mc2_global_dispatch(state);
817 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
818 update_cpu_prio(state);
819 raw_spin_unlock(&_global_env.lock);
820 } else {
821 raw_spin_lock(&_global_env.lock);
822 _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
823 update_cpu_prio(state);
824 raw_spin_unlock(&_global_env.lock);
825 }
826
827 //raw_spin_lock(&_lowest_prio_cpu.lock);
828 //_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false;
829 //update_cpu_prio(state);
830 //raw_spin_unlock(&_lowest_prio_cpu.lock);
831
832 /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */
833 sched_state_task_picked();
834
835 /* program scheduler timer */
836 state->sup_env.will_schedule = false;
837
838 /* NOTE: drops state->lock */
839 mc2_update_timer_and_unlock(state);
840
841 if (prev != state->scheduled && is_realtime(prev)) {
842 struct mc2_task_state* tinfo = get_mc2_state(prev);
843 struct reservation* res = tinfo->res_info.client.reservation;
844 TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on);
845 res->scheduled_on = NO_CPU;
846 TRACE_TASK(prev, "descheduled.\n");
847 /* if prev is preempted and a global task, find the lowest cpu and reschedule */
848 if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) {
849 int cpu;
850 raw_spin_lock(&_global_env.lock);
851 cpu = get_lowest_prio_cpu(res?res->priority:0);
852 //TRACE("LEVEL-C TASK PREEMPTED!! poking CPU %d to reschedule\n", cpu);
853 if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) {
854 //raw_spin_lock(&_lowest_prio_cpu.lock);
855 _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true;
856 resched_cpu[cpu] = 1;
857 //raw_spin_unlock(&_lowest_prio_cpu.lock);
858 }
859 raw_spin_unlock(&_global_env.lock);
860 }
861 }
862 if (state->scheduled) {
863 TRACE_TASK(state->scheduled, "scheduled.\n");
864 }
865
866 post_schedule(state->scheduled, state->cpu);
867
868 return state->scheduled;
869}
870
871static void resume_legacy_task_model_updates(struct task_struct *tsk)
872{
873 lt_t now;
874 if (is_sporadic(tsk)) {
875 /* If this sporadic task was gone for a "long" time and woke up past
876 * its deadline, then give it a new budget by triggering a job
877 * release. This is purely cosmetic and has no effect on the
878 * MC2 scheduler. */
879
880 now = litmus_clock();
881 if (is_tardy(tsk, now)) {
882 //release_at(tsk, now);
883 //sched_trace_task_release(tsk);
884 }
885 }
886}
887
888/* mc2_task_resume - Called when the state of tsk changes back to
889 * TASK_RUNNING. We need to requeue the task.
890 */
891static void mc2_task_resume(struct task_struct *tsk)
892{
893 unsigned long flags;
894 struct mc2_task_state* tinfo = get_mc2_state(tsk);
895 struct mc2_cpu_state *state;
896
897 TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock());
898
899 local_irq_save(flags);
900 if (tinfo->cpu != -1)
901 state = cpu_state_for(tinfo->cpu);
902 else
903 state = local_cpu_state();
904
905 /* 9/20/2015 fix
906 raw_spin_lock(&_global_env.lock);
907 */
908 /* Requeue only if self-suspension was already processed. */
909 if (tinfo->has_departed)
910 {
911 /* We don't want to consider jobs before synchronous releases */
912 if (tsk_rt(tsk)->job_params.job_no > 5) {
913 switch(get_task_crit_level(tsk)) {
914 case CRIT_LEVEL_A:
915 TS_RELEASE_LATENCY_A(get_release(tsk));
916 break;
917 case CRIT_LEVEL_B:
918 TS_RELEASE_LATENCY_B(get_release(tsk));
919 break;
920 case CRIT_LEVEL_C:
921 TS_RELEASE_LATENCY_C(get_release(tsk));
922 break;
923 default:
924 break;
925 }
926 }
927
928 raw_spin_lock(&state->lock);
929 /* Assumption: litmus_clock() is synchronized across cores,
930 * since we might not actually be executing on tinfo->cpu
931 * at the moment. */
932 if (tinfo->cpu != -1) {
933 sup_update_time(&state->sup_env, litmus_clock());
934 task_arrives(state, tsk);
935 } else {
936 raw_spin_lock(&_global_env.lock);
937 gmp_update_time(&_global_env, litmus_clock());
938 task_arrives(state, tsk);
939 raw_spin_unlock(&_global_env.lock);
940 }
941
942 /* 9/20/2015 fix
943 mc2_update_ghost_state(state);
944 */
945 //task_arrives(state, tsk);
946 /* NOTE: drops state->lock */
947 TRACE_TASK(tsk, "mc2_resume()\n");
948 mc2_update_timer_and_unlock(state);
949 } else {
950 TRACE_TASK(tsk, "resume event ignored, still scheduled\n");
951 //raw_spin_unlock(&_global_env.lock);
952 }
953
954 local_irq_restore(flags);
955
956 //gmp_free_passed_event();
957 resume_legacy_task_model_updates(tsk);
958}
959
960/* mc2_complete_job - syscall backend for job completions
961 */
962static long mc2_complete_job(void)
963{
964 ktime_t next_release;
965 long err;
966
967 tsk_rt(current)->completed = 1;
968
969 /* If this the first job instance, we need to reset replenish
970 time to the next release time */
971 if (tsk_rt(current)->sporadic_release) {
972 struct mc2_cpu_state *state;
973 struct reservation_environment *env;
974 struct mc2_task_state *tinfo;
975 struct reservation *res = NULL;
976 unsigned long flags;
977 enum crit_level lv;
978
979 preempt_disable();
980 local_irq_save(flags);
981
982 tinfo = get_mc2_state(current);
983 lv = get_task_crit_level(current);
984
985 if (lv < CRIT_LEVEL_C) {
986 state = cpu_state_for(tinfo->cpu);
987 raw_spin_lock(&state->lock);
988 env = &(state->sup_env.env);
989 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
990 env->time_zero = tsk_rt(current)->sporadic_release_time;
991 }
992 else if (lv == CRIT_LEVEL_C) {
993 state = local_cpu_state();
994 raw_spin_lock(&state->lock);
995 raw_spin_lock(&_global_env.lock);
996 res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
997 _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time;
998 }
999 else
1000 BUG();
1001
1002 /* set next_replenishtime to synchronous release time */
1003 BUG_ON(!res);
1004 res->next_replenishment = tsk_rt(current)->sporadic_release_time;
1005/*
1006 if (get_task_crit_level(current) == CRIT_LEVEL_A) {
1007 struct table_driven_reservation *tdres;
1008 tdres = container_of(res, struct table_driven_reservation, res);
1009 tdres->next_interval = 0;
1010 tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time;
1011 res->next_replenishment += tdres->intervals[0].start;
1012 }
1013*/
1014 res->cur_budget = 0;
1015 res->env->change_state(res->env, res, RESERVATION_DEPLETED);
1016
1017 //TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update);
1018
1019 //if (lv < CRIT_LEVEL_C)
1020// raw_spin_unlock(&state->lock);
1021 //else
1022 if (lv == CRIT_LEVEL_C)
1023 raw_spin_unlock(&_global_env.lock);
1024
1025 raw_spin_unlock(&state->lock);
1026 local_irq_restore(flags);
1027 preempt_enable();
1028 }
1029
1030 sched_trace_task_completion(current, 0);
1031 /* update the next release time and deadline */
1032 prepare_for_next_period(current);
1033 sched_trace_task_release(current);
1034 next_release = ns_to_ktime(get_release(current));
1035 preempt_disable();
1036 TRACE_CUR("next_release=%llu\n", get_release(current));
1037 if (get_release(current) > litmus_clock()) {
1038 /* sleep until next_release */
1039 set_current_state(TASK_INTERRUPTIBLE);
1040 preempt_enable_no_resched();
1041 err = schedule_hrtimeout(&next_release, HRTIMER_MODE_ABS);
1042 } else {
1043 /* release the next job immediately */
1044 err = 0;
1045 TRACE_CUR("TARDY: release=%llu now=%llu\n", get_release(current), litmus_clock());
1046 preempt_enable();
1047 }
1048
1049 TRACE_CUR("mc2_complete_job returns at %llu\n", litmus_clock());
1050
1051 return err;
1052}
1053
1054/* mc2_admit_task - Setup mc2 task parameters
1055 */
1056static long mc2_admit_task(struct task_struct *tsk)
1057{
1058 long err = -ESRCH;
1059 unsigned long flags;
1060 struct reservation *res;
1061 struct mc2_cpu_state *state;
1062 struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC);
1063 struct mc2_task *mp = tsk_rt(tsk)->mc2_data;
1064 enum crit_level lv;
1065
1066 if (!tinfo)
1067 return -ENOMEM;
1068
1069 if (!mp) {
1070 printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n");
1071 return err;
1072 }
1073
1074 lv = mp->crit;
1075 preempt_disable();
1076
1077 if (lv < CRIT_LEVEL_C) {
1078 state = cpu_state_for(task_cpu(tsk));
1079 raw_spin_lock_irqsave(&state->lock, flags);
1080
1081 res = sup_find_by_id(&state->sup_env, mp->res_id);
1082
1083 /* found the appropriate reservation */
1084 if (res) {
1085 TRACE_TASK(tsk, "SUP FOUND RES ID\n");
1086 tinfo->mc2_param.crit = mp->crit;
1087 tinfo->mc2_param.res_id = mp->res_id;
1088
1089 /* initial values */
1090 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
1091 tinfo->cpu = task_cpu(tsk);
1092 tinfo->has_departed = true;
1093 tsk_rt(tsk)->plugin_state = tinfo;
1094
1095 /* disable LITMUS^RT's per-thread budget enforcement */
1096 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
1097 }
1098
1099 raw_spin_unlock_irqrestore(&state->lock, flags);
1100 } else if (lv == CRIT_LEVEL_C) {
1101 state = local_cpu_state();
1102 raw_spin_lock_irqsave(&state->lock, flags);
1103 raw_spin_lock(&_global_env.lock);
1104 //state = local_cpu_state();
1105
1106 //raw_spin_lock(&state->lock);
1107
1108 res = gmp_find_by_id(&_global_env, mp->res_id);
1109
1110 /* found the appropriate reservation (or vCPU) */
1111 if (res) {
1112 TRACE_TASK(tsk, "GMP FOUND RES ID\n");
1113 tinfo->mc2_param.crit = mp->crit;
1114 tinfo->mc2_param.res_id = mp->res_id;
1115
1116 /* initial values */
1117 err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res);
1118 tinfo->cpu = -1;
1119 tinfo->has_departed = true;
1120 tsk_rt(tsk)->plugin_state = tinfo;
1121
1122 /* disable LITMUS^RT's per-thread budget enforcement */
1123 tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT;
1124 }
1125
1126 raw_spin_unlock(&_global_env.lock);
1127 raw_spin_unlock_irqrestore(&state->lock, flags);
1128 }
1129
1130 preempt_enable();
1131
1132 if (err)
1133 kfree(tinfo);
1134
1135 return err;
1136}
1137
1138/* mc2_task_new - A new real-time job is arrived. Release the next job
1139 * at the next reservation replenish time
1140 */
1141static void mc2_task_new(struct task_struct *tsk, int on_runqueue,
1142 int is_running)
1143{
1144 unsigned long flags;
1145 struct mc2_task_state* tinfo = get_mc2_state(tsk);
1146 struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu);
1147 struct reservation *res;
1148 enum crit_level lv = get_task_crit_level(tsk);
1149 lt_t release = 0;
1150
1151 TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n",
1152 litmus_clock(), on_runqueue, is_running);
1153
1154 if (tinfo->cpu == -1)
1155 state = local_cpu_state();
1156 else
1157 state = cpu_state_for(tinfo->cpu);
1158
1159 local_irq_save(flags);
1160
1161 /* acquire the lock protecting the state and disable interrupts */
1162 //raw_spin_lock(&_global_env.lock);
1163 //raw_spin_lock(&state->lock);
1164 if (is_running) {
1165 state->scheduled = tsk;
1166 /* make sure this task should actually be running */
1167 litmus_reschedule_local();
1168 }
1169
1170 raw_spin_lock(&state->lock);
1171
1172 if (lv == CRIT_LEVEL_C) {
1173 raw_spin_lock(&_global_env.lock);
1174 res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id);
1175 }
1176 else {
1177 res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id);
1178 }
1179 //res = res_find_by_id(state, tinfo->mc2_param.res_id);
1180 release = res->next_replenishment;
1181
1182 if (on_runqueue || is_running) {
1183 /* Assumption: litmus_clock() is synchronized across cores
1184 * [see comment in pres_task_resume()] */
1185 if (lv == CRIT_LEVEL_C) {
1186 gmp_update_time(&_global_env, litmus_clock());
1187 //raw_spin_unlock(&_global_env.lock);
1188 }
1189 else
1190 sup_update_time(&state->sup_env, litmus_clock());
1191 //mc2_update_time(lv, state, litmus_clock());
1192 /* 9/20/2015 fix
1193 mc2_update_ghost_state(state);
1194 */
1195 task_arrives(state, tsk);
1196 if (lv == CRIT_LEVEL_C)
1197 raw_spin_unlock(&_global_env.lock);
1198 /* NOTE: drops state->lock */
1199 TRACE("mc2_new()\n");
1200
1201 mc2_update_timer_and_unlock(state);
1202 } else {
1203 if (lv == CRIT_LEVEL_C)
1204 raw_spin_unlock(&_global_env.lock);
1205 raw_spin_unlock(&state->lock);
1206 //raw_spin_unlock(&_global_env.lock);
1207 }
1208 local_irq_restore(flags);
1209
1210 if (!release) {
1211 TRACE_TASK(tsk, "mc2_task_new() next_release = %llu\n", release);
1212 //release_at(tsk, release);
1213 }
1214 else
1215 TRACE_TASK(tsk, "mc2_task_new() next_release = NULL\n");
1216}
1217
1218/* mc2_reservation_destroy - reservation_destroy system call backend
1219 */
1220static long mc2_reservation_destroy(unsigned int reservation_id, int cpu)
1221{
1222 long ret = -EINVAL;
1223 struct mc2_cpu_state *state;
1224 struct reservation *res = NULL, *next;
1225 struct sup_reservation_environment *sup_env;
1226 int found = 0;
1227 //enum crit_level lv = get_task_crit_level(current);
1228 unsigned long flags;
1229
1230 if (cpu == -1) {
1231 /* if the reservation is global reservation */
1232 local_irq_save(flags);
1233 //state = local_cpu_state();
1234 raw_spin_lock(&_global_env.lock);
1235 //raw_spin_lock(&state->lock);
1236
1237 list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) {
1238 if (res->id == reservation_id) {
1239 list_del(&res->list);
1240 kfree(res);
1241 found = 1;
1242 ret = 0;
1243 }
1244 }
1245 if (!found) {
1246 list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) {
1247 if (res->id == reservation_id) {
1248 list_del(&res->list);
1249 kfree(res);
1250 found = 1;
1251 ret = 0;
1252 }
1253 }
1254 }
1255 if (!found) {
1256 list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) {
1257 if (res->id == reservation_id) {
1258 list_del(&res->list);
1259 kfree(res);
1260 found = 1;
1261 ret = 0;
1262 }
1263 }
1264 }
1265
1266 //raw_spin_unlock(&state->lock);
1267 raw_spin_unlock(&_global_env.lock);
1268 local_irq_restore(flags);
1269 } else {
1270 /* if the reservation is partitioned reservation */
1271 state = cpu_state_for(cpu);
1272 local_irq_save(flags);
1273 raw_spin_lock(&state->lock);
1274
1275 // res = sup_find_by_id(&state->sup_env, reservation_id);
1276 sup_env = &state->sup_env;
1277 list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) {
1278 if (res->id == reservation_id) {
1279/*
1280 if (lv == CRIT_LEVEL_A) {
1281 struct table_driven_reservation *tdres;
1282 tdres = container_of(res, struct table_driven_reservation, res);
1283 kfree(tdres->intervals);
1284 }
1285*/
1286 list_del(&res->list);
1287 kfree(res);
1288 found = 1;
1289 ret = 0;
1290 }
1291 }
1292 if (!found) {
1293 list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) {
1294 if (res->id == reservation_id) {
1295/* if (lv == CRIT_LEVEL_A) {
1296 struct table_driven_reservation *tdres;
1297 tdres = container_of(res, struct table_driven_reservation, res);
1298 kfree(tdres->intervals);
1299 }
1300*/
1301 list_del(&res->list);
1302 kfree(res);
1303 found = 1;
1304 ret = 0;
1305 }
1306 }
1307 }
1308 if (!found) {
1309 list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) {
1310 if (res->id == reservation_id) {
1311/* if (lv == CRIT_LEVEL_A) {
1312 struct table_driven_reservation *tdres;
1313 tdres = container_of(res, struct table_driven_reservation, res);
1314 kfree(tdres->intervals);
1315 }
1316*/
1317 list_del(&res->list);
1318 kfree(res);
1319 found = 1;
1320 ret = 0;
1321 }
1322 }
1323 }
1324
1325 raw_spin_unlock(&state->lock);
1326 local_irq_restore(flags);
1327 }
1328
1329 TRACE("Rerservation destroyed ret = %d\n", ret);
1330 return ret;
1331}
1332
1333/* mc2_task_exit - Task became a normal task (not real-time task)
1334 */
1335static void mc2_task_exit(struct task_struct *tsk)
1336{
1337 unsigned long flags;
1338 struct mc2_task_state* tinfo = get_mc2_state(tsk);
1339 struct mc2_cpu_state *state;
1340 enum crit_level lv = tinfo->mc2_param.crit;
1341 struct crit_entry* ce;
1342 int cpu;
1343
1344 local_irq_save(flags);
1345 if (tinfo->cpu != -1)
1346 state = cpu_state_for(tinfo->cpu);
1347 else
1348 state = local_cpu_state();
1349
1350 raw_spin_lock(&state->lock);
1351
1352 if (state->scheduled == tsk)
1353 state->scheduled = NULL;
1354
1355 ce = &state->crit_entries[lv];
1356 if (ce->running == tsk)
1357 ce->running = NULL;
1358
1359 /* remove from queues */
1360 if (is_running(tsk)) {
1361 /* Assumption: litmus_clock() is synchronized across cores
1362 * [see comment in pres_task_resume()] */
1363
1364 /* update both global and partitioned */
1365 if (lv < CRIT_LEVEL_C) {
1366 sup_update_time(&state->sup_env, litmus_clock());
1367 }
1368 else if (lv == CRIT_LEVEL_C) {
1369 raw_spin_lock(&_global_env.lock);
1370 gmp_update_time(&_global_env, litmus_clock());
1371 //raw_spin_unlock(&_global_env.lock);
1372 }
1373 /* 9/20/2015 fix
1374 mc2_update_ghost_state(state);
1375 */
1376 task_departs(tsk, 0);
1377 if (lv == CRIT_LEVEL_C)
1378 raw_spin_unlock(&_global_env.lock);
1379
1380 /* NOTE: drops state->lock */
1381 TRACE("mc2_exit()\n");
1382
1383 mc2_update_timer_and_unlock(state);
1384 } else {
1385 raw_spin_unlock(&state->lock);
1386
1387 }
1388
1389 if (lv == CRIT_LEVEL_C) {
1390 for_each_online_cpu(cpu) {
1391 state = cpu_state_for(cpu);
1392 if (state == local_cpu_state())
1393 continue;
1394 raw_spin_lock(&state->lock);
1395
1396 if (state->scheduled == tsk)
1397 state->scheduled = NULL;
1398
1399 ce = &state->crit_entries[lv];
1400 if (ce->running == tsk)
1401 ce->running = NULL;
1402
1403 raw_spin_unlock(&state->lock);
1404 }
1405 }
1406
1407 local_irq_restore(flags);
1408
1409 kfree(tsk_rt(tsk)->plugin_state);
1410 tsk_rt(tsk)->plugin_state = NULL;
1411 kfree(tsk_rt(tsk)->mc2_data);
1412 tsk_rt(tsk)->mc2_data = NULL;
1413}
1414
1415/* create_polling_reservation - create a new polling reservation
1416 */
1417static long create_polling_reservation(
1418 int res_type,
1419 struct reservation_config *config)
1420{
1421 struct mc2_cpu_state *state;
1422 struct reservation* res;
1423 struct polling_reservation *pres;
1424 unsigned long flags;
1425 int use_edf = config->priority == LITMUS_NO_PRIORITY;
1426 int periodic = res_type == PERIODIC_POLLING;
1427 long err = -EINVAL;
1428
1429 /* sanity checks */
1430 if (config->polling_params.budget >
1431 config->polling_params.period) {
1432 printk(KERN_ERR "invalid polling reservation (%u): "
1433 "budget > period\n", config->id);
1434 return -EINVAL;
1435 }
1436 if (config->polling_params.budget >
1437 config->polling_params.relative_deadline
1438 && config->polling_params.relative_deadline) {
1439 printk(KERN_ERR "invalid polling reservation (%u): "
1440 "budget > deadline\n", config->id);
1441 return -EINVAL;
1442 }
1443 if (config->polling_params.offset >
1444 config->polling_params.period) {
1445 printk(KERN_ERR "invalid polling reservation (%u): "
1446 "offset > period\n", config->id);
1447 return -EINVAL;
1448 }
1449
1450 /* Allocate before we grab a spin lock.
1451 * Todo: would be nice to use a core-local allocation.
1452 */
1453 pres = kzalloc(sizeof(*pres), GFP_KERNEL);
1454 if (!pres)
1455 return -ENOMEM;
1456
1457 if (config->cpu != -1) {
1458
1459 //raw_spin_lock_irqsave(&_global_env.lock, flags);
1460 state = cpu_state_for(config->cpu);
1461 raw_spin_lock_irqsave(&state->lock, flags);
1462
1463 res = sup_find_by_id(&state->sup_env, config->id);
1464 if (!res) {
1465 polling_reservation_init(pres, use_edf, periodic,
1466 config->polling_params.budget,
1467 config->polling_params.period,
1468 config->polling_params.relative_deadline,
1469 config->polling_params.offset);
1470 pres->res.id = config->id;
1471 pres->res.blocked_by_ghost = 0;
1472 pres->res.is_ghost = NO_CPU;
1473 if (!use_edf)
1474 pres->res.priority = config->priority;
1475 sup_add_new_reservation(&state->sup_env, &pres->res);
1476 err = config->id;
1477 } else {
1478 err = -EEXIST;
1479 }
1480
1481 raw_spin_unlock_irqrestore(&state->lock, flags);
1482 //raw_spin_unlock_irqrestore(&_global_env.lock, flags);
1483
1484 } else {
1485 raw_spin_lock_irqsave(&_global_env.lock, flags);
1486
1487 res = gmp_find_by_id(&_global_env, config->id);
1488 if (!res) {
1489 polling_reservation_init(pres, use_edf, periodic,
1490 config->polling_params.budget,
1491 config->polling_params.period,
1492 config->polling_params.relative_deadline,
1493 config->polling_params.offset);
1494 pres->res.id = config->id;
1495 pres->res.blocked_by_ghost = 0;
1496 pres->res.scheduled_on = NO_CPU;
1497 pres->res.is_ghost = NO_CPU;
1498 if (!use_edf)
1499 pres->res.priority = config->priority;
1500 gmp_add_new_reservation(&_global_env, &pres->res);
1501 err = config->id;
1502 } else {
1503 err = -EEXIST;
1504 }
1505 raw_spin_unlock_irqrestore(&_global_env.lock, flags);
1506 }
1507
1508 if (err < 0)
1509 kfree(pres);
1510
1511 return err;
1512}
1513
1514#define MAX_INTERVALS 1024
1515
1516/* create_table_driven_reservation - create a table_driven reservation
1517 */
1518static long create_table_driven_reservation(
1519 struct reservation_config *config)
1520{
1521 struct mc2_cpu_state *state;
1522 struct reservation* res;
1523 struct table_driven_reservation *td_res = NULL;
1524 struct lt_interval *slots = NULL;
1525 size_t slots_size;
1526 unsigned int i, num_slots;
1527 unsigned long flags;
1528 long err = -EINVAL;
1529
1530
1531 if (!config->table_driven_params.num_intervals) {
1532 printk(KERN_ERR "invalid table-driven reservation (%u): "
1533 "no intervals\n", config->id);
1534 return -EINVAL;
1535 }
1536
1537 if (config->table_driven_params.num_intervals > MAX_INTERVALS) {
1538 printk(KERN_ERR "invalid table-driven reservation (%u): "
1539 "too many intervals (max: %d)\n", config->id, MAX_INTERVALS);
1540 return -EINVAL;
1541 }
1542
1543 num_slots = config->table_driven_params.num_intervals;
1544 slots_size = sizeof(slots[0]) * num_slots;
1545 slots = kzalloc(slots_size, GFP_KERNEL);
1546 if (!slots)
1547 return -ENOMEM;
1548
1549 td_res = kzalloc(sizeof(*td_res), GFP_KERNEL);
1550 if (!td_res)
1551 err = -ENOMEM;
1552 else
1553 err = copy_from_user(slots,
1554 config->table_driven_params.intervals, slots_size);
1555
1556 if (!err) {
1557 /* sanity checks */
1558 for (i = 0; !err && i < num_slots; i++)
1559 if (slots[i].end <= slots[i].start) {
1560 printk(KERN_ERR
1561 "invalid table-driven reservation (%u): "
1562 "invalid interval %u => [%llu, %llu]\n",
1563 config->id, i,
1564 slots[i].start, slots[i].end);
1565 err = -EINVAL;
1566 }
1567
1568 for (i = 0; !err && i + 1 < num_slots; i++)
1569 if (slots[i + 1].start <= slots[i].end) {
1570 printk(KERN_ERR
1571 "invalid table-driven reservation (%u): "
1572 "overlapping intervals %u, %u\n",
1573 config->id, i, i + 1);
1574 err = -EINVAL;
1575 }
1576
1577 if (slots[num_slots - 1].end >
1578 config->table_driven_params.major_cycle_length) {
1579 printk(KERN_ERR
1580 "invalid table-driven reservation (%u): last "
1581 "interval ends past major cycle %llu > %llu\n",
1582 config->id,
1583 slots[num_slots - 1].end,
1584 config->table_driven_params.major_cycle_length);
1585 err = -EINVAL;
1586 }
1587 }
1588
1589 if (!err) {
1590 state = cpu_state_for(config->cpu);
1591 raw_spin_lock_irqsave(&state->lock, flags);
1592
1593 res = sup_find_by_id(&state->sup_env, config->id);
1594 if (!res) {
1595 table_driven_reservation_init(td_res,
1596 config->table_driven_params.major_cycle_length,
1597 slots, num_slots);
1598 td_res->res.id = config->id;
1599 td_res->res.priority = config->priority;
1600 td_res->res.blocked_by_ghost = 0;
1601 sup_add_new_reservation(&state->sup_env, &td_res->res);
1602 err = config->id;
1603 } else {
1604 err = -EEXIST;
1605 }
1606
1607 raw_spin_unlock_irqrestore(&state->lock, flags);
1608 }
1609
1610 if (err < 0) {
1611 kfree(slots);
1612 kfree(td_res);
1613 }
1614
1615 return err;
1616}
1617
1618/* mc2_reservation_create - reservation_create system call backend
1619 */
1620static long mc2_reservation_create(int res_type, void* __user _config)
1621{
1622 long ret = -EINVAL;
1623 struct reservation_config config;
1624
1625 TRACE("Attempt to create reservation (%d)\n", res_type);
1626
1627 if (copy_from_user(&config, _config, sizeof(config)))
1628 return -EFAULT;
1629
1630 if (config.cpu != -1) {
1631 if (config.cpu < 0 || !cpu_online(config.cpu)) {
1632 printk(KERN_ERR "invalid polling reservation (%u): "
1633 "CPU %d offline\n", config.id, config.cpu);
1634 return -EINVAL;
1635 }
1636 }
1637
1638 switch (res_type) {
1639 case PERIODIC_POLLING:
1640 case SPORADIC_POLLING:
1641 ret = create_polling_reservation(res_type, &config);
1642 break;
1643
1644 case TABLE_DRIVEN:
1645 ret = create_table_driven_reservation(&config);
1646 break;
1647
1648 default:
1649 return -EINVAL;
1650 };
1651
1652 return ret;
1653}
1654
1655static struct domain_proc_info mc2_domain_proc_info;
1656
1657static long mc2_get_domain_proc_info(struct domain_proc_info **ret)
1658{
1659 *ret = &mc2_domain_proc_info;
1660 return 0;
1661}
1662
1663static void mc2_setup_domain_proc(void)
1664{
1665 int i, cpu;
1666 int num_rt_cpus = num_online_cpus();
1667
1668 struct cd_mapping *cpu_map, *domain_map;
1669
1670 memset(&mc2_domain_proc_info, sizeof(mc2_domain_proc_info), 0);
1671 init_domain_proc_info(&mc2_domain_proc_info, num_rt_cpus, num_rt_cpus);
1672 mc2_domain_proc_info.num_cpus = num_rt_cpus;
1673 mc2_domain_proc_info.num_domains = num_rt_cpus;
1674
1675 i = 0;
1676 for_each_online_cpu(cpu) {
1677 cpu_map = &mc2_domain_proc_info.cpu_to_domains[i];
1678 domain_map = &mc2_domain_proc_info.domain_to_cpus[i];
1679
1680 cpu_map->id = cpu;
1681 domain_map->id = i;
1682 cpumask_set_cpu(i, cpu_map->mask);
1683 cpumask_set_cpu(cpu, domain_map->mask);
1684 ++i;
1685 }
1686}
1687
1688static long mc2_activate_plugin(void)
1689{
1690 int cpu, lv;
1691 struct mc2_cpu_state *state;
1692 struct cpu_entry *ce;
1693
1694 gmp_init(&_global_env);
1695 raw_spin_lock_init(&_lowest_prio_cpu.lock);
1696
1697 for_each_online_cpu(cpu) {
1698 TRACE("Initializing CPU%d...\n", cpu);
1699
1700 resched_cpu[cpu] = 0;
1701 state = cpu_state_for(cpu);
1702 ce = &_lowest_prio_cpu.cpu_entries[cpu];
1703
1704 ce->cpu = cpu;
1705 ce->scheduled = NULL;
1706 ce->deadline = ULLONG_MAX;
1707 ce->lv = NUM_CRIT_LEVELS;
1708 ce->will_schedule = false;
1709
1710 raw_spin_lock_init(&state->lock);
1711 state->cpu = cpu;
1712 state->scheduled = NULL;
1713 for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) {
1714 struct crit_entry *cr_entry = &state->crit_entries[lv];
1715 cr_entry->level = lv;
1716 cr_entry->running = NULL;
1717 }
1718 sup_init(&state->sup_env);
1719
1720 hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1721 state->timer.function = on_scheduling_timer;
1722 }
1723
1724 mc2_setup_domain_proc();
1725
1726 return 0;
1727}
1728
1729static void mc2_finish_switch(struct task_struct *prev)
1730{
1731 struct mc2_cpu_state *state = local_cpu_state();
1732
1733 state->scheduled = is_realtime(current) ? current : NULL;
1734}
1735
1736static long mc2_deactivate_plugin(void)
1737{
1738 int cpu;
1739 struct mc2_cpu_state *state;
1740 struct reservation *res;
1741 struct next_timer_event *event;
1742 struct cpu_entry *ce;
1743
1744 for_each_online_cpu(cpu) {
1745 state = cpu_state_for(cpu);
1746 raw_spin_lock(&state->lock);
1747
1748 hrtimer_cancel(&state->timer);
1749
1750 ce = &_lowest_prio_cpu.cpu_entries[cpu];
1751
1752 ce->cpu = cpu;
1753 ce->scheduled = NULL;
1754 ce->deadline = ULLONG_MAX;
1755 ce->lv = NUM_CRIT_LEVELS;
1756 ce->will_schedule = false;
1757
1758 /* Delete all reservations --- assumes struct reservation
1759 * is prefix of containing struct. */
1760
1761 while (!list_empty(&state->sup_env.active_reservations)) {
1762 res = list_first_entry(
1763 &state->sup_env.active_reservations,
1764 struct reservation, list);
1765 list_del(&res->list);
1766 kfree(res);
1767 }
1768
1769 while (!list_empty(&state->sup_env.inactive_reservations)) {
1770 res = list_first_entry(
1771 &state->sup_env.inactive_reservations,
1772 struct reservation, list);
1773 list_del(&res->list);
1774 kfree(res);
1775 }
1776
1777 while (!list_empty(&state->sup_env.depleted_reservations)) {
1778 res = list_first_entry(
1779 &state->sup_env.depleted_reservations,
1780 struct reservation, list);
1781 list_del(&res->list);
1782 kfree(res);
1783 }
1784
1785 raw_spin_unlock(&state->lock);
1786 }
1787
1788 raw_spin_lock(&_global_env.lock);
1789
1790 while (!list_empty(&_global_env.active_reservations)) {
1791 res = list_first_entry(
1792 &_global_env.active_reservations,
1793 struct reservation, list);
1794 list_del(&res->list);
1795 kfree(res);
1796 }
1797
1798 while (!list_empty(&_global_env.inactive_reservations)) {
1799 res = list_first_entry(
1800 &_global_env.inactive_reservations,
1801 struct reservation, list);
1802 list_del(&res->list);
1803 kfree(res);
1804 }
1805
1806 while (!list_empty(&_global_env.depleted_reservations)) {
1807 res = list_first_entry(
1808 &_global_env.depleted_reservations,
1809 struct reservation, list);
1810 list_del(&res->list);
1811 kfree(res);
1812 }
1813
1814 while (!list_empty(&_global_env.next_events)) {
1815 event = list_first_entry(
1816 &_global_env.next_events,
1817 struct next_timer_event, list);
1818 list_del(&event->list);
1819 kfree(event);
1820 }
1821
1822 raw_spin_unlock(&_global_env.lock);
1823
1824 destroy_domain_proc_info(&mc2_domain_proc_info);
1825 return 0;
1826}
1827
1828static struct sched_plugin mc2_plugin = {
1829 .plugin_name = "MC2",
1830 .schedule = mc2_schedule,
1831 .finish_switch = mc2_finish_switch,
1832 .task_wake_up = mc2_task_resume,
1833 .admit_task = mc2_admit_task,
1834 .task_new = mc2_task_new,
1835 .task_exit = mc2_task_exit,
1836 .complete_job = mc2_complete_job,
1837 .get_domain_proc_info = mc2_get_domain_proc_info,
1838 .activate_plugin = mc2_activate_plugin,
1839 .deactivate_plugin = mc2_deactivate_plugin,
1840 .reservation_create = mc2_reservation_create,
1841 .reservation_destroy = mc2_reservation_destroy,
1842};
1843
1844static int __init init_mc2(void)
1845{
1846 return register_sched_plugin(&mc2_plugin);
1847}
1848
1849module_init(init_mc2);