aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Makefile22
-rw-r--r--bin/color.c50
-rw-r--r--bin/colorbench.c460
-rw-r--r--bin/colortest.c94
-rw-r--r--bin/perfcounters.c197
-rw-r--r--bin/rtspin.c33
-rw-r--r--bin/testcounters.c401
-rw-r--r--include/color.h9
-rw-r--r--include/internal.h5
-rw-r--r--include/litmus.h1
-rw-r--r--include/perfcounters.h36
-rw-r--r--src/kernel_iface.c15
-rw-r--r--src/syscalls.c7
14 files changed, 1284 insertions, 49 deletions
diff --git a/.gitignore b/.gitignore
index 7f419d0..07d6c61 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,6 +22,9 @@ showst
22rtspin 22rtspin
23cycles 23cycles
24measure_syscall 24measure_syscall
25colortest
26colorbench
27testcounters
25 28
26# build system files 29# build system files
27.config 30.config
diff --git a/Makefile b/Makefile
index 6b1feed..5527862 100644
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@ host-arch := $(shell uname -m | \
12ARCH ?= ${host-arch} 12ARCH ?= ${host-arch}
13 13
14# LITMUS_KERNEL -- where to find the litmus kernel? 14# LITMUS_KERNEL -- where to find the litmus kernel?
15LITMUS_KERNEL ?= ../litmus-rt 15LITMUS_KERNEL ?= ../backup-litmus-rt
16 16
17 17
18# ############################################################################## 18# ##############################################################################
@@ -71,8 +71,8 @@ AR := ${CROSS_COMPILE}${AR}
71# Targets 71# Targets
72 72
73all = lib ${rt-apps} 73all = lib ${rt-apps}
74rt-apps = cycles base_task rt_launch locktest rtspin rtspin.ovh rtspin.beta release_ts \ 74rt-apps = cycles base_task rt_launch rtspin release_ts measure_syscall \
75 measure_syscall base_mt_task runtests bespin 75 base_mt_task runtests colortest colorbench testcounters bespin
76 76
77.PHONY: all lib clean dump-config TAGS tags cscope help 77.PHONY: all lib clean dump-config TAGS tags cscope help
78 78
@@ -155,7 +155,7 @@ arch/${include-${ARCH}}/include/asm/%.h: \
155 cp $< $@ 155 cp $< $@
156 156
157litmus-headers = include/litmus/rt_param.h include/litmus/unistd_32.h \ 157litmus-headers = include/litmus/rt_param.h include/litmus/unistd_32.h \
158 include/litmus/unistd_64.h include/litmus/sched_mc.h include/litmus/color.h 158 include/litmus/unistd_64.h include/litmus/color.h
159 159
160unistd-headers = \ 160unistd-headers = \
161 $(foreach file,${unistd-${ARCH}},arch/${include-${ARCH}}/include/asm/$(file)) 161 $(foreach file,${unistd-${ARCH}},arch/${include-${ARCH}}/include/asm/$(file))
@@ -230,12 +230,21 @@ lib-locktest = -lrt -pthread
230obj-measure_syscall = null_call.o 230obj-measure_syscall = null_call.o
231lib-measure_syscall = -lm 231lib-measure_syscall = -lm
232 232
233obj-colortest = colortest.o color.o
234lib-colortest = -static
235
236obj-colorbench = colorbench.o color.o perfcounters.o common.o
237lib-colorbench = -lpthread -lrt -lgsl -lgslcblas
238
239obj-testcounters = testcounters.o
240lib-testcounters =
241
233# ############################################################################## 242# ##############################################################################
234# Build everything that depends on liblitmus. 243# Build everything that depends on liblitmus.
235 244
236.SECONDEXPANSION: 245.SECONDEXPANSION:
237${rt-apps}: $${obj-$$@} liblitmus.a 246${rt-apps}: $${obj-$$@} liblitmus.a
238 $(CC) -o $@ $(LDFLAGS) ${ldf-$@} $(filter-out liblitmus.a,$+) $(LOADLIBS) $(LDLIBS) ${lib-$@} ${liblitmus-flags} 247 $(CC) -o $@ $(LDFLAGS) ${ldf-$@} $(filter-out liblitmus.a,$+) $(LOADLIBS) $(LDLIBS) ${liblitmus-flags} ${lib-$@}
239 248
240# ############################################################################## 249# ##############################################################################
241# Dependency resolution. 250# Dependency resolution.
@@ -273,7 +282,7 @@ $(error Cannot build without access to the LITMUS^RT kernel source)
273endif 282endif
274 283
275kernel-unistd-hdrs := $(foreach file,${unistd-headers},${LITMUS_KERNEL}/$(file)) 284kernel-unistd-hdrs := $(foreach file,${unistd-headers},${LITMUS_KERNEL}/$(file))
276hdr-ok := $(shell egrep '\#include ["<]litmus/unistd' ${kernel-unistd-hdrs} ) 285hdr-ok := $(shell egrep '\#include .*litmus/unistd' ${kernel-unistd-hdrs} )
277ifeq ($(strip $(hdr-ok)),) 286ifeq ($(strip $(hdr-ok)),)
278$(info (!!) Could not find LITMUS^RT system calls in ${kernel-unistd-hdrs}.) 287$(info (!!) Could not find LITMUS^RT system calls in ${kernel-unistd-hdrs}.)
279$(error Your kernel headers do not seem to be LITMUS^RT headers) 288$(error Your kernel headers do not seem to be LITMUS^RT headers)
@@ -290,4 +299,3 @@ $(error Cannot build without access to the architecture-specific files)
290endif 299endif
291 300
292endif 301endif
293
diff --git a/bin/color.c b/bin/color.c
new file mode 100644
index 0000000..2ec97a4
--- /dev/null
+++ b/bin/color.c
@@ -0,0 +1,50 @@
1#include <stdlib.h>
2#include <stdint.h>
3#include <sys/mman.h>
4#include <sys/fcntl.h>
5#include <unistd.h>
6
7#include <litmus/rt_param.h>
8
9#include "color.h"
10
11#define LITMUS_COLOR_ALLOC "/dev/litmus/color_alloc"
12#define LITMUS_COLOR_CTRL "/dev/litmus/color_ctrl"
13
14static int map_file(const char* filename, void **addr, size_t size)
15{
16 int error = 0;
17 int fd;
18
19 if (size > 0) {
20 fd = open(filename, O_RDWR);
21 if (fd >= 0) {
22 *addr = mmap(NULL, size,
23 PROT_READ | PROT_WRITE,
24 MAP_PRIVATE,
25 fd, 0);
26 if (*addr == MAP_FAILED)
27 error = -1;
28 close(fd);
29 } else
30 error = fd;
31 } else
32 *addr = NULL;
33 return error;
34}
35
36int map_color_ctrl(void **addr)
37{
38 return map_file(LITMUS_COLOR_CTRL, addr, PAGE_SIZE);
39}
40
41void* color_malloc(size_t size)
42{
43 int err;
44 void *mem;
45
46 err = map_file(LITMUS_COLOR_ALLOC, &mem, size);
47 if (err)
48 mem = NULL;
49 return mem;
50}
diff --git a/bin/colorbench.c b/bin/colorbench.c
new file mode 100644
index 0000000..c91b731
--- /dev/null
+++ b/bin/colorbench.c
@@ -0,0 +1,460 @@
1#include <stdint.h> /* rt_param needs uint types */
2#include <stdlib.h>
3#include <limits.h>
4#include <pthread.h>
5#include <sched.h>
6#include <errno.h>
7#include <stdio.h>
8#include <unistd.h>
9#include <sys/mman.h> /* mlockall */
10#include <sys/ioctl.h>
11
12#include <litmus/rt_param.h>
13
14#include "perfcounters.h"
15#include "color.h"
16#include "litmus.h"
17
18#define DEBUG 1
19#define NR_LOOPS 10
20
21/* Pound */
22#define NR_CPUS 4
23#define CACHE_SIZE_MB 8
24#define ASSOC 16
25#define LINE_SIZE 64
26
27#define CACHE_SIZE (CACHE_SIZE_MB * 1024 * 1024)
28#define TOTAL_COLORS (CACHE_SIZE / ASSOC / PAGE_SIZE)
29
30/* number of colors we actually use */
31#define USE_COLORS (TOTAL_COLORS >> color_shift)
32
33/* how many adjacent pages of the same color we need to allocate */
34#define CONTIG_COLORS (ARENA_PAGES / USE_COLORS)
35
36/* number of pages in arena */
37#define ARENA_PAGES (arena_size / PAGE_SIZE)
38
39/* page offset bit mask */
40#define PAGE_LOWER ((PAGE_SIZE - 1))
41
42/* number of integers in arena */
43#define ARENA_INTS (arena_size / sizeof(int))
44
45/* number of pages in arena */
46#define ARENA_PAGES (arena_size / PAGE_SIZE)
47
48/* number of cache lines in arena */
49#define ARENA_LINES (arena_size / LINE_SIZE)
50
51/* number of cache lines per page */
52#define PAGE_LINES (PAGE_SIZE / LINE_SIZE)
53
54/* number of integers in a page */
55#define PAGE_INTS (PAGE_SIZE / sizeof(int))
56
57/* number of integers in a cache line */
58#define LINE_INTS (LINE_SIZE / sizeof(int))
59
60/* convert page number and cache line number to an integer index */
61#define PAGE_AND_LINE_TO_IDX(page, line) \
62 (((page) * PAGE_INTS) + ((line) * LINE_INTS))
63
64/* what CPU a thread should run on */
65#define THREAD_CPU(t) (t * (NR_CPUS / nr_threads))
66
67struct pthread_state {
68 pthread_t thread;
69 int tid;
70 int retval;
71 struct color_ctrl_page *color_ctrl;
72 int *arena;
73};
74
75static pthread_barrier_t barrier;
76static int nr_threads;
77static int arena_size;
78static int color_shift;
79//static int *page_line_order;
80static int *arena_line_order;
81static struct perf_counter perf_counters[NR_CPUS * NR_PERF_COUNTERS];
82
83#ifdef DEBUG
84#define debug_print(fmt, args...) do { \
85 fprintf(stderr, fmt, ##args); \
86 } while (0)
87#define debug_print_thread(ts, fmt, args...) do { \
88 debug_print("T%d: " fmt, ts->tid, ##args); \
89 } while (0)
90#else
91#define debug_print(fmt, args...) do {} while (0)
92#define debug_print_thread(ts, fmt, args...) do {} while (0)
93#endif
94
95/*
96 * Get a random number in [0, max). Not really a good way to do this.
97 */
98inline int randrange(const int max)
99{
100 return (rand() / (RAND_MAX / max + 1));
101}
102
103/*
104 * Write 1, 2, ..., n - 1, 0 into items.
105 */
106void sequential(int *items, const int len)
107{
108 int i;
109 for (i = 0; i < len; i++)
110 items[i] = (i + 1) % len;
111}
112
113/*
114 * Sattolo's algorithm makes a random cycle that includes all the elements
115 * in the items array.
116 */
117void sattolo(int *items, const int len)
118{
119 int i;
120 /* first set up 0, 1, ..., n - 1 */
121 for (i = 0; i < len; i++)
122 items[i] = i;
123 /* note: i is now n */
124 while (1 < i--) {
125 /* 0 <= j < i */
126 int t, j = randrange(i);
127 t = items[i];
128 items[i] = items[j];
129 items[j] = t;
130 }
131}
132
133/*
134 * Write the order to read the arena into the arena. Each page in the arena is
135 * read back, but the page is read in a random order to prevent the prefetcher
136 * from working.
137 *
138 * Starting at position 0 in the page_line_order means the cycle ends with 0.
139 * We use 0 in the arena to signify that we are done reading.
140 */
141#if 0
142static void init_arena_page_line_order(int *arena)
143{
144 int cur_page;
145 for (cur_page = 0; cur_page < ARENA_PAGES; cur_page++) {
146 /* for each page in the arena */
147 int cur_line;
148 for (cur_line = 0; cur_line < PAGE_LINES; cur_line++) {
149 /* for each line in the page */
150 const int idx = PAGE_AND_LINE_TO_IDX(cur_page,
151 cur_line);
152 const int next_line = page_line_order[cur_line];
153 int next_idx = PAGE_AND_LINE_TO_IDX(cur_page,
154 next_line);
155
156 if (0 == next_line) {
157 /* special case: cycle end */
158 if (cur_page < ARENA_PAGES - 1) {
159 /* arena has more pages: go to next */
160 next_idx = PAGE_AND_LINE_TO_IDX(
161 (cur_page + 1), 0);
162 } else {
163 /* the very last element */
164 next_idx = 0;
165 }
166 }
167 arena[idx] = next_idx;
168 }
169 }
170}
171#endif
172static void init_arena_line_order(int *arena)
173{
174 int cur_line;
175 for (cur_line = 0; cur_line < ARENA_LINES; cur_line++) {
176 const int idx = LINE_INTS * cur_line;
177 const int next_line = arena_line_order[cur_line];
178 int next_idx = LINE_INTS * next_line;
179
180 if (0 == next_line) {
181 /* special case: cycle end */
182 next_idx = 0;
183 }
184 arena[idx] = next_idx;
185 }
186}
187
188static void setup_colors(struct pthread_state *state)
189{
190 int color, i;
191 for (color = 0; color < USE_COLORS; color++) {
192 /* what color do we use */
193 for (i = 0; i < CONTIG_COLORS; i++) {
194 /* how many times do we use it */
195 const int idx = CONTIG_COLORS * color + i;
196 state->color_ctrl->colors[idx] = color;
197 }
198 }
199#if 0
200 for (i = 0; i < ARENA_PAGES; i++)
201 printf("%d: %2d\n", i, state->color_ctrl->colors[i]);
202#endif
203}
204
205static int loop_once(struct pthread_state *state)
206{
207 int i = 0, j;
208 do {
209 i = state->arena[i];
210 j = i;
211 } while (i);
212 return j;
213}
214
215static void print_perf_counters(void)
216{
217 uint64_t val;
218 int err, cpu, i;
219 const char *name;
220
221 for (cpu = 0; cpu < NR_CPUS; cpu++) {
222 printf("CPU %d\n", cpu);
223 for (i = 0; i < NR_PERF_COUNTERS; i++) {
224 const int idx = cpu * NR_PERF_COUNTERS + i;
225 name = get_perf_name(&perf_counters[idx]);
226 err = read_perf_counter(&perf_counters[idx], &val);
227 if (err)
228 printf("%50s: ERROR\n", name);
229 else
230#if 0
231 printf("%50s: %10.3f\n", name,
232 ((double)val) / NR_LOOPS);
233#endif
234 printf("%50s: %lu\n", name, val);
235 }
236 }
237}
238
239int thread_init(struct pthread_state *state)
240{
241 const int cpu = THREAD_CPU(state->tid);
242 int err = 0;
243
244 err = be_migrate_to(cpu);
245 if (err) {
246 debug_print_thread(state, "set affinity failed\n");
247 goto out;
248 }
249
250 err = map_color_ctrl((void**)&state->color_ctrl);
251 if (err) {
252 debug_print_thread(state, "mapping control device failed\n");
253 goto out;
254 }
255
256 setup_colors(state);
257
258 state->arena = color_malloc(arena_size);
259 if (!state->arena) {
260 debug_print_thread(state, "alloc arena failed\n");
261 err = 1;
262 goto out;
263 }
264
265 //init_arena_page_line_order(state->arena);
266 init_arena_line_order(state->arena);
267
268 err = mlockall(MCL_CURRENT|MCL_FUTURE);
269 if (err)
270 {
271 debug_print_thread(state, "mlockall failed\n");
272 goto out;
273 }
274out:
275 return err;
276}
277
278static void change_counters(const int request)
279{
280 int cpu;
281 for (cpu = 0; cpu < NR_CPUS; cpu++) {
282 const int ctr_idx = cpu * NR_PERF_COUNTERS;
283 ioctl(perf_counters[ctr_idx].fd, request);
284 }
285}
286
287static void reset_counters(void)
288{
289 int cpu, counter;
290 for (cpu = 0; cpu < NR_CPUS; cpu++) {
291 for (counter = 0; counter < NR_PERF_COUNTERS; counter++) {
292 const int ctr_idx = cpu * NR_PERF_COUNTERS + counter;
293 ioctl(perf_counters[ctr_idx].fd, PERF_EVENT_IOC_RESET);
294 }
295 }
296}
297
298void * thread_start(void *data)
299{
300 struct pthread_state *state = (struct pthread_state*) data;
301 int i;
302
303 state->retval = thread_init(state);
304 if (state->retval)
305 goto out;
306
307 pthread_barrier_wait(&barrier);
308
309#if 0
310 if (0 == state->tid)
311 change_counters(PERF_EVENT_IOC_ENABLE);
312
313 pthread_barrier_wait(&barrier);
314#endif
315
316 change_counters(PERF_EVENT_IOC_ENABLE);
317
318 for (i = 0; i < NR_LOOPS; ++i) {
319 loop_once(state);
320 //pthread_barrier_wait(&barrier);
321 print_perf_counters();
322 reset_counters();
323 }
324
325 if (0 == state->tid)
326 change_counters(PERF_EVENT_IOC_DISABLE);
327
328out:
329 pthread_exit(&state->retval);
330}
331
332int xstrtol(char *str, long int *conv)
333{
334 char *endptr;
335 long int val;
336 int err = 0;
337
338 errno = 0;
339 val = strtol(str, &endptr, 0);
340 if (0 == val && str == endptr) {
341 err = 1;
342 goto out;
343 }
344 if (ERANGE == errno && (LONG_MIN == val || LONG_MAX == val)) {
345 err = 1;
346 goto out;
347 }
348 *conv = val;
349out:
350 return err;
351}
352
353static int setup_perf_counters(void)
354{
355 int cpu, ret = 0;
356 for (cpu = 0; cpu < NR_CPUS; cpu++) {
357 const int idx = cpu * NR_PERF_COUNTERS;
358 const int group_leader = -1;
359 ret = setup_cpu_perf(cpu, group_leader, &perf_counters[idx]);
360 if (ret)
361 goto out;
362 }
363out:
364 return ret;
365}
366
367#define CHECK(fun, ...) { \
368 int err = fun(__VA_ARGS__); \
369 if (err) { \
370 debug_print(#fun " failed\n"); \
371 return err; \
372 } \
373}
374
375int main(int argc, char **argv)
376{
377 int ret = 0, err, i;
378 long int strtol_val;
379
380 struct pthread_state *pthread_state;
381 pthread_attr_t attr;
382
383 if (argc < 3) {
384 debug_print("usage: %s <threads> <arena-size> <color-shift>\n",
385 argv[0]);
386 ret = 1;
387 goto out;
388 }
389
390 err = xstrtol(argv[1], &strtol_val);
391 nr_threads = strtol_val;
392 err |= xstrtol(argv[2], &strtol_val);
393 arena_size = strtol_val;
394 err |= xstrtol(argv[3], &strtol_val);
395 color_shift = strtol_val;
396 if (err) {
397 debug_print("non-integer argument?\n");
398 ret = 1;
399 goto out;
400 }
401 if (arena_size & PAGE_LOWER) {
402 debug_print("arena size not page multiple\n");
403 ret = 1;
404 goto out;
405 }
406
407 pthread_state = malloc(nr_threads * sizeof(*pthread_state));
408 //page_line_order = malloc(PAGE_LINES * sizeof(*page_line_order));
409 arena_line_order = malloc(ARENA_LINES * sizeof(*arena_line_order));
410 //if (!pthread_state || !page_line_order) {
411 if (!pthread_state || !arena_line_order) {
412 debug_print("could not malloc\n");
413 ret = 1;
414 goto out;
415 }
416
417 CHECK(setup_perf_counters);
418
419 CHECK(pthread_attr_init, &attr);
420 CHECK(pthread_attr_setdetachstate, &attr, PTHREAD_CREATE_JOINABLE);
421 CHECK(pthread_barrier_init, &barrier, NULL, nr_threads);
422
423 //sattolo(page_line_order, PAGE_LINES);
424 sattolo(arena_line_order, ARENA_LINES);
425 //sequential(page_line_order, PAGE_LINES);
426
427 for (i = 0; i < nr_threads; i++) {
428 pthread_state[i].tid = i;
429 CHECK(pthread_create, &pthread_state[i].thread, &attr, thread_start,
430 (void*)&pthread_state[i]);
431 }
432
433 for (i = 0; i < nr_threads; i++) {
434 int *retval;
435 err = pthread_join(pthread_state[i].thread, (void**)&retval);
436 if (err) {
437 debug_print("pthread_join failed\n");
438 ret = err;
439 goto out;
440 }
441 if (PTHREAD_CANCELED == retval || *retval) {
442 debug_print("bad retval T%d\n", i);
443 ret = 1;
444 }
445 }
446
447 CHECK(pthread_barrier_destroy, &barrier);
448 CHECK(pthread_attr_destroy, &attr);
449
450 printf("arena size: %d\n", arena_size);
451 printf("color shift: %d\n", color_shift);
452 printf("arena pages: %d\n", ARENA_PAGES);
453 printf("arena lines: %d\n", ARENA_LINES);
454 printf("use colors: %d\n", USE_COLORS);
455 printf("contiguous colors: %d\n", CONTIG_COLORS);
456 //print_perf_counters();
457
458out:
459 return ret;
460}
diff --git a/bin/colortest.c b/bin/colortest.c
new file mode 100644
index 0000000..02a9169
--- /dev/null
+++ b/bin/colortest.c
@@ -0,0 +1,94 @@
1#include <stdlib.h>
2#include <stdio.h>
3#include <stdint.h>
4
5#include <litmus/rt_param.h>
6
7#include "color.h"
8
9#define PAGES_TO_ALLOC 5
10
11static void* color_malloc_or_exit(size_t size)
12{
13 void *mem;
14 mem = color_malloc(size);
15 if (!mem) {
16 fprintf(stderr, "could not allocate memory.\n");
17 exit(EXIT_FAILURE);
18 }
19 return mem;
20}
21
22static void check_memory(const char *start, const size_t len, const char expect)
23{
24 const char *cur = start;
25 int i;
26
27 for (i = 0; i < len; i++, cur++) {
28 if (expect != *cur) {
29 fprintf(stderr, "inconsistent memory: "
30 "start:0x%p i:%d cur:0x%p "
31 "expect:0x%x have:0x%x\n",
32 start, i, cur,
33 expect, *cur);
34 exit(EXIT_FAILURE);
35 }
36 }
37}
38
39void setup_pages(struct color_ctrl_page *ctrl, uint16_t start,
40 uint16_t nr_pages, uint16_t stride)
41{
42 int i;
43 for (i = 0; i < nr_pages; i++, start += stride)
44 ctrl->colors[i] = start;
45}
46
47int main(int argc, char **argv)
48{
49 const char VAL1 = 0x0e; /* 0b00001110 */
50 const char VAL2 = 0x3c; /* 0b00111100 */
51 struct color_ctrl_page *color_ctrl;
52 char *mem1, *mem2;
53 int err, i;
54
55 err = map_color_ctrl((void**)&color_ctrl);
56 if (err) {
57 fprintf(stderr, "Could not map color control interface.\n");
58 exit(EXIT_FAILURE);
59 }
60
61 setup_pages(color_ctrl, 0, PAGES_TO_ALLOC, 1);
62 mem1 = color_malloc_or_exit(PAGE_SIZE * PAGES_TO_ALLOC);
63 setup_pages(color_ctrl, PAGES_TO_ALLOC, PAGES_TO_ALLOC, 2);
64 mem2 = color_malloc_or_exit(PAGE_SIZE * PAGES_TO_ALLOC);
65
66 /* zero it */
67 for (i = 0; i < PAGE_SIZE * PAGES_TO_ALLOC; i++) {
68 mem1[i] = mem2[i] = 0;
69 }
70
71 printf("checking both arenas are zero\n");
72 check_memory(mem1, PAGE_SIZE * PAGES_TO_ALLOC, 0);
73 check_memory(mem2, PAGE_SIZE * PAGES_TO_ALLOC, 0);
74
75 printf("writing to mem1\n");
76 for (i = 0; i < PAGE_SIZE * PAGES_TO_ALLOC; i++) {
77 mem1[i] = VAL1;
78 }
79
80 printf("checking mem1 for value and mem2 for zero\n");
81 check_memory(mem1, PAGE_SIZE * PAGES_TO_ALLOC, VAL1);
82 check_memory(mem2, PAGE_SIZE * PAGES_TO_ALLOC, 0);
83
84 printf("writing to mem2\n");
85 for (i = 0; i < PAGE_SIZE * PAGES_TO_ALLOC; i++) {
86 mem2[i] = VAL2;
87 }
88
89 printf("checking mem1 and mem2 for their values\n");
90 check_memory(mem1, PAGE_SIZE * PAGES_TO_ALLOC, VAL1);
91 check_memory(mem2, PAGE_SIZE * PAGES_TO_ALLOC, VAL2);
92
93 exit(EXIT_SUCCESS);
94}
diff --git a/bin/perfcounters.c b/bin/perfcounters.c
new file mode 100644
index 0000000..6302164
--- /dev/null
+++ b/bin/perfcounters.c
@@ -0,0 +1,197 @@
1#include "asm/unistd.h" /* from kernel source tree */
2#include <unistd.h> /* for syscall */
3
4#include <sys/ioctl.h>
5
6#include "perfcounters.h"
7
8#define C(x) (PERF_COUNT_HW_CACHE_##x)
9#define ATTR_CONFIG_CACHE(cache, op, result) \
10 (((C(cache) & 0xffULL) << 0) | \
11 ((C(op) & 0xffULL) << 8) | \
12 ((C(result) & 0xffULL) << 16))
13
14#define ATTR_CONFIG(event, umask) \
15 ((((event) & 0xffULL) << 0) | \
16 (((umask) & 0xffULL) << 8))
17
18static struct perf_event_attr perf_event_attr = {
19 .type = 0, /* set per initilized event */
20 .size = 0, /* set later */
21 .config = 0, /* set per initilized event */
22 { .sample_period = 0, }, /* is a counter, so no period */
23 .disabled = 0, /* event is enabled */
24 .inherit = 0, /* children don't inherit */
25 .pinned = 0, /* set per initilized event */
26 .exclusive = 0, /* set per initilized event */
27 .exclude_user = 0, /* don't count user (when set) */
28 .exclude_kernel = 0, /* ditto kernel */
29 .exclude_hv = 0, /* ditto hypervisor */
30 .exclude_idle = 0, /* don't count when idle */
31 .mmap = 0, /* include mmap data */
32 .comm = 0, /* include comm data */
33};
34
35struct perf_counter_setup {
36 char *name;
37 enum perf_type_id type;
38 uint64_t config;
39};
40
41#if 0
42/* these events are always zero */
43static struct perf_fd perf_fds[] = {
44 {
45 .fd = -1,
46 .name = "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
47 .type = PERF_TYPE_RAW,
48 .config = ATTR_CONFIG(0x0f, 0x08),
49 .exclusive = 0,
50 .pinned = 0,
51 },
52 {
53 .fd = -1,
54 .name = "MEM_UNCORE_RETIRED.REMOTE_DRAM",
55 .type = PERF_TYPE_RAW,
56 .config = ATTR_CONFIG(0x0f, 0x10),
57 .exclusive = 0, /* child events cannot be exclusive */
58 .pinned = 0, /* child events cannot be pinned */
59 },
60 { },
61};
62#endif
63
64static struct perf_counter_setup perf_setup[NR_PERF_COUNTERS] = {
65#if 0
66 {
67 .name = "MEM_UNCORE_RETIRED.LOCAL_DRAM",
68 .type = PERF_TYPE_RAW,
69 .config = ATTR_CONFIG(0x0f, 0x20),
70 },
71 {
72 .name = "L2_RQSTS.PREFETCH_HIT",
73 .type = PERF_TYPE_RAW,
74 .config = ATTR_CONFIG(0x24, 0x40),
75 },
76 {
77 .name = "L2_RQSTS.PREFETCH_MISS",
78 .type = PERF_TYPE_RAW,
79 .config = ATTR_CONFIG(0x24, 0x80),
80 },
81#endif
82 {
83 .name = "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
84 .type = PERF_TYPE_RAW,
85 .config = ATTR_CONFIG(0xcb, 0x08),
86 },
87 {
88 .name = "MEM_LOAD_RETIRED.L3_UNSHARED_HIT",
89 .type = PERF_TYPE_RAW,
90 .config = ATTR_CONFIG(0xcb, 0x04),
91 },
92 {
93 .name = "MEM_LOAD_RETIRED.L3_MISS",
94 .type = PERF_TYPE_RAW,
95 .config = ATTR_CONFIG(0xcb, 0x10),
96 },
97 {
98 .name = "Off Core Response Counter",
99 .type = PERF_TYPE_HW_CACHE,
100 .config = ATTR_CONFIG_CACHE(LL, OP_READ, RESULT_MISS),
101#if 0
102 /* read misses */
103 .config = ATTR_CONFIG_CACHE(LL, OP_READ, RESULT_MISS),
104 /* write misses */
105 .config = ATTR_CONFIG_CACHE(LL, OP_WRITE, RESULT_MISS),
106 /* prefetch misses */
107 .config = ATTR_CONFIG_CACHE(LL, OP_PREFETCH, RESULT_MISS),
108#endif
109 },
110};
111
112
113/* from kernel tools/perf/perf.h */
114int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid,
115 int cpu, int group_fd, unsigned long flags)
116{
117 attr->size = sizeof(*attr);
118 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
119}
120
121/* make the temporary attributes shadow those in the perf_fd temporarially */
122static void write_global_perf_attr(const struct perf_counter_setup *p)
123{
124 perf_event_attr.type = p->type;
125 perf_event_attr.config = p->config;
126 perf_event_attr.pinned = 0;
127 perf_event_attr.exclusive = 0;
128}
129
130int setup_cpu_perf(const int cpu, const int group_leader,
131 struct perf_counter *perf_counters)
132{
133 const int perf_pid = -1; /* -1: all tasks */
134 int err = 0, i;
135
136 if (-1 == group_leader) {
137 /* first element determines the group for all others */
138 perf_counters->fd = -1;
139 }
140
141 for (i = 0; i < NR_PERF_COUNTERS; i++) {
142 int perf_group;
143
144 /* setup the attributes to pass in */
145 write_global_perf_attr(&perf_setup[i]);
146
147 if (0 == i && -1 == group_leader) {
148 /* but group leader is pinned and exclusive */
149 perf_event_attr.exclusive = 1;
150 perf_event_attr.pinned = 1;
151 perf_group = -1;
152 } else if (-1 == group_leader) {
153 /* not first counter, but no group passed in */
154 perf_group = perf_counters[0].fd;
155 }
156
157 perf_counters[i].fd = sys_perf_event_open(&perf_event_attr,
158 perf_pid, cpu, perf_group, 0);
159
160 if (0 > perf_counters[i].fd) {
161 err = -1;
162 goto out;
163 }
164
165 /* save the attributes in the user-visible configuration */
166 perf_counters[i].type = perf_setup[i].type;
167 perf_counters[i].config = perf_setup[i].config;
168 }
169out:
170 return err;
171}
172
173static inline int perf_setup_match(const struct perf_counter_setup* ps,
174 const struct perf_counter *pc)
175{
176 return (ps->type == pc->type && ps->config == pc->config);
177}
178
179const char* get_perf_name(const struct perf_counter* perf_counter)
180{
181 char *ret = NULL;
182 int i;
183
184 for (i = 0; i < NR_PERF_COUNTERS; i++) {
185 if (perf_setup_match(&perf_setup[i], perf_counter)) {
186 ret = perf_setup[i].name;
187 break;
188 }
189 }
190 return ret;
191}
192
193int read_perf_counter(const struct perf_counter* perf_counter, uint64_t *val)
194{
195 ssize_t ret = read(perf_counter->fd, val, sizeof(*val));
196 return (ret <= 0);
197}
diff --git a/bin/rtspin.c b/bin/rtspin.c
index c680529..a19637c 100644
--- a/bin/rtspin.c
+++ b/bin/rtspin.c
@@ -10,9 +10,6 @@
10 10
11#include "litmus.h" 11#include "litmus.h"
12#include "common.h" 12#include "common.h"
13/* #include "color.h" */
14
15#include <litmus/sched_mc.h>
16 13
17static void usage(char *error) { 14static void usage(char *error) {
18 fprintf(stderr, "Error: %s\n", error); 15 fprintf(stderr, "Error: %s\n", error);
@@ -23,7 +20,6 @@ static void usage(char *error) {
23 " rt_spin -l\n" 20 " rt_spin -l\n"
24 "\n" 21 "\n"
25 "COMMON-OPTS = [-w] [-p PARTITION] [-c CLASS] [-s SCALE]\n" 22 "COMMON-OPTS = [-w] [-p PARTITION] [-c CLASS] [-s SCALE]\n"
26 " [-r CRITICALITY = [a|b|c|d]] [-i MC-LVL-A-ID]\n"
27 " [-h NUM-COLORS,AVG-WAYS]\n" 23 " [-h NUM-COLORS,AVG-WAYS]\n"
28 "\n" 24 "\n"
29 "WCET and PERIOD are milliseconds, DURATION is seconds.\n"); 25 "WCET and PERIOD are milliseconds, DURATION is seconds.\n");
@@ -165,19 +161,7 @@ static int job(double exec_time, double program_end)
165 } 161 }
166} 162}
167 163
168enum crit_level str2crit(const char* str) 164#define OPTSTR "p:c:wlveo:f:s:h:"
169{
170 if (0 == strncasecmp("a", str, 1))
171 return CRIT_LEVEL_A;
172 else if (0 == strncasecmp("b", str, 1))
173 return CRIT_LEVEL_B;
174 else if (0 == strncasecmp("c", str, 1))
175 return CRIT_LEVEL_C;
176 /* failure */
177 return NUM_CRIT_LEVELS;
178}
179
180#define OPTSTR "p:c:wlveo:f:s:r:i:h:"
181 165
182int main(int argc, char** argv) 166int main(int argc, char** argv)
183{ 167{
@@ -200,7 +184,6 @@ int main(int argc, char** argv)
200 int cur_job, num_jobs; 184 int cur_job, num_jobs;
201 int task_colors = 0; 185 int task_colors = 0;
202 int avg_ways = 0; 186 int avg_ways = 0;
203 struct mc_task mc_task = { .crit = NUM_CRIT_LEVELS, .lvl_a_id = -1 };
204 187
205 progname = argv[0]; 188 progname = argv[0];
206 189
@@ -233,14 +216,6 @@ int main(int argc, char** argv)
233 case 's': 216 case 's':
234 scale = atof(optarg); 217 scale = atof(optarg);
235 break; 218 break;
236 case 'r':
237 mc_task.crit = str2crit(optarg);
238 if (NUM_CRIT_LEVELS == mc_task.crit)
239 usage("Bad crit level.");
240 break;
241 case 'i':
242 mc_task.lvl_a_id = atoi(optarg);
243 break;
244 case 'h': 219 case 'h':
245 sscanf(optarg, "%d,%d", &task_colors, &avg_ways); 220 sscanf(optarg, "%d,%d", &task_colors, &avg_ways);
246 break; 221 break;
@@ -311,12 +286,6 @@ int main(int argc, char** argv)
311 if (ret < 0) 286 if (ret < 0)
312 bail_out("could not setup rt task params"); 287 bail_out("could not setup rt task params");
313 288
314 if (NUM_CRIT_LEVELS != mc_task.crit) {
315 ret = set_rt_task_mc_param(gettid(), &mc_task);
316 if (ret < 0)
317 bail_out("could not setup rt mixed criticality params");
318 }
319
320 init_litmus(); 289 init_litmus();
321 290
322 request_resources(task_colors, avg_ways); 291 request_resources(task_colors, avg_ways);
diff --git a/bin/testcounters.c b/bin/testcounters.c
new file mode 100644
index 0000000..207851a
--- /dev/null
+++ b/bin/testcounters.c
@@ -0,0 +1,401 @@
1#if 0
2/* done in Makefile */
3#define _GNU_SOURCE /* or _BSD_SOURCE or _SVID_SOURCE */
4#endif
5
6#include "asm/unistd.h" /* from kernel source tree */
7#include <unistd.h> /* for syscall */
8
9#include <sys/ioctl.h>
10#include <stdio.h>
11#include <stdlib.h>
12#include <sched.h>
13#include <stdint.h> /* rt_param needs uint32 */
14
15#include "../../litmus-rt/include/linux/perf_event.h"
16
17#include <litmus/rt_param.h> /* page size macro */
18
19/* from kernel tools/perf/perf.h */
20static inline int sys_perf_event_open(struct perf_event_attr *attr, pid_t pid,
21 int cpu, int group_fd, unsigned long flags)
22{
23 attr->size = sizeof(*attr);
24 return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
25}
26
27#define C(x) (PERF_COUNT_HW_CACHE_##x)
28#define ATTR_CONFIG_CACHE(cache, op, result) \
29 (((C(cache) & 0xffULL) << 0) | \
30 ((C(op) & 0xffULL) << 8) | \
31 ((C(result) & 0xffULL) << 16))
32
33#define ATTR_CONFIG(event, umask) \
34 ((((event) & 0xffULL) << 0) | \
35 (((umask) & 0xffULL) << 8))
36
37struct perf_event_attr perf_event_attr = {
38 .type = 0, /* set per initilized event */
39 .size = 0, /* set later */
40 .config = 0, /* set per initilized event */
41 { .sample_period = 0, }, /* is a counter, so no period */
42 .disabled = 0, /* event is enabled */
43 .inherit = 0, /* children don't inherit */
44 .pinned = 0, /* set per initilized event */
45 .exclusive = 0, /* set per initilized event */
46 .exclude_user = 0, /* don't count user */
47 .exclude_kernel = 0, /* ditto kernel */
48 .exclude_hv = 0, /* ditto hypervisor */
49 .exclude_idle = 0, /* don't count when idle */
50 .mmap = 0, /* include mmap data */
51 .comm = 0, /* include comm data */
52};
53
54/* Pound */
55#define NR_CPUS 4
56#define CACHE_SIZE_MB 8
57#define ASSOC 16
58#define LINE_SIZE 64
59#define CACHE_SIZE (CACHE_SIZE_MB * 1024 * 1024)
60
61/* arena size in bytes */
62//#define ARENA_SIZE (CACHE_SIZE * 14 / 16)
63#define ARENA_SIZE (CACHE_SIZE * 1)
64
65/* number of pages in arena */
66#define ARENA_PAGES (ARENA_SIZE / PAGE_SIZE)
67
68/* number of cache lines per page */
69#define PAGE_LINES (PAGE_SIZE / LINE_SIZE)
70
71/* number of cache lines in arena */
72#define ARENA_LINES (ARENA_SIZE / LINE_SIZE)
73
74/* number of integers in arena */
75#define ARENA_INTS (ARENA_SIZE / sizeof(int))
76
77/* number of integers in a page */
78#define PAGE_INTS (PAGE_SIZE / sizeof(int))
79
80/* number of integers in a cache line */
81#define LINE_INTS (LINE_SIZE / sizeof(int))
82
83/* convert page number and cache line number to an integer index */
84#define PAGE_AND_LINE_TO_IDX(page, line) \
85 (((page) * PAGE_INTS) + ((line) * LINE_INTS))
86
87
88/* not really a good way to do this */
89inline int randrange(const int max)
90{
91 return (rand() / (RAND_MAX / max + 1));
92}
93
94void sequential(int *items, const int len)
95{
96 int i;
97 for (i = 0; i < len; i++)
98 items[i] = (i + 1) % len;
99}
100
101/* Sattolo's algorithm makes a cycle. */
102void sattolo(int *items, const int len)
103{
104 int i;
105 for (i = 0; i < len; i++)
106 items[i] = i;
107 while (1 < i--) {
108 /* 0 <= j < i */
109 int t, j = randrange(i);
110 t = items[i];
111 items[i] = items[j];
112 items[j] = t;
113 }
114}
115
116/*
117 * Write the order to read the arena into the arena. Each page in the arena is
118 * read back, but the page is read in a random order to prevent the prefetcher
119 * from working.
120 */
121static void init_arena_page_line_order(int *arena, int *page_line_order)
122{
123 int cur_page;
124 for (cur_page = 0; cur_page < ARENA_PAGES; cur_page++) {
125 /* for each page in the arena */
126 int cur_line;
127 for (cur_line = 0; cur_line < PAGE_LINES; cur_line++) {
128 /* for each line in the page */
129 const int idx = PAGE_AND_LINE_TO_IDX(cur_page,
130 cur_line);
131 const int next_line = page_line_order[cur_line];
132 int next_idx = PAGE_AND_LINE_TO_IDX(cur_page,
133 next_line);
134
135 if (!next_line) {
136 /* special case: last line in the page */
137 if (cur_page < ARENA_PAGES - 1) {
138 /* arena has more pages: go to next */
139 next_idx = PAGE_AND_LINE_TO_IDX(
140 (cur_page + 1), 0);
141 } else {
142 /* the very last element */
143 next_idx = 0;
144 }
145 }
146 arena[idx] = next_idx;
147 }
148 }
149}
150
151static int loop_once(const int perf_fd, int *arena)
152{
153 int i = 0, j;
154 do {
155 i = arena[i];
156 j = i;
157 } while (i);
158 return j;
159}
160
161static int set_affinity(int cpu)
162{
163 cpu_set_t cpu_set;
164 CPU_ZERO(&cpu_set);
165 CPU_SET(cpu, &cpu_set);
166 return sched_setaffinity(0, sizeof(cpu_set_t), &cpu_set);
167}
168
169struct perf_fd {
170 int fd;
171 char *name;
172 enum perf_type_id type;
173 __u64 config;
174 __u64 exclusive : 1,
175 pinned : 1,
176 __reserved_1 : 62;
177};
178
179#define PERF_FD_EMPTY(p) \
180 ((p)->fd == 0 && (p)->name == NULL && \
181 (p)->type == 0 && (p)->config == 0)
182#define PERF_FD_NON_EMPTY(p) (!PERF_FD_EMPTY(p))
183
184
185#if 0
186/* these events are always zero */
187static struct perf_fd perf_fds[] = {
188 {
189 .fd = -1,
190 .name = "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
191 .type = PERF_TYPE_RAW,
192 .config = ATTR_CONFIG(0x0f, 0x08),
193 .exclusive = 0,
194 .pinned = 0,
195 },
196 {
197 .fd = -1,
198 .name = "MEM_UNCORE_RETIRED.REMOTE_DRAM",
199 .type = PERF_TYPE_RAW,
200 .config = ATTR_CONFIG(0x0f, 0x10),
201 .exclusive = 0, /* child events cannot be exclusive */
202 .pinned = 0, /* child events cannot be pinned */
203 },
204 { },
205};
206#endif
207
208static struct perf_fd perf_fds[] = {
209 /* first element is assumed to be group leader */
210#if 0
211 {
212 .fd = -1,
213 .name = "MEM_UNCORE_RETIRED.LOCAL_DRAM",
214 .type = PERF_TYPE_RAW,
215 .config = ATTR_CONFIG(0x0f, 0x20),
216 .exclusive = 1, /* group leader is scheduled exclusively */
217 .pinned = 1, /* group leader is pinnned to CPU (always on) */
218 },
219#endif
220 {
221 .fd = -1,
222 .name = "L2_RQSTS.PREFETCH_HIT",
223 .type = PERF_TYPE_RAW,
224 .config = ATTR_CONFIG(0x24, 0x40),
225#if 0
226 .exclusive = 0,
227 .pinned = 0,
228#endif
229 .exclusive = 1, /* group leader is scheduled exclusively */
230 .pinned = 1, /* group leader is pinnned to CPU (always on) */
231 },
232 {
233 .fd = -1,
234 .name = "L2_RQSTS.PREFETCH_MISS",
235 .type = PERF_TYPE_RAW,
236 .config = ATTR_CONFIG(0x24, 0x80),
237 .exclusive = 0,
238 .pinned = 0,
239 },
240 {
241 .fd = -1,
242 .name = "MEM_LOAD_RETIRED.L3_MISS",
243 .type = PERF_TYPE_RAW,
244 .config = ATTR_CONFIG(0xcb, 0x10),
245 .exclusive = 0,
246 .pinned = 0,
247 },
248 {
249 .fd = -1,
250 .name = "Off Core Response Counter",
251 .type = PERF_TYPE_HW_CACHE,
252 .config = ATTR_CONFIG_CACHE(LL, OP_READ, RESULT_MISS),
253#if 0
254 /* read misses */
255 .config = ATTR_CONFIG_CACHE(LL, OP_READ, RESULT_MISS),
256 /* write misses */
257 .config = ATTR_CONFIG_CACHE(LL, OP_WRITE, RESULT_MISS),
258 /* prefetch misses */
259 .config = ATTR_CONFIG_CACHE(LL, OP_PREFETCH, RESULT_MISS),
260#endif
261 .exclusive = 0,
262 .pinned = 0,
263 },
264 { },
265};
266
267
268static inline void events_ioctl(const int request)
269{
270 ioctl(perf_fds[0].fd, request);
271}
272
273static void do_read(double divide)
274{
275 struct perf_fd *perf_fd;
276 for (perf_fd = perf_fds; PERF_FD_NON_EMPTY(perf_fd); perf_fd++) {
277 __u64 perf_val;
278 ssize_t ret;
279 ret = read(perf_fd->fd, &perf_val, sizeof(perf_val));
280 if (0 >= ret)
281 printf("%50s: ERROR\n", perf_fd->name);
282 else
283 printf("%50s: %10.3f\n",
284 perf_fd->name, (perf_val / divide));
285 ioctl(perf_fd->fd, PERF_EVENT_IOC_RESET);
286 }
287}
288
289static void write_global_perf_attr(struct perf_fd *perf_fd)
290{
291 perf_event_attr.type = perf_fd->type;
292 perf_event_attr.config = perf_fd->config;
293 perf_event_attr.exclusive = perf_fd->exclusive;
294 perf_event_attr.pinned = perf_fd->pinned;
295}
296
297#define CPU 0
298static int setup_perf(void)
299{
300 /* cannot have pid == -1 and cpu == -1 */
301 const int perf_pid = -1; /* -1: all tasks, 0: this task */
302 const int perf_cpu = CPU; /* -1: all CPUs (follow task) */
303 struct perf_fd *perf_fd;
304 int err = 0;
305
306 for (perf_fd = perf_fds; PERF_FD_NON_EMPTY(perf_fd); perf_fd++) {
307 /* make a group whose leader is the zeroth element */
308 const int perf_group = perf_fds[0].fd;
309
310 /* setup the attributes to pass in */
311 write_global_perf_attr(perf_fd);
312
313 perf_fd->fd = sys_perf_event_open(&perf_event_attr, perf_pid,
314 perf_cpu, perf_group, 0);
315
316 if (0 > perf_fd->fd) {
317 fprintf(stderr, "could not setup %s\n", perf_fd->name);
318 err = -1;
319 goto out;
320 }
321 }
322out:
323 return err;
324}
325
326int main(int argc, char **argv)
327{
328
329 const int task_cpu = CPU;
330 int ret = 0, i;
331 int *arena, *page_line_order;
332
333 if (set_affinity(task_cpu)) {
334 fprintf(stderr, "could not set affinity\n");
335 ret = -1;
336 goto out;
337 }
338
339 arena = malloc(ARENA_SIZE);
340 if (!arena) {
341 fprintf(stderr, "could not allocate memory\n");
342 ret = -1;
343 goto out;
344 }
345
346 page_line_order = malloc(PAGE_LINES * sizeof(*page_line_order));
347 if (!page_line_order) {
348 fprintf(stderr, "could not allocate memory\n");
349 ret = -1;
350 goto out;
351 }
352
353 sattolo(page_line_order, PAGE_LINES);
354 //sequential(page_line_order, PAGE_LINES);
355 init_arena_page_line_order(arena, page_line_order);
356
357 if (setup_perf()) {
358 ret = -1;
359 goto out;
360 }
361
362 printf("arena_size: %d\n", ARENA_SIZE);
363 printf("arena_lines: %d\n", ARENA_LINES);
364
365 printf("initially\n");
366 do_read(1.0);
367
368 events_ioctl(PERF_EVENT_IOC_ENABLE);
369 loop_once(perf_fds[0].fd, arena);
370 events_ioctl(PERF_EVENT_IOC_DISABLE);
371 printf("after a loop\n");
372 do_read(1.0);
373
374 events_ioctl(PERF_EVENT_IOC_ENABLE);
375 loop_once(perf_fds[0].fd, arena);
376 events_ioctl(PERF_EVENT_IOC_DISABLE);
377 printf("after another loop\n");
378 do_read(1.0);
379
380 events_ioctl(PERF_EVENT_IOC_ENABLE);
381 loop_once(perf_fds[0].fd, arena);
382 events_ioctl(PERF_EVENT_IOC_DISABLE);
383 printf("after another loop\n");
384 do_read(1.0);
385
386 events_ioctl(PERF_EVENT_IOC_ENABLE);
387 loop_once(perf_fds[0].fd, arena);
388 events_ioctl(PERF_EVENT_IOC_DISABLE);
389 printf("after another loop\n");
390 do_read(1.0);
391
392 events_ioctl(PERF_EVENT_IOC_ENABLE);
393 for (i = 0; i < 100; i++)
394 loop_once(perf_fds[0].fd, arena);
395 events_ioctl(PERF_EVENT_IOC_DISABLE);
396 printf("after 100 loops\n");
397 do_read(100.0);
398
399out:
400 return ret;
401}
diff --git a/include/color.h b/include/color.h
new file mode 100644
index 0000000..dc1b1cc
--- /dev/null
+++ b/include/color.h
@@ -0,0 +1,9 @@
1#ifndef COLOR_H
2#define COLOR_H
3
4#include <stddef.h> /* for size_t */
5
6int map_color_ctrl(void **);
7void* color_malloc(size_t);
8
9#endif
diff --git a/include/internal.h b/include/internal.h
index 07253b7..f30ce61 100644
--- a/include/internal.h
+++ b/include/internal.h
@@ -18,8 +18,13 @@ int __launch_rt_task(rt_fn_t rt_prog, void *rt_arg,
18 } 18 }
19 19
20 20
21/* taken from the kernel */
22
21#define likely(x) __builtin_expect((x), 1) 23#define likely(x) __builtin_expect((x), 1)
22#define unlikely(x) __builtin_expect((x), 0) 24#define unlikely(x) __builtin_expect((x), 0)
23 25
26#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
27#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
28
24#endif 29#endif
25 30
diff --git a/include/litmus.h b/include/litmus.h
index 045579e..82e2ba2 100644
--- a/include/litmus.h
+++ b/include/litmus.h
@@ -99,6 +99,7 @@ task_class_t str2class(const char* str);
99/* non-preemptive section support */ 99/* non-preemptive section support */
100void enter_np(void); 100void enter_np(void);
101void exit_np(void); 101void exit_np(void);
102int requested_to_preempt(void);
102 103
103/* task system support */ 104/* task system support */
104int wait_for_ts_release(void); 105int wait_for_ts_release(void);
diff --git a/include/perfcounters.h b/include/perfcounters.h
new file mode 100644
index 0000000..03f94fb
--- /dev/null
+++ b/include/perfcounters.h
@@ -0,0 +1,36 @@
1#ifndef PERFCOUNTERS_H
2#define PERFCOUNTERS_H
3
4#include <stdint.h>
5
6#include "../../litmus-rt/include/linux/perf_event.h"
7
8#define NR_PERF_COUNTERS 4
9
10/*
11 * Retain this information with a performance counter file descriptor.
12 */
13struct perf_counter {
14 int fd;
15 enum perf_type_id type;
16 uint64_t config;
17};
18
19
20/*
21 * Initialize a set of counters for a CPU.
22 *
23 * This is NOT thread safe!
24 *
25 * @cpu CPU
26 * @group_leader group leader PID or -1 make a new group
27 * @perf_counter array
28 * @return 0 or error
29 */
30int setup_cpu_perf(const int, const int, struct perf_counter*);
31
32const char* get_perf_name(const struct perf_counter*);
33
34int read_perf_counter(const struct perf_counter*, uint64_t*);
35
36#endif
diff --git a/src/kernel_iface.c b/src/kernel_iface.c
index 6286810..bd47d5c 100644
--- a/src/kernel_iface.c
+++ b/src/kernel_iface.c
@@ -43,6 +43,8 @@ int init_kernel_iface(void)
43 int err = 0; 43 int err = 0;
44 long page_size = sysconf(_SC_PAGESIZE); 44 long page_size = sysconf(_SC_PAGESIZE);
45 45
46 BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t));
47
46 err = map_file(LITMUS_CTRL_DEVICE, (void**) &ctrl_page, CTRL_PAGES * page_size); 48 err = map_file(LITMUS_CTRL_DEVICE, (void**) &ctrl_page, CTRL_PAGES * page_size);
47 if (err) { 49 if (err) {
48 fprintf(stderr, "%s: cannot open LITMUS^RT control page (%m)\n", 50 fprintf(stderr, "%s: cannot open LITMUS^RT control page (%m)\n",
@@ -55,7 +57,7 @@ int init_kernel_iface(void)
55void enter_np(void) 57void enter_np(void)
56{ 58{
57 if (likely(ctrl_page != NULL) || init_kernel_iface() == 0) 59 if (likely(ctrl_page != NULL) || init_kernel_iface() == 0)
58 ctrl_page->np_flag++; 60 ctrl_page->sched.np.flag++;
59 else 61 else
60 fprintf(stderr, "enter_np: control page not mapped!\n"); 62 fprintf(stderr, "enter_np: control page not mapped!\n");
61} 63}
@@ -63,10 +65,12 @@ void enter_np(void)
63 65
64void exit_np(void) 66void exit_np(void)
65{ 67{
66 if (likely(ctrl_page != NULL) && --ctrl_page->np_flag == 0) { 68 if (likely(ctrl_page != NULL) &&
69 ctrl_page->sched.np.flag &&
70 !(--ctrl_page->sched.np.flag)) {
67 /* became preemptive, let's check for delayed preemptions */ 71 /* became preemptive, let's check for delayed preemptions */
68 __sync_synchronize(); 72 __sync_synchronize();
69 if (ctrl_page->delayed_preemption) 73 if (ctrl_page->sched.np.preempt)
70 sched_yield(); 74 sched_yield();
71 } 75 }
72} 76}
@@ -78,6 +82,11 @@ void request_resource(int resource, int replicas)
78 } 82 }
79} 83}
80 84
85int requested_to_preempt(void)
86{
87 return (likely(ctrl_page != NULL) && ctrl_page->sched.np.preempt);
88}
89
81/* init and return a ptr to the control page for 90/* init and return a ptr to the control page for
82 * preemption and migration overhead analysis 91 * preemption and migration overhead analysis
83 * 92 *
diff --git a/src/syscalls.c b/src/syscalls.c
index ef1ea17..c68f15b 100644
--- a/src/syscalls.c
+++ b/src/syscalls.c
@@ -8,8 +8,6 @@
8/* for syscall() */ 8/* for syscall() */
9#include <unistd.h> 9#include <unistd.h>
10 10
11//#include <sys/types.h>
12
13#include "litmus.h" 11#include "litmus.h"
14 12
15/* Syscall stub for setting RT mode and scheduling options */ 13/* Syscall stub for setting RT mode and scheduling options */
@@ -88,8 +86,3 @@ int null_call(cycles_t *timestamp)
88{ 86{
89 return syscall(__NR_null_call, timestamp); 87 return syscall(__NR_null_call, timestamp);
90} 88}
91
92int set_rt_task_mc_param(pid_t pid, struct mc_task *param)
93{
94 return syscall(__NR_set_rt_task_mc_param, pid, param);
95}