aboutsummaryrefslogtreecommitdiffstats
path: root/include/cache_common.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/cache_common.h')
-rw-r--r--include/cache_common.h140
1 files changed, 140 insertions, 0 deletions
diff --git a/include/cache_common.h b/include/cache_common.h
new file mode 100644
index 0000000..8239233
--- /dev/null
+++ b/include/cache_common.h
@@ -0,0 +1,140 @@
1#ifndef __CACHE_COMMON_H__
2#define __CACHE_COMMON_H__
3
4#include <stdio.h>
5#include <stdlib.h>
6#include <time.h>
7#include <string.h>
8#include <assert.h>
9
10#include <signal.h>
11#include <sys/mman.h>
12#include <sys/types.h>
13#include <sys/stat.h>
14#include <fcntl.h>
15#include <unistd.h>
16
17#include <sys/io.h>
18#include <sys/utsname.h>
19
20#include <sched.h>
21#include <sys/time.h>
22#include <sys/resource.h>
23
24#include "litmus.h"
25#include "asm/cycles.h"
26
27#if defined(__i386__) || defined(__x86_64__)
28#include "asm/irq.h"
29#endif
30
31
32#define UNCACHE_DEV "/dev/litmus/uncache"
33
34static void die(char *error)
35{
36 fprintf(stderr, "Error: %s (errno: %m)\n",
37 error);
38 exit(1);
39}
40
41static int lock_memory(void)
42{
43 return mlockall(MCL_CURRENT | MCL_FUTURE);
44}
45
46/* define CACHELINE_SIZE if not provided by compiler args */
47#ifndef CACHELINE_SIZE
48#if defined(__i386__) || defined(__x86_64__)
49/* recent intel cpus */
50#define CACHELINE_SIZE 64
51#elif defined(__arm__)
52/* at least with Cortex-A9 cpus ("8 words") */
53#define CACHELINE_SIZE 32
54#else
55#error "Could not determine cacheline size!"
56#endif
57#endif
58
59#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int))
60typedef struct cacheline
61{
62 int line[INTS_IN_CACHELINE];
63} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t;
64
65#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t))
66
67
68static cacheline_t* alloc_arena(size_t size, int use_huge_pages, int use_uncache_pages)
69{
70 int flags = MAP_PRIVATE | MAP_POPULATE;
71 cacheline_t* arena = NULL;
72 int fd;
73
74 if(use_huge_pages)
75 flags |= MAP_HUGETLB;
76
77 if(use_uncache_pages) {
78 fd = open(UNCACHE_DEV, O_RDWR);
79 if (fd == -1)
80 die("Failed to open uncache device. Are you running the LITMUS^RT kernel?");
81 }
82 else {
83 fd = -1;
84 flags |= MAP_ANONYMOUS;
85 }
86
87 arena = mmap(0, size, PROT_READ | PROT_WRITE, flags, fd, 0);
88
89 if(use_uncache_pages)
90 close(fd);
91
92 assert(arena);
93
94 return arena;
95}
96
97static void dealloc_arena(cacheline_t* arena, size_t size)
98{
99 int ret = munmap((void*)arena, size);
100 if(ret != 0)
101 die("munmap() error");
102}
103
104static int randrange(int min, int max)
105{
106 /* generate a random number on the range [min, max) w/o skew */
107 int limit = max - min;
108 int devisor = RAND_MAX/limit;
109 int retval;
110
111 do {
112 retval = rand() / devisor;
113 } while(retval == limit);
114 retval += min;
115
116 return retval;
117}
118
119static void init_arena(cacheline_t* arena, size_t size)
120{
121 int i;
122 size_t num_arena_elem = size / sizeof(cacheline_t);
123
124 /* Generate a cycle among the cache lines using Sattolo's algorithm.
125 Every int in the cache line points to the same cache line.
126 Note: Sequential walk doesn't care about these values. */
127 for (i = 0; i < num_arena_elem; i++) {
128 int j;
129 for(j = 0; j < INTS_IN_CACHELINE; ++j)
130 arena[i].line[j] = i;
131 }
132 while(1 < i--) {
133 int j = randrange(0, i);
134 cacheline_t temp = arena[j];
135 arena[j] = arena[i];
136 arena[i] = temp;
137 }
138}
139
140#endif