From 103b615bbee63502d8a6a3acb408ea1e1cf0bc20 Mon Sep 17 00:00:00 2001 From: Joshua Bakita Date: Mon, 8 Mar 2021 21:49:53 -0500 Subject: Fix the random_walk benchmark so that it builds when LITMUS is enabled --- dis/random_walk.c | 74 +++++++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 37 deletions(-) (limited to 'dis') diff --git a/dis/random_walk.c b/dis/random_walk.c index 9f907bb..6282487 100644 --- a/dis/random_walk.c +++ b/dis/random_walk.c @@ -16,7 +16,7 @@ #include "common.h" */ /* CPU time consumed so far in seconds */ -double cputime(void) +double rw_cputime(void) { struct timespec ts; int err; @@ -27,7 +27,7 @@ double cputime(void) } /* wall-clock time in seconds */ -double wctime(void) +double rw_wctime(void) { struct timeval tv; gettimeofday(&tv, NULL); @@ -43,25 +43,25 @@ void bail_out(const char* msg) #include "extra.h" #define PAGE_SIZE (4096) -#define CACHELINE_SIZE 64 -#define INTS_IN_CACHELINE (CACHELINE_SIZE/sizeof(int)) -#define CACHELINES_IN_1KB (1024 / sizeof(cacheline_t)) +#define RW_CACHELINE_SIZE 64 +#define INTS_IN_RW_CACHELINE (RW_CACHELINE_SIZE/sizeof(int)) +#define RW_CACHELINES_IN_1KB (1024 / sizeof(rw_cacheline_t)) #define INTS_IN_1KB (1024 / sizeof(int)) -typedef struct cacheline +typedef struct rw_cacheline { - int line[INTS_IN_CACHELINE]; -} __attribute__((aligned(CACHELINE_SIZE))) cacheline_t; + int line[INTS_IN_RW_CACHELINE]; +} __attribute__((aligned(RW_CACHELINE_SIZE))) rw_cacheline_t; -static volatile cacheline_t* arena = NULL; +static volatile rw_cacheline_t* arena = NULL; #define UNCACHE_DEV "/dev/litmus/uncache" #define FAKE_DEV "/dev/litmus/fakedev0" -static cacheline_t* alloc_arena(size_t size, int use_huge_pages, int use_uncache_pages) +static rw_cacheline_t* alloc_arena(size_t size, int use_huge_pages, int use_uncache_pages) { int flags = MAP_PRIVATE | MAP_POPULATE; - cacheline_t* arena = NULL; + rw_cacheline_t* arena = NULL; int fd; if(use_huge_pages) @@ -80,7 +80,7 @@ static cacheline_t* alloc_arena(size_t size, int use_huge_pages, int use_uncache flags |= MAP_ANONYMOUS; } - arena = (cacheline_t*)mmap(0, size, PROT_READ | PROT_WRITE, flags, fd, 0); + arena = (rw_cacheline_t*)mmap(0, size, PROT_READ | PROT_WRITE, flags, fd, 0); if(use_uncache_pages) close(fd); @@ -90,7 +90,7 @@ static cacheline_t* alloc_arena(size_t size, int use_huge_pages, int use_uncache return arena; } -static void dealloc_arena(cacheline_t* arena, size_t size) +static void dealloc_arena(rw_cacheline_t* arena, size_t size) { int ret = munmap((void*)arena, size); if(ret != 0) @@ -116,17 +116,17 @@ static int randrange(int min, int max) return retval;*/ } -static void init_arena(volatile cacheline_t* arena, size_t size) +static void init_arena(volatile rw_cacheline_t* arena, size_t size) { int i; - size_t num_arena_elem = size / sizeof(cacheline_t); + size_t num_arena_elem = size / sizeof(rw_cacheline_t); /* Generate a cycle among the cache lines using Sattolo's algorithm. Every int in the cache line points to the same cache line. Note: Sequential walk doesn't care about these values. */ for (i = 0; i < num_arena_elem; i++) { int j; - for(j = 0; j < INTS_IN_CACHELINE; j++) + for(j = 0; j < INTS_IN_RW_CACHELINE; j++) arena[i].line[j] = i; arena[i].line[1] = 0; } @@ -138,7 +138,7 @@ static void init_arena(volatile cacheline_t* arena, size_t size) }*/ for (int j = 0; j < num_arena_elem-1; j++) { int k = randrange(j+1, num_arena_elem); - cacheline_t temp = arena[j]; + rw_cacheline_t temp = arena[j]; arena[j] = arena[k]; arena[k] = temp; } @@ -147,14 +147,14 @@ static void init_arena(volatile cacheline_t* arena, size_t size) /* Random walk around the arena in cacheline-sized chunks. Cacheline-sized chucks ensures the same utilization of each hit line as sequential read. (Otherwise, our utilization - would only be 1/INTS_IN_CACHELINE.) */ -static int random_walk(volatile cacheline_t *mem, int wss, int write_cycle) + would only be 1/INTS_IN_RW_CACHELINE.) */ +static int random_walk(volatile rw_cacheline_t *mem, int wss, int write_cycle) { /* a random cycle among the cache lines was set up by init_arena(). */ int sum, i, next; // Always do the same number of hops - int numlines = 33554432 / CACHELINE_SIZE;//wss * CACHELINES_IN_1KB; + int numlines = 33554432 / RW_CACHELINE_SIZE;//wss * RW_CACHELINES_IN_1KB; sum = 0; @@ -167,7 +167,7 @@ static int random_walk(volatile cacheline_t *mem, int wss, int write_cycle) next = arena[next].line[0]; arena[next].line[1] = 1; // Record that we touched this line sum += next; - for (int j = 2; j < INTS_IN_CACHELINE; j++) + for (int j = 2; j < INTS_IN_RW_CACHELINE; j++) arena[next].line[j]++; } } else { @@ -187,9 +187,9 @@ static int random_walk(volatile cacheline_t *mem, int wss, int write_cycle) return sum; } -volatile static cacheline_t* random_start(int wss) +volatile static rw_cacheline_t* random_start(int wss) { - return arena + randrange(0, ((wss * 1024)/sizeof(cacheline_t))); + return arena + randrange(0, ((wss * 1024)/sizeof(rw_cacheline_t))); } /* static int sequential_walk(cacheline_t *_mem, int wss, int write_cycle) @@ -217,7 +217,7 @@ static cacheline_t* sequential_start(int wss) { static int pos = 0; - int num_cachelines = wss * CACHELINES_IN_1KB; + int num_cachelines = wss * RW_CACHELINES_IN_1KB; cacheline_t *mem; @@ -245,7 +245,7 @@ static volatile int dont_optimize_me = 0; #define RANDOM_WALK 1 static int loop_once(int wss, int write) { - volatile cacheline_t *mem; + volatile rw_cacheline_t *mem; int temp; #ifdef RANDOM_WALK @@ -275,7 +275,7 @@ int main(int argc, char** argv) { } printf("random_walk: Using parameters: %ld, %lf, %d\n", wss, duration, write); wss /= 1024; - program_end = duration + wctime(); + program_end = duration + rw_wctime(); // Initialize memory size_t arena_sz = wss*1024; arena = alloc_arena(arena_sz*2, 0, 0); @@ -285,23 +285,23 @@ int main(int argc, char** argv) { while (1) { double emergency_exit = program_end + 1; - if (wctime() > program_end) { + if (rw_wctime() > program_end) { break; } else { double last_loop = 0, loop_start; int tmp = 0; - double start = cputime(); - double now = cputime(); + double start = rw_cputime(); + double now = rw_cputime(); //while (now + last_loop < start) {// + exec_time) { loop_start = now; START_LOOP tmp += loop_once(wss, write); STOP_LOOP - now = cputime(); + now = rw_cputime(); last_loop = now - loop_start; - if (emergency_exit && wctime() > emergency_exit) { + if (emergency_exit && rw_wctime() > emergency_exit) { /* Oops --- this should only be possible if the execution time tracking * is broken in the LITMUS^RT kernel. */ fprintf(stderr, "!!! rtspin/%d emergency exit!\n", getpid()); @@ -310,11 +310,11 @@ int main(int argc, char** argv) { } long sum = 0; // Verify that we actually traversed the whole wss - for (volatile cacheline_t* loc = arena; loc < arena + (wss*1024)/sizeof(cacheline_t); loc++) { + for (volatile rw_cacheline_t* loc = arena; loc < arena + (wss*1024)/sizeof(rw_cacheline_t); loc++) { sum += loc->line[1]; } - if (sum != wss * CACHELINES_IN_1KB) { - fprintf(stderr, "We hopped the wrong number of times! Hops: %ld, should have been: %ld\n", sum, wss * CACHELINES_IN_1KB); + if (sum != wss * RW_CACHELINES_IN_1KB) { + fprintf(stderr, "We hopped the wrong number of times! Hops: %ld, should have been: %ld\n", sum, wss * RW_CACHELINES_IN_1KB); } //} @@ -498,7 +498,7 @@ int main(int argc, char** argv) else set_page_color(config.cpu); - start = wctime(); + start = rw_wctime(); ret = task_mode(LITMUS_RT_TASK); if (ret != 0) bail_out("could not become RT task"); @@ -507,7 +507,7 @@ int main(int argc, char** argv) ret = wait_for_ts_release(); if (ret != 0) bail_out("wait_for_ts_release()"); - start = wctime(); + start = rw_wctime(); } cp = get_ctrl_page(); @@ -515,7 +515,7 @@ int main(int argc, char** argv) if (verbose) { get_job_no(&job_no); fprintf(stderr, "rtspin/%d:%u @ %.4fms\n", gettid(), - job_no, (wctime() - start) * 1000); + job_no, (rw_wctime() - start) * 1000); if (cp) { double deadline, current, release; lt_t now = litmus_clock(); -- cgit v1.2.2