blob: a10602a5b2d6395ef697dbe7381278d4997ecf8f (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
#ifndef __UM_CACHE_H
#define __UM_CACHE_H
#include <linux/config.h>
#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#elif defined(CONFIG_UML_X86) /* 64-bit */
# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
#else
/* XXX: this was taken from x86, now it's completely random. Luckily only
* affects SMP padding. */
# define L1_CACHE_SHIFT 5
#endif
/* XXX: this is valid for x86 and x86_64. */
#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif
|