aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-05-03 07:20:29 -0400
committerRussell King <rmk@dyn-67.arm.linux.org.uk>2005-05-03 07:20:29 -0400
commit5c3073e691b56dabbdec60dda4258b4e50d64872 (patch)
tree073b46c9cb83d8e2a8e73e18849e79d193108ad2 /include
parent5cd0c3442021fbf39c7152b341a952aa24054be9 (diff)
[PATCH] ARM: cleanup vmalloc start/offset macros
VMALLOC_START and VMALLOC_OFFSET are common between all ARM machine classes. Move them into include/asm-arm/pgtable.h, but allow a machine class to override them if required. Signed-off-by: Russell King <rmk@arm.linux.org.uk>
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-cl7500/vmalloc.h11
-rw-r--r--include/asm-arm/arch-clps711x/vmalloc.h11
-rw-r--r--include/asm-arm/arch-ebsa110/vmalloc.h11
-rw-r--r--include/asm-arm/arch-ebsa285/vmalloc.h11
-rw-r--r--include/asm-arm/arch-epxa10db/vmalloc.h11
-rw-r--r--include/asm-arm/arch-h720x/vmalloc.h11
-rw-r--r--include/asm-arm/arch-imx/vmalloc.h12
-rw-r--r--include/asm-arm/arch-integrator/vmalloc.h11
-rw-r--r--include/asm-arm/arch-iop3xx/vmalloc.h3
-rw-r--r--include/asm-arm/arch-ixp2000/vmalloc.h3
-rw-r--r--include/asm-arm/arch-ixp4xx/vmalloc.h12
-rw-r--r--include/asm-arm/arch-l7200/vmalloc.h11
-rw-r--r--include/asm-arm/arch-lh7a40x/vmalloc.h11
-rw-r--r--include/asm-arm/arch-omap/vmalloc.h12
-rw-r--r--include/asm-arm/arch-pxa/vmalloc.h11
-rw-r--r--include/asm-arm/arch-rpc/vmalloc.h11
-rw-r--r--include/asm-arm/arch-s3c2410/vmalloc.h12
-rw-r--r--include/asm-arm/arch-sa1100/vmalloc.h11
-rw-r--r--include/asm-arm/arch-shark/vmalloc.h11
-rw-r--r--include/asm-arm/arch-versatile/vmalloc.h12
-rw-r--r--include/asm-arm/pgtable.h17
21 files changed, 17 insertions, 209 deletions
diff --git a/include/asm-arm/arch-cl7500/vmalloc.h b/include/asm-arm/arch-cl7500/vmalloc.h
index 91883def4889..ba8d7a84456a 100644
--- a/include/asm-arm/arch-cl7500/vmalloc.h
+++ b/include/asm-arm/arch-cl7500/vmalloc.h
@@ -1,15 +1,4 @@
1/* 1/*
2 * linux/include/asm-arm/arch-cl7500/vmalloc.h 2 * linux/include/asm-arm/arch-cl7500/vmalloc.h
3 */ 3 */
4
5/*
6 * Just any arbitrary offset to the start of the vmalloc VM area: the
7 * current 8MB value just means that there will be a 8MB "hole" after the
8 * physical memory until the kernel virtual memory starts. That means that
9 * any out-of-bounds memory accesses will hopefully be caught.
10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
11 * area for the same reason. ;)
12 */
13#define VMALLOC_OFFSET (8*1024*1024)
14#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
15#define VMALLOC_END (PAGE_OFFSET + 0x1c000000) 4#define VMALLOC_END (PAGE_OFFSET + 0x1c000000)
diff --git a/include/asm-arm/arch-clps711x/vmalloc.h b/include/asm-arm/arch-clps711x/vmalloc.h
index 42571ed5e493..a5dfe96abc96 100644
--- a/include/asm-arm/arch-clps711x/vmalloc.h
+++ b/include/asm-arm/arch-clps711x/vmalloc.h
@@ -17,15 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20
21/*
22 * Just any arbitrary offset to the start of the vmalloc VM area: the
23 * current 8MB value just means that there will be a 8MB "hole" after the
24 * physical memory until the kernel virtual memory starts. That means that
25 * any out-of-bounds memory accesses will hopefully be caught.
26 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
27 * area for the same reason. ;)
28 */
29#define VMALLOC_OFFSET (8*1024*1024)
30#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
31#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 20#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
diff --git a/include/asm-arm/arch-ebsa110/vmalloc.h b/include/asm-arm/arch-ebsa110/vmalloc.h
index 759659be109f..26674ba4683c 100644
--- a/include/asm-arm/arch-ebsa110/vmalloc.h
+++ b/include/asm-arm/arch-ebsa110/vmalloc.h
@@ -7,15 +7,4 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10
11/*
12 * Just any arbitrary offset to the start of the vmalloc VM area: the
13 * current 8MB value just means that there will be a 8MB "hole" after the
14 * physical memory until the kernel virtual memory starts. That means that
15 * any out-of-bounds memory accesses will hopefully be caught.
16 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
17 * area for the same reason. ;)
18 */
19#define VMALLOC_OFFSET (8*1024*1024)
20#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
21#define VMALLOC_END (PAGE_OFFSET + 0x1f000000) 10#define VMALLOC_END (PAGE_OFFSET + 0x1f000000)
diff --git a/include/asm-arm/arch-ebsa285/vmalloc.h b/include/asm-arm/arch-ebsa285/vmalloc.h
index def705a3c209..d1ca955ce434 100644
--- a/include/asm-arm/arch-ebsa285/vmalloc.h
+++ b/include/asm-arm/arch-ebsa285/vmalloc.h
@@ -8,17 +8,6 @@
8 8
9#include <linux/config.h> 9#include <linux/config.h>
10 10
11/*
12 * Just any arbitrary offset to the start of the vmalloc VM area: the
13 * current 8MB value just means that there will be a 8MB "hole" after the
14 * physical memory until the kernel virtual memory starts. That means that
15 * any out-of-bounds memory accesses will hopefully be caught.
16 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
17 * area for the same reason. ;)
18 */
19#define VMALLOC_OFFSET (8*1024*1024)
20#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
21
22#ifdef CONFIG_ARCH_FOOTBRIDGE 11#ifdef CONFIG_ARCH_FOOTBRIDGE
23#define VMALLOC_END (PAGE_OFFSET + 0x30000000) 12#define VMALLOC_END (PAGE_OFFSET + 0x30000000)
24#else 13#else
diff --git a/include/asm-arm/arch-epxa10db/vmalloc.h b/include/asm-arm/arch-epxa10db/vmalloc.h
index d31ef8584760..546fb7d2b6ad 100644
--- a/include/asm-arm/arch-epxa10db/vmalloc.h
+++ b/include/asm-arm/arch-epxa10db/vmalloc.h
@@ -17,15 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20
21/*
22 * Just any arbitrary offset to the start of the vmalloc VM area: the
23 * current 8MB value just means that there will be a 8MB "hole" after the
24 * physical memory until the kernel virtual memory starts. That means that
25 * any out-of-bounds memory accesses will hopefully be caught.
26 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
27 * area for the same reason. ;)
28 */
29#define VMALLOC_OFFSET (8*1024*1024)
30#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
31#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 20#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
diff --git a/include/asm-arm/arch-h720x/vmalloc.h b/include/asm-arm/arch-h720x/vmalloc.h
index 4af523a5e189..b4693cb821ef 100644
--- a/include/asm-arm/arch-h720x/vmalloc.h
+++ b/include/asm-arm/arch-h720x/vmalloc.h
@@ -5,17 +5,6 @@
5#ifndef __ARCH_ARM_VMALLOC_H 5#ifndef __ARCH_ARM_VMALLOC_H
6#define __ARCH_ARM_VMALLOC_H 6#define __ARCH_ARM_VMALLOC_H
7 7
8/*
9 * Just any arbitrary offset to the start of the vmalloc VM area: the
10 * current 8MB value just means that there will be a 8MB "hole" after the
11 * physical memory until the kernel virtual memory starts. That means that
12 * any out-of-bounds memory accesses will hopefully be caught.
13 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
14 * area for the same reason. ;)
15 */
16#define VMALLOC_OFFSET (8*1024*1024)
17#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
18#define VMALLOC_VMADDR(x) ((unsigned long)(x))
19#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 8#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
20 9
21#endif 10#endif
diff --git a/include/asm-arm/arch-imx/vmalloc.h b/include/asm-arm/arch-imx/vmalloc.h
index 252038f48163..cb6169127068 100644
--- a/include/asm-arm/arch-imx/vmalloc.h
+++ b/include/asm-arm/arch-imx/vmalloc.h
@@ -17,16 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20
21/*
22 * Just any arbitrary offset to the start of the vmalloc VM area: the
23 * current 8MB value just means that there will be a 8MB "hole" after the
24 * physical memory until the kernel virtual memory starts. That means that
25 * any out-of-bounds memory accesses will hopefully be caught.
26 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
27 * area for the same reason. ;)
28 */
29#define VMALLOC_OFFSET (8*1024*1024)
30#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
31#define VMALLOC_VMADDR(x) ((unsigned long)(x))
32#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 20#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
diff --git a/include/asm-arm/arch-integrator/vmalloc.h b/include/asm-arm/arch-integrator/vmalloc.h
index 50e9aee79486..170cccece523 100644
--- a/include/asm-arm/arch-integrator/vmalloc.h
+++ b/include/asm-arm/arch-integrator/vmalloc.h
@@ -17,15 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20
21/*
22 * Just any arbitrary offset to the start of the vmalloc VM area: the
23 * current 8MB value just means that there will be a 8MB "hole" after the
24 * physical memory until the kernel virtual memory starts. That means that
25 * any out-of-bounds memory accesses will hopefully be caught.
26 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
27 * area for the same reason. ;)
28 */
29#define VMALLOC_OFFSET (8*1024*1024)
30#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
31#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 20#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
diff --git a/include/asm-arm/arch-iop3xx/vmalloc.h b/include/asm-arm/arch-iop3xx/vmalloc.h
index dc1d2a957164..0f2f6847f93c 100644
--- a/include/asm-arm/arch-iop3xx/vmalloc.h
+++ b/include/asm-arm/arch-iop3xx/vmalloc.h
@@ -10,9 +10,6 @@
10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
11 * area for the same reason. ;) 11 * area for the same reason. ;)
12 */ 12 */
13#define VMALLOC_OFFSET (8*1024*1024)
14#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
15#define VMALLOC_VMADDR(x) ((unsigned long)(x))
16//#define VMALLOC_END (0xe8000000) 13//#define VMALLOC_END (0xe8000000)
17/* increase usable physical RAM to ~992M per RMK */ 14/* increase usable physical RAM to ~992M per RMK */
18#define VMALLOC_END (0xfe000000) 15#define VMALLOC_END (0xfe000000)
diff --git a/include/asm-arm/arch-ixp2000/vmalloc.h b/include/asm-arm/arch-ixp2000/vmalloc.h
index 2e4bcbcf31f0..473dff4ec561 100644
--- a/include/asm-arm/arch-ixp2000/vmalloc.h
+++ b/include/asm-arm/arch-ixp2000/vmalloc.h
@@ -17,7 +17,4 @@
17 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 17 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
18 * area for the same reason. ;) 18 * area for the same reason. ;)
19 */ 19 */
20#define VMALLOC_OFFSET (8*1024*1024)
21#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
22#define VMALLOC_VMADDR(x) ((unsigned long)(x))
23#define VMALLOC_END 0xfaffefff 20#define VMALLOC_END 0xfaffefff
diff --git a/include/asm-arm/arch-ixp4xx/vmalloc.h b/include/asm-arm/arch-ixp4xx/vmalloc.h
index da46e560ad6f..050d46e6b126 100644
--- a/include/asm-arm/arch-ixp4xx/vmalloc.h
+++ b/include/asm-arm/arch-ixp4xx/vmalloc.h
@@ -1,17 +1,5 @@
1/* 1/*
2 * linux/include/asm-arm/arch-ixp4xx/vmalloc.h 2 * linux/include/asm-arm/arch-ixp4xx/vmalloc.h
3 */ 3 */
4
5/*
6 * Just any arbitrary offset to the start of the vmalloc VM area: the
7 * current 8MB value just means that there will be a 8MB "hole" after the
8 * physical memory until the kernel virtual memory starts. That means that
9 * any out-of-bounds memory accesses will hopefully be caught.
10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
11 * area for the same reason. ;)
12 */
13#define VMALLOC_OFFSET (8*1024*1024)
14#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
15#define VMALLOC_VMADDR(x) ((unsigned long)(x))
16#define VMALLOC_END (0xFF000000) 4#define VMALLOC_END (0xFF000000)
17 5
diff --git a/include/asm-arm/arch-l7200/vmalloc.h b/include/asm-arm/arch-l7200/vmalloc.h
index edeaebe1f14a..816231eedaac 100644
--- a/include/asm-arm/arch-l7200/vmalloc.h
+++ b/include/asm-arm/arch-l7200/vmalloc.h
@@ -1,15 +1,4 @@
1/* 1/*
2 * linux/include/asm-arm/arch-l7200/vmalloc.h 2 * linux/include/asm-arm/arch-l7200/vmalloc.h
3 */ 3 */
4
5/*
6 * Just any arbitrary offset to the start of the vmalloc VM area: the
7 * current 8MB value just means that there will be a 8MB "hole" after the
8 * physical memory until the kernel virtual memory starts. That means that
9 * any out-of-bounds memory accesses will hopefully be caught.
10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
11 * area for the same reason. ;)
12 */
13#define VMALLOC_OFFSET (8*1024*1024)
14#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
15#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 4#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
diff --git a/include/asm-arm/arch-lh7a40x/vmalloc.h b/include/asm-arm/arch-lh7a40x/vmalloc.h
index 5ac607925bea..8163e45109b9 100644
--- a/include/asm-arm/arch-lh7a40x/vmalloc.h
+++ b/include/asm-arm/arch-lh7a40x/vmalloc.h
@@ -7,15 +7,4 @@
7 * version 2 as published by the Free Software Foundation. 7 * version 2 as published by the Free Software Foundation.
8 * 8 *
9 */ 9 */
10
11/*
12 * Just any arbitrary offset to the start of the vmalloc VM area: the
13 * current 8MB value just means that there will be a 8MB "hole" after
14 * the physical memory until the kernel virtual memory starts. That
15 * means that any out-of-bounds memory accesses will hopefully be
16 * caught. The vmalloc() routines leaves a hole of 4kB (one page)
17 * between each vmalloced area for the same reason. ;)
18 */
19#define VMALLOC_OFFSET (8*1024*1024)
20#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
21#define VMALLOC_END (0xe8000000) 10#define VMALLOC_END (0xe8000000)
diff --git a/include/asm-arm/arch-omap/vmalloc.h b/include/asm-arm/arch-omap/vmalloc.h
index c6a83581a2fc..5b8bd8dae8be 100644
--- a/include/asm-arm/arch-omap/vmalloc.h
+++ b/include/asm-arm/arch-omap/vmalloc.h
@@ -17,17 +17,5 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20
21/*
22 * Just any arbitrary offset to the start of the vmalloc VM area: the
23 * current 8MB value just means that there will be a 8MB "hole" after the
24 * physical memory until the kernel virtual memory starts. That means that
25 * any out-of-bounds memory accesses will hopefully be caught.
26 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
27 * area for the same reason. ;)
28 */
29#define VMALLOC_OFFSET (8*1024*1024)
30#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
31#define VMALLOC_VMADDR(x) ((unsigned long)(x))
32#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 20#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
33 21
diff --git a/include/asm-arm/arch-pxa/vmalloc.h b/include/asm-arm/arch-pxa/vmalloc.h
index 3381af6ddb0d..5bb450c7aa2c 100644
--- a/include/asm-arm/arch-pxa/vmalloc.h
+++ b/include/asm-arm/arch-pxa/vmalloc.h
@@ -8,15 +8,4 @@
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11
12/*
13 * Just any arbitrary offset to the start of the vmalloc VM area: the
14 * current 8MB value just means that there will be a 8MB "hole" after the
15 * physical memory until the kernel virtual memory starts. That means that
16 * any out-of-bounds memory accesses will hopefully be caught.
17 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
18 * area for the same reason. ;)
19 */
20#define VMALLOC_OFFSET (8*1024*1024)
21#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
22#define VMALLOC_END (0xe8000000) 11#define VMALLOC_END (0xe8000000)
diff --git a/include/asm-arm/arch-rpc/vmalloc.h b/include/asm-arm/arch-rpc/vmalloc.h
index a13c27f37d71..077046bb2f36 100644
--- a/include/asm-arm/arch-rpc/vmalloc.h
+++ b/include/asm-arm/arch-rpc/vmalloc.h
@@ -7,15 +7,4 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10
11/*
12 * Just any arbitrary offset to the start of the vmalloc VM area: the
13 * current 8MB value just means that there will be a 8MB "hole" after the
14 * physical memory until the kernel virtual memory starts. That means that
15 * any out-of-bounds memory accesses will hopefully be caught.
16 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
17 * area for the same reason. ;)
18 */
19#define VMALLOC_OFFSET (8*1024*1024)
20#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
21#define VMALLOC_END (PAGE_OFFSET + 0x1c000000) 10#define VMALLOC_END (PAGE_OFFSET + 0x1c000000)
diff --git a/include/asm-arm/arch-s3c2410/vmalloc.h b/include/asm-arm/arch-s3c2410/vmalloc.h
index 5fe72ad70904..33963cd5461b 100644
--- a/include/asm-arm/arch-s3c2410/vmalloc.h
+++ b/include/asm-arm/arch-s3c2410/vmalloc.h
@@ -19,18 +19,6 @@
19#ifndef __ASM_ARCH_VMALLOC_H 19#ifndef __ASM_ARCH_VMALLOC_H
20#define __ASM_ARCH_VMALLOC_H 20#define __ASM_ARCH_VMALLOC_H
21 21
22/*
23 * Just any arbitrary offset to the start of the vmalloc VM area: the
24 * current 8MB value just means that there will be a 8MB "hole" after the
25 * physical memory until the kernel virtual memory starts. That means that
26 * any out-of-bounds memory accesses will hopefully be caught.
27 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
28 * area for the same reason. ;)
29 */
30
31#define VMALLOC_OFFSET (8*1024*1024)
32#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
33#define VMALLOC_VMADDR(x) ((unsigned long)(x))
34#define VMALLOC_END (0xE0000000) 22#define VMALLOC_END (0xE0000000)
35 23
36#endif /* __ASM_ARCH_VMALLOC_H */ 24#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/include/asm-arm/arch-sa1100/vmalloc.h b/include/asm-arm/arch-sa1100/vmalloc.h
index 135bc9493c06..2fb1c6f3aa1b 100644
--- a/include/asm-arm/arch-sa1100/vmalloc.h
+++ b/include/asm-arm/arch-sa1100/vmalloc.h
@@ -1,15 +1,4 @@
1/* 1/*
2 * linux/include/asm-arm/arch-sa1100/vmalloc.h 2 * linux/include/asm-arm/arch-sa1100/vmalloc.h
3 */ 3 */
4
5/*
6 * Just any arbitrary offset to the start of the vmalloc VM area: the
7 * current 8MB value just means that there will be a 8MB "hole" after the
8 * physical memory until the kernel virtual memory starts. That means that
9 * any out-of-bounds memory accesses will hopefully be caught.
10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
11 * area for the same reason. ;)
12 */
13#define VMALLOC_OFFSET (8*1024*1024)
14#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
15#define VMALLOC_END (0xe8000000) 4#define VMALLOC_END (0xe8000000)
diff --git a/include/asm-arm/arch-shark/vmalloc.h b/include/asm-arm/arch-shark/vmalloc.h
index 1cc20098f690..10db5d188231 100644
--- a/include/asm-arm/arch-shark/vmalloc.h
+++ b/include/asm-arm/arch-shark/vmalloc.h
@@ -1,15 +1,4 @@
1/* 1/*
2 * linux/include/asm-arm/arch-rpc/vmalloc.h 2 * linux/include/asm-arm/arch-rpc/vmalloc.h
3 */ 3 */
4
5/*
6 * Just any arbitrary offset to the start of the vmalloc VM area: the
7 * current 8MB value just means that there will be a 8MB "hole" after the
8 * physical memory until the kernel virtual memory starts. That means that
9 * any out-of-bounds memory accesses will hopefully be caught.
10 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
11 * area for the same reason. ;)
12 */
13#define VMALLOC_OFFSET (8*1024*1024)
14#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
15#define VMALLOC_END (PAGE_OFFSET + 0x10000000) 4#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
diff --git a/include/asm-arm/arch-versatile/vmalloc.h b/include/asm-arm/arch-versatile/vmalloc.h
index adfb34829bfc..ac780df62156 100644
--- a/include/asm-arm/arch-versatile/vmalloc.h
+++ b/include/asm-arm/arch-versatile/vmalloc.h
@@ -18,16 +18,4 @@
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21
22/*
23 * Just any arbitrary offset to the start of the vmalloc VM area: the
24 * current 8MB value just means that there will be a 8MB "hole" after the
25 * physical memory until the kernel virtual memory starts. That means that
26 * any out-of-bounds memory accesses will hopefully be caught.
27 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
28 * area for the same reason. ;)
29 */
30#define VMALLOC_OFFSET (8*1024*1024)
31#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
32#define VMALLOC_VMADDR(x) ((unsigned long)(x))
33#define VMALLOC_END (PAGE_OFFSET + 0x18000000) 21#define VMALLOC_END (PAGE_OFFSET + 0x18000000)
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 2df4eacf4fa9..a9892eb42a23 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -17,6 +17,23 @@
17#include <asm/arch/vmalloc.h> 17#include <asm/arch/vmalloc.h>
18 18
19/* 19/*
20 * Just any arbitrary offset to the start of the vmalloc VM area: the
21 * current 8MB value just means that there will be a 8MB "hole" after the
22 * physical memory until the kernel virtual memory starts. That means that
23 * any out-of-bounds memory accesses will hopefully be caught.
24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
25 * area for the same reason. ;)
26 *
27 * Note that platforms may override VMALLOC_START, but they must provide
28 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
29 * which may not overlap IO space.
30 */
31#ifndef VMALLOC_START
32#define VMALLOC_OFFSET (8*1024*1024)
33#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
34#endif
35
36/*
20 * Hardware-wise, we have a two level page table structure, where the first 37 * Hardware-wise, we have a two level page table structure, where the first
21 * level has 4096 entries, and the second level has 256 entries. Each entry 38 * level has 4096 entries, and the second level has 256 entries. Each entry
22 * is one 32-bit word. Most of the bits in the second level entry are used 39 * is one 32-bit word. Most of the bits in the second level entry are used