aboutsummaryrefslogtreecommitdiffstats
path: root/arch/microblaze/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 16:15:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 16:15:17 -0400
commit02a99ed6207e9a1d787bb360ef97de023c7edf4a (patch)
treef5818df7dd3f3741d02afbdd4271deed48c41f3d /arch/microblaze/include
parent2b10dc45d15150434d7f206264e912eacbff734b (diff)
parent3447ef29a7f3b1fd0d8d58376950e695e04f6f8b (diff)
Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze
* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (55 commits) microblaze: Don't use access_ok for unaligned microblaze: remove unused flat_stack_align() definition microblaze: Fix problem with early_printk in startup microblaze_mmu_v2: Makefiles microblaze_mmu_v2: Kconfig update microblaze_mmu_v2: stat.h MMU update microblaze_mmu_v2: Elf update microblaze_mmu_v2: Update dma.h for MMU microblaze_mmu_v2: Update cacheflush.h microblaze_mmu_v2: Update signal returning address microblaze_mmu_v2: Traps MMU update microblaze_mmu_v2: Enable fork syscall for MMU and add fork as vfork for noMMU microblaze_mmu_v2: Update linker script for MMU microblaze_mmu_v2: Add MMU related exceptions handling microblaze_mmu_v2: uaccess MMU update microblaze_mmu_v2: Update exception handling - MMU exception microblaze_mmu_v2: entry.S, entry.h microblaze_mmu_v2: Add CURRENT_TASK for entry.S microblaze_mmu_v2: MMU asm offset update microblaze_mmu_v2: Update tlb.h and tlbflush.h ...
Diffstat (limited to 'arch/microblaze/include')
-rw-r--r--arch/microblaze/include/asm/Kbuild25
-rw-r--r--arch/microblaze/include/asm/cacheflush.h20
-rw-r--r--arch/microblaze/include/asm/checksum.h14
-rw-r--r--arch/microblaze/include/asm/current.h8
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h130
-rw-r--r--arch/microblaze/include/asm/dma.h5
-rw-r--r--arch/microblaze/include/asm/elf.h93
-rw-r--r--arch/microblaze/include/asm/entry.h37
-rw-r--r--arch/microblaze/include/asm/exceptions.h24
-rw-r--r--arch/microblaze/include/asm/flat.h1
-rw-r--r--arch/microblaze/include/asm/gpio.h6
-rw-r--r--arch/microblaze/include/asm/io.h31
-rw-r--r--arch/microblaze/include/asm/mmu.h104
-rw-r--r--arch/microblaze/include/asm/mmu_context.h26
-rw-r--r--arch/microblaze/include/asm/mmu_context_mm.h140
-rw-r--r--arch/microblaze/include/asm/mmu_context_no.h23
-rw-r--r--arch/microblaze/include/asm/page.h166
-rw-r--r--arch/microblaze/include/asm/pgalloc.h191
-rw-r--r--arch/microblaze/include/asm/pgtable.h538
-rw-r--r--arch/microblaze/include/asm/posix_types.h2
-rw-r--r--arch/microblaze/include/asm/processor.h95
-rw-r--r--arch/microblaze/include/asm/ptrace.h1
-rw-r--r--arch/microblaze/include/asm/registers.h21
-rw-r--r--arch/microblaze/include/asm/sections.h3
-rw-r--r--arch/microblaze/include/asm/segment.h20
-rw-r--r--arch/microblaze/include/asm/setup.h10
-rw-r--r--arch/microblaze/include/asm/stat.h77
-rw-r--r--arch/microblaze/include/asm/string.h2
-rw-r--r--arch/microblaze/include/asm/syscalls.h3
-rw-r--r--arch/microblaze/include/asm/thread_info.h20
-rw-r--r--arch/microblaze/include/asm/tlb.h8
-rw-r--r--arch/microblaze/include/asm/tlbflush.h48
-rw-r--r--arch/microblaze/include/asm/uaccess.h305
-rw-r--r--arch/microblaze/include/asm/unaligned.h3
34 files changed, 1836 insertions, 364 deletions
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 31820dfef56b..db5294c30caf 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,26 +1,3 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += auxvec.h 3header-y += elf.h
4header-y += errno.h
5header-y += fcntl.h
6header-y += ioctl.h
7header-y += ioctls.h
8header-y += ipcbuf.h
9header-y += linkage.h
10header-y += msgbuf.h
11header-y += poll.h
12header-y += resource.h
13header-y += sembuf.h
14header-y += shmbuf.h
15header-y += sigcontext.h
16header-y += siginfo.h
17header-y += socket.h
18header-y += sockios.h
19header-y += statfs.h
20header-y += stat.h
21header-y += termbits.h
22header-y += ucontext.h
23
24unifdef-y += cputable.h
25unifdef-y += elf.h
26unifdef-y += termios.h
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 3300b785049b..f989d6aad648 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007 PetaLogix 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
3 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 4 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
4 * based on v850 version which was 5 * based on v850 version which was
5 * Copyright (C) 2001,02,03 NEC Electronics Corporation 6 * Copyright (C) 2001,02,03 NEC Electronics Corporation
@@ -43,6 +44,23 @@
43#define flush_icache_range(start, len) __invalidate_icache_range(start, len) 44#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
44#define flush_icache_page(vma, pg) do { } while (0) 45#define flush_icache_page(vma, pg) do { } while (0)
45 46
47#ifndef CONFIG_MMU
48# define flush_icache_user_range(start, len) do { } while (0)
49#else
50# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
51
52# define flush_page_to_ram(page) do { } while (0)
53
54# define flush_icache() __invalidate_icache_all()
55# define flush_cache_sigtramp(vaddr) \
56 __invalidate_icache_range(vaddr, vaddr + 8)
57
58# define flush_dcache_mmap_lock(mapping) do { } while (0)
59# define flush_dcache_mmap_unlock(mapping) do { } while (0)
60
61# define flush_cache_dup_mm(mm) do { } while (0)
62#endif
63
46#define flush_cache_vmap(start, end) do { } while (0) 64#define flush_cache_vmap(start, end) do { } while (0)
47#define flush_cache_vunmap(start, end) do { } while (0) 65#define flush_cache_vunmap(start, end) do { } while (0)
48 66
diff --git a/arch/microblaze/include/asm/checksum.h b/arch/microblaze/include/asm/checksum.h
index 92b30762ce59..97ea46b5cf80 100644
--- a/arch/microblaze/include/asm/checksum.h
+++ b/arch/microblaze/include/asm/checksum.h
@@ -51,7 +51,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
51 * here even more important to align src and dst on a 32-bit (or even 51 * here even more important to align src and dst on a 32-bit (or even
52 * better 64-bit) boundary 52 * better 64-bit) boundary
53 */ 53 */
54extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum); 54extern __wsum csum_partial_copy(const void *src, void *dst, int len,
55 __wsum sum);
55 56
56/* 57/*
57 * the same as csum_partial_copy, but copies from user space. 58 * the same as csum_partial_copy, but copies from user space.
@@ -59,8 +60,8 @@ extern __wsum csum_partial_copy(const char *src, char *dst, int len, int sum);
59 * here even more important to align src and dst on a 32-bit (or even 60 * here even more important to align src and dst on a 32-bit (or even
60 * better 64-bit) boundary 61 * better 64-bit) boundary
61 */ 62 */
62extern __wsum csum_partial_copy_from_user(const char *src, char *dst, 63extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
63 int len, int sum, int *csum_err); 64 int len, __wsum sum, int *csum_err);
64 65
65#define csum_partial_copy_nocheck(src, dst, len, sum) \ 66#define csum_partial_copy_nocheck(src, dst, len, sum) \
66 csum_partial_copy((src), (dst), (len), (sum)) 67 csum_partial_copy((src), (dst), (len), (sum))
@@ -75,11 +76,12 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
75/* 76/*
76 * Fold a partial checksum 77 * Fold a partial checksum
77 */ 78 */
78static inline __sum16 csum_fold(unsigned int sum) 79static inline __sum16 csum_fold(__wsum csum)
79{ 80{
81 u32 sum = (__force u32)csum;
80 sum = (sum & 0xffff) + (sum >> 16); 82 sum = (sum & 0xffff) + (sum >> 16);
81 sum = (sum & 0xffff) + (sum >> 16); 83 sum = (sum & 0xffff) + (sum >> 16);
82 return ~sum; 84 return (__force __sum16)~sum;
83} 85}
84 86
85static inline __sum16 87static inline __sum16
@@ -93,6 +95,6 @@ csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
93 * this routine is used for miscellaneous IP-like checksums, mainly 95 * this routine is used for miscellaneous IP-like checksums, mainly
94 * in icmp.c 96 * in icmp.c
95 */ 97 */
96extern __sum16 ip_compute_csum(const unsigned char *buff, int len); 98extern __sum16 ip_compute_csum(const void *buff, int len);
97 99
98#endif /* _ASM_MICROBLAZE_CHECKSUM_H */ 100#endif /* _ASM_MICROBLAZE_CHECKSUM_H */
diff --git a/arch/microblaze/include/asm/current.h b/arch/microblaze/include/asm/current.h
index 8375ea991e26..29303ed825cc 100644
--- a/arch/microblaze/include/asm/current.h
+++ b/arch/microblaze/include/asm/current.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,12 @@
9#ifndef _ASM_MICROBLAZE_CURRENT_H 11#ifndef _ASM_MICROBLAZE_CURRENT_H
10#define _ASM_MICROBLAZE_CURRENT_H 12#define _ASM_MICROBLAZE_CURRENT_H
11 13
14/*
15 * Register used to hold the current task pointer while in the kernel.
16 * Any `call clobbered' register without a special meaning should be OK,
17 * but check asm/microblaze/kernel/entry.S to be sure.
18 */
19#define CURRENT_TASK r31
12# ifndef __ASSEMBLY__ 20# ifndef __ASSEMBLY__
13/* 21/*
14 * Dedicate r31 to keeping the current task pointer 22 * Dedicate r31 to keeping the current task pointer
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 17336252a9b8..d00e40099165 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -1,129 +1 @@
1/* #include <asm-generic/dma-mapping-broken.h>
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
10#define _ASM_MICROBLAZE_DMA_MAPPING_H
11
12#include <asm/cacheflush.h>
13#include <linux/io.h>
14#include <linux/bug.h>
15
16struct scatterlist;
17
18#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
19#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
20
21/* FIXME */
22static inline int
23dma_supported(struct device *dev, u64 mask)
24{
25 return 1;
26}
27
28static inline dma_addr_t
29dma_map_page(struct device *dev, struct page *page,
30 unsigned long offset, size_t size,
31 enum dma_data_direction direction)
32{
33 BUG();
34 return 0;
35}
36
37static inline void
38dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
39 enum dma_data_direction direction)
40{
41 BUG();
42}
43
44static inline int
45dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
46 enum dma_data_direction direction)
47{
48 BUG();
49 return 0;
50}
51
52static inline void
53dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
54 enum dma_data_direction direction)
55{
56 BUG();
57}
58
59static inline void
60dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
61 enum dma_data_direction direction)
62{
63 BUG();
64}
65
66static inline void
67dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
68 size_t size, enum dma_data_direction direction)
69{
70 BUG();
71}
72
73static inline void
74dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
75 enum dma_data_direction direction)
76{
77 BUG();
78}
79
80static inline void
81dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
82 enum dma_data_direction direction)
83{
84 BUG();
85}
86
87static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
88{
89 return 0;
90}
91
92static inline void *dma_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_handle, int flag)
94{
95 return NULL; /* consistent_alloc(flag, size, dma_handle); */
96}
97
98static inline void dma_free_coherent(struct device *dev, size_t size,
99 void *vaddr, dma_addr_t dma_handle)
100{
101 BUG();
102}
103
104static inline dma_addr_t
105dma_map_single(struct device *dev, void *ptr, size_t size,
106 enum dma_data_direction direction)
107{
108 BUG_ON(direction == DMA_NONE);
109
110 return virt_to_bus(ptr);
111}
112
113static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
114 size_t size,
115 enum dma_data_direction direction)
116{
117 switch (direction) {
118 case DMA_FROM_DEVICE:
119 flush_dcache_range((unsigned)dma_addr,
120 (unsigned)dma_addr + size);
121 /* Fall through */
122 case DMA_TO_DEVICE:
123 break;
124 default:
125 BUG();
126 }
127}
128
129#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/dma.h b/arch/microblaze/include/asm/dma.h
index 0967fa04fc5e..08c073badf19 100644
--- a/arch/microblaze/include/asm/dma.h
+++ b/arch/microblaze/include/asm/dma.h
@@ -9,8 +9,13 @@
9#ifndef _ASM_MICROBLAZE_DMA_H 9#ifndef _ASM_MICROBLAZE_DMA_H
10#define _ASM_MICROBLAZE_DMA_H 10#define _ASM_MICROBLAZE_DMA_H
11 11
12#ifndef CONFIG_MMU
12/* we don't have dma address limit. define it as zero to be 13/* we don't have dma address limit. define it as zero to be
13 * unlimited. */ 14 * unlimited. */
14#define MAX_DMA_ADDRESS (0) 15#define MAX_DMA_ADDRESS (0)
16#else
17/* Virtual address corresponding to last available physical memory address. */
18#define MAX_DMA_ADDRESS (CONFIG_KERNEL_START + memory_size - 1)
19#endif
15 20
16#endif /* _ASM_MICROBLAZE_DMA_H */ 21#endif /* _ASM_MICROBLAZE_DMA_H */
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index 81337f241347..f92fc0dda006 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -27,4 +29,95 @@
27 */ 29 */
28#define ELF_CLASS ELFCLASS32 30#define ELF_CLASS ELFCLASS32
29 31
32#ifndef __uClinux__
33
34/*
35 * ELF register definitions..
36 */
37
38#include <asm/ptrace.h>
39#include <asm/byteorder.h>
40
41#ifndef ELF_GREG_T
42#define ELF_GREG_T
43typedef unsigned long elf_greg_t;
44#endif
45
46#ifndef ELF_NGREG
47#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
48#endif
49
50#ifndef ELF_GREGSET_T
51#define ELF_GREGSET_T
52typedef elf_greg_t elf_gregset_t[ELF_NGREG];
53#endif
54
55#ifndef ELF_FPREGSET_T
56#define ELF_FPREGSET_T
57
58/* TBD */
59#define ELF_NFPREG 33 /* includes fsr */
60typedef unsigned long elf_fpreg_t;
61typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
62
63/* typedef struct user_fpu_struct elf_fpregset_t; */
64#endif
65
66/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
67 * use of this is to invoke "./ld.so someprog" to test out a new version of
68 * the loader. We need to make sure that it is out of the way of the program
69 * that it will "exec", and that there is sufficient room for the brk.
70 */
71
72#define ELF_ET_DYN_BASE (0x08000000)
73
74#ifdef __LITTLE_ENDIAN__
75#define ELF_DATA ELFDATA2LSB
76#else
77#define ELF_DATA ELFDATA2MSB
78#endif
79
80#define USE_ELF_CORE_DUMP
81#define ELF_EXEC_PAGESIZE 4096
82
83
84#define ELF_CORE_COPY_REGS(_dest, _regs) \
85 memcpy((char *) &_dest, (char *) _regs, \
86 sizeof(struct pt_regs));
87
88/* This yields a mask that user programs can use to figure out what
89 * instruction set this CPU supports. This could be done in user space,
90 * but it's not easy, and we've already done it here.
91 */
92#define ELF_HWCAP (0)
93
94/* This yields a string that ld.so will use to load implementation
95 * specific libraries for optimization. This is more specific in
96 * intent than poking at uname or /proc/cpuinfo.
97
98 * For the moment, we have only optimizations for the Intel generations,
99 * but that could change...
100 */
101#define ELF_PLATFORM (NULL)
102
103/* Added _f parameter. Is this definition correct: TBD */
104#define ELF_PLAT_INIT(_r, _f) \
105do { \
106 _r->r1 = _r->r1 = _r->r2 = _r->r3 = \
107 _r->r4 = _r->r5 = _r->r6 = _r->r7 = \
108 _r->r8 = _r->r9 = _r->r10 = _r->r11 = \
109 _r->r12 = _r->r13 = _r->r14 = _r->r15 = \
110 _r->r16 = _r->r17 = _r->r18 = _r->r19 = \
111 _r->r20 = _r->r21 = _r->r22 = _r->r23 = \
112 _r->r24 = _r->r25 = _r->r26 = _r->r27 = \
113 _r->r28 = _r->r29 = _r->r30 = _r->r31 = \
114 0; \
115} while (0)
116
117#ifdef __KERNEL__
118#define SET_PERSONALITY(ex) set_personality(PER_LINUX_32BIT)
119#endif
120
121#endif /* __uClinux__ */
122
30#endif /* _ASM_MICROBLAZE_ELF_H */ 123#endif /* _ASM_MICROBLAZE_ELF_H */
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index e4c3aef884df..61abbd232640 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Definitions used by low-level trap handlers 2 * Definitions used by low-level trap handlers
3 * 3 *
4 * Copyright (C) 2008 Michal Simek 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007 - 2008 PetaLogix 5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 6 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
@@ -31,7 +31,40 @@ DECLARE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
31DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */ 31DECLARE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
32# endif /* __ASSEMBLY__ */ 32# endif /* __ASSEMBLY__ */
33 33
34#ifndef CONFIG_MMU
35
34/* noMMU hasn't any space for args */ 36/* noMMU hasn't any space for args */
35# define STATE_SAVE_ARG_SPACE (0) 37# define STATE_SAVE_ARG_SPACE (0)
36 38
39#else /* CONFIG_MMU */
40
41/* If true, system calls save and restore all registers (except result
42 * registers, of course). If false, then `call clobbered' registers
43 * will not be preserved, on the theory that system calls are basically
44 * function calls anyway, and the caller should be able to deal with it.
45 * This is a security risk, of course, as `internal' values may leak out
46 * after a system call, but that certainly doesn't matter very much for
47 * a processor with no MMU protection! For a protected-mode kernel, it
48 * would be faster to just zero those registers before returning.
49 *
50 * I can not rely on the glibc implementation. If you turn it off make
51 * sure that r11/r12 is saved in user-space. --KAA
52 *
53 * These are special variables using by the kernel trap/interrupt code
54 * to save registers in, at a time when there are no spare registers we
55 * can use to do so, and we can't depend on the value of the stack
56 * pointer. This means that they must be within a signed 16-bit
57 * displacement of 0x00000000.
58 */
59
60/* A `state save frame' is a struct pt_regs preceded by some extra space
61 * suitable for a function call stack frame. */
62
63/* Amount of room on the stack reserved for arguments and to satisfy the
64 * C calling conventions, in addition to the space used by the struct
65 * pt_regs that actually holds saved values. */
66#define STATE_SAVE_ARG_SPACE (6*4) /* Up to six arguments */
67
68#endif /* CONFIG_MMU */
69
37#endif /* _ASM_MICROBLAZE_ENTRY_H */ 70#endif /* _ASM_MICROBLAZE_ENTRY_H */
diff --git a/arch/microblaze/include/asm/exceptions.h b/arch/microblaze/include/asm/exceptions.h
index 24ca540e77c0..90731df9e574 100644
--- a/arch/microblaze/include/asm/exceptions.h
+++ b/arch/microblaze/include/asm/exceptions.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * Preliminary support for HW exception handing for Microblaze 2 * Preliminary support for HW exception handing for Microblaze
3 * 3 *
4 * Copyright (C) 2008 Michal Simek 4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008 PetaLogix 5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> 6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
@@ -64,21 +64,13 @@ asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
64void die(const char *str, struct pt_regs *fp, long err); 64void die(const char *str, struct pt_regs *fp, long err);
65void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr); 65void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
66 66
67#if defined(CONFIG_XMON) 67#ifdef CONFIG_MMU
68extern void xmon(struct pt_regs *regs); 68void __bug(const char *file, int line, void *data);
69extern int xmon_bpt(struct pt_regs *regs); 69int bad_trap(int trap_num, struct pt_regs *regs);
70extern int xmon_sstep(struct pt_regs *regs); 70int debug_trap(struct pt_regs *regs);
71extern int xmon_iabr_match(struct pt_regs *regs); 71#endif /* CONFIG_MMU */
72extern int xmon_dabr_match(struct pt_regs *regs);
73extern void (*xmon_fault_handler)(struct pt_regs *regs);
74 72
75void (*debugger)(struct pt_regs *regs) = xmon; 73#if defined(CONFIG_KGDB)
76int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
77int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
78int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
79int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
80void (*debugger_fault_handler)(struct pt_regs *regs);
81#elif defined(CONFIG_KGDB)
82void (*debugger)(struct pt_regs *regs); 74void (*debugger)(struct pt_regs *regs);
83int (*debugger_bpt)(struct pt_regs *regs); 75int (*debugger_bpt)(struct pt_regs *regs);
84int (*debugger_sstep)(struct pt_regs *regs); 76int (*debugger_sstep)(struct pt_regs *regs);
diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h
index acf0da543ef1..6847c1512c7b 100644
--- a/arch/microblaze/include/asm/flat.h
+++ b/arch/microblaze/include/asm/flat.h
@@ -13,7 +13,6 @@
13 13
14#include <asm/unaligned.h> 14#include <asm/unaligned.h>
15 15
16#define flat_stack_align(sp) /* nothing needed */
17#define flat_argvp_envp_on_stack() 0 16#define flat_argvp_envp_on_stack() 0
18#define flat_old_ram_flag(flags) (flags) 17#define flat_old_ram_flag(flags) (flags)
19#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 18#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/microblaze/include/asm/gpio.h b/arch/microblaze/include/asm/gpio.h
index ea04632399d8..2345ac354d9b 100644
--- a/arch/microblaze/include/asm/gpio.h
+++ b/arch/microblaze/include/asm/gpio.h
@@ -11,8 +11,8 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#ifndef __ASM_POWERPC_GPIO_H 14#ifndef _ASM_MICROBLAZE_GPIO_H
15#define __ASM_POWERPC_GPIO_H 15#define _ASM_MICROBLAZE_GPIO_H
16 16
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <asm-generic/gpio.h> 18#include <asm-generic/gpio.h>
@@ -53,4 +53,4 @@ static inline int irq_to_gpio(unsigned int irq)
53 53
54#endif /* CONFIG_GPIOLIB */ 54#endif /* CONFIG_GPIOLIB */
55 55
56#endif /* __ASM_POWERPC_GPIO_H */ 56#endif /* _ASM_MICROBLAZE_GPIO_H */
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 8b5853ee6b5c..5c173424d074 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -12,6 +14,9 @@
12#include <asm/byteorder.h> 14#include <asm/byteorder.h>
13#include <asm/page.h> 15#include <asm/page.h>
14#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/byteorder.h>
18#include <linux/mm.h> /* Get struct page {...} */
19
15 20
16#define IO_SPACE_LIMIT (0xFFFFFFFF) 21#define IO_SPACE_LIMIT (0xFFFFFFFF)
17 22
@@ -112,6 +117,30 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
112#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c)) 117#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
113#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c)) 118#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
114 119
120#ifdef CONFIG_MMU
121
122#define mm_ptov(addr) ((void *)__phys_to_virt(addr))
123#define mm_vtop(addr) ((unsigned long)__virt_to_phys(addr))
124#define phys_to_virt(addr) ((void *)__phys_to_virt(addr))
125#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
126#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
127
128#define __page_address(page) \
129 (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
130#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
131#define page_to_bus(page) (page_to_phys(page))
132#define bus_to_virt(addr) (phys_to_virt(addr))
133
134extern void iounmap(void *addr);
135/*extern void *__ioremap(phys_addr_t address, unsigned long size,
136 unsigned long flags);*/
137extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
138#define ioremap_writethrough(addr, size) ioremap((addr), (size))
139#define ioremap_nocache(addr, size) ioremap((addr), (size))
140#define ioremap_fullcache(addr, size) ioremap((addr), (size))
141
142#else /* CONFIG_MMU */
143
115/** 144/**
116 * virt_to_phys - map virtual addresses to physical 145 * virt_to_phys - map virtual addresses to physical
117 * @address: address to remap 146 * @address: address to remap
@@ -160,6 +189,8 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
160#define iounmap(addr) ((void)0) 189#define iounmap(addr) ((void)0)
161#define ioremap_nocache(physaddr, size) ioremap(physaddr, size) 190#define ioremap_nocache(physaddr, size) ioremap(physaddr, size)
162 191
192#endif /* CONFIG_MMU */
193
163/* 194/*
164 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 195 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
165 * access 196 * access
diff --git a/arch/microblaze/include/asm/mmu.h b/arch/microblaze/include/asm/mmu.h
index 0e0431d61635..66cad6a99d77 100644
--- a/arch/microblaze/include/asm/mmu.h
+++ b/arch/microblaze/include/asm/mmu.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,11 +11,109 @@
9#ifndef _ASM_MICROBLAZE_MMU_H 11#ifndef _ASM_MICROBLAZE_MMU_H
10#define _ASM_MICROBLAZE_MMU_H 12#define _ASM_MICROBLAZE_MMU_H
11 13
12#ifndef __ASSEMBLY__ 14# ifndef CONFIG_MMU
15# ifndef __ASSEMBLY__
13typedef struct { 16typedef struct {
14 struct vm_list_struct *vmlist; 17 struct vm_list_struct *vmlist;
15 unsigned long end_brk; 18 unsigned long end_brk;
16} mm_context_t; 19} mm_context_t;
17#endif /* __ASSEMBLY__ */ 20# endif /* __ASSEMBLY__ */
21# else /* CONFIG_MMU */
22# ifdef __KERNEL__
23# ifndef __ASSEMBLY__
18 24
25/* Default "unsigned long" context */
26typedef unsigned long mm_context_t;
27
28/* Hardware Page Table Entry */
29typedef struct _PTE {
30 unsigned long v:1; /* Entry is valid */
31 unsigned long vsid:24; /* Virtual segment identifier */
32 unsigned long h:1; /* Hash algorithm indicator */
33 unsigned long api:6; /* Abbreviated page index */
34 unsigned long rpn:20; /* Real (physical) page number */
35 unsigned long :3; /* Unused */
36 unsigned long r:1; /* Referenced */
37 unsigned long c:1; /* Changed */
38 unsigned long w:1; /* Write-thru cache mode */
39 unsigned long i:1; /* Cache inhibited */
40 unsigned long m:1; /* Memory coherence */
41 unsigned long g:1; /* Guarded */
42 unsigned long :1; /* Unused */
43 unsigned long pp:2; /* Page protection */
44} PTE;
45
46/* Values for PP (assumes Ks=0, Kp=1) */
47# define PP_RWXX 0 /* Supervisor read/write, User none */
48# define PP_RWRX 1 /* Supervisor read/write, User read */
49# define PP_RWRW 2 /* Supervisor read/write, User read/write */
50# define PP_RXRX 3 /* Supervisor read, User read */
51
52/* Segment Register */
53typedef struct _SEGREG {
54 unsigned long t:1; /* Normal or I/O type */
55 unsigned long ks:1; /* Supervisor 'key' (normally 0) */
56 unsigned long kp:1; /* User 'key' (normally 1) */
57 unsigned long n:1; /* No-execute */
58 unsigned long :4; /* Unused */
59 unsigned long vsid:24; /* Virtual Segment Identifier */
60} SEGREG;
61
62extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
63extern void _tlbia(void); /* invalidate all TLB entries */
64# endif /* __ASSEMBLY__ */
65
66/*
67 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
68 * instruction and data sides share a unified, 64-entry, semi-associative
69 * TLB which is maintained totally under software control. In addition, the
70 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
71 * TLB which serves as a first level to the shared TLB. These two TLBs are
72 * known as the UTLB and ITLB, respectively.
73 */
74
75# define MICROBLAZE_TLB_SIZE 64
76
77/*
78 * TLB entries are defined by a "high" tag portion and a "low" data
79 * portion. The data portion is 32-bits.
80 *
81 * TLB entries are managed entirely under software control by reading,
82 * writing, and searching using the MTS and MFS instructions.
83 */
84
85# define TLB_LO 1
86# define TLB_HI 0
87# define TLB_DATA TLB_LO
88# define TLB_TAG TLB_HI
89
90/* Tag portion */
91# define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
92# define TLB_PAGESZ_MASK 0x00000380
93# define TLB_PAGESZ(x) (((x) & 0x7) << 7)
94# define PAGESZ_1K 0
95# define PAGESZ_4K 1
96# define PAGESZ_16K 2
97# define PAGESZ_64K 3
98# define PAGESZ_256K 4
99# define PAGESZ_1M 5
100# define PAGESZ_4M 6
101# define PAGESZ_16M 7
102# define TLB_VALID 0x00000040 /* Entry is valid */
103
104/* Data portion */
105# define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
106# define TLB_PERM_MASK 0x00000300
107# define TLB_EX 0x00000200 /* Instruction execution allowed */
108# define TLB_WR 0x00000100 /* Writes permitted */
109# define TLB_ZSEL_MASK 0x000000F0
110# define TLB_ZSEL(x) (((x) & 0xF) << 4)
111# define TLB_ATTR_MASK 0x0000000F
112# define TLB_W 0x00000008 /* Caching is write-through */
113# define TLB_I 0x00000004 /* Caching is inhibited */
114# define TLB_M 0x00000002 /* Memory is coherent */
115# define TLB_G 0x00000001 /* Memory is guarded from prefetch */
116
117# endif /* __KERNEL__ */
118# endif /* CONFIG_MMU */
19#endif /* _ASM_MICROBLAZE_MMU_H */ 119#endif /* _ASM_MICROBLAZE_MMU_H */
diff --git a/arch/microblaze/include/asm/mmu_context.h b/arch/microblaze/include/asm/mmu_context.h
index 150ca01b74ba..385fed16bbfb 100644
--- a/arch/microblaze/include/asm/mmu_context.h
+++ b/arch/microblaze/include/asm/mmu_context.h
@@ -1,21 +1,5 @@
1/* 1#ifdef CONFIG_MMU
2 * Copyright (C) 2006 Atmark Techno, Inc. 2# include "mmu_context_mm.h"
3 * 3#else
4 * This file is subject to the terms and conditions of the GNU General Public 4# include "mmu_context_no.h"
5 * License. See the file "COPYING" in the main directory of this archive 5#endif
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
10#define _ASM_MICROBLAZE_MMU_CONTEXT_H
11
12# define init_new_context(tsk, mm) ({ 0; })
13
14# define enter_lazy_tlb(mm, tsk) do {} while (0)
15# define change_mm_context(old, ctx, _pml4) do {} while (0)
16# define destroy_context(mm) do {} while (0)
17# define deactivate_mm(tsk, mm) do {} while (0)
18# define switch_mm(prev, next, tsk) do {} while (0)
19# define activate_mm(prev, next) do {} while (0)
20
21#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/mmu_context_mm.h b/arch/microblaze/include/asm/mmu_context_mm.h
new file mode 100644
index 000000000000..3e5c254e8d1c
--- /dev/null
+++ b/arch/microblaze/include/asm/mmu_context_mm.h
@@ -0,0 +1,140 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
12#define _ASM_MICROBLAZE_MMU_CONTEXT_H
13
14#include <asm/atomic.h>
15#include <asm/bitops.h>
16#include <asm/mmu.h>
17#include <asm-generic/mm_hooks.h>
18
19# ifdef __KERNEL__
20/*
21 * This function defines the mapping from contexts to VSIDs (virtual
22 * segment IDs). We use a skew on both the context and the high 4 bits
23 * of the 32-bit virtual address (the "effective segment ID") in order
24 * to spread out the entries in the MMU hash table.
25 */
26# define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
27 & 0xffffff)
28
29/*
30 MicroBlaze has 256 contexts, so we can just rotate through these
31 as a way of "switching" contexts. If the TID of the TLB is zero,
32 the PID/TID comparison is disabled, so we can use a TID of zero
33 to represent all kernel pages as shared among all contexts.
34 */
35
36static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
37{
38}
39
40# define NO_CONTEXT 256
41# define LAST_CONTEXT 255
42# define FIRST_CONTEXT 1
43
44/*
45 * Set the current MMU context.
46 * This is done byloading up the segment registers for the user part of the
47 * address space.
48 *
49 * Since the PGD is immediately available, it is much faster to simply
50 * pass this along as a second parameter, which is required for 8xx and
51 * can be used for debugging on all processors (if you happen to have
52 * an Abatron).
53 */
54extern void set_context(mm_context_t context, pgd_t *pgd);
55
56/*
57 * Bitmap of contexts in use.
58 * The size of this bitmap is LAST_CONTEXT + 1 bits.
59 */
60extern unsigned long context_map[];
61
62/*
63 * This caches the next context number that we expect to be free.
64 * Its use is an optimization only, we can't rely on this context
65 * number to be free, but it usually will be.
66 */
67extern mm_context_t next_mmu_context;
68
69/*
70 * Since we don't have sufficient contexts to give one to every task
71 * that could be in the system, we need to be able to steal contexts.
72 * These variables support that.
73 */
74extern atomic_t nr_free_contexts;
75extern struct mm_struct *context_mm[LAST_CONTEXT+1];
76extern void steal_context(void);
77
78/*
79 * Get a new mmu context for the address space described by `mm'.
80 */
81static inline void get_mmu_context(struct mm_struct *mm)
82{
83 mm_context_t ctx;
84
85 if (mm->context != NO_CONTEXT)
86 return;
87 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
88 steal_context();
89 ctx = next_mmu_context;
90 while (test_and_set_bit(ctx, context_map)) {
91 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
92 if (ctx > LAST_CONTEXT)
93 ctx = 0;
94 }
95 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
96 mm->context = ctx;
97 context_mm[ctx] = mm;
98}
99
100/*
101 * Set up the context for a new address space.
102 */
103# define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
104
105/*
106 * We're finished using the context for an address space.
107 */
108static inline void destroy_context(struct mm_struct *mm)
109{
110 if (mm->context != NO_CONTEXT) {
111 clear_bit(mm->context, context_map);
112 mm->context = NO_CONTEXT;
113 atomic_inc(&nr_free_contexts);
114 }
115}
116
117static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
118 struct task_struct *tsk)
119{
120 tsk->thread.pgdir = next->pgd;
121 get_mmu_context(next);
122 set_context(next->context, next->pgd);
123}
124
125/*
126 * After we have set current->mm to a new value, this activates
127 * the context for the new mm so we see the new mappings.
128 */
129static inline void activate_mm(struct mm_struct *active_mm,
130 struct mm_struct *mm)
131{
132 current->thread.pgdir = mm->pgd;
133 get_mmu_context(mm);
134 set_context(mm->context, mm->pgd);
135}
136
137extern void mmu_context_init(void);
138
139# endif /* __KERNEL__ */
140#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/mmu_context_no.h b/arch/microblaze/include/asm/mmu_context_no.h
new file mode 100644
index 000000000000..ba5567190154
--- /dev/null
+++ b/arch/microblaze/include/asm/mmu_context_no.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
12#define _ASM_MICROBLAZE_MMU_CONTEXT_H
13
14# define init_new_context(tsk, mm) ({ 0; })
15
16# define enter_lazy_tlb(mm, tsk) do {} while (0)
17# define change_mm_context(old, ctx, _pml4) do {} while (0)
18# define destroy_context(mm) do {} while (0)
19# define deactivate_mm(tsk, mm) do {} while (0)
20# define switch_mm(prev, next, tsk) do {} while (0)
21# define activate_mm(prev, next) do {} while (0)
22
23#endif /* _ASM_MICROBLAZE_MMU_CONTEXT_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 7238dcfcc517..210e584974f7 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * VM ops
3 * Copyright (C) 2008 PetaLogix 3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 6 * Copyright (C) 2006 Atmark Techno, Inc.
5 * Changes for MMU support: 7 * Changes for MMU support:
6 * Copyright (C) 2007 Xilinx, Inc. All rights reserved. 8 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
@@ -15,14 +17,15 @@
15 17
16#include <linux/pfn.h> 18#include <linux/pfn.h>
17#include <asm/setup.h> 19#include <asm/setup.h>
20#include <linux/const.h>
21
22#ifdef __KERNEL__
18 23
19/* PAGE_SHIFT determines the page size */ 24/* PAGE_SHIFT determines the page size */
20#define PAGE_SHIFT (12) 25#define PAGE_SHIFT (12)
21#define PAGE_SIZE (1UL << PAGE_SHIFT) 26#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
22#define PAGE_MASK (~(PAGE_SIZE-1)) 27#define PAGE_MASK (~(PAGE_SIZE-1))
23 28
24#ifdef __KERNEL__
25
26#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
27 30
28#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 31#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
@@ -35,6 +38,7 @@
35/* align addr on a size boundary - adjust address up if needed */ 38/* align addr on a size boundary - adjust address up if needed */
36#define _ALIGN(addr, size) _ALIGN_UP(addr, size) 39#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
37 40
41#ifndef CONFIG_MMU
38/* 42/*
39 * PAGE_OFFSET -- the first address of the first page of memory. When not 43 * PAGE_OFFSET -- the first address of the first page of memory. When not
40 * using MMU this corresponds to the first free page in physical memory (aligned 44 * using MMU this corresponds to the first free page in physical memory (aligned
@@ -43,15 +47,44 @@
43extern unsigned int __page_offset; 47extern unsigned int __page_offset;
44#define PAGE_OFFSET __page_offset 48#define PAGE_OFFSET __page_offset
45 49
46#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 50#else /* CONFIG_MMU */
47#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
48#define free_user_page(page, addr) free_page(addr)
49 51
50#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 52/*
53 * PAGE_OFFSET -- the first address of the first page of memory. With MMU
54 * it is set to the kernel start address (aligned on a page boundary).
55 *
56 * CONFIG_KERNEL_START is defined in arch/microblaze/config.in and used
57 * in arch/microblaze/Makefile.
58 */
59#define PAGE_OFFSET CONFIG_KERNEL_START
51 60
61/*
62 * MAP_NR -- given an address, calculate the index of the page struct which
63 * points to the address's page.
64 */
65#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
52 66
53#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 67/*
54#define copy_user_page(vto, vfrom, vaddr, topg) \ 68 * The basic type of a PTE - 32 bit physical addressing.
69 */
70typedef unsigned long pte_basic_t;
71#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
72#define PTE_FMT "%.8lx"
73
74#endif /* CONFIG_MMU */
75
76# ifndef CONFIG_MMU
77# define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
78# define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
79# define free_user_page(page, addr) free_page(addr)
80# else /* CONFIG_MMU */
81extern void copy_page(void *to, void *from);
82# endif /* CONFIG_MMU */
83
84# define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
85
86# define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
87# define copy_user_page(vto, vfrom, vaddr, topg) \
55 memcpy((vto), (vfrom), PAGE_SIZE) 88 memcpy((vto), (vfrom), PAGE_SIZE)
56 89
57/* 90/*
@@ -60,21 +93,32 @@ extern unsigned int __page_offset;
60typedef struct page *pgtable_t; 93typedef struct page *pgtable_t;
61typedef struct { unsigned long pte; } pte_t; 94typedef struct { unsigned long pte; } pte_t;
62typedef struct { unsigned long pgprot; } pgprot_t; 95typedef struct { unsigned long pgprot; } pgprot_t;
96/* FIXME this can depend on linux kernel version */
97# ifdef CONFIG_MMU
98typedef struct { unsigned long pmd; } pmd_t;
99typedef struct { unsigned long pgd; } pgd_t;
100# else /* CONFIG_MMU */
63typedef struct { unsigned long ste[64]; } pmd_t; 101typedef struct { unsigned long ste[64]; } pmd_t;
64typedef struct { pmd_t pue[1]; } pud_t; 102typedef struct { pmd_t pue[1]; } pud_t;
65typedef struct { pud_t pge[1]; } pgd_t; 103typedef struct { pud_t pge[1]; } pgd_t;
104# endif /* CONFIG_MMU */
66 105
106# define pte_val(x) ((x).pte)
107# define pgprot_val(x) ((x).pgprot)
67 108
68#define pte_val(x) ((x).pte) 109# ifdef CONFIG_MMU
69#define pgprot_val(x) ((x).pgprot) 110# define pmd_val(x) ((x).pmd)
70#define pmd_val(x) ((x).ste[0]) 111# define pgd_val(x) ((x).pgd)
71#define pud_val(x) ((x).pue[0]) 112# else /* CONFIG_MMU */
72#define pgd_val(x) ((x).pge[0]) 113# define pmd_val(x) ((x).ste[0])
114# define pud_val(x) ((x).pue[0])
115# define pgd_val(x) ((x).pge[0])
116# endif /* CONFIG_MMU */
73 117
74#define __pte(x) ((pte_t) { (x) }) 118# define __pte(x) ((pte_t) { (x) })
75#define __pmd(x) ((pmd_t) { (x) }) 119# define __pmd(x) ((pmd_t) { (x) })
76#define __pgd(x) ((pgd_t) { (x) }) 120# define __pgd(x) ((pgd_t) { (x) })
77#define __pgprot(x) ((pgprot_t) { (x) }) 121# define __pgprot(x) ((pgprot_t) { (x) })
78 122
79/** 123/**
80 * Conversions for virtual address, physical address, pfn, and struct 124 * Conversions for virtual address, physical address, pfn, and struct
@@ -94,44 +138,80 @@ extern unsigned long max_low_pfn;
94extern unsigned long min_low_pfn; 138extern unsigned long min_low_pfn;
95extern unsigned long max_pfn; 139extern unsigned long max_pfn;
96 140
97#define __pa(vaddr) ((unsigned long) (vaddr)) 141extern unsigned long memory_start;
98#define __va(paddr) ((void *) (paddr)) 142extern unsigned long memory_end;
143extern unsigned long memory_size;
99 144
100#define phys_to_pfn(phys) (PFN_DOWN(phys)) 145extern int page_is_ram(unsigned long pfn);
101#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
102 146
103#define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr)))) 147# define phys_to_pfn(phys) (PFN_DOWN(phys))
104#define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) 148# define pfn_to_phys(pfn) (PFN_PHYS(pfn))
105 149
106#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 150# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
107#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 151# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
108 152
109#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 153# ifdef CONFIG_MMU
110#define page_to_bus(page) (page_to_phys(page)) 154# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
111#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 155# else /* CONFIG_MMU */
156# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
157# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
158# define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
159# define page_to_bus(page) (page_to_phys(page))
160# define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
161# endif /* CONFIG_MMU */
112 162
113extern unsigned int memory_start; 163# ifndef CONFIG_MMU
114extern unsigned int memory_end; 164# define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
115extern unsigned int memory_size; 165# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
166# else /* CONFIG_MMU */
167# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
168# define pfn_valid(pfn) ((pfn) < (max_mapnr + ARCH_PFN_OFFSET))
169# define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
170# endif /* CONFIG_MMU */
116 171
117#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr) 172# endif /* __ASSEMBLY__ */
118 173
119#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 174#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
120 175
121#else
122#define tophys(rd, rs) (addik rd, rs, 0)
123#define tovirt(rd, rs) (addik rd, rs, 0)
124#endif /* __ASSEMBLY__ */
125 176
126#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) 177# ifndef CONFIG_MMU
178# define __pa(vaddr) ((unsigned long) (vaddr))
179# define __va(paddr) ((void *) (paddr))
180# else /* CONFIG_MMU */
181# define __pa(x) __virt_to_phys((unsigned long)(x))
182# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
183# endif /* CONFIG_MMU */
184
127 185
128/* Convert between virtual and physical address for MMU. */ 186/* Convert between virtual and physical address for MMU. */
129/* Handle MicroBlaze processor with virtual memory. */ 187/* Handle MicroBlaze processor with virtual memory. */
188#ifndef CONFIG_MMU
130#define __virt_to_phys(addr) addr 189#define __virt_to_phys(addr) addr
131#define __phys_to_virt(addr) addr 190#define __phys_to_virt(addr) addr
191#define tophys(rd, rs) addik rd, rs, 0
192#define tovirt(rd, rs) addik rd, rs, 0
193#else
194#define __virt_to_phys(addr) \
195 ((addr) + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
196#define __phys_to_virt(addr) \
197 ((addr) + CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
198#define tophys(rd, rs) \
199 addik rd, rs, (CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START)
200#define tovirt(rd, rs) \
201 addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
202#endif /* CONFIG_MMU */
132 203
133#define TOPHYS(addr) __virt_to_phys(addr) 204#define TOPHYS(addr) __virt_to_phys(addr)
134 205
206#ifdef CONFIG_MMU
207#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
208#define WANT_PAGE_VIRTUAL 1 /* page alloc 2 relies on this */
209#endif
210
211#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
212 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
213#endif /* CONFIG_MMU */
214
135#endif /* __KERNEL__ */ 215#endif /* __KERNEL__ */
136 216
137#include <asm-generic/memory_model.h> 217#include <asm-generic/memory_model.h>
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 2a4b35484010..59a757e46ba5 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,195 @@
9#ifndef _ASM_MICROBLAZE_PGALLOC_H 11#ifndef _ASM_MICROBLAZE_PGALLOC_H
10#define _ASM_MICROBLAZE_PGALLOC_H 12#define _ASM_MICROBLAZE_PGALLOC_H
11 13
14#ifdef CONFIG_MMU
15
16#include <linux/kernel.h> /* For min/max macros */
17#include <linux/highmem.h>
18#include <asm/setup.h>
19#include <asm/io.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22
23#define PGDIR_ORDER 0
24
25/*
26 * This is handled very differently on MicroBlaze since out page tables
27 * are all 0's and I want to be able to use these zero'd pages elsewhere
28 * as well - it gives us quite a speedup.
29 * -- Cort
30 */
31extern struct pgtable_cache_struct {
32 unsigned long *pgd_cache;
33 unsigned long *pte_cache;
34 unsigned long pgtable_cache_sz;
35} quicklists;
36
37#define pgd_quicklist (quicklists.pgd_cache)
38#define pmd_quicklist ((unsigned long *)0)
39#define pte_quicklist (quicklists.pte_cache)
40#define pgtable_cache_size (quicklists.pgtable_cache_sz)
41
42extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
43extern atomic_t zero_sz; /* # currently pre-zero'd pages */
44extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
45extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
46extern atomic_t zerototal; /* # pages zero'd over time */
47
48#define zero_quicklist (zero_cache)
49#define zero_cache_sz (zero_sz)
50#define zero_cache_calls (zeropage_calls)
51#define zero_cache_hits (zeropage_hits)
52#define zero_cache_total (zerototal)
53
54/*
55 * return a pre-zero'd page from the list,
56 * return NULL if none available -- Cort
57 */
58extern unsigned long get_zero_page_fast(void);
59
60extern void __bad_pte(pmd_t *pmd);
61
62extern inline pgd_t *get_pgd_slow(void)
63{
64 pgd_t *ret;
65
66 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
67 if (ret != NULL)
68 clear_page(ret);
69 return ret;
70}
71
72extern inline pgd_t *get_pgd_fast(void)
73{
74 unsigned long *ret;
75
76 ret = pgd_quicklist;
77 if (ret != NULL) {
78 pgd_quicklist = (unsigned long *)(*ret);
79 ret[0] = 0;
80 pgtable_cache_size--;
81 } else
82 ret = (unsigned long *)get_pgd_slow();
83 return (pgd_t *)ret;
84}
85
86extern inline void free_pgd_fast(pgd_t *pgd)
87{
88 *(unsigned long **)pgd = pgd_quicklist;
89 pgd_quicklist = (unsigned long *) pgd;
90 pgtable_cache_size++;
91}
92
93extern inline void free_pgd_slow(pgd_t *pgd)
94{
95 free_page((unsigned long)pgd);
96}
97
98#define pgd_free(mm, pgd) free_pgd_fast(pgd)
99#define pgd_alloc(mm) get_pgd_fast()
100
101#define pmd_pgtable(pmd) pmd_page(pmd)
102
103/*
104 * We don't have any real pmd's, and this code never triggers because
105 * the pgd will always be present..
106 */
107#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
108#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
109/* FIXME two definition - look below */
110#define pmd_free(mm, x) do { } while (0)
111#define pgd_populate(mm, pmd, pte) BUG()
112
113static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
114 unsigned long address)
115{
116 pte_t *pte;
117 extern int mem_init_done;
118 extern void *early_get_page(void);
119 if (mem_init_done) {
120 pte = (pte_t *)__get_free_page(GFP_KERNEL |
121 __GFP_REPEAT | __GFP_ZERO);
122 } else {
123 pte = (pte_t *)early_get_page();
124 if (pte)
125 clear_page(pte);
126 }
127 return pte;
128}
129
130static inline struct page *pte_alloc_one(struct mm_struct *mm,
131 unsigned long address)
132{
133 struct page *ptepage;
134
135#ifdef CONFIG_HIGHPTE
136 int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
137#else
138 int flags = GFP_KERNEL | __GFP_REPEAT;
139#endif
140
141 ptepage = alloc_pages(flags, 0);
142 if (ptepage)
143 clear_highpage(ptepage);
144 return ptepage;
145}
146
147static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
148 unsigned long address)
149{
150 unsigned long *ret;
151
152 ret = pte_quicklist;
153 if (ret != NULL) {
154 pte_quicklist = (unsigned long *)(*ret);
155 ret[0] = 0;
156 pgtable_cache_size--;
157 }
158 return (pte_t *)ret;
159}
160
161extern inline void pte_free_fast(pte_t *pte)
162{
163 *(unsigned long **)pte = pte_quicklist;
164 pte_quicklist = (unsigned long *) pte;
165 pgtable_cache_size++;
166}
167
168extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
169{
170 free_page((unsigned long)pte);
171}
172
173extern inline void pte_free_slow(struct page *ptepage)
174{
175 __free_page(ptepage);
176}
177
178extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
179{
180 __free_page(ptepage);
181}
182
183#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
184
185#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
186
187#define pmd_populate_kernel(mm, pmd, pte) \
188 (pmd_val(*(pmd)) = (unsigned long) (pte))
189
190/*
191 * We don't have any real pmd's, and this code never triggers because
192 * the pgd will always be present..
193 */
194#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
195/*#define pmd_free(mm, x) do { } while (0)*/
196#define __pmd_free_tlb(tlb, x) do { } while (0)
197#define pgd_populate(mm, pmd, pte) BUG()
198
199extern int do_check_pgt_cache(int, int);
200
201#endif /* CONFIG_MMU */
202
12#define check_pgt_cache() do {} while (0) 203#define check_pgt_cache() do {} while (0)
13 204
14#endif /* _ASM_MICROBLAZE_PGALLOC_H */ 205#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 4df31e46568e..4c57a586a989 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +16,8 @@
14#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
15 remap_pfn_range(vma, vaddr, pfn, size, prot) 17 remap_pfn_range(vma, vaddr, pfn, size, prot)
16 18
19#ifndef CONFIG_MMU
20
17#define pgd_present(pgd) (1) /* pages are always present on non MMU */ 21#define pgd_present(pgd) (1) /* pages are always present on non MMU */
18#define pgd_none(pgd) (0) 22#define pgd_none(pgd) (0)
19#define pgd_bad(pgd) (0) 23#define pgd_bad(pgd) (0)
@@ -27,6 +31,8 @@
27#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ 31#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
28#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ 32#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
29 33
34#define pgprot_noncached(x) (x)
35
30#define __swp_type(x) (0) 36#define __swp_type(x) (0)
31#define __swp_offset(x) (0) 37#define __swp_offset(x) (0)
32#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 38#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
@@ -45,6 +51,538 @@ static inline int pte_file(pte_t pte) { return 0; }
45 51
46#define arch_enter_lazy_cpu_mode() do {} while (0) 52#define arch_enter_lazy_cpu_mode() do {} while (0)
47 53
54#else /* CONFIG_MMU */
55
56#include <asm-generic/4level-fixup.h>
57
58#ifdef __KERNEL__
59#ifndef __ASSEMBLY__
60
61#include <linux/sched.h>
62#include <linux/threads.h>
63#include <asm/processor.h> /* For TASK_SIZE */
64#include <asm/mmu.h>
65#include <asm/page.h>
66
67#define FIRST_USER_ADDRESS 0
68
69extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address);
71extern unsigned long ioremap_bot, ioremap_base;
72
73/*
74 * The following only work if pte_present() is true.
75 * Undefined behaviour if not..
76 */
77
78static inline int pte_special(pte_t pte) { return 0; }
79
80static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
81
82/* Start and end of the vmalloc area. */
83/* Make sure to map the vmalloc area above the pinned kernel memory area
84 of 32Mb. */
85#define VMALLOC_START (CONFIG_KERNEL_START + \
86 max(32 * 1024 * 1024UL, memory_size))
87#define VMALLOC_END ioremap_bot
88#define VMALLOC_VMADDR(x) ((unsigned long)(x))
89
90#endif /* __ASSEMBLY__ */
91
92/*
93 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
94 * table containing PTEs, together with a set of 16 segment registers, to
95 * define the virtual to physical address mapping.
96 *
97 * We use the hash table as an extended TLB, i.e. a cache of currently
98 * active mappings. We maintain a two-level page table tree, much
99 * like that used by the i386, for the sake of the Linux memory
100 * management code. Low-level assembler code in hashtable.S
101 * (procedure hash_page) is responsible for extracting ptes from the
102 * tree and putting them into the hash table when necessary, and
103 * updating the accessed and modified bits in the page table tree.
104 */
105
106/*
107 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
108 * instruction and data sides share a unified, 64-entry, semi-associative
109 * TLB which is maintained totally under software control. In addition, the
110 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
111 * TLB which serves as a first level to the shared TLB. These two TLBs are
112 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
113 */
114
115/*
116 * The normal case is that PTEs are 32-bits and we have a 1-page
117 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
118 *
119 */
120
121/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
122#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
123#define PMD_SIZE (1UL << PMD_SHIFT)
124#define PMD_MASK (~(PMD_SIZE-1))
125
126/* PGDIR_SHIFT determines what a top-level page table entry can map */
127#define PGDIR_SHIFT PMD_SHIFT
128#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
129#define PGDIR_MASK (~(PGDIR_SIZE-1))
130
131/*
132 * entries per page directory level: our page-table tree is two-level, so
133 * we don't really have any PMD directory.
134 */
135#define PTRS_PER_PTE (1 << PTE_SHIFT)
136#define PTRS_PER_PMD 1
137#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
138
139#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
140#define FIRST_USER_PGD_NR 0
141
142#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
143#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
144
145#define pte_ERROR(e) \
146 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
147 __FILE__, __LINE__, pte_val(e))
148#define pmd_ERROR(e) \
149 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
150 __FILE__, __LINE__, pmd_val(e))
151#define pgd_ERROR(e) \
152 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
153 __FILE__, __LINE__, pgd_val(e))
154
155/*
156 * Bits in a linux-style PTE. These match the bits in the
157 * (hardware-defined) PTE as closely as possible.
158 */
159
160/* There are several potential gotchas here. The hardware TLBLO
161 * field looks like this:
162 *
163 * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
164 * RPN..................... 0 0 EX WR ZSEL....... W I M G
165 *
166 * Where possible we make the Linux PTE bits match up with this
167 *
168 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
169 * support down to 1k pages), this is done in the TLBMiss exception
170 * handler.
171 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
172 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
173 * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
174 * zone.
175 * - PRESENT *must* be in the bottom two bits because swap cache
176 * entries use the top 30 bits. Because 4xx doesn't support SMP
177 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
178 * is cleared in the TLB miss handler before the TLB entry is loaded.
179 * - All other bits of the PTE are loaded into TLBLO without
180 * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
181 * software PTE bits. We actually use use bits 21, 24, 25, and
182 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
183 * PRESENT.
184 */
185
186/* Definitions for MicroBlaze. */
187#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
188#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
189#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
190#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
191#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
192#define _PAGE_RW 0x040 /* software: Writes permitted */
193#define _PAGE_DIRTY 0x080 /* software: dirty page */
194#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
195#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
196#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
197#define _PMD_PRESENT PAGE_MASK
198
199/*
200 * Some bits are unused...
201 */
202#ifndef _PAGE_HASHPTE
203#define _PAGE_HASHPTE 0
204#endif
205#ifndef _PTE_NONE_MASK
206#define _PTE_NONE_MASK 0
207#endif
208#ifndef _PAGE_SHARED
209#define _PAGE_SHARED 0
210#endif
211#ifndef _PAGE_HWWRITE
212#define _PAGE_HWWRITE 0
213#endif
214#ifndef _PAGE_HWEXEC
215#define _PAGE_HWEXEC 0
216#endif
217#ifndef _PAGE_EXEC
218#define _PAGE_EXEC 0
219#endif
220
221#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
222
223/*
224 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
225 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
226 * to have it in the Linux PTE, and in fact the bit could be reused for
227 * another purpose. -- paulus.
228 */
229#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
230#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
231
232#define _PAGE_KERNEL \
233 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
234
235#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
236
237#define PAGE_NONE __pgprot(_PAGE_BASE)
238#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
239#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
240#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
241#define PAGE_SHARED_X \
242 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
243#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
244#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
245
246#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
247#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
248#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
249
250/*
251 * We consider execute permission the same as read.
252 * Also, write permissions imply read permissions.
253 */
254#define __P000 PAGE_NONE
255#define __P001 PAGE_READONLY_X
256#define __P010 PAGE_COPY
257#define __P011 PAGE_COPY_X
258#define __P100 PAGE_READONLY
259#define __P101 PAGE_READONLY_X
260#define __P110 PAGE_COPY
261#define __P111 PAGE_COPY_X
262
263#define __S000 PAGE_NONE
264#define __S001 PAGE_READONLY_X
265#define __S010 PAGE_SHARED
266#define __S011 PAGE_SHARED_X
267#define __S100 PAGE_READONLY
268#define __S101 PAGE_READONLY_X
269#define __S110 PAGE_SHARED
270#define __S111 PAGE_SHARED_X
271
272#ifndef __ASSEMBLY__
273/*
274 * ZERO_PAGE is a global shared page that is always zero: used
275 * for zero-mapped memory areas etc..
276 */
277extern unsigned long empty_zero_page[1024];
278#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
279
280#endif /* __ASSEMBLY__ */
281
282#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
283#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
284#define pte_clear(mm, addr, ptep) \
285 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
286
287#define pmd_none(pmd) (!pmd_val(pmd))
288#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
289#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
290#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
291
292#define pte_page(x) (mem_map + (unsigned long) \
293 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
294#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
295
296#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
297
298#define pfn_pte(pfn, prot) \
299 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
300
301#ifndef __ASSEMBLY__
302/*
303 * The "pgd_xxx()" functions here are trivial for a folded two-level
304 * setup: the pgd is never bad, and a pmd always exists (as it's folded
305 * into the pgd entry)
306 */
307static inline int pgd_none(pgd_t pgd) { return 0; }
308static inline int pgd_bad(pgd_t pgd) { return 0; }
309static inline int pgd_present(pgd_t pgd) { return 1; }
310#define pgd_clear(xp) do { } while (0)
311#define pgd_page(pgd) \
312 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
313
314/*
315 * The following only work if pte_present() is true.
316 * Undefined behaviour if not..
317 */
318static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
319static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
320static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
321static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
322static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
323/* FIXME */
324static inline int pte_file(pte_t pte) { return 0; }
325
326static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
327static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
328
329static inline pte_t pte_rdprotect(pte_t pte) \
330 { pte_val(pte) &= ~_PAGE_USER; return pte; }
331static inline pte_t pte_wrprotect(pte_t pte) \
332 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
333static inline pte_t pte_exprotect(pte_t pte) \
334 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
335static inline pte_t pte_mkclean(pte_t pte) \
336 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
337static inline pte_t pte_mkold(pte_t pte) \
338 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
339
340static inline pte_t pte_mkread(pte_t pte) \
341 { pte_val(pte) |= _PAGE_USER; return pte; }
342static inline pte_t pte_mkexec(pte_t pte) \
343 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
344static inline pte_t pte_mkwrite(pte_t pte) \
345 { pte_val(pte) |= _PAGE_RW; return pte; }
346static inline pte_t pte_mkdirty(pte_t pte) \
347 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
348static inline pte_t pte_mkyoung(pte_t pte) \
349 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
350
351/*
352 * Conversion functions: convert a page and protection to a page entry,
353 * and a page entry and page directory to the page they refer to.
354 */
355
356static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
357{
358 pte_t pte;
359 pte_val(pte) = physpage | pgprot_val(pgprot);
360 return pte;
361}
362
363#define mk_pte(page, pgprot) \
364({ \
365 pte_t pte; \
366 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
367 pgprot_val(pgprot); \
368 pte; \
369})
370
371static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
372{
373 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
374 return pte;
375}
376
377/*
378 * Atomic PTE updates.
379 *
380 * pte_update clears and sets bit atomically, and returns
381 * the old pte value.
382 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
383 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
384 */
385static inline unsigned long pte_update(pte_t *p, unsigned long clr,
386 unsigned long set)
387{
388 unsigned long old, tmp, msr;
389
390 __asm__ __volatile__("\
391 msrclr %2, 0x2\n\
392 nop\n\
393 lw %0, %4, r0\n\
394 andn %1, %0, %5\n\
395 or %1, %1, %6\n\
396 sw %1, %4, r0\n\
397 mts rmsr, %2\n\
398 nop"
399 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
400 : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
401 : "cc");
402
403 return old;
404}
405
406/*
407 * set_pte stores a linux PTE into the linux page table.
408 */
409static inline void set_pte(struct mm_struct *mm, unsigned long addr,
410 pte_t *ptep, pte_t pte)
411{
412 *ptep = pte;
413}
414
415static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
416 pte_t *ptep, pte_t pte)
417{
418 *ptep = pte;
419}
420
421static inline int ptep_test_and_clear_young(struct mm_struct *mm,
422 unsigned long addr, pte_t *ptep)
423{
424 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
425}
426
427static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
428 unsigned long addr, pte_t *ptep)
429{
430 return (pte_update(ptep, \
431 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
432}
433
434static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
435 unsigned long addr, pte_t *ptep)
436{
437 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
438}
439
440/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
441 unsigned long addr, pte_t *ptep)
442{
443 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
444}*/
445
446static inline void ptep_mkdirty(struct mm_struct *mm,
447 unsigned long addr, pte_t *ptep)
448{
449 pte_update(ptep, 0, _PAGE_DIRTY);
450}
451
452/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
453
454/* Convert pmd entry to page */
455/* our pmd entry is an effective address of pte table*/
456/* returns effective address of the pmd entry*/
457#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
458
459/* returns struct *page of the pmd entry*/
460#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
461
462/* to find an entry in a kernel page-table-directory */
463#define pgd_offset_k(address) pgd_offset(&init_mm, address)
464
465/* to find an entry in a page-table-directory */
466#define pgd_index(address) ((address) >> PGDIR_SHIFT)
467#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
468
469/* Find an entry in the second-level page table.. */
470static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
471{
472 return (pmd_t *) dir;
473}
474
475/* Find an entry in the third-level page table.. */
476#define pte_index(address) \
477 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
478#define pte_offset_kernel(dir, addr) \
479 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
480#define pte_offset_map(dir, addr) \
481 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
482#define pte_offset_map_nested(dir, addr) \
483 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
484
485#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
486#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
487
488/* Encode and decode a nonlinear file mapping entry */
489#define PTE_FILE_MAX_BITS 29
490#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
491#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) })
492
493extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
494
495/*
496 * When flushing the tlb entry for a page, we also need to flush the hash
497 * table entry. flush_hash_page is assembler (for speed) in hashtable.S.
498 */
499extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
500
501/* Add an HPTE to the hash table */
502extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
503
504/*
505 * Encode and decode a swap entry.
506 * Note that the bits we use in a PTE for representing a swap entry
507 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
508 * (if used). -- paulus
509 */
510#define __swp_type(entry) ((entry).val & 0x3f)
511#define __swp_offset(entry) ((entry).val >> 6)
512#define __swp_entry(type, offset) \
513 ((swp_entry_t) { (type) | ((offset) << 6) })
514#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
515#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
516
517
518/* CONFIG_APUS */
519/* For virtual address to physical address conversion */
520extern void cache_clear(__u32 addr, int length);
521extern void cache_push(__u32 addr, int length);
522extern int mm_end_of_chunk(unsigned long addr, int len);
523extern unsigned long iopa(unsigned long addr);
524/* extern unsigned long mm_ptov(unsigned long addr) \
525 __attribute__ ((const)); TBD */
526
527/* Values for nocacheflag and cmode */
528/* These are not used by the APUS kernel_map, but prevents
529 * compilation errors.
530 */
531#define IOMAP_FULL_CACHING 0
532#define IOMAP_NOCACHE_SER 1
533#define IOMAP_NOCACHE_NONSER 2
534#define IOMAP_NO_COPYBACK 3
535
536/*
537 * Map some physical address range into the kernel address space.
538 */
539extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
540 int nocacheflag, unsigned long *memavailp);
541
542/*
543 * Set cache mode of (kernel space) address range.
544 */
545extern void kernel_set_cachemode(unsigned long address, unsigned long size,
546 unsigned int cmode);
547
548/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
549#define kern_addr_valid(addr) (1)
550
551#define io_remap_page_range remap_page_range
552
553/*
554 * No page table caches to initialise
555 */
556#define pgtable_cache_init() do { } while (0)
557
558void do_page_fault(struct pt_regs *regs, unsigned long address,
559 unsigned long error_code);
560
561void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
562 unsigned int size, int flags);
563
564void __init adjust_total_lowmem(void);
565void mapin_ram(void);
566int map_page(unsigned long va, phys_addr_t pa, int flags);
567
568extern int mem_init_done;
569extern unsigned long ioremap_base;
570extern unsigned long ioremap_bot;
571
572asmlinkage void __init mmu_init(void);
573
574void __init *early_get_page(void);
575
576void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
577void consistent_free(void *vaddr);
578void consistent_sync(void *vaddr, size_t size, int direction);
579void consistent_sync_page(struct page *page, unsigned long offset,
580 size_t size, int direction);
581#endif /* __ASSEMBLY__ */
582#endif /* __KERNEL__ */
583
584#endif /* CONFIG_MMU */
585
48#ifndef __ASSEMBLY__ 586#ifndef __ASSEMBLY__
49#include <asm-generic/pgtable.h> 587#include <asm-generic/pgtable.h>
50 588
diff --git a/arch/microblaze/include/asm/posix_types.h b/arch/microblaze/include/asm/posix_types.h
index b4df41c5dde2..8c758b231f37 100644
--- a/arch/microblaze/include/asm/posix_types.h
+++ b/arch/microblaze/include/asm/posix_types.h
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18typedef unsigned long __kernel_ino_t; 18typedef unsigned long __kernel_ino_t;
19typedef unsigned int __kernel_mode_t; 19typedef unsigned short __kernel_mode_t;
20typedef unsigned int __kernel_nlink_t; 20typedef unsigned int __kernel_nlink_t;
21typedef long __kernel_off_t; 21typedef long __kernel_off_t;
22typedef int __kernel_pid_t; 22typedef int __kernel_pid_t;
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 9329029d2614..563c6b9453f0 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -26,14 +26,15 @@ extern const struct seq_operations cpuinfo_op;
26# define cpu_sleep() do {} while (0) 26# define cpu_sleep() do {} while (0)
27# define prepare_to_copy(tsk) do {} while (0) 27# define prepare_to_copy(tsk) do {} while (0)
28 28
29# endif /* __ASSEMBLY__ */
30
31#define task_pt_regs(tsk) \ 29#define task_pt_regs(tsk) \
32 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) 30 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
33 31
34/* Do necessary setup to start up a newly executed thread. */ 32/* Do necessary setup to start up a newly executed thread. */
35void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp); 33void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp);
36 34
35# endif /* __ASSEMBLY__ */
36
37# ifndef CONFIG_MMU
37/* 38/*
38 * User space process size: memory size 39 * User space process size: memory size
39 * 40 *
@@ -85,4 +86,90 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
85# define KSTK_EIP(tsk) (0) 86# define KSTK_EIP(tsk) (0)
86# define KSTK_ESP(tsk) (0) 87# define KSTK_ESP(tsk) (0)
87 88
89# else /* CONFIG_MMU */
90
91/*
92 * This is used to define STACK_TOP, and with MMU it must be below
93 * kernel base to select the correct PGD when handling MMU exceptions.
94 */
95# define TASK_SIZE (CONFIG_KERNEL_START)
96
97/*
98 * This decides where the kernel will search for a free chunk of vm
99 * space during mmap's.
100 */
101# define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
102
103# define THREAD_KSP 0
104
105# ifndef __ASSEMBLY__
106
107/*
108 * Default implementation of macro that returns current
109 * instruction pointer ("program counter").
110 */
111# define current_text_addr() ({ __label__ _l; _l: &&_l; })
112
113/* If you change this, you must change the associated assembly-languages
114 * constants defined below, THREAD_*.
115 */
116struct thread_struct {
117 /* kernel stack pointer (must be first field in structure) */
118 unsigned long ksp;
119 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
120 void *pgdir; /* root of page-table tree */
121 struct pt_regs *regs; /* Pointer to saved register state */
122};
123
124# define INIT_THREAD { \
125 .ksp = sizeof init_stack + (unsigned long)init_stack, \
126 .pgdir = swapper_pg_dir, \
127}
128
129/* Do necessary setup to start up a newly executed thread. */
130void start_thread(struct pt_regs *regs,
131 unsigned long pc, unsigned long usp);
132
133/* Free all resources held by a thread. */
134extern inline void release_thread(struct task_struct *dead_task)
135{
136}
137
138extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
139
140/* Free current thread data structures etc. */
141static inline void exit_thread(void)
142{
143}
144
145/* Return saved (kernel) PC of a blocked thread. */
146# define thread_saved_pc(tsk) \
147 ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
148
149unsigned long get_wchan(struct task_struct *p);
150
151/* The size allocated for kernel stacks. This _must_ be a power of two! */
152# define KERNEL_STACK_SIZE 0x2000
153
154/* Return some info about the user process TASK. */
155# define task_tos(task) ((unsigned long)(task) + KERNEL_STACK_SIZE)
156# define task_regs(task) ((struct pt_regs *)task_tos(task) - 1)
157
158# define task_pt_regs_plus_args(tsk) \
159 (((void *)task_pt_regs(tsk)) - STATE_SAVE_ARG_SPACE)
160
161# define task_sp(task) (task_regs(task)->r1)
162# define task_pc(task) (task_regs(task)->pc)
163/* Grotty old names for some. */
164# define KSTK_EIP(task) (task_pc(task))
165# define KSTK_ESP(task) (task_sp(task))
166
167/* FIXME */
168# define deactivate_mm(tsk, mm) do { } while (0)
169
170# define STACK_TOP TASK_SIZE
171# define STACK_TOP_MAX STACK_TOP
172
173# endif /* __ASSEMBLY__ */
174# endif /* CONFIG_MMU */
88#endif /* _ASM_MICROBLAZE_PROCESSOR_H */ 175#endif /* _ASM_MICROBLAZE_PROCESSOR_H */
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h
index 55015bce5e47..a917dc517736 100644
--- a/arch/microblaze/include/asm/ptrace.h
+++ b/arch/microblaze/include/asm/ptrace.h
@@ -10,7 +10,6 @@
10#define _ASM_MICROBLAZE_PTRACE_H 10#define _ASM_MICROBLAZE_PTRACE_H
11 11
12#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
13#include <linux/types.h>
14 13
15typedef unsigned long microblaze_reg_t; 14typedef unsigned long microblaze_reg_t;
16 15
diff --git a/arch/microblaze/include/asm/registers.h b/arch/microblaze/include/asm/registers.h
index 834142d9356f..68c3afb73877 100644
--- a/arch/microblaze/include/asm/registers.h
+++ b/arch/microblaze/include/asm/registers.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -30,4 +30,21 @@
30#define FSR_UF (1<<1) /* Underflow */ 30#define FSR_UF (1<<1) /* Underflow */
31#define FSR_DO (1<<0) /* Denormalized operand error */ 31#define FSR_DO (1<<0) /* Denormalized operand error */
32 32
33# ifdef CONFIG_MMU
34/* Machine State Register (MSR) Fields */
35# define MSR_UM (1<<11) /* User Mode */
36# define MSR_UMS (1<<12) /* User Mode Save */
37# define MSR_VM (1<<13) /* Virtual Mode */
38# define MSR_VMS (1<<14) /* Virtual Mode Save */
39
40# define MSR_KERNEL (MSR_EE | MSR_VM)
41/* # define MSR_USER (MSR_KERNEL | MSR_UM | MSR_IE) */
42# define MSR_KERNEL_VMS (MSR_EE | MSR_VMS)
43/* # define MSR_USER_VMS (MSR_KERNEL_VMS | MSR_UMS | MSR_IE) */
44
45/* Exception State Register (ESR) Fields */
46# define ESR_DIZ (1<<11) /* Zone Protection */
47# define ESR_S (1<<10) /* Store instruction */
48
49# endif /* CONFIG_MMU */
33#endif /* _ASM_MICROBLAZE_REGISTERS_H */ 50#endif /* _ASM_MICROBLAZE_REGISTERS_H */
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 8434a43e5421..4487e150b455 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -14,6 +16,7 @@
14# ifndef __ASSEMBLY__ 16# ifndef __ASSEMBLY__
15extern char _ssbss[], _esbss[]; 17extern char _ssbss[], _esbss[];
16extern unsigned long __ivt_start[], __ivt_end[]; 18extern unsigned long __ivt_start[], __ivt_end[];
19extern char _etext[], _stext[];
17 20
18# ifdef CONFIG_MTD_UCLINUX 21# ifdef CONFIG_MTD_UCLINUX
19extern char *_ebss; 22extern char *_ebss;
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
index 7f5dcc56eea1..0e7102c3fb11 100644
--- a/arch/microblaze/include/asm/segment.h
+++ b/arch/microblaze/include/asm/segment.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008 Michal Simek 2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008 PetaLogix 3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 5 *
6 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -11,7 +11,7 @@
11#ifndef _ASM_MICROBLAZE_SEGMENT_H 11#ifndef _ASM_MICROBLAZE_SEGMENT_H
12#define _ASM_MICROBLAZE_SEGMENT_H 12#define _ASM_MICROBLAZE_SEGMENT_H
13 13
14#ifndef __ASSEMBLY__ 14# ifndef __ASSEMBLY__
15 15
16typedef struct { 16typedef struct {
17 unsigned long seg; 17 unsigned long seg;
@@ -29,15 +29,21 @@ typedef struct {
29 * 29 *
30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal. 30 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
31 */ 31 */
32# define KERNEL_DS ((mm_segment_t){0}) 32# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34# ifndef CONFIG_MMU
35# define KERNEL_DS MAKE_MM_SEG(0)
33# define USER_DS KERNEL_DS 36# define USER_DS KERNEL_DS
37# else
38# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
39# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
40# endif
34 41
35# define get_ds() (KERNEL_DS) 42# define get_ds() (KERNEL_DS)
36# define get_fs() (current_thread_info()->addr_limit) 43# define get_fs() (current_thread_info()->addr_limit)
37# define set_fs(x) \ 44# define set_fs(val) (current_thread_info()->addr_limit = (val))
38 do { current_thread_info()->addr_limit = (x); } while (0)
39 45
40# define segment_eq(a, b) ((a).seg == (b).seg) 46# define segment_eq(a, b) ((a).seg == (b).seg)
41 47
42# endif /* __ASSEMBLY__ */ 48# endif /* __ASSEMBLY__ */
43#endif /* _ASM_MICROBLAZE_SEGMENT_H */ 49#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 9b98e8e6abae..27f8dafd8c34 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
3 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
4 * 5 *
5 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -18,7 +19,6 @@
18extern unsigned int boot_cpuid; /* move to smp.h */ 19extern unsigned int boot_cpuid; /* move to smp.h */
19 20
20extern char cmd_line[COMMAND_LINE_SIZE]; 21extern char cmd_line[COMMAND_LINE_SIZE];
21# endif/* __KERNEL__ */
22 22
23void early_printk(const char *fmt, ...); 23void early_printk(const char *fmt, ...);
24 24
@@ -30,6 +30,11 @@ void setup_heartbeat(void);
30 30
31unsigned long long sched_clock(void); 31unsigned long long sched_clock(void);
32 32
33# ifdef CONFIG_MMU
34extern void mmu_reset(void);
35extern void early_console_reg_tlb_alloc(unsigned int addr);
36# endif /* CONFIG_MMU */
37
33void time_init(void); 38void time_init(void);
34void init_IRQ(void); 39void init_IRQ(void);
35void machine_early_init(const char *cmdline, unsigned int ram, 40void machine_early_init(const char *cmdline, unsigned int ram,
@@ -40,5 +45,6 @@ void machine_shutdown(void);
40void machine_halt(void); 45void machine_halt(void);
41void machine_power_off(void); 46void machine_power_off(void);
42 47
48# endif/* __KERNEL__ */
43# endif /* __ASSEMBLY__ */ 49# endif /* __ASSEMBLY__ */
44#endif /* _ASM_MICROBLAZE_SETUP_H */ 50#endif /* _ASM_MICROBLAZE_SETUP_H */
diff --git a/arch/microblaze/include/asm/stat.h b/arch/microblaze/include/asm/stat.h
index 5f18b8aed220..a15f77520bfd 100644
--- a/arch/microblaze/include/asm/stat.h
+++ b/arch/microblaze/include/asm/stat.h
@@ -16,58 +16,53 @@
16 16
17#include <linux/posix_types.h> 17#include <linux/posix_types.h>
18 18
19#define STAT_HAVE_NSEC 1
20
19struct stat { 21struct stat {
20 unsigned int st_dev; 22 unsigned long st_dev;
21 unsigned long st_ino; 23 unsigned long st_ino;
22 unsigned int st_mode; 24 unsigned int st_mode;
23 unsigned int st_nlink; 25 unsigned int st_nlink;
24 unsigned int st_uid; 26 unsigned int st_uid;
25 unsigned int st_gid; 27 unsigned int st_gid;
26 unsigned int st_rdev; 28 unsigned long st_rdev;
27 unsigned long st_size; 29 unsigned long __pad1;
28 unsigned long st_blksize; 30 long st_size;
29 unsigned long st_blocks; 31 int st_blksize;
30 unsigned long st_atime; 32 int __pad2;
31 unsigned long __unused1; /* unsigned long st_atime_nsec */ 33 long st_blocks;
32 unsigned long st_mtime; 34 int st_atime;
33 unsigned long __unused2; /* unsigned long st_mtime_nsec */ 35 unsigned int st_atime_nsec;
34 unsigned long st_ctime; 36 int st_mtime;
35 unsigned long __unused3; /* unsigned long st_ctime_nsec */ 37 unsigned int st_mtime_nsec;
38 int st_ctime;
39 unsigned int st_ctime_nsec;
36 unsigned long __unused4; 40 unsigned long __unused4;
37 unsigned long __unused5; 41 unsigned long __unused5;
38}; 42};
39 43
40struct stat64 { 44struct stat64 {
41 unsigned long long st_dev; 45 unsigned long long st_dev; /* Device. */
42 unsigned long __unused1; 46 unsigned long long st_ino; /* File serial number. */
43 47 unsigned int st_mode; /* File mode. */
44 unsigned long long st_ino; 48 unsigned int st_nlink; /* Link count. */
45 49 unsigned int st_uid; /* User ID of the file's owner. */
46 unsigned int st_mode; 50 unsigned int st_gid; /* Group ID of the file's group. */
47 unsigned int st_nlink; 51 unsigned long long st_rdev; /* Device number, if device. */
48 52 unsigned long long __pad1;
49 unsigned int st_uid; 53 long long st_size; /* Size of file, in bytes. */
50 unsigned int st_gid; 54 int st_blksize; /* Optimal block size for I/O. */
51 55 int __pad2;
52 unsigned long long st_rdev; 56 long long st_blocks; /* Number 512-byte blocks allocated. */
53 unsigned long __unused3; 57 int st_atime; /* Time of last access. */
54 58 unsigned int st_atime_nsec;
55 long long st_size; 59 int st_mtime; /* Time of last modification. */
56 unsigned long st_blksize; 60 unsigned int st_mtime_nsec;
57 61 int st_ctime; /* Time of last status change. */
58 unsigned long st_blocks; /* No. of 512-byte blocks allocated */ 62 unsigned int st_ctime_nsec;
59 unsigned long __unused4; /* future possible st_blocks high bits */ 63 unsigned int __unused4;
60 64 unsigned int __unused5;
61 unsigned long st_atime;
62 unsigned long st_atime_nsec;
63
64 unsigned long st_mtime;
65 unsigned long st_mtime_nsec;
66
67 unsigned long st_ctime;
68 unsigned long st_ctime_nsec;
69
70 unsigned long __unused8;
71}; 65};
72 66
73#endif /* _ASM_MICROBLAZE_STAT_H */ 67#endif /* _ASM_MICROBLAZE_STAT_H */
68
diff --git a/arch/microblaze/include/asm/string.h b/arch/microblaze/include/asm/string.h
index f7728c90fc18..aec2f59298b8 100644
--- a/arch/microblaze/include/asm/string.h
+++ b/arch/microblaze/include/asm/string.h
@@ -9,7 +9,7 @@
9#ifndef _ASM_MICROBLAZE_STRING_H 9#ifndef _ASM_MICROBLAZE_STRING_H
10#define _ASM_MICROBLAZE_STRING_H 10#define _ASM_MICROBLAZE_STRING_H
11 11
12#ifndef __KERNEL__ 12#ifdef __KERNEL__
13 13
14#define __HAVE_ARCH_MEMSET 14#define __HAVE_ARCH_MEMSET
15#define __HAVE_ARCH_MEMCPY 15#define __HAVE_ARCH_MEMCPY
diff --git a/arch/microblaze/include/asm/syscalls.h b/arch/microblaze/include/asm/syscalls.h
index 9cb4ff0edeb2..ddea9eb31f8d 100644
--- a/arch/microblaze/include/asm/syscalls.h
+++ b/arch/microblaze/include/asm/syscalls.h
@@ -34,6 +34,9 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
34asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act, 34asmlinkage int sys_sigaction(int sig, const struct old_sigaction *act,
35 struct old_sigaction *oact); 35 struct old_sigaction *oact);
36 36
37asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
38 struct sigaction __user *oact, size_t sigsetsize);
39
37asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 40asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
38 struct pt_regs *regs); 41 struct pt_regs *regs);
39 42
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 4c3943e3f403..7fac44498445 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -122,6 +122,8 @@ static inline struct thread_info *current_thread_info(void)
122#define TIF_SINGLESTEP 4 122#define TIF_SINGLESTEP 4
123#define TIF_IRET 5 /* return with iret */ 123#define TIF_IRET 5 /* return with iret */
124#define TIF_MEMDIE 6 124#define TIF_MEMDIE 6
125#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
126#define TIF_SECCOMP 10 /* secure computing */
125#define TIF_FREEZE 14 /* Freezing for suspend */ 127#define TIF_FREEZE 14 /* Freezing for suspend */
126 128
127/* FIXME change in entry.S */ 129/* FIXME change in entry.S */
@@ -138,10 +140,17 @@ static inline struct thread_info *current_thread_info(void)
138#define _TIF_IRET (1<<TIF_IRET) 140#define _TIF_IRET (1<<TIF_IRET)
139#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 141#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
140#define _TIF_FREEZE (1<<TIF_FREEZE) 142#define _TIF_FREEZE (1<<TIF_FREEZE)
143#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
144#define _TIF_SECCOMP (1 << TIF_SECCOMP)
141#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE) 145#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE)
142 146
147/* work to do in syscall trace */
148#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
149 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
150
143/* work to do on interrupt/exception return */ 151/* work to do on interrupt/exception return */
144#define _TIF_WORK_MASK 0x0000FFFE 152#define _TIF_WORK_MASK 0x0000FFFE
153
145/* work to do on any return to u-space */ 154/* work to do on any return to u-space */
146#define _TIF_ALLWORK_MASK 0x0000FFFF 155#define _TIF_ALLWORK_MASK 0x0000FFFF
147 156
@@ -154,6 +163,17 @@ static inline struct thread_info *current_thread_info(void)
154 */ 163 */
155/* FPU was used by this task this quantum (SMP) */ 164/* FPU was used by this task this quantum (SMP) */
156#define TS_USEDFPU 0x0001 165#define TS_USEDFPU 0x0001
166#define TS_RESTORE_SIGMASK 0x0002
167
168#ifndef __ASSEMBLY__
169#define HAVE_SET_RESTORE_SIGMASK 1
170static inline void set_restore_sigmask(void)
171{
172 struct thread_info *ti = current_thread_info();
173 ti->status |= TS_RESTORE_SIGMASK;
174 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
175}
176#endif
157 177
158#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
159#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */ 179#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index d1dfe3791127..c472d2801132 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -13,4 +15,10 @@
13 15
14#include <asm-generic/tlb.h> 16#include <asm-generic/tlb.h>
15 17
18#ifdef CONFIG_MMU
19#define tlb_start_vma(tlb, vma) do { } while (0)
20#define tlb_end_vma(tlb, vma) do { } while (0)
21#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
22#endif
23
16#endif /* _ASM_MICROBLAZE_TLB_H */ 24#endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index d7fe7629001b..eb31a0e8a772 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -9,6 +11,50 @@
9#ifndef _ASM_MICROBLAZE_TLBFLUSH_H 11#ifndef _ASM_MICROBLAZE_TLBFLUSH_H
10#define _ASM_MICROBLAZE_TLBFLUSH_H 12#define _ASM_MICROBLAZE_TLBFLUSH_H
11 13
14#ifdef CONFIG_MMU
15
16#include <linux/sched.h>
17#include <linux/threads.h>
18#include <asm/processor.h> /* For TASK_SIZE */
19#include <asm/mmu.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22
23extern void _tlbie(unsigned long address);
24extern void _tlbia(void);
25
26#define __tlbia() _tlbia()
27
28static inline void local_flush_tlb_all(void)
29 { __tlbia(); }
30static inline void local_flush_tlb_mm(struct mm_struct *mm)
31 { __tlbia(); }
32static inline void local_flush_tlb_page(struct vm_area_struct *vma,
33 unsigned long vmaddr)
34 { _tlbie(vmaddr); }
35static inline void local_flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end)
37 { __tlbia(); }
38
39#define flush_tlb_kernel_range(start, end) do { } while (0)
40
41#define update_mmu_cache(vma, addr, pte) do { } while (0)
42
43#define flush_tlb_all local_flush_tlb_all
44#define flush_tlb_mm local_flush_tlb_mm
45#define flush_tlb_page local_flush_tlb_page
46#define flush_tlb_range local_flush_tlb_range
47
48/*
49 * This is called in munmap when we have freed up some page-table
50 * pages. We don't need to do anything here, there's nothing special
51 * about our page-table pages. -- paulus
52 */
53static inline void flush_tlb_pgtables(struct mm_struct *mm,
54 unsigned long start, unsigned long end) { }
55
56#else /* CONFIG_MMU */
57
12#define flush_tlb() BUG() 58#define flush_tlb() BUG()
13#define flush_tlb_all() BUG() 59#define flush_tlb_all() BUG()
14#define flush_tlb_mm(mm) BUG() 60#define flush_tlb_mm(mm) BUG()
@@ -17,4 +63,6 @@
17#define flush_tlb_pgtables(mm, start, end) BUG() 63#define flush_tlb_pgtables(mm, start, end) BUG()
18#define flush_tlb_kernel_range(start, end) BUG() 64#define flush_tlb_kernel_range(start, end) BUG()
19 65
66#endif /* CONFIG_MMU */
67
20#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */ 68#endif /* _ASM_MICROBLAZE_TLBFLUSH_H */
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 5a3ffc308e12..65adad61e7e9 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
2 * Copyright (C) 2006 Atmark Techno, Inc. 4 * Copyright (C) 2006 Atmark Techno, Inc.
3 * 5 *
4 * This file is subject to the terms and conditions of the GNU General Public 6 * This file is subject to the terms and conditions of the GNU General Public
@@ -26,6 +28,10 @@
26#define VERIFY_READ 0 28#define VERIFY_READ 0
27#define VERIFY_WRITE 1 29#define VERIFY_WRITE 1
28 30
31#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
32
33#ifndef CONFIG_MMU
34
29extern int ___range_ok(unsigned long addr, unsigned long size); 35extern int ___range_ok(unsigned long addr, unsigned long size);
30 36
31#define __range_ok(addr, size) \ 37#define __range_ok(addr, size) \
@@ -34,68 +40,68 @@ extern int ___range_ok(unsigned long addr, unsigned long size);
34#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) 40#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
35#define __access_ok(add, size) (__range_ok((addr), (size)) == 0) 41#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
36 42
37extern inline int bad_user_access_length(void) 43/* Undefined function to trigger linker error */
38{ 44extern int bad_user_access_length(void);
39 return 0; 45
40}
41/* FIXME this is function for optimalization -> memcpy */ 46/* FIXME this is function for optimalization -> memcpy */
42#define __get_user(var, ptr) \ 47#define __get_user(var, ptr) \
43 ({ \ 48({ \
44 int __gu_err = 0; \ 49 int __gu_err = 0; \
45 switch (sizeof(*(ptr))) { \ 50 switch (sizeof(*(ptr))) { \
46 case 1: \ 51 case 1: \
47 case 2: \ 52 case 2: \
48 case 4: \ 53 case 4: \
49 (var) = *(ptr); \ 54 (var) = *(ptr); \
50 break; \ 55 break; \
51 case 8: \ 56 case 8: \
52 memcpy((void *) &(var), (ptr), 8); \ 57 memcpy((void *) &(var), (ptr), 8); \
53 break; \ 58 break; \
54 default: \ 59 default: \
55 (var) = 0; \ 60 (var) = 0; \
56 __gu_err = __get_user_bad(); \ 61 __gu_err = __get_user_bad(); \
57 break; \ 62 break; \
58 } \ 63 } \
59 __gu_err; \ 64 __gu_err; \
60 }) 65})
61 66
62#define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 67#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
63 68
69/* FIXME is not there defined __pu_val */
64#define __put_user(var, ptr) \ 70#define __put_user(var, ptr) \
65 ({ \ 71({ \
66 int __pu_err = 0; \ 72 int __pu_err = 0; \
67 switch (sizeof(*(ptr))) { \ 73 switch (sizeof(*(ptr))) { \
68 case 1: \ 74 case 1: \
69 case 2: \ 75 case 2: \
70 case 4: \ 76 case 4: \
71 *(ptr) = (var); \ 77 *(ptr) = (var); \
72 break; \ 78 break; \
73 case 8: { \ 79 case 8: { \
74 typeof(*(ptr)) __pu_val = var; \ 80 typeof(*(ptr)) __pu_val = (var); \
75 memcpy(ptr, &__pu_val, sizeof(__pu_val));\ 81 memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
76 } \ 82 } \
77 break; \ 83 break; \
78 default: \ 84 default: \
79 __pu_err = __put_user_bad(); \ 85 __pu_err = __put_user_bad(); \
80 break; \ 86 break; \
81 } \ 87 } \
82 __pu_err; \ 88 __pu_err; \
83 }) 89})
84 90
85#define __put_user_bad() (bad_user_access_length(), (-EFAULT)) 91#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
86 92
87#define put_user(x, ptr) __put_user(x, ptr) 93#define put_user(x, ptr) __put_user((x), (ptr))
88#define get_user(x, ptr) __get_user(x, ptr) 94#define get_user(x, ptr) __get_user((x), (ptr))
89 95
90#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) 96#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
91#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) 97#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
92 98
93#define __copy_to_user(to, from, n) (copy_to_user(to, from, n)) 99#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
94#define __copy_from_user(to, from, n) (copy_from_user(to, from, n)) 100#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
95#define __copy_to_user_inatomic(to, from, n) (__copy_to_user(to, from, n)) 101#define __copy_to_user_inatomic(to, from, n) \
96#define __copy_from_user_inatomic(to, from, n) (__copy_from_user(to, from, n)) 102 (__copy_to_user((to), (from), (n)))
97 103#define __copy_from_user_inatomic(to, from, n) \
98#define __clear_user(addr, n) (memset((void *)addr, 0, n), 0) 104 (__copy_from_user((to), (from), (n)))
99 105
100static inline unsigned long clear_user(void *addr, unsigned long size) 106static inline unsigned long clear_user(void *addr, unsigned long size)
101{ 107{
@@ -104,13 +110,200 @@ static inline unsigned long clear_user(void *addr, unsigned long size)
104 return size; 110 return size;
105} 111}
106 112
107/* Returns 0 if exception not found and fixup otherwise. */ 113/* Returns 0 if exception not found and fixup otherwise. */
108extern unsigned long search_exception_table(unsigned long); 114extern unsigned long search_exception_table(unsigned long);
109 115
116extern long strncpy_from_user(char *dst, const char *src, long count);
117extern long strnlen_user(const char *src, long count);
118
119#else /* CONFIG_MMU */
120
121/*
122 * Address is valid if:
123 * - "addr", "addr + size" and "size" are all below the limit
124 */
125#define access_ok(type, addr, size) \
126 (get_fs().seg > (((unsigned long)(addr)) | \
127 (size) | ((unsigned long)(addr) + (size))))
128
129/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
130 type?"WRITE":"READ",addr,size,get_fs().seg)) */
131
132/*
133 * All the __XXX versions macros/functions below do not perform
134 * access checking. It is assumed that the necessary checks have been
135 * already performed before the finction (macro) is called.
136 */
137
138#define get_user(x, ptr) \
139({ \
140 access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
141 ? __get_user((x), (ptr)) : -EFAULT; \
142})
143
144#define put_user(x, ptr) \
145({ \
146 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
147 ? __put_user((x), (ptr)) : -EFAULT; \
148})
149
150#define __get_user(x, ptr) \
151({ \
152 unsigned long __gu_val; \
153 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
154 long __gu_err; \
155 switch (sizeof(*(ptr))) { \
156 case 1: \
157 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
158 break; \
159 case 2: \
160 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
161 break; \
162 case 4: \
163 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
164 break; \
165 default: \
166 __gu_val = 0; __gu_err = -EINVAL; \
167 } \
168 x = (__typeof__(*(ptr))) __gu_val; \
169 __gu_err; \
170})
171
172#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
173({ \
174 __asm__ __volatile__ ( \
175 "1:" insn " %1, %2, r0; \
176 addk %0, r0, r0; \
177 2: \
178 .section .fixup,\"ax\"; \
179 3: brid 2b; \
180 addik %0, r0, %3; \
181 .previous; \
182 .section __ex_table,\"a\"; \
183 .word 1b,3b; \
184 .previous;" \
185 : "=r"(__gu_err), "=r"(__gu_val) \
186 : "r"(__gu_ptr), "i"(-EFAULT) \
187 ); \
188})
189
190#define __put_user(x, ptr) \
191({ \
192 __typeof__(*(ptr)) __gu_val = x; \
193 long __gu_err = 0; \
194 switch (sizeof(__gu_val)) { \
195 case 1: \
196 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
197 break; \
198 case 2: \
199 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
200 break; \
201 case 4: \
202 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
203 break; \
204 case 8: \
205 __put_user_asm_8((ptr), __gu_val, __gu_err); \
206 break; \
207 default: \
208 __gu_err = -EINVAL; \
209 } \
210 __gu_err; \
211})
212
213#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
214({ \
215__asm__ __volatile__ (" lwi %0, %1, 0; \
216 1: swi %0, %2, 0; \
217 lwi %0, %1, 4; \
218 2: swi %0, %2, 4; \
219 addk %0,r0,r0; \
220 3: \
221 .section .fixup,\"ax\"; \
222 4: brid 3b; \
223 addik %0, r0, %3; \
224 .previous; \
225 .section __ex_table,\"a\"; \
226 .word 1b,4b,2b,4b; \
227 .previous;" \
228 : "=&r"(__gu_err) \
229 : "r"(&__gu_val), \
230 "r"(__gu_ptr), "i"(-EFAULT) \
231 ); \
232})
233
234#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
235({ \
236 __asm__ __volatile__ ( \
237 "1:" insn " %1, %2, r0; \
238 addk %0, r0, r0; \
239 2: \
240 .section .fixup,\"ax\"; \
241 3: brid 2b; \
242 addik %0, r0, %3; \
243 .previous; \
244 .section __ex_table,\"a\"; \
245 .word 1b,3b; \
246 .previous;" \
247 : "=r"(__gu_err) \
248 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
249 ); \
250})
251
252/*
253 * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
254 */
255static inline int clear_user(char *to, int size)
256{
257 if (size && access_ok(VERIFY_WRITE, to, size)) {
258 __asm__ __volatile__ (" \
259 1: \
260 sb r0, %2, r0; \
261 addik %0, %0, -1; \
262 bneid %0, 1b; \
263 addik %2, %2, 1; \
264 2: \
265 .section __ex_table,\"a\"; \
266 .word 1b,2b; \
267 .section .text;" \
268 : "=r"(size) \
269 : "0"(size), "r"(to)
270 );
271 }
272 return size;
273}
274
275extern unsigned long __copy_tofrom_user(void __user *to,
276 const void __user *from, unsigned long size);
277
278#define copy_to_user(to, from, n) \
279 (access_ok(VERIFY_WRITE, (to), (n)) ? \
280 __copy_tofrom_user((void __user *)(to), \
281 (__force const void __user *)(from), (n)) \
282 : -EFAULT)
283
284#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
285#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
286
287#define copy_from_user(to, from, n) \
288 (access_ok(VERIFY_READ, (from), (n)) ? \
289 __copy_tofrom_user((__force void __user *)(to), \
290 (void __user *)(from), (n)) \
291 : -EFAULT)
292
293#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
294#define __copy_from_user_inatomic(to, from, n) \
295 copy_from_user((to), (from), (n))
296
297extern int __strncpy_user(char *to, const char __user *from, int len);
298extern int __strnlen_user(const char __user *sstr, int len);
299
300#define strncpy_from_user(to, from, len) \
301 (access_ok(VERIFY_READ, from, 1) ? \
302 __strncpy_user(to, from, len) : -EFAULT)
303#define strnlen_user(str, len) \
304 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
110 305
111extern long strncpy_from_user(char *dst, const char __user *src, long count); 306#endif /* CONFIG_MMU */
112extern long strnlen_user(const char __user *src, long count);
113extern long __strncpy_from_user(char *dst, const char __user *src, long count);
114 307
115/* 308/*
116 * The exception table consists of pairs of addresses: the first is the 309 * The exception table consists of pairs of addresses: the first is the
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h
index 9d66b640c910..3658d91ac0fb 100644
--- a/arch/microblaze/include/asm/unaligned.h
+++ b/arch/microblaze/include/asm/unaligned.h
@@ -12,7 +12,8 @@
12 12
13# ifdef __KERNEL__ 13# ifdef __KERNEL__
14 14
15# include <linux/unaligned/access_ok.h> 15# include <linux/unaligned/be_struct.h>
16# include <linux/unaligned/le_byteshift.h>
16# include <linux/unaligned/generic.h> 17# include <linux/unaligned/generic.h>
17 18
18# define get_unaligned __get_unaligned_be 19# define get_unaligned __get_unaligned_be