aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-28 19:20:49 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-28 19:20:49 -0400
commit27d68a36c4f1ca2fc6be82620843493462c08c51 (patch)
treea06b451e19c25a77595c918ca81bbb30f0ec9ebf /include/asm-arm
parent76a22271fd14e3fe7660f8646db12f0780fa4fd2 (diff)
parent583e7f5d36547f0d84caf71d43b71f0530a47766 (diff)
Merge branch 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm: [ARM] nommu: backtrace code must not reference a discarded section [ARM] nommu: Initial uCLinux support for MMU-based CPUs [ARM] nommu: prevent Xscale-based machines being selected [ARM] nommu: export flush_dcache_page() [ARM] nommu: remove fault-armv, mmap and mm-armv files from nommu build [ARM] Remove TABLE_SIZE, and several unused function prototypes [ARM] nommu: Provide a simple flush_dcache_page implementation [ARM] nommu: add arch/arm/Kconfig-nommu to Kconfig files [ARM] nommu: add stubs for ioremap and friends [ARM] nommu: avoid selecting TLB and CPU specific copy code [ARM] nommu: uaccess tweaks [ARM] nommu: adjust headers for !MMU ARM systems [ARM] nommu: we need the TLS register emulation for nommu mode
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/bugs.h4
-rw-r--r--include/asm-arm/domain.h7
-rw-r--r--include/asm-arm/mach/map.h9
-rw-r--r--include/asm-arm/memory.h75
-rw-r--r--include/asm-arm/mmu.h16
-rw-r--r--include/asm-arm/mmu_context.h2
-rw-r--r--include/asm-arm/page-nommu.h51
-rw-r--r--include/asm-arm/page.h8
-rw-r--r--include/asm-arm/pgalloc.h8
-rw-r--r--include/asm-arm/pgtable-nommu.h123
-rw-r--r--include/asm-arm/pgtable.h10
-rw-r--r--include/asm-arm/proc-fns.h4
-rw-r--r--include/asm-arm/uaccess.h139
13 files changed, 378 insertions, 78 deletions
diff --git a/include/asm-arm/bugs.h b/include/asm-arm/bugs.h
index 4c80ec519d45..ca54eb0f12d7 100644
--- a/include/asm-arm/bugs.h
+++ b/include/asm-arm/bugs.h
@@ -10,8 +10,12 @@
10#ifndef __ASM_BUGS_H 10#ifndef __ASM_BUGS_H
11#define __ASM_BUGS_H 11#define __ASM_BUGS_H
12 12
13#ifdef CONFIG_MMU
13extern void check_writebuffer_bugs(void); 14extern void check_writebuffer_bugs(void);
14 15
15#define check_bugs() check_writebuffer_bugs() 16#define check_bugs() check_writebuffer_bugs()
17#else
18#define check_bugs() do { } while (0)
19#endif
16 20
17#endif 21#endif
diff --git a/include/asm-arm/domain.h b/include/asm-arm/domain.h
index f8ea2de4848e..4c2885abbe6c 100644
--- a/include/asm-arm/domain.h
+++ b/include/asm-arm/domain.h
@@ -50,6 +50,8 @@
50#define domain_val(dom,type) ((type) << (2*(dom))) 50#define domain_val(dom,type) ((type) << (2*(dom)))
51 51
52#ifndef __ASSEMBLY__ 52#ifndef __ASSEMBLY__
53
54#ifdef CONFIG_MMU
53#define set_domain(x) \ 55#define set_domain(x) \
54 do { \ 56 do { \
55 __asm__ __volatile__( \ 57 __asm__ __volatile__( \
@@ -66,5 +68,10 @@
66 set_domain(thread->cpu_domain); \ 68 set_domain(thread->cpu_domain); \
67 } while (0) 69 } while (0)
68 70
71#else
72#define set_domain(x) do { } while (0)
73#define modify_domain(dom,type) do { } while (0)
74#endif
75
69#endif 76#endif
70#endif /* !__ASSEMBLY__ */ 77#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/mach/map.h b/include/asm-arm/mach/map.h
index e8ea67c97c73..cef5364ed5fe 100644
--- a/include/asm-arm/mach/map.h
+++ b/include/asm-arm/mach/map.h
@@ -16,8 +16,6 @@ struct map_desc {
16 unsigned int type; 16 unsigned int type;
17}; 17};
18 18
19struct meminfo;
20
21#define MT_DEVICE 0 19#define MT_DEVICE 0
22#define MT_CACHECLEAN 1 20#define MT_CACHECLEAN 1
23#define MT_MINICLEAN 2 21#define MT_MINICLEAN 2
@@ -28,7 +26,8 @@ struct meminfo;
28#define MT_IXP2000_DEVICE 7 26#define MT_IXP2000_DEVICE 7
29#define MT_NONSHARED_DEVICE 8 27#define MT_NONSHARED_DEVICE 8
30 28
31extern void create_memmap_holes(struct meminfo *); 29#ifdef CONFIG_MMU
32extern void memtable_init(struct meminfo *);
33extern void iotable_init(struct map_desc *, int); 30extern void iotable_init(struct map_desc *, int);
34extern void setup_io_desc(void); 31#else
32#define iotable_init(map,num) do { } while (0)
33#endif
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index 731e321a57d1..94f973b704f1 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -2,6 +2,7 @@
2 * linux/include/asm-arm/memory.h 2 * linux/include/asm-arm/memory.h
3 * 3 *
4 * Copyright (C) 2000-2002 Russell King 4 * Copyright (C) 2000-2002 Russell King
5 * modification for nommu, Hyok S. Choi, 2004
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,8 @@
26#include <asm/arch/memory.h> 27#include <asm/arch/memory.h>
27#include <asm/sizes.h> 28#include <asm/sizes.h>
28 29
30#ifdef CONFIG_MMU
31
29#ifndef TASK_SIZE 32#ifndef TASK_SIZE
30/* 33/*
31 * TASK_SIZE - the maximum size of a user space task. 34 * TASK_SIZE - the maximum size of a user space task.
@@ -48,6 +51,60 @@
48#endif 51#endif
49 52
50/* 53/*
54 * The module space lives between the addresses given by TASK_SIZE
55 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
56 */
57#define MODULE_END (PAGE_OFFSET)
58#define MODULE_START (MODULE_END - 16*1048576)
59
60#if TASK_SIZE > MODULE_START
61#error Top of user space clashes with start of module space
62#endif
63
64/*
65 * The XIP kernel gets mapped at the bottom of the module vm area.
66 * Since we use sections to map it, this macro replaces the physical address
67 * with its virtual address while keeping offset from the base section.
68 */
69#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
70
71#else /* CONFIG_MMU */
72
73/*
74 * The limitation of user task size can grow up to the end of free ram region.
75 * It is difficult to define and perhaps will never meet the original meaning
76 * of this define that was meant to.
77 * Fortunately, there is no reference for this in noMMU mode, for now.
78 */
79#ifndef TASK_SIZE
80#define TASK_SIZE (CONFIG_DRAM_SIZE)
81#endif
82
83#ifndef TASK_UNMAPPED_BASE
84#define TASK_UNMAPPED_BASE UL(0x00000000)
85#endif
86
87#ifndef PHYS_OFFSET
88#define PHYS_OFFSET (CONFIG_DRAM_BASE)
89#endif
90
91#ifndef END_MEM
92#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
93#endif
94
95#ifndef PAGE_OFFSET
96#define PAGE_OFFSET (PHYS_OFFSET)
97#endif
98
99/*
100 * The module can be at any place in ram in nommu mode.
101 */
102#define MODULE_END (END_MEM)
103#define MODULE_START (PHYS_OFFSET)
104
105#endif /* !CONFIG_MMU */
106
107/*
51 * Size of DMA-consistent memory region. Must be multiple of 2M, 108 * Size of DMA-consistent memory region. Must be multiple of 2M,
52 * between 2MB and 14MB inclusive. 109 * between 2MB and 14MB inclusive.
53 */ 110 */
@@ -71,24 +128,6 @@
71#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) 128#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
72#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 129#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
73 130
74/*
75 * The module space lives between the addresses given by TASK_SIZE
76 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
77 */
78#define MODULE_END (PAGE_OFFSET)
79#define MODULE_START (MODULE_END - 16*1048576)
80
81#if TASK_SIZE > MODULE_START
82#error Top of user space clashes with start of module space
83#endif
84
85/*
86 * The XIP kernel gets mapped at the bottom of the module vm area.
87 * Since we use sections to map it, this macro replaces the physical address
88 * with its virtual address while keeping offset from the base section.
89 */
90#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
91
92#ifndef __ASSEMBLY__ 131#ifndef __ASSEMBLY__
93 132
94/* 133/*
diff --git a/include/asm-arm/mmu.h b/include/asm-arm/mmu.h
index a457cb71984f..23dde52e0945 100644
--- a/include/asm-arm/mmu.h
+++ b/include/asm-arm/mmu.h
@@ -1,6 +1,8 @@
1#ifndef __ARM_MMU_H 1#ifndef __ARM_MMU_H
2#define __ARM_MMU_H 2#define __ARM_MMU_H
3 3
4#ifdef CONFIG_MMU
5
4typedef struct { 6typedef struct {
5#if __LINUX_ARM_ARCH__ >= 6 7#if __LINUX_ARM_ARCH__ >= 6
6 unsigned int id; 8 unsigned int id;
@@ -13,4 +15,18 @@ typedef struct {
13#define ASID(mm) (0) 15#define ASID(mm) (0)
14#endif 16#endif
15 17
18#else
19
20/*
21 * From nommu.h:
22 * Copyright (C) 2002, David McCullough <davidm@snapgear.com>
23 * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
24 */
25typedef struct {
26 struct vm_list_struct *vmlist;
27 unsigned long end_brk;
28} mm_context_t;
29
30#endif
31
16#endif 32#endif
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index 81c59facea3b..9fadb01e030d 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -82,6 +82,7 @@ static inline void
82switch_mm(struct mm_struct *prev, struct mm_struct *next, 82switch_mm(struct mm_struct *prev, struct mm_struct *next,
83 struct task_struct *tsk) 83 struct task_struct *tsk)
84{ 84{
85#ifdef CONFIG_MMU
85 unsigned int cpu = smp_processor_id(); 86 unsigned int cpu = smp_processor_id();
86 87
87 if (prev != next) { 88 if (prev != next) {
@@ -91,6 +92,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
91 if (cache_is_vivt()) 92 if (cache_is_vivt())
92 cpu_clear(cpu, prev->cpu_vm_mask); 93 cpu_clear(cpu, prev->cpu_vm_mask);
93 } 94 }
95#endif
94} 96}
95 97
96#define deactivate_mm(tsk,mm) do { } while (0) 98#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/include/asm-arm/page-nommu.h b/include/asm-arm/page-nommu.h
new file mode 100644
index 000000000000..a1bcad060480
--- /dev/null
+++ b/include/asm-arm/page-nommu.h
@@ -0,0 +1,51 @@
1/*
2 * linux/include/asm-arm/page-nommu.h
3 *
4 * Copyright (C) 2004 Hyok S. Choi
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PAGE_NOMMU_H
11#define _ASMARM_PAGE_NOMMU_H
12
13#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
14#define KTHREAD_SIZE (8192)
15#else
16#define KTHREAD_SIZE PAGE_SIZE
17#endif
18
19#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
20#define free_user_page(page, addr) free_page(addr)
21
22#define clear_page(page) memset((page), 0, PAGE_SIZE)
23#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
24
25#define clear_user_page(page, vaddr, pg) clear_page(page)
26#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
27
28/*
29 * These are used to make use of C type-checking..
30 */
31typedef unsigned long pte_t;
32typedef unsigned long pmd_t;
33typedef unsigned long pgd_t[2];
34typedef unsigned long pgprot_t;
35
36#define pte_val(x) (x)
37#define pmd_val(x) (x)
38#define pgd_val(x) ((x)[0])
39#define pgprot_val(x) (x)
40
41#define __pte(x) (x)
42#define __pmd(x) (x)
43#define __pgprot(x) (x)
44
45/* to align the pointer to the (next) page boundary */
46#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
47
48extern unsigned long memory_start;
49extern unsigned long memory_end;
50
51#endif
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 66cfeb5290ea..63d12f0244c5 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -23,6 +23,12 @@
23 23
24#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
25 25
26#ifndef CONFIG_MMU
27
28#include "page-nommu.h"
29
30#else
31
26#include <asm/glue.h> 32#include <asm/glue.h>
27 33
28/* 34/*
@@ -171,6 +177,8 @@ typedef unsigned long pgprot_t;
171/* the upper-most page table pointer */ 177/* the upper-most page table pointer */
172extern pmd_t *top_pmd; 178extern pmd_t *top_pmd;
173 179
180#endif /* CONFIG_MMU */
181
174#include <asm/memory.h> 182#include <asm/memory.h>
175 183
176#endif /* !__ASSEMBLY__ */ 184#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h
index c4ac2e67768d..4d4394552911 100644
--- a/include/asm-arm/pgalloc.h
+++ b/include/asm-arm/pgalloc.h
@@ -16,6 +16,10 @@
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18 18
19#define check_pgt_cache() do { } while (0)
20
21#ifdef CONFIG_MMU
22
19#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) 23#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
20#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) 24#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
21 25
@@ -32,8 +36,6 @@ extern void free_pgd_slow(pgd_t *pgd);
32#define pgd_alloc(mm) get_pgd_slow(mm) 36#define pgd_alloc(mm) get_pgd_slow(mm)
33#define pgd_free(pgd) free_pgd_slow(pgd) 37#define pgd_free(pgd) free_pgd_slow(pgd)
34 38
35#define check_pgt_cache() do { } while (0)
36
37/* 39/*
38 * Allocate one PTE table. 40 * Allocate one PTE table.
39 * 41 *
@@ -126,4 +128,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
126 __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); 128 __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
127} 129}
128 130
131#endif /* CONFIG_MMU */
132
129#endif 133#endif
diff --git a/include/asm-arm/pgtable-nommu.h b/include/asm-arm/pgtable-nommu.h
new file mode 100644
index 000000000000..b13322dccf41
--- /dev/null
+++ b/include/asm-arm/pgtable-nommu.h
@@ -0,0 +1,123 @@
1/*
2 * linux/include/asm-arm/pgtable-nommu.h
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _ASMARM_PGTABLE_NOMMU_H
12#define _ASMARM_PGTABLE_NOMMU_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/config.h>
17#include <linux/slab.h>
18#include <asm/processor.h>
19#include <asm/page.h>
20#include <asm/io.h>
21
22/*
23 * Trivial page table functions.
24 */
25#define pgd_present(pgd) (1)
26#define pgd_none(pgd) (0)
27#define pgd_bad(pgd) (0)
28#define pgd_clear(pgdp)
29#define kern_addr_valid(addr) (1)
30#define pmd_offset(a, b) ((void *)0)
31/* FIXME */
32/*
33 * PMD_SHIFT determines the size of the area a second-level page table can map
34 * PGDIR_SHIFT determines what a third-level page table entry can map
35 */
36#define PGDIR_SHIFT 21
37
38#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
39#define PGDIR_MASK (~(PGDIR_SIZE-1))
40/* FIXME */
41
42#define PAGE_NONE __pgprot(0)
43#define PAGE_SHARED __pgprot(0)
44#define PAGE_COPY __pgprot(0)
45#define PAGE_READONLY __pgprot(0)
46#define PAGE_KERNEL __pgprot(0)
47
48//extern void paging_init(struct meminfo *, struct machine_desc *);
49#define swapper_pg_dir ((pgd_t *) 0)
50
51#define __swp_type(x) (0)
52#define __swp_offset(x) (0)
53#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
54#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
55#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
56
57
58typedef pte_t *pte_addr_t;
59
60static inline int pte_file(pte_t pte) { return 0; }
61
62/*
63 * ZERO_PAGE is a global shared page that is always zero: used
64 * for zero-mapped memory areas etc..
65 */
66#define ZERO_PAGE(vaddr) (virt_to_page(0))
67
68/*
69 * Mark the prot value as uncacheable and unbufferable.
70 */
71#define pgprot_noncached(prot) __pgprot(0)
72#define pgprot_writecombine(prot) __pgprot(0)
73
74
75/*
76 * These would be in other places but having them here reduces the diffs.
77 */
78extern unsigned int kobjsize(const void *objp);
79extern int is_in_rom(unsigned long);
80
81/*
82 * No page table caches to initialise.
83 */
84#define pgtable_cache_init() do { } while (0)
85#define io_remap_page_range remap_page_range
86#define io_remap_pfn_range remap_pfn_range
87
88#define MK_IOSPACE_PFN(space, pfn) (pfn)
89#define GET_IOSPACE(pfn) 0
90#define GET_PFN(pfn) (pfn)
91
92
93/*
94 * All 32bit addresses are effectively valid for vmalloc...
95 * Sort of meaningless for non-VM targets.
96 */
97#define VMALLOC_START 0
98#define VMALLOC_END 0xffffffff
99
100#define FIRST_USER_ADDRESS (0)
101
102#else
103
104/*
105 * dummy tlb and user structures.
106 */
107#define v3_tlb_fns (0)
108#define v4_tlb_fns (0)
109#define v4wb_tlb_fns (0)
110#define v4wbi_tlb_fns (0)
111#define v6_tlb_fns (0)
112
113#define v3_user_fns (0)
114#define v4_user_fns (0)
115#define v4_mc_user_fns (0)
116#define v4wb_user_fns (0)
117#define v4wt_user_fns (0)
118#define v6_user_fns (0)
119#define xscale_mc_user_fns (0)
120
121#endif /*__ASSEMBLY__*/
122
123#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index e85c08d78dda..8d3919c6458c 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -11,9 +11,15 @@
11#define _ASMARM_PGTABLE_H 11#define _ASMARM_PGTABLE_H
12 12
13#include <asm-generic/4level-fixup.h> 13#include <asm-generic/4level-fixup.h>
14#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
18#include "pgtable-nommu.h"
19
20#else
14 21
15#include <asm/memory.h> 22#include <asm/memory.h>
16#include <asm/proc-fns.h>
17#include <asm/arch/vmalloc.h> 23#include <asm/arch/vmalloc.h>
18 24
19/* 25/*
@@ -378,4 +384,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
378 384
379#endif /* !__ASSEMBLY__ */ 385#endif /* !__ASSEMBLY__ */
380 386
387#endif /* CONFIG_MMU */
388
381#endif /* _ASMARM_PGTABLE_H */ 389#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h
index e9310895e79d..1bde92cdaebd 100644
--- a/include/asm-arm/proc-fns.h
+++ b/include/asm-arm/proc-fns.h
@@ -165,6 +165,8 @@
165 165
166#include <asm/memory.h> 166#include <asm/memory.h>
167 167
168#ifdef CONFIG_MMU
169
168#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 170#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
169 171
170#define cpu_get_pgd() \ 172#define cpu_get_pgd() \
@@ -176,6 +178,8 @@
176 (pgd_t *)phys_to_virt(pg); \ 178 (pgd_t *)phys_to_virt(pg); \
177 }) 179 })
178 180
181#endif
182
179#endif /* __ASSEMBLY__ */ 183#endif /* __ASSEMBLY__ */
180#endif /* __KERNEL__ */ 184#endif /* __KERNEL__ */
181#endif /* __ASM_PROCFNS_H */ 185#endif /* __ASM_PROCFNS_H */
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index f909dc75301a..87aba57a66c4 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -41,15 +41,24 @@ struct exception_table_entry
41extern int fixup_exception(struct pt_regs *regs); 41extern int fixup_exception(struct pt_regs *regs);
42 42
43/* 43/*
44 * These two are intentionally not defined anywhere - if the kernel
45 * code generates any references to them, that's a bug.
46 */
47extern int __get_user_bad(void);
48extern int __put_user_bad(void);
49
50/*
44 * Note that this is actually 0x1,0000,0000 51 * Note that this is actually 0x1,0000,0000
45 */ 52 */
46#define KERNEL_DS 0x00000000 53#define KERNEL_DS 0x00000000
47#define USER_DS TASK_SIZE
48
49#define get_ds() (KERNEL_DS) 54#define get_ds() (KERNEL_DS)
55
56#ifdef CONFIG_MMU
57
58#define USER_DS TASK_SIZE
50#define get_fs() (current_thread_info()->addr_limit) 59#define get_fs() (current_thread_info()->addr_limit)
51 60
52static inline void set_fs (mm_segment_t fs) 61static inline void set_fs(mm_segment_t fs)
53{ 62{
54 current_thread_info()->addr_limit = fs; 63 current_thread_info()->addr_limit = fs;
55 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 64 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
75 : "cc"); \ 84 : "cc"); \
76 flag; }) 85 flag; })
77 86
78#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
79
80/* 87/*
81 * Single-value transfer routines. They automatically use the right 88 * Single-value transfer routines. They automatically use the right
82 * size if we just have the right pointer type. Note that the functions 89 * size if we just have the right pointer type. Note that the functions
@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
87 * fixup code, but there are a few places where it intrudes on the 94 * fixup code, but there are a few places where it intrudes on the
88 * main code path. When we only write to user space, there is no 95 * main code path. When we only write to user space, there is no
89 * problem. 96 * problem.
90 *
91 * The "__xxx" versions of the user access functions do not verify the
92 * address space - it must have been done previously with a separate
93 * "access_ok()" call.
94 *
95 * The "xxx_error" versions set the third argument to EFAULT if an
96 * error occurs, and leave it unchanged on success. Note that these
97 * versions are void (ie, don't return a value as such).
98 */ 97 */
99
100extern int __get_user_1(void *); 98extern int __get_user_1(void *);
101extern int __get_user_2(void *); 99extern int __get_user_2(void *);
102extern int __get_user_4(void *); 100extern int __get_user_4(void *);
103extern int __get_user_bad(void);
104 101
105#define __get_user_x(__r2,__p,__e,__s,__i...) \ 102#define __get_user_x(__r2,__p,__e,__s,__i...) \
106 __asm__ __volatile__ ( \ 103 __asm__ __volatile__ ( \
@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
131 __e; \ 128 __e; \
132 }) 129 })
133 130
131extern int __put_user_1(void *, unsigned int);
132extern int __put_user_2(void *, unsigned int);
133extern int __put_user_4(void *, unsigned int);
134extern int __put_user_8(void *, unsigned long long);
135
136#define __put_user_x(__r2,__p,__e,__s) \
137 __asm__ __volatile__ ( \
138 __asmeq("%0", "r0") __asmeq("%2", "r2") \
139 "bl __put_user_" #__s \
140 : "=&r" (__e) \
141 : "0" (__p), "r" (__r2) \
142 : "ip", "lr", "cc")
143
144#define put_user(x,p) \
145 ({ \
146 const register typeof(*(p)) __r2 asm("r2") = (x); \
147 const register typeof(*(p)) __user *__p asm("r0") = (p);\
148 register int __e asm("r0"); \
149 switch (sizeof(*(__p))) { \
150 case 1: \
151 __put_user_x(__r2, __p, __e, 1); \
152 break; \
153 case 2: \
154 __put_user_x(__r2, __p, __e, 2); \
155 break; \
156 case 4: \
157 __put_user_x(__r2, __p, __e, 4); \
158 break; \
159 case 8: \
160 __put_user_x(__r2, __p, __e, 8); \
161 break; \
162 default: __e = __put_user_bad(); break; \
163 } \
164 __e; \
165 })
166
167#else /* CONFIG_MMU */
168
169/*
170 * uClinux has only one addr space, so has simplified address limits.
171 */
172#define USER_DS KERNEL_DS
173
174#define segment_eq(a,b) (1)
175#define __addr_ok(addr) (1)
176#define __range_ok(addr,size) (0)
177#define get_fs() (KERNEL_DS)
178
179static inline void set_fs(mm_segment_t fs)
180{
181}
182
183#define get_user(x,p) __get_user(x,p)
184#define put_user(x,p) __put_user(x,p)
185
186#endif /* CONFIG_MMU */
187
188#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
189
190/*
191 * The "__xxx" versions of the user access functions do not verify the
192 * address space - it must have been done previously with a separate
193 * "access_ok()" call.
194 *
195 * The "xxx_error" versions set the third argument to EFAULT if an
196 * error occurs, and leave it unchanged on success. Note that these
197 * versions are void (ie, don't return a value as such).
198 */
134#define __get_user(x,ptr) \ 199#define __get_user(x,ptr) \
135({ \ 200({ \
136 long __gu_err = 0; \ 201 long __gu_err = 0; \
@@ -212,43 +277,6 @@ do { \
212 : "r" (addr), "i" (-EFAULT) \ 277 : "r" (addr), "i" (-EFAULT) \
213 : "cc") 278 : "cc")
214 279
215extern int __put_user_1(void *, unsigned int);
216extern int __put_user_2(void *, unsigned int);
217extern int __put_user_4(void *, unsigned int);
218extern int __put_user_8(void *, unsigned long long);
219extern int __put_user_bad(void);
220
221#define __put_user_x(__r2,__p,__e,__s) \
222 __asm__ __volatile__ ( \
223 __asmeq("%0", "r0") __asmeq("%2", "r2") \
224 "bl __put_user_" #__s \
225 : "=&r" (__e) \
226 : "0" (__p), "r" (__r2) \
227 : "ip", "lr", "cc")
228
229#define put_user(x,p) \
230 ({ \
231 const register typeof(*(p)) __r2 asm("r2") = (x); \
232 const register typeof(*(p)) __user *__p asm("r0") = (p);\
233 register int __e asm("r0"); \
234 switch (sizeof(*(__p))) { \
235 case 1: \
236 __put_user_x(__r2, __p, __e, 1); \
237 break; \
238 case 2: \
239 __put_user_x(__r2, __p, __e, 2); \
240 break; \
241 case 4: \
242 __put_user_x(__r2, __p, __e, 4); \
243 break; \
244 case 8: \
245 __put_user_x(__r2, __p, __e, 8); \
246 break; \
247 default: __e = __put_user_bad(); break; \
248 } \
249 __e; \
250 })
251
252#define __put_user(x,ptr) \ 280#define __put_user(x,ptr) \
253({ \ 281({ \
254 long __pu_err = 0; \ 282 long __pu_err = 0; \
@@ -354,9 +382,16 @@ do { \
354 : "cc") 382 : "cc")
355 383
356 384
385#ifdef CONFIG_MMU
357extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n); 386extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
358extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n); 387extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
359extern unsigned long __clear_user(void __user *addr, unsigned long n); 388extern unsigned long __clear_user(void __user *addr, unsigned long n);
389#else
390#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
391#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
392#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
393#endif
394
360extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count); 395extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
361extern unsigned long __strnlen_user(const char __user *s, long n); 396extern unsigned long __strnlen_user(const char __user *s, long n);
362 397