aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-um
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-um')
-rw-r--r--include/asm-um/a.out.h4
-rw-r--r--include/asm-um/current.h23
-rw-r--r--include/asm-um/elf-i386.h28
-rw-r--r--include/asm-um/elf-x86_64.h8
-rw-r--r--include/asm-um/fixmap.h6
-rw-r--r--include/asm-um/ldt.h4
-rw-r--r--include/asm-um/linkage.h6
-rw-r--r--include/asm-um/mmu_context.h7
-rw-r--r--include/asm-um/page.h6
-rw-r--r--include/asm-um/param.h2
-rw-r--r--include/asm-um/pgalloc.h8
-rw-r--r--include/asm-um/pgtable-2level.h3
-rw-r--r--include/asm-um/pgtable-3level.h35
-rw-r--r--include/asm-um/pgtable.h100
-rw-r--r--include/asm-um/processor-generic.h13
-rw-r--r--include/asm-um/processor-i386.h1
-rw-r--r--include/asm-um/thread_info.h11
-rw-r--r--include/asm-um/tlb.h122
-rw-r--r--include/asm-um/uaccess.h10
19 files changed, 206 insertions, 191 deletions
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h
index 9281dd8eb334..f42ff14577fa 100644
--- a/include/asm-um/a.out.h
+++ b/include/asm-um/a.out.h
@@ -13,11 +13,9 @@
13 13
14extern unsigned long stacksizelim; 14extern unsigned long stacksizelim;
15 15
16extern unsigned long host_task_size;
17
18#define STACK_ROOM (stacksizelim) 16#define STACK_ROOM (stacksizelim)
19 17
20#define STACK_TOP task_size 18#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
21 19
22#define STACK_TOP_MAX STACK_TOP 20#define STACK_TOP_MAX STACK_TOP
23 21
diff --git a/include/asm-um/current.h b/include/asm-um/current.h
index 8fd72f69ce65..c2191d9aa03d 100644
--- a/include/asm-um/current.h
+++ b/include/asm-um/current.h
@@ -1,32 +1,13 @@
1/* 1/*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6#ifndef __UM_CURRENT_H 6#ifndef __UM_CURRENT_H
7#define __UM_CURRENT_H 7#define __UM_CURRENT_H
8 8
9#ifndef __ASSEMBLY__
10
11#include "asm/page.h"
12#include "linux/thread_info.h" 9#include "linux/thread_info.h"
13 10
14#define current (current_thread_info()->task) 11#define current (current_thread_info()->task)
15 12
16/*Backward compatibility - it's used inside arch/um.*/
17#define current_thread current_thread_info()
18
19#endif /* __ASSEMBLY__ */
20
21#endif 13#endif
22
23/*
24 * Overrides for Emacs so that we follow Linus's tabbing style.
25 * Emacs will notice this stuff at the end of the file and automatically
26 * adjust the settings for this buffer only. This must remain at the end
27 * of the file.
28 * ---------------------------------------------------------------------------
29 * Local variables:
30 * c-file-style: "linux"
31 * End:
32 */
diff --git a/include/asm-um/elf-i386.h b/include/asm-um/elf-i386.h
index ca94a136dfe8..23d6893e8617 100644
--- a/include/asm-um/elf-i386.h
+++ b/include/asm-um/elf-i386.h
@@ -1,11 +1,11 @@
1/* 1/*
2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5#ifndef __UM_ELF_I386_H 5#ifndef __UM_ELF_I386_H
6#define __UM_ELF_I386_H 6#define __UM_ELF_I386_H
7 7
8#include <linux/sched.h> 8#include <asm/user.h>
9#include "skas.h" 9#include "skas.h"
10 10
11#define R_386_NONE 0 11#define R_386_NONE 0
@@ -46,7 +46,7 @@ typedef struct user_i387_struct elf_fpregset_t;
46 PT_REGS_EDI(regs) = 0; \ 46 PT_REGS_EDI(regs) = 0; \
47 PT_REGS_EBP(regs) = 0; \ 47 PT_REGS_EBP(regs) = 0; \
48 PT_REGS_EAX(regs) = 0; \ 48 PT_REGS_EAX(regs) = 0; \
49} while(0) 49} while (0)
50 50
51#define USE_ELF_CORE_DUMP 51#define USE_ELF_CORE_DUMP
52#define ELF_EXEC_PAGESIZE 4096 52#define ELF_EXEC_PAGESIZE 4096
@@ -74,14 +74,9 @@ typedef struct user_i387_struct elf_fpregset_t;
74 pr_reg[14] = PT_REGS_EFLAGS(regs); \ 74 pr_reg[14] = PT_REGS_EFLAGS(regs); \
75 pr_reg[15] = PT_REGS_SP(regs); \ 75 pr_reg[15] = PT_REGS_SP(regs); \
76 pr_reg[16] = PT_REGS_SS(regs); \ 76 pr_reg[16] = PT_REGS_SS(regs); \
77} while(0); 77} while (0);
78 78
79static inline int elf_core_copy_fpregs(struct task_struct *t, 79extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
80 elf_fpregset_t *fpu)
81{
82 int cpu = ((struct thread_info *) t->stack)->cpu;
83 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
84}
85 80
86#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) 81#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
87 82
@@ -91,7 +86,7 @@ extern long elf_aux_hwcap;
91extern char * elf_aux_platform; 86extern char * elf_aux_platform;
92#define ELF_PLATFORM (elf_aux_platform) 87#define ELF_PLATFORM (elf_aux_platform)
93 88
94#define SET_PERSONALITY(ex, ibcs2) do ; while(0) 89#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
95 90
96extern unsigned long vsyscall_ehdr; 91extern unsigned long vsyscall_ehdr;
97extern unsigned long vsyscall_end; 92extern unsigned long vsyscall_end;
@@ -166,14 +161,3 @@ if ( vsyscall_ehdr ) { \
166} 161}
167 162
168#endif 163#endif
169
170/*
171 * Overrides for Emacs so that we follow Linus's tabbing style.
172 * Emacs will notice this stuff at the end of the file and automatically
173 * adjust the settings for this buffer only. This must remain at the end
174 * of the file.
175 * ---------------------------------------------------------------------------
176 * Local variables:
177 * c-file-style: "linux"
178 * End:
179 */
diff --git a/include/asm-um/elf-x86_64.h b/include/asm-um/elf-x86_64.h
index 3c9d543eb61e..3b2d5224a7e1 100644
--- a/include/asm-um/elf-x86_64.h
+++ b/include/asm-um/elf-x86_64.h
@@ -7,7 +7,6 @@
7#ifndef __UM_ELF_X86_64_H 7#ifndef __UM_ELF_X86_64_H
8#define __UM_ELF_X86_64_H 8#define __UM_ELF_X86_64_H
9 9
10#include <linux/sched.h>
11#include <asm/user.h> 10#include <asm/user.h>
12#include "skas.h" 11#include "skas.h"
13 12
@@ -96,12 +95,7 @@ typedef struct user_i387_struct elf_fpregset_t;
96 (pr_reg)[25] = 0; \ 95 (pr_reg)[25] = 0; \
97 (pr_reg)[26] = 0; 96 (pr_reg)[26] = 0;
98 97
99static inline int elf_core_copy_fpregs(struct task_struct *t, 98extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
100 elf_fpregset_t *fpu)
101{
102 int cpu = current_thread->cpu;
103 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
104}
105 99
106#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu) 100#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
107 101
diff --git a/include/asm-um/fixmap.h b/include/asm-um/fixmap.h
index d352a35cfafb..89a87c18b927 100644
--- a/include/asm-um/fixmap.h
+++ b/include/asm-um/fixmap.h
@@ -1,9 +1,10 @@
1#ifndef __UM_FIXMAP_H 1#ifndef __UM_FIXMAP_H
2#define __UM_FIXMAP_H 2#define __UM_FIXMAP_H
3 3
4#include <asm/system.h>
4#include <asm/kmap_types.h> 5#include <asm/kmap_types.h>
5#include <asm/archparam.h> 6#include <asm/archparam.h>
6#include <asm/elf.h> 7#include <asm/page.h>
7 8
8/* 9/*
9 * Here we define all the compile-time 'special' virtual 10 * Here we define all the compile-time 'special' virtual
@@ -55,9 +56,8 @@ extern void __set_fixmap (enum fixed_addresses idx,
55 * the start of the fixmap, and leave one page empty 56 * the start of the fixmap, and leave one page empty
56 * at the top of mem.. 57 * at the top of mem..
57 */ 58 */
58extern unsigned long get_kmem_end(void);
59 59
60#define FIXADDR_TOP (get_kmem_end() - 0x2000) 60#define FIXADDR_TOP (CONFIG_TOP_ADDR - 2 * PAGE_SIZE)
61#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 61#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
62#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 62#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
63 63
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
index b2553f3e87eb..52af512f5e7d 100644
--- a/include/asm-um/ldt.h
+++ b/include/asm-um/ldt.h
@@ -8,7 +8,7 @@
8#ifndef __ASM_LDT_H 8#ifndef __ASM_LDT_H
9#define __ASM_LDT_H 9#define __ASM_LDT_H
10 10
11#include "asm/semaphore.h" 11#include <linux/mutex.h>
12#include "asm/host_ldt.h" 12#include "asm/host_ldt.h"
13 13
14extern void ldt_host_info(void); 14extern void ldt_host_info(void);
@@ -27,7 +27,7 @@ struct ldt_entry {
27 27
28typedef struct uml_ldt { 28typedef struct uml_ldt {
29 int entry_count; 29 int entry_count;
30 struct semaphore semaphore; 30 struct mutex lock;
31 union { 31 union {
32 struct ldt_entry * pages[LDT_PAGES_MAX]; 32 struct ldt_entry * pages[LDT_PAGES_MAX];
33 struct ldt_entry entries[LDT_DIRECT_ENTRIES]; 33 struct ldt_entry entries[LDT_DIRECT_ENTRIES];
diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h
index cdb3024a699a..7dfce37adc8b 100644
--- a/include/asm-um/linkage.h
+++ b/include/asm-um/linkage.h
@@ -3,10 +3,4 @@
3 3
4#include "asm/arch/linkage.h" 4#include "asm/arch/linkage.h"
5 5
6
7/* <linux/linkage.h> will pick sane defaults */
8#ifdef CONFIG_GPROF
9#undef fastcall
10#endif
11
12#endif 6#endif
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index 5f3b863aef9a..6686fc524ca1 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -6,11 +6,12 @@
6#ifndef __UM_MMU_CONTEXT_H 6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H 7#define __UM_MMU_CONTEXT_H
8 8
9#include <asm-generic/mm_hooks.h>
10
11#include "linux/sched.h" 9#include "linux/sched.h"
12#include "um_mmu.h" 10#include "um_mmu.h"
13 11
12extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
13extern void arch_exit_mmap(struct mm_struct *mm);
14
14#define get_mmu_context(task) do ; while(0) 15#define get_mmu_context(task) do ; while(0)
15#define activate_context(tsk) do ; while(0) 16#define activate_context(tsk) do ; while(0)
16 17
@@ -30,6 +31,8 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
30 */ 31 */
31 if (old != new && (current->flags & PF_BORROWED_MM)) 32 if (old != new && (current->flags & PF_BORROWED_MM))
32 __switch_mm(&new->context.id); 33 __switch_mm(&new->context.id);
34
35 arch_dup_mmap(old, new);
33} 36}
34 37
35static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 38static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 4b424c75fca5..fe2374d705d1 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -30,7 +30,7 @@ struct page;
30#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) 30#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
31 31
32typedef struct { unsigned long pte_low, pte_high; } pte_t; 32typedef struct { unsigned long pte_low, pte_high; } pte_t;
33typedef struct { unsigned long long pmd; } pmd_t; 33typedef struct { unsigned long pmd; } pmd_t;
34typedef struct { unsigned long pgd; } pgd_t; 34typedef struct { unsigned long pgd; } pgd_t;
35#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32)) 35#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
36 36
@@ -106,8 +106,8 @@ extern unsigned long uml_physmem;
106#define __pa(virt) to_phys((void *) (unsigned long) (virt)) 106#define __pa(virt) to_phys((void *) (unsigned long) (virt))
107#define __va(phys) to_virt((unsigned long) (phys)) 107#define __va(phys) to_virt((unsigned long) (phys))
108 108
109#define phys_to_pfn(p) ((p) >> PAGE_SHIFT) 109#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
110#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 110#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
111 111
112#define pfn_valid(pfn) ((pfn) < max_mapnr) 112#define pfn_valid(pfn) ((pfn) < max_mapnr)
113#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) 113#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
diff --git a/include/asm-um/param.h b/include/asm-um/param.h
index f914e7d67b01..4cd4a226f8c1 100644
--- a/include/asm-um/param.h
+++ b/include/asm-um/param.h
@@ -10,7 +10,7 @@
10#define MAXHOSTNAMELEN 64 /* max length of hostname */ 10#define MAXHOSTNAMELEN 64 /* max length of hostname */
11 11
12#ifdef __KERNEL__ 12#ifdef __KERNEL__
13#define HZ 100 13#define HZ CONFIG_HZ
14#define USER_HZ 100 /* .. some user interfaces are in "ticks" */ 14#define USER_HZ 100 /* .. some user interfaces are in "ticks" */
15#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ 15#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
16#endif 16#endif
diff --git a/include/asm-um/pgalloc.h b/include/asm-um/pgalloc.h
index 14904876e8fb..4f3e62b02861 100644
--- a/include/asm-um/pgalloc.h
+++ b/include/asm-um/pgalloc.h
@@ -23,17 +23,17 @@
23 * Allocate and free page tables. 23 * Allocate and free page tables.
24 */ 24 */
25extern pgd_t *pgd_alloc(struct mm_struct *); 25extern pgd_t *pgd_alloc(struct mm_struct *);
26extern void pgd_free(pgd_t *pgd); 26extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
27 27
28extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); 28extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
29extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); 29extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
30 30
31static inline void pte_free_kernel(pte_t *pte) 31static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
32{ 32{
33 free_page((unsigned long) pte); 33 free_page((unsigned long) pte);
34} 34}
35 35
36static inline void pte_free(struct page *pte) 36static inline void pte_free(struct mm_struct *mm, struct page *pte)
37{ 37{
38 __free_page(pte); 38 __free_page(pte);
39} 39}
@@ -42,7 +42,7 @@ static inline void pte_free(struct page *pte)
42 42
43#ifdef CONFIG_3_LEVEL_PGTABLES 43#ifdef CONFIG_3_LEVEL_PGTABLES
44 44
45static inline void pmd_free(pmd_t *pmd) 45static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
46{ 46{
47 free_page((unsigned long)pmd); 47 free_page((unsigned long)pmd);
48} 48}
diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h
index 172a75fde512..f534b73e753e 100644
--- a/include/asm-um/pgtable-2level.h
+++ b/include/asm-um/pgtable-2level.h
@@ -41,9 +41,6 @@ static inline void pgd_mkuptodate(pgd_t pgd) { }
41#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot)) 41#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
42#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot)) 42#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
43 43
44#define pmd_page_vaddr(pmd) \
45 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
46
47/* 44/*
48 * Bits 0 through 4 are taken 45 * Bits 0 through 4 are taken
49 */ 46 */
diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h
index 3ebafbaacb24..0446f456b428 100644
--- a/include/asm-um/pgtable-3level.h
+++ b/include/asm-um/pgtable-3level.h
@@ -11,7 +11,11 @@
11 11
12/* PGDIR_SHIFT determines what a third-level page table entry can map */ 12/* PGDIR_SHIFT determines what a third-level page table entry can map */
13 13
14#ifdef CONFIG_64BIT
14#define PGDIR_SHIFT 30 15#define PGDIR_SHIFT 30
16#else
17#define PGDIR_SHIFT 31
18#endif
15#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 19#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
16#define PGDIR_MASK (~(PGDIR_SIZE-1)) 20#define PGDIR_MASK (~(PGDIR_SIZE-1))
17 21
@@ -28,9 +32,15 @@
28 */ 32 */
29 33
30#define PTRS_PER_PTE 512 34#define PTRS_PER_PTE 512
35#ifdef CONFIG_64BIT
31#define PTRS_PER_PMD 512 36#define PTRS_PER_PMD 512
32#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
33#define PTRS_PER_PGD 512 37#define PTRS_PER_PGD 512
38#else
39#define PTRS_PER_PMD 1024
40#define PTRS_PER_PGD 1024
41#endif
42
43#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
34#define FIRST_USER_ADDRESS 0 44#define FIRST_USER_ADDRESS 0
35 45
36#define pte_ERROR(e) \ 46#define pte_ERROR(e) \
@@ -49,7 +59,12 @@
49#define pud_populate(mm, pud, pmd) \ 59#define pud_populate(mm, pud, pmd) \
50 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) 60 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
51 61
62#ifdef CONFIG_64BIT
52#define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval)) 63#define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
64#else
65#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
66#endif
67
53static inline int pgd_newpage(pgd_t pgd) 68static inline int pgd_newpage(pgd_t pgd)
54{ 69{
55 return(pgd_val(pgd) & _PAGE_NEWPAGE); 70 return(pgd_val(pgd) & _PAGE_NEWPAGE);
@@ -57,17 +72,14 @@ static inline int pgd_newpage(pgd_t pgd)
57 72
58static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; } 73static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
59 74
75#ifdef CONFIG_64BIT
60#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval)) 76#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
77#else
78#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
79#endif
61 80
62static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 81struct mm_struct;
63{ 82extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
64 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
65
66 if(pmd)
67 memset(pmd, 0, PAGE_SIZE);
68
69 return pmd;
70}
71 83
72static inline void pud_clear (pud_t *pud) 84static inline void pud_clear (pud_t *pud)
73{ 85{
@@ -75,8 +87,7 @@ static inline void pud_clear (pud_t *pud)
75} 87}
76 88
77#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK) 89#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
78#define pud_page_vaddr(pud) \ 90#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
79 ((struct page *) __va(pud_val(pud) & PAGE_MASK))
80 91
81/* Find an entry in the second-level page table.. */ 92/* Find an entry in the second-level page table.. */
82#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \ 93#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 830fc6e5d49d..4102b443e925 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc. 3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgtable.h 4 * Derived from include/asm-i386/pgtable.h
5 * Licensed under the GPL 5 * Licensed under the GPL
@@ -8,11 +8,7 @@
8#ifndef __UM_PGTABLE_H 8#ifndef __UM_PGTABLE_H
9#define __UM_PGTABLE_H 9#define __UM_PGTABLE_H
10 10
11#include "linux/sched.h" 11#include <asm/fixmap.h>
12#include "linux/linkage.h"
13#include "asm/processor.h"
14#include "asm/page.h"
15#include "asm/fixmap.h"
16 12
17#define _PAGE_PRESENT 0x001 13#define _PAGE_PRESENT 0x001
18#define _PAGE_NEWPAGE 0x002 14#define _PAGE_NEWPAGE 0x002
@@ -34,22 +30,11 @@
34 30
35extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 31extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
36 32
37extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt,
38 pte_t *pte_out);
39
40/* zero page used for uninitialized stuff */ 33/* zero page used for uninitialized stuff */
41extern unsigned long *empty_zero_page; 34extern unsigned long *empty_zero_page;
42 35
43#define pgtable_cache_init() do ; while (0) 36#define pgtable_cache_init() do ; while (0)
44 37
45/*
46 * pgd entries used up by user/kernel:
47 */
48
49#define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
50#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
51
52#ifndef __ASSEMBLY__
53/* Just any arbitrary offset to the start of the vmalloc VM area: the 38/* Just any arbitrary offset to the start of the vmalloc VM area: the
54 * current 8MB value just means that there will be a 8MB "hole" after the 39 * current 8MB value just means that there will be a 8MB "hole" after the
55 * physical memory until the kernel virtual memory starts. That means that 40 * physical memory until the kernel virtual memory starts. That means that
@@ -62,16 +47,12 @@ extern unsigned long end_iomem;
62 47
63#define VMALLOC_OFFSET (__va_space) 48#define VMALLOC_OFFSET (__va_space)
64#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 49#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
65
66#ifdef CONFIG_HIGHMEM 50#ifdef CONFIG_HIGHMEM
67# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 51# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
68#else 52#else
69# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 53# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
70#endif 54#endif
71 55
72#define REGION_SHIFT (sizeof(pte_t) * 8 - 4)
73#define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT)
74
75#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 56#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
76#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
77#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 58#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -81,11 +62,12 @@ extern unsigned long end_iomem;
81#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 62#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
82#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) 63#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
83#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 64#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
84#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
85 65
86/* 66/*
87 * The i386 can't do page protection for execute, and considers that the same are read. 67 * The i386 can't do page protection for execute, and considers that the same
88 * Also, write permissions imply read permissions. This is the closest we can get.. 68 * are read.
69 * Also, write permissions imply read permissions. This is the closest we can
70 * get..
89 */ 71 */
90#define __P000 PAGE_NONE 72#define __P000 PAGE_NONE
91#define __P001 PAGE_READONLY 73#define __P001 PAGE_READONLY
@@ -106,40 +88,16 @@ extern unsigned long end_iomem;
106#define __S111 PAGE_SHARED 88#define __S111 PAGE_SHARED
107 89
108/* 90/*
109 * Define this if things work differently on an i386 and an i486:
110 * it will (on an i486) warn about kernel memory accesses that are
111 * done without a 'access_ok(VERIFY_WRITE,..)'
112 */
113#undef TEST_VERIFY_AREA
114
115/* page table for 0-4MB for everybody */
116extern unsigned long pg0[1024];
117
118/*
119 * ZERO_PAGE is a global shared page that is always zero: used 91 * ZERO_PAGE is a global shared page that is always zero: used
120 * for zero-mapped memory areas etc.. 92 * for zero-mapped memory areas etc..
121 */ 93 */
122
123#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 94#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
124 95
125/* number of bits that fit into a memory pointer */
126#define BITS_PER_PTR (8*sizeof(unsigned long))
127
128/* to align the pointer to a pointer address */
129#define PTR_MASK (~(sizeof(void*)-1))
130
131/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
132/* 64-bit machines, beware! SRB. */
133#define SIZEOF_PTR_LOG2 3
134
135/* to find an entry in a page-table */
136#define PAGE_PTR(address) \
137((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
138
139#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) 96#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
140 97
141#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) 98#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
142#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) 99#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
100
143#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 101#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
144#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) 102#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
145 103
@@ -149,14 +107,9 @@ extern unsigned long pg0[1024];
149#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) 107#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
150#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) 108#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
151 109
152#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
153
154#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 110#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
155 111
156#define pte_page(x) pfn_to_page(pte_pfn(x)) 112#define pte_page(x) pfn_to_page(pte_pfn(x))
157#define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
158#define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT))
159#define phys_addr(p) ((p) & ~REGION_MASK)
160 113
161#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) 114#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
162 115
@@ -309,7 +262,8 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
309 262
310#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) 263#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
311#define __virt_to_page(virt) phys_to_page(__pa(virt)) 264#define __virt_to_page(virt) phys_to_page(__pa(virt))
312#define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) 265#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
266#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
313 267
314#define mk_pte(page, pgprot) \ 268#define mk_pte(page, pgprot) \
315 ({ pte_t pte; \ 269 ({ pte_t pte; \
@@ -325,8 +279,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
325 return pte; 279 return pte;
326} 280}
327 281
328#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
329
330/* 282/*
331 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] 283 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
332 * 284 *
@@ -335,8 +287,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
335 */ 287 */
336#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 288#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
337 289
338#define pgd_index_k(addr) pgd_index(addr)
339
340/* 290/*
341 * pgd_offset() returns a (pgd_t *) 291 * pgd_offset() returns a (pgd_t *)
342 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 292 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
@@ -355,8 +305,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
355 * this macro returns the index of the entry in the pmd page which would 305 * this macro returns the index of the entry in the pmd page which would
356 * control the given virtual address 306 * control the given virtual address
357 */ 307 */
308#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
358#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 309#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
359 310
311#define pmd_page_vaddr(pmd) \
312 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
313
360/* 314/*
361 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 315 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
362 * 316 *
@@ -372,6 +326,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
372#define pte_unmap(pte) do { } while (0) 326#define pte_unmap(pte) do { } while (0)
373#define pte_unmap_nested(pte) do { } while (0) 327#define pte_unmap_nested(pte) do { } while (0)
374 328
329struct mm_struct;
330extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
331
375#define update_mmu_cache(vma,address,pte) do ; while (0) 332#define update_mmu_cache(vma,address,pte) do ; while (0)
376 333
377/* Encode and de-code a swap entry */ 334/* Encode and de-code a swap entry */
@@ -388,29 +345,4 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
388 345
389#include <asm-generic/pgtable.h> 346#include <asm-generic/pgtable.h>
390 347
391#include <asm-generic/pgtable-nopud.h>
392
393#ifdef CONFIG_HIGHMEM
394/* Clear a kernel PTE and flush it from the TLB */
395#define kpte_clear_flush(ptep, vaddr) \
396do { \
397 pte_clear(&init_mm, vaddr, ptep); \
398 __flush_tlb_one(vaddr); \
399} while (0)
400#endif 348#endif
401
402#endif
403#endif
404
405#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
406
407/*
408 * Overrides for Emacs so that we follow Linus's tabbing style.
409 * Emacs will notice this stuff at the end of the file and automatically
410 * adjust the settings for this buffer only. This must remain at the end
411 * of the file.
412 * ---------------------------------------------------------------------------
413 * Local variables:
414 * c-file-style: "linux"
415 * End:
416 */
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
index 78c0599cc80c..b7d9a16a7451 100644
--- a/include/asm-um/processor-generic.h
+++ b/include/asm-um/processor-generic.h
@@ -11,6 +11,7 @@ struct pt_regs;
11struct task_struct; 11struct task_struct;
12 12
13#include "asm/ptrace.h" 13#include "asm/ptrace.h"
14#include "asm/pgtable.h"
14#include "registers.h" 15#include "registers.h"
15#include "sysdep/archsetjmp.h" 16#include "sysdep/archsetjmp.h"
16 17
@@ -26,7 +27,6 @@ struct thread_struct {
26 * as of 2.6.11). 27 * as of 2.6.11).
27 */ 28 */
28 int forking; 29 int forking;
29 int nsyscalls;
30 struct pt_regs regs; 30 struct pt_regs regs;
31 int singlestep_syscall; 31 int singlestep_syscall;
32 void *fault_addr; 32 void *fault_addr;
@@ -58,7 +58,6 @@ struct thread_struct {
58#define INIT_THREAD \ 58#define INIT_THREAD \
59{ \ 59{ \
60 .forking = 0, \ 60 .forking = 0, \
61 .nsyscalls = 0, \
62 .regs = EMPTY_REGS, \ 61 .regs = EMPTY_REGS, \
63 .fault_addr = NULL, \ 62 .fault_addr = NULL, \
64 .prev_sched = NULL, \ 63 .prev_sched = NULL, \
@@ -68,10 +67,6 @@ struct thread_struct {
68 .request = { 0 } \ 67 .request = { 0 } \
69} 68}
70 69
71typedef struct {
72 unsigned long seg;
73} mm_segment_t;
74
75extern struct task_struct *alloc_task_struct(void); 70extern struct task_struct *alloc_task_struct(void);
76 71
77static inline void release_thread(struct task_struct *task) 72static inline void release_thread(struct task_struct *task)
@@ -97,9 +92,7 @@ static inline void mm_copy_segments(struct mm_struct *from_mm,
97/* 92/*
98 * User space process size: 3GB (default). 93 * User space process size: 3GB (default).
99 */ 94 */
100extern unsigned long task_size; 95#define TASK_SIZE (CONFIG_TOP_ADDR & PGDIR_MASK)
101
102#define TASK_SIZE (task_size)
103 96
104/* This decides where the kernel will search for a free chunk of vm 97/* This decides where the kernel will search for a free chunk of vm
105 * space during mmap's. 98 * space during mmap's.
@@ -128,6 +121,6 @@ extern struct cpuinfo_um cpu_data[];
128 121
129 122
130#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf) 123#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
131#define get_wchan(p) (0) 124extern unsigned long get_wchan(struct task_struct *p);
132 125
133#endif 126#endif
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
index 595f1c3e1e40..a2b7fe13fe1e 100644
--- a/include/asm-um/processor-i386.h
+++ b/include/asm-um/processor-i386.h
@@ -10,7 +10,6 @@
10#include "asm/host_ldt.h" 10#include "asm/host_ldt.h"
11#include "asm/segment.h" 11#include "asm/segment.h"
12 12
13extern int host_has_xmm;
14extern int host_has_cmov; 13extern int host_has_cmov;
15 14
16/* include faultinfo structure */ 15/* include faultinfo structure */
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 6e5fd5c892d0..356b83e2c22e 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
@@ -8,8 +8,9 @@
8 8
9#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
10 10
11#include <asm/processor.h>
12#include <asm/types.h> 11#include <asm/types.h>
12#include <asm/page.h>
13#include <asm/uaccess.h>
13 14
14struct thread_info { 15struct thread_info {
15 struct task_struct *task; /* main task structure */ 16 struct task_struct *task; /* main task structure */
@@ -75,8 +76,8 @@ static inline struct thread_info *current_thread_info(void)
75#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 76#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
76#define TIF_SIGPENDING 1 /* signal pending */ 77#define TIF_SIGPENDING 1 /* signal pending */
77#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 78#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
78#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling 79#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
79 * TIF_NEED_RESCHED 80 * TIF_NEED_RESCHED
80 */ 81 */
81#define TIF_RESTART_BLOCK 4 82#define TIF_RESTART_BLOCK 4
82#define TIF_MEMDIE 5 83#define TIF_MEMDIE 5
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h
index c640033bc1fd..39fc475df6c9 100644
--- a/include/asm-um/tlb.h
+++ b/include/asm-um/tlb.h
@@ -1,6 +1,126 @@
1#ifndef __UM_TLB_H 1#ifndef __UM_TLB_H
2#define __UM_TLB_H 2#define __UM_TLB_H
3 3
4#include <asm/arch/tlb.h> 4#include <linux/swap.h>
5#include <asm/percpu.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8
9#define tlb_start_vma(tlb, vma) do { } while (0)
10#define tlb_end_vma(tlb, vma) do { } while (0)
11#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
12
13/* struct mmu_gather is an opaque type used by the mm code for passing around
14 * any data needed by arch specific code for tlb_remove_page.
15 */
16struct mmu_gather {
17 struct mm_struct *mm;
18 unsigned int need_flush; /* Really unmapped some ptes? */
19 unsigned long start;
20 unsigned long end;
21 unsigned int fullmm; /* non-zero means full mm flush */
22};
23
24/* Users of the generic TLB shootdown code must declare this storage space. */
25DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
26
27static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
28 unsigned long address)
29{
30 if (tlb->start > address)
31 tlb->start = address;
32 if (tlb->end < address + PAGE_SIZE)
33 tlb->end = address + PAGE_SIZE;
34}
35
36static inline void init_tlb_gather(struct mmu_gather *tlb)
37{
38 tlb->need_flush = 0;
39
40 tlb->start = TASK_SIZE;
41 tlb->end = 0;
42
43 if (tlb->fullmm) {
44 tlb->start = 0;
45 tlb->end = TASK_SIZE;
46 }
47}
48
49/* tlb_gather_mmu
50 * Return a pointer to an initialized struct mmu_gather.
51 */
52static inline struct mmu_gather *
53tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
54{
55 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
56
57 tlb->mm = mm;
58 tlb->fullmm = full_mm_flush;
59
60 init_tlb_gather(tlb);
61
62 return tlb;
63}
64
65extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
66 unsigned long end);
67
68static inline void
69tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
70{
71 if (!tlb->need_flush)
72 return;
73
74 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
75 init_tlb_gather(tlb);
76}
77
78/* tlb_finish_mmu
79 * Called at the end of the shootdown operation to free up any resources
80 * that were required.
81 */
82static inline void
83tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
84{
85 tlb_flush_mmu(tlb, start, end);
86
87 /* keep the page table cache within bounds */
88 check_pgt_cache();
89
90 put_cpu_var(mmu_gathers);
91}
92
93/* tlb_remove_page
94 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
95 * while handling the additional races in SMP caused by other CPUs
96 * caching valid mappings in their TLBs.
97 */
98static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
99{
100 tlb->need_flush = 1;
101 free_page_and_swap_cache(page);
102 return;
103}
104
105/**
106 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
107 *
108 * Record the fact that pte's were really umapped in ->need_flush, so we can
109 * later optimise away the tlb invalidate. This helps when userspace is
110 * unmapping already-unmapped pages, which happens quite a lot.
111 */
112#define tlb_remove_tlb_entry(tlb, ptep, address) \
113 do { \
114 tlb->need_flush = 1; \
115 __tlb_remove_tlb_entry(tlb, ptep, address); \
116 } while (0)
117
118#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
119
120#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
121
122#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
123
124#define tlb_migrate_finish(mm) do {} while (0)
5 125
6#endif 126#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 077032d4fc47..b9a895d6fa1d 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -6,7 +6,15 @@
6#ifndef __UM_UACCESS_H 6#ifndef __UM_UACCESS_H
7#define __UM_UACCESS_H 7#define __UM_UACCESS_H
8 8
9#include "linux/sched.h" 9#include <asm/errno.h>
10#include <asm/processor.h>
11
12/* thread_info has a mm_segment_t in it, so put the definition up here */
13typedef struct {
14 unsigned long seg;
15} mm_segment_t;
16
17#include "linux/thread_info.h"
10 18
11#define VERIFY_READ 0 19#define VERIFY_READ 0
12#define VERIFY_WRITE 1 20#define VERIFY_WRITE 1