aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:39 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:42 -0400
commitb38c6845b695141259019e2b7c0fe6c32a6e720d (patch)
treee950aa393f0514e976fe9b46b3462607b016b1db /arch/um
parent8f5cd76c185a4c8aeb5fe1e560e3612bfc050c35 (diff)
[PATCH] mm: uml kill unused
In worrying over the various pte operations in different architectures, I came across some unused functions in UML: remove mprotect_kernel_vm, protect_vm_page and addr_pte. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/include/tlb.h1
-rw-r--r--arch/um/kernel/tt/tlb.c36
2 files changed, 0 insertions, 37 deletions
diff --git a/arch/um/include/tlb.h b/arch/um/include/tlb.h
index 45d7da6c3b2c..8efc1e0f1b84 100644
--- a/arch/um/include/tlb.h
+++ b/arch/um/include/tlb.h
@@ -34,7 +34,6 @@ struct host_vm_op {
34 } u; 34 } u;
35}; 35};
36 36
37extern void mprotect_kernel_vm(int w);
38extern void force_flush_all(void); 37extern void force_flush_all(void);
39extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr, 38extern void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
40 unsigned long end_addr, int force, 39 unsigned long end_addr, int force,
diff --git a/arch/um/kernel/tt/tlb.c b/arch/um/kernel/tt/tlb.c
index f1d85dbb45b9..ae6217c86135 100644
--- a/arch/um/kernel/tt/tlb.c
+++ b/arch/um/kernel/tt/tlb.c
@@ -74,42 +74,6 @@ void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
74 atomic_inc(&vmchange_seq); 74 atomic_inc(&vmchange_seq);
75} 75}
76 76
77static void protect_vm_page(unsigned long addr, int w, int must_succeed)
78{
79 int err;
80
81 err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
82 if(err == 0) return;
83 else if((err == -EFAULT) || (err == -ENOMEM)){
84 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
85 protect_vm_page(addr, w, 1);
86 }
87 else panic("protect_vm_page : protect failed, errno = %d\n", err);
88}
89
90void mprotect_kernel_vm(int w)
91{
92 struct mm_struct *mm;
93 pgd_t *pgd;
94 pud_t *pud;
95 pmd_t *pmd;
96 pte_t *pte;
97 unsigned long addr;
98
99 mm = &init_mm;
100 for(addr = start_vm; addr < end_vm;){
101 pgd = pgd_offset(mm, addr);
102 pud = pud_offset(pgd, addr);
103 pmd = pmd_offset(pud, addr);
104 if(pmd_present(*pmd)){
105 pte = pte_offset_kernel(pmd, addr);
106 if(pte_present(*pte)) protect_vm_page(addr, w, 0);
107 addr += PAGE_SIZE;
108 }
109 else addr += PMD_SIZE;
110 }
111}
112
113void flush_tlb_kernel_vm_tt(void) 77void flush_tlb_kernel_vm_tt(void)
114{ 78{
115 flush_tlb_kernel_range(start_vm, end_vm); 79 flush_tlb_kernel_range(start_vm, end_vm);