diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:42 -0400 |
commit | b38c6845b695141259019e2b7c0fe6c32a6e720d (patch) | |
tree | e950aa393f0514e976fe9b46b3462607b016b1db /arch/um/kernel/tt | |
parent | 8f5cd76c185a4c8aeb5fe1e560e3612bfc050c35 (diff) |
[PATCH] mm: uml kill unused
In worrying over the various pte operations in different architectures, I came
across some unused functions in UML: remove mprotect_kernel_vm,
protect_vm_page and addr_pte.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/kernel/tt')
-rw-r--r-- | arch/um/kernel/tt/tlb.c | 36 |
1 files changed, 0 insertions, 36 deletions
diff --git a/arch/um/kernel/tt/tlb.c b/arch/um/kernel/tt/tlb.c index f1d85dbb45b9..ae6217c86135 100644 --- a/arch/um/kernel/tt/tlb.c +++ b/arch/um/kernel/tt/tlb.c | |||
@@ -74,42 +74,6 @@ void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end) | |||
74 | atomic_inc(&vmchange_seq); | 74 | atomic_inc(&vmchange_seq); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void protect_vm_page(unsigned long addr, int w, int must_succeed) | ||
78 | { | ||
79 | int err; | ||
80 | |||
81 | err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed); | ||
82 | if(err == 0) return; | ||
83 | else if((err == -EFAULT) || (err == -ENOMEM)){ | ||
84 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); | ||
85 | protect_vm_page(addr, w, 1); | ||
86 | } | ||
87 | else panic("protect_vm_page : protect failed, errno = %d\n", err); | ||
88 | } | ||
89 | |||
90 | void mprotect_kernel_vm(int w) | ||
91 | { | ||
92 | struct mm_struct *mm; | ||
93 | pgd_t *pgd; | ||
94 | pud_t *pud; | ||
95 | pmd_t *pmd; | ||
96 | pte_t *pte; | ||
97 | unsigned long addr; | ||
98 | |||
99 | mm = &init_mm; | ||
100 | for(addr = start_vm; addr < end_vm;){ | ||
101 | pgd = pgd_offset(mm, addr); | ||
102 | pud = pud_offset(pgd, addr); | ||
103 | pmd = pmd_offset(pud, addr); | ||
104 | if(pmd_present(*pmd)){ | ||
105 | pte = pte_offset_kernel(pmd, addr); | ||
106 | if(pte_present(*pte)) protect_vm_page(addr, w, 0); | ||
107 | addr += PAGE_SIZE; | ||
108 | } | ||
109 | else addr += PMD_SIZE; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | void flush_tlb_kernel_vm_tt(void) | 77 | void flush_tlb_kernel_vm_tt(void) |
114 | { | 78 | { |
115 | flush_tlb_kernel_range(start_vm, end_vm); | 79 | flush_tlb_kernel_range(start_vm, end_vm); |