diff options
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r-- | arch/um/kernel/tlb.c | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 4a39d50d2d62..8a8d52851443 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include "os.h" | 17 | #include "os.h" |
18 | 18 | ||
19 | static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | 19 | static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, |
20 | int r, int w, int x, struct host_vm_op *ops, int *index, | 20 | unsigned int prot, struct host_vm_op *ops, int *index, |
21 | int last_filled, union mm_context *mmu, void **flush, | 21 | int last_filled, union mm_context *mmu, void **flush, |
22 | int (*do_ops)(union mm_context *, struct host_vm_op *, | 22 | int (*do_ops)(union mm_context *, struct host_vm_op *, |
23 | int, int, void **)) | 23 | int, int, void **)) |
@@ -31,8 +31,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | |||
31 | last = &ops[*index]; | 31 | last = &ops[*index]; |
32 | if((last->type == MMAP) && | 32 | if((last->type == MMAP) && |
33 | (last->u.mmap.addr + last->u.mmap.len == virt) && | 33 | (last->u.mmap.addr + last->u.mmap.len == virt) && |
34 | (last->u.mmap.r == r) && (last->u.mmap.w == w) && | 34 | (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) && |
35 | (last->u.mmap.x == x) && (last->u.mmap.fd == fd) && | ||
36 | (last->u.mmap.offset + last->u.mmap.len == offset)){ | 35 | (last->u.mmap.offset + last->u.mmap.len == offset)){ |
37 | last->u.mmap.len += len; | 36 | last->u.mmap.len += len; |
38 | return 0; | 37 | return 0; |
@@ -48,9 +47,7 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, | |||
48 | .u = { .mmap = { | 47 | .u = { .mmap = { |
49 | .addr = virt, | 48 | .addr = virt, |
50 | .len = len, | 49 | .len = len, |
51 | .r = r, | 50 | .prot = prot, |
52 | .w = w, | ||
53 | .x = x, | ||
54 | .fd = fd, | 51 | .fd = fd, |
55 | .offset = offset } | 52 | .offset = offset } |
56 | } }); | 53 | } }); |
@@ -87,8 +84,8 @@ static int add_munmap(unsigned long addr, unsigned long len, | |||
87 | return ret; | 84 | return ret; |
88 | } | 85 | } |
89 | 86 | ||
90 | static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, | 87 | static int add_mprotect(unsigned long addr, unsigned long len, |
91 | int x, struct host_vm_op *ops, int *index, | 88 | unsigned int prot, struct host_vm_op *ops, int *index, |
92 | int last_filled, union mm_context *mmu, void **flush, | 89 | int last_filled, union mm_context *mmu, void **flush, |
93 | int (*do_ops)(union mm_context *, struct host_vm_op *, | 90 | int (*do_ops)(union mm_context *, struct host_vm_op *, |
94 | int, int, void **)) | 91 | int, int, void **)) |
@@ -100,8 +97,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, | |||
100 | last = &ops[*index]; | 97 | last = &ops[*index]; |
101 | if((last->type == MPROTECT) && | 98 | if((last->type == MPROTECT) && |
102 | (last->u.mprotect.addr + last->u.mprotect.len == addr) && | 99 | (last->u.mprotect.addr + last->u.mprotect.len == addr) && |
103 | (last->u.mprotect.r == r) && (last->u.mprotect.w == w) && | 100 | (last->u.mprotect.prot == prot)){ |
104 | (last->u.mprotect.x == x)){ | ||
105 | last->u.mprotect.len += len; | 101 | last->u.mprotect.len += len; |
106 | return 0; | 102 | return 0; |
107 | } | 103 | } |
@@ -116,9 +112,7 @@ static int add_mprotect(unsigned long addr, unsigned long len, int r, int w, | |||
116 | .u = { .mprotect = { | 112 | .u = { .mprotect = { |
117 | .addr = addr, | 113 | .addr = addr, |
118 | .len = len, | 114 | .len = len, |
119 | .r = r, | 115 | .prot = prot } } }); |
120 | .w = w, | ||
121 | .x = x } } }); | ||
122 | return ret; | 116 | return ret; |
123 | } | 117 | } |
124 | 118 | ||
@@ -133,7 +127,7 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
133 | void **)) | 127 | void **)) |
134 | { | 128 | { |
135 | pte_t *pte; | 129 | pte_t *pte; |
136 | int r, w, x, ret = 0; | 130 | int r, w, x, prot, ret = 0; |
137 | 131 | ||
138 | pte = pte_offset_kernel(pmd, addr); | 132 | pte = pte_offset_kernel(pmd, addr); |
139 | do { | 133 | do { |
@@ -146,19 +140,19 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, | |||
146 | } else if (!pte_dirty(*pte)) { | 140 | } else if (!pte_dirty(*pte)) { |
147 | w = 0; | 141 | w = 0; |
148 | } | 142 | } |
143 | prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) | | ||
144 | (x ? UM_PROT_EXEC : 0)); | ||
149 | if(force || pte_newpage(*pte)){ | 145 | if(force || pte_newpage(*pte)){ |
150 | if(pte_present(*pte)) | 146 | if(pte_present(*pte)) |
151 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, | 147 | ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK, |
152 | PAGE_SIZE, r, w, x, ops, | 148 | PAGE_SIZE, prot, ops, op_index, |
153 | op_index, last_op, mmu, flush, | 149 | last_op, mmu, flush, do_ops); |
154 | do_ops); | ||
155 | else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, | 150 | else ret = add_munmap(addr, PAGE_SIZE, ops, op_index, |
156 | last_op, mmu, flush, do_ops); | 151 | last_op, mmu, flush, do_ops); |
157 | } | 152 | } |
158 | else if(pte_newprot(*pte)) | 153 | else if(pte_newprot(*pte)) |
159 | ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops, | 154 | ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index, |
160 | op_index, last_op, mmu, flush, | 155 | last_op, mmu, flush, do_ops); |
161 | do_ops); | ||
162 | *pte = pte_mkuptodate(*pte); | 156 | *pte = pte_mkuptodate(*pte); |
163 | } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); | 157 | } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret)); |
164 | return ret; | 158 | return ret; |
@@ -377,14 +371,6 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr) | |||
377 | return(pte_offset_map(pmd, addr)); | 371 | return(pte_offset_map(pmd, addr)); |
378 | } | 372 | } |
379 | 373 | ||
380 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) | ||
381 | { | ||
382 | address &= PAGE_MASK; | ||
383 | |||
384 | CHOOSE_MODE(flush_tlb_range(vma, address, address + PAGE_SIZE), | ||
385 | flush_tlb_page_skas(vma, address)); | ||
386 | } | ||
387 | |||
388 | void flush_tlb_all(void) | 374 | void flush_tlb_all(void) |
389 | { | 375 | { |
390 | flush_tlb_mm(current->mm); | 376 | flush_tlb_mm(current->mm); |