aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-25 15:48:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-25 15:48:08 -0400
commitbf16ae250999e76aff0491a362073a552db965fc (patch)
tree9b012f0f4e9cc146648fe8914346452563f999d9 /drivers/char
parent0b79dada976198cb1a4c043068e3b44d5cab2a5a (diff)
parent1526a756fba5b1f2eb5001b8e8de2a0ea1bd2c66 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-pat
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-pat: generic: add ioremap_wc() interface wrapper /dev/mem: make promisc the default pat: cleanups x86: PAT use reserve free memtype in mmap of /dev/mem x86: PAT phys_mem_access_prot_allowed for dev/mem mmap x86: PAT avoid aliasing in /dev/mem read/write devmem: add range_is_allowed() check to mmap of /dev/mem x86: introduce /dev/mem restrictions with a config option
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/mem.c133
1 files changed, 101 insertions, 32 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 20070b7c573d..e83623ead441 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -41,36 +41,7 @@
41 */ 41 */
42static inline int uncached_access(struct file *file, unsigned long addr) 42static inline int uncached_access(struct file *file, unsigned long addr)
43{ 43{
44#if defined(__i386__) && !defined(__arch_um__) 44#if defined(CONFIG_IA64)
45 /*
46 * On the PPro and successors, the MTRRs are used to set
47 * memory types for physical addresses outside main memory,
48 * so blindly setting PCD or PWT on those pages is wrong.
49 * For Pentiums and earlier, the surround logic should disable
50 * caching for the high addresses through the KEN pin, but
51 * we maintain the tradition of paranoia in this code.
52 */
53 if (file->f_flags & O_SYNC)
54 return 1;
55 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59 && addr >= __pa(high_memory);
60#elif defined(__x86_64__) && !defined(__arch_um__)
61 /*
62 * This is broken because it can generate memory type aliases,
63 * which can cause cache corruptions
64 * But it is only available for root and we have to be bug-to-bug
65 * compatible with i386.
66 */
67 if (file->f_flags & O_SYNC)
68 return 1;
69 /* same behaviour as i386. PAT always set to cached and MTRRs control the
70 caching behaviour.
71 Hopefully a full PAT implementation will fix that soon. */
72 return 0;
73#elif defined(CONFIG_IA64)
74 /* 45 /*
75 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. 46 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
76 */ 47 */
@@ -108,6 +79,36 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
108} 79}
109#endif 80#endif
110 81
82#ifdef CONFIG_NONPROMISC_DEVMEM
83static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84{
85 u64 from = ((u64)pfn) << PAGE_SHIFT;
86 u64 to = from + size;
87 u64 cursor = from;
88
89 while (cursor < to) {
90 if (!devmem_is_allowed(pfn)) {
91 printk(KERN_INFO
92 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
93 current->comm, from, to);
94 return 0;
95 }
96 cursor += PAGE_SIZE;
97 pfn++;
98 }
99 return 1;
100}
101#else
102static inline int range_is_allowed(unsigned long pfn, unsigned long size)
103{
104 return 1;
105}
106#endif
107
108void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109{
110}
111
111/* 112/*
112 * This funcion reads the *physical* memory. The f_pos points directly to the 113 * This funcion reads the *physical* memory. The f_pos points directly to the
113 * memory location. 114 * memory location.
@@ -150,15 +151,25 @@ static ssize_t read_mem(struct file * file, char __user * buf,
150 151
151 sz = min_t(unsigned long, sz, count); 152 sz = min_t(unsigned long, sz, count);
152 153
154 if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 return -EPERM;
156
153 /* 157 /*
154 * On ia64 if a page has been mapped somewhere as 158 * On ia64 if a page has been mapped somewhere as
155 * uncached, then it must also be accessed uncached 159 * uncached, then it must also be accessed uncached
156 * by the kernel or data corruption may occur 160 * by the kernel or data corruption may occur
157 */ 161 */
158 ptr = xlate_dev_mem_ptr(p); 162 ptr = xlate_dev_mem_ptr(p);
163 if (!ptr)
164 return -EFAULT;
159 165
160 if (copy_to_user(buf, ptr, sz)) 166 if (copy_to_user(buf, ptr, sz)) {
167 unxlate_dev_mem_ptr(p, ptr);
161 return -EFAULT; 168 return -EFAULT;
169 }
170
171 unxlate_dev_mem_ptr(p, ptr);
172
162 buf += sz; 173 buf += sz;
163 p += sz; 174 p += sz;
164 count -= sz; 175 count -= sz;
@@ -207,20 +218,32 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
207 218
208 sz = min_t(unsigned long, sz, count); 219 sz = min_t(unsigned long, sz, count);
209 220
221 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 return -EPERM;
223
210 /* 224 /*
211 * On ia64 if a page has been mapped somewhere as 225 * On ia64 if a page has been mapped somewhere as
212 * uncached, then it must also be accessed uncached 226 * uncached, then it must also be accessed uncached
213 * by the kernel or data corruption may occur 227 * by the kernel or data corruption may occur
214 */ 228 */
215 ptr = xlate_dev_mem_ptr(p); 229 ptr = xlate_dev_mem_ptr(p);
230 if (!ptr) {
231 if (written)
232 break;
233 return -EFAULT;
234 }
216 235
217 copied = copy_from_user(ptr, buf, sz); 236 copied = copy_from_user(ptr, buf, sz);
218 if (copied) { 237 if (copied) {
219 written += sz - copied; 238 written += sz - copied;
239 unxlate_dev_mem_ptr(p, ptr);
220 if (written) 240 if (written)
221 break; 241 break;
222 return -EFAULT; 242 return -EFAULT;
223 } 243 }
244
245 unxlate_dev_mem_ptr(p, ptr);
246
224 buf += sz; 247 buf += sz;
225 p += sz; 248 p += sz;
226 count -= sz; 249 count -= sz;
@@ -231,6 +254,12 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
231 return written; 254 return written;
232} 255}
233 256
257int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259{
260 return 1;
261}
262
234#ifndef __HAVE_PHYS_MEM_ACCESS_PROT 263#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
235static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 264static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
236 unsigned long size, pgprot_t vma_prot) 265 unsigned long size, pgprot_t vma_prot)
@@ -271,6 +300,35 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
271} 300}
272#endif 301#endif
273 302
303void __attribute__((weak))
304map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
305{
306 /* nothing. architectures can override. */
307}
308
309void __attribute__((weak))
310unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
311{
312 /* nothing. architectures can override. */
313}
314
315static void mmap_mem_open(struct vm_area_struct *vma)
316{
317 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
318 vma->vm_page_prot);
319}
320
321static void mmap_mem_close(struct vm_area_struct *vma)
322{
323 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
324 vma->vm_page_prot);
325}
326
327static struct vm_operations_struct mmap_mem_ops = {
328 .open = mmap_mem_open,
329 .close = mmap_mem_close
330};
331
274static int mmap_mem(struct file * file, struct vm_area_struct * vma) 332static int mmap_mem(struct file * file, struct vm_area_struct * vma)
275{ 333{
276 size_t size = vma->vm_end - vma->vm_start; 334 size_t size = vma->vm_end - vma->vm_start;
@@ -281,17 +339,28 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
281 if (!private_mapping_ok(vma)) 339 if (!private_mapping_ok(vma))
282 return -ENOSYS; 340 return -ENOSYS;
283 341
342 if (!range_is_allowed(vma->vm_pgoff, size))
343 return -EPERM;
344
345 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
346 &vma->vm_page_prot))
347 return -EINVAL;
348
284 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 349 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
285 size, 350 size,
286 vma->vm_page_prot); 351 vma->vm_page_prot);
287 352
353 vma->vm_ops = &mmap_mem_ops;
354
288 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 355 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
289 if (remap_pfn_range(vma, 356 if (remap_pfn_range(vma,
290 vma->vm_start, 357 vma->vm_start,
291 vma->vm_pgoff, 358 vma->vm_pgoff,
292 size, 359 size,
293 vma->vm_page_prot)) 360 vma->vm_page_prot)) {
361 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
294 return -EAGAIN; 362 return -EAGAIN;
363 }
295 return 0; 364 return 0;
296} 365}
297 366