aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/mem.c
diff options
context:
space:
mode:
authorBjorn Helgaas <bjorn.helgaas@hp.com>2006-01-08 04:04:13 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 23:14:02 -0500
commit80851ef2a5a404e6054211ca96ecd5ac4b06d297 (patch)
treedcacd2a475adc28c540b6012b58f1af9783778c1 /drivers/char/mem.c
parent44ac8413901167589226abf824d994aa57e4fd28 (diff)
[PATCH] /dev/mem: validate mmap requests
Add a hook so architectures can validate /dev/mem mmap requests. This is analogous to validation we already perform in the read/write paths. The identity mapping scheme used on ia64 requires that each 16MB or 64MB granule be accessed with exactly one attribute (write-back or uncacheable). This avoids "attribute aliasing", which can cause a machine check. Sample problem scenario: - Machine supports VGA, so it has uncacheable (UC) MMIO at 640K-768K - efi_memmap_init() discards any write-back (WB) memory in the first granule - Application (e.g., "hwinfo") mmaps /dev/mem, offset 0 - hwinfo receives UC mapping (the default, since memmap says "no WB here") - Machine check abort (on chipsets that don't support UC access to WB memory, e.g., sx1000) In the scenario above, the only choices are - Use WB for hwinfo mmap. Can't do this because it causes attribute aliasing with the UC mapping for the VGA MMIO space. - Use UC for hwinfo mmap. Can't do this because the chipset may not support UC for that region. - Disallow the hwinfo mmap with -EINVAL. That's what this patch does. Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/char/mem.c')
-rw-r--r--drivers/char/mem.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ce3ff8641191..5b2d18035073 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -101,6 +101,11 @@ static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
101 101
102 return 1; 102 return 1;
103} 103}
104
105static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size)
106{
107 return 1;
108}
104#endif 109#endif
105 110
106/* 111/*
@@ -244,15 +249,20 @@ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
244 249
245static int mmap_mem(struct file * file, struct vm_area_struct * vma) 250static int mmap_mem(struct file * file, struct vm_area_struct * vma)
246{ 251{
252 size_t size = vma->vm_end - vma->vm_start;
253
254 if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size))
255 return -EINVAL;
256
247 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 257 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
248 vma->vm_end - vma->vm_start, 258 size,
249 vma->vm_page_prot); 259 vma->vm_page_prot);
250 260
251 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 261 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
252 if (remap_pfn_range(vma, 262 if (remap_pfn_range(vma,
253 vma->vm_start, 263 vma->vm_start,
254 vma->vm_pgoff, 264 vma->vm_pgoff,
255 vma->vm_end-vma->vm_start, 265 size,
256 vma->vm_page_prot)) 266 vma->vm_page_prot))
257 return -EAGAIN; 267 return -EAGAIN;
258 return 0; 268 return 0;