aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/mm/pat.c128
-rw-r--r--drivers/char/mem.c35
2 files changed, 152 insertions, 11 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 64cc0c18233e..1489aafbfa71 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/bootmem.h>
14 15
15#include <asm/msr.h> 16#include <asm/msr.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
@@ -21,6 +22,7 @@
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22#include <asm/fcntl.h> 23#include <asm/fcntl.h>
23#include <asm/mtrr.h> 24#include <asm/mtrr.h>
25#include <asm/io.h>
24 26
25int pat_wc_enabled = 1; 27int pat_wc_enabled = 1;
26 28
@@ -190,6 +192,21 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
190 return 0; 192 return 0;
191} 193}
192 194
195/*
196 * req_type typically has one of the:
197 * - _PAGE_CACHE_WB
198 * - _PAGE_CACHE_WC
199 * - _PAGE_CACHE_UC_MINUS
200 * - _PAGE_CACHE_UC
201 *
202 * req_type will have a special case value '-1', when requester want to inherit
203 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
204 *
205 * If ret_type is NULL, function will return an error if it cannot reserve the
206 * region with req_type. If ret_type is non-null, function will return
207 * available type in ret_type in case of no error. In case of any error
208 * it will return a negative return value.
209 */
193int reserve_memtype(u64 start, u64 end, unsigned long req_type, 210int reserve_memtype(u64 start, u64 end, unsigned long req_type,
194 unsigned long *ret_type) 211 unsigned long *ret_type)
195{ 212{
@@ -200,9 +217,14 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
200 217
201 /* Only track when pat_wc_enabled */ 218 /* Only track when pat_wc_enabled */
202 if (!pat_wc_enabled) { 219 if (!pat_wc_enabled) {
203 if (ret_type) 220 /* This is identical to page table setting without PAT */
204 *ret_type = req_type; 221 if (ret_type) {
205 222 if (req_type == -1) {
223 *ret_type = _PAGE_CACHE_WB;
224 } else {
225 *ret_type = req_type;
226 }
227 }
206 return 0; 228 return 0;
207 } 229 }
208 230
@@ -214,8 +236,29 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
214 return 0; 236 return 0;
215 } 237 }
216 238
217 req_type &= _PAGE_CACHE_MASK; 239 if (req_type == -1) {
218 err = pat_x_mtrr_type(start, end, req_type, &actual_type); 240 /*
241 * Special case where caller wants to inherit from mtrr or
242 * existing pat mapping, defaulting to UC_MINUS in case of
243 * no match.
244 */
245 u8 mtrr_type = mtrr_type_lookup(start, end);
246 if (mtrr_type == 0xFE) { /* MTRR match error */
247 err = -1;
248 }
249
250 if (mtrr_type == MTRR_TYPE_WRBACK) {
251 req_type = _PAGE_CACHE_WB;
252 actual_type = _PAGE_CACHE_WB;
253 } else {
254 req_type = _PAGE_CACHE_UC_MINUS;
255 actual_type = _PAGE_CACHE_UC_MINUS;
256 }
257 } else {
258 req_type &= _PAGE_CACHE_MASK;
259 err = pat_x_mtrr_type(start, end, req_type, &actual_type);
260 }
261
219 if (err) { 262 if (err) {
220 if (ret_type) 263 if (ret_type)
221 *ret_type = actual_type; 264 *ret_type = actual_type;
@@ -420,7 +463,14 @@ int free_memtype(u64 start, u64 end)
420} 463}
421 464
422 465
423/* /dev/mem interface. Use the previous mapping */ 466/*
467 * /dev/mem mmap interface. The memtype used for mapping varies:
468 * - Use UC for mappings with O_SYNC flag
469 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
470 * inherit the memtype from existing mapping.
471 * - Else use UC_MINUS memtype (for backward compatibility with existing
472 * X drivers.
473 */
424pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 474pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
425 unsigned long size, pgprot_t vma_prot) 475 unsigned long size, pgprot_t vma_prot)
426{ 476{
@@ -430,10 +480,13 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
430int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 480int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
431 unsigned long size, pgprot_t *vma_prot) 481 unsigned long size, pgprot_t *vma_prot)
432{ 482{
483 u64 offset = ((u64) pfn) << PAGE_SHIFT;
484 unsigned long flags = _PAGE_CACHE_UC_MINUS;
485 unsigned long ret_flags;
486 int retval;
433 487
434 if (file->f_flags & O_SYNC) { 488 if (file->f_flags & O_SYNC) {
435 *vma_prot = pgprot_noncached(*vma_prot); 489 flags = _PAGE_CACHE_UC;
436 return 1;
437 } 490 }
438 491
439#ifdef CONFIG_X86_32 492#ifdef CONFIG_X86_32
@@ -451,10 +504,65 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
451 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || 504 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
452 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) && 505 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) &&
453 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { 506 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
454 *vma_prot = pgprot_noncached(*vma_prot); 507 flags = _PAGE_CACHE_UC;
455 return 1;
456 } 508 }
457#endif 509#endif
458 510
511 /*
512 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
513 * Without O_SYNC, we want to get
514 * - WB for WB-able memory and no other conflicting mappings
515 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
516 * - Inherit from confliting mappings otherwise
517 */
518 if (flags != _PAGE_CACHE_UC_MINUS) {
519 retval = reserve_memtype(offset, offset + size, flags, NULL);
520 } else {
521 retval = reserve_memtype(offset, offset + size, -1, &ret_flags);
522 }
523
524 if (retval < 0)
525 return 0;
526
527 flags = ret_flags;
528
529 if (pfn <= max_pfn_mapped &&
530 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
531 free_memtype(offset, offset + size);
532 printk(KERN_DEBUG
533 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
534 current->comm, current->pid,
535 cattr_name(flags),
536 offset, offset + size);
537 return 0;
538 }
539
540 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
541 flags);
459 return 1; 542 return 1;
460} 543}
544
545void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
546{
547 u64 addr = (u64)pfn << PAGE_SHIFT;
548 unsigned long flags;
549 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
550
551 reserve_memtype(addr, addr + size, want_flags, &flags);
552 if (flags != want_flags) {
553 printk(KERN_DEBUG
554 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
555 current->comm, current->pid,
556 cattr_name(want_flags),
557 addr, addr + size,
558 cattr_name(flags));
559 }
560}
561
562void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
563{
564 u64 addr = (u64)pfn << PAGE_SHIFT;
565
566 free_memtype(addr, addr + size);
567}
568
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 56b2fb4fbc93..e83623ead441 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -300,6 +300,35 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
300} 300}
301#endif 301#endif
302 302
303void __attribute__((weak))
304map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
305{
306 /* nothing. architectures can override. */
307}
308
309void __attribute__((weak))
310unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
311{
312 /* nothing. architectures can override. */
313}
314
315static void mmap_mem_open(struct vm_area_struct *vma)
316{
317 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
318 vma->vm_page_prot);
319}
320
321static void mmap_mem_close(struct vm_area_struct *vma)
322{
323 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
324 vma->vm_page_prot);
325}
326
327static struct vm_operations_struct mmap_mem_ops = {
328 .open = mmap_mem_open,
329 .close = mmap_mem_close
330};
331
303static int mmap_mem(struct file * file, struct vm_area_struct * vma) 332static int mmap_mem(struct file * file, struct vm_area_struct * vma)
304{ 333{
305 size_t size = vma->vm_end - vma->vm_start; 334 size_t size = vma->vm_end - vma->vm_start;
@@ -321,13 +350,17 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
321 size, 350 size,
322 vma->vm_page_prot); 351 vma->vm_page_prot);
323 352
353 vma->vm_ops = &mmap_mem_ops;
354
324 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ 355 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
325 if (remap_pfn_range(vma, 356 if (remap_pfn_range(vma,
326 vma->vm_start, 357 vma->vm_start,
327 vma->vm_pgoff, 358 vma->vm_pgoff,
328 size, 359 size,
329 vma->vm_page_prot)) 360 vma->vm_page_prot)) {
361 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
330 return -EAGAIN; 362 return -EAGAIN;
363 }
331 return 0; 364 return 0;
332} 365}
333 366