aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-25 15:48:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-25 15:48:08 -0400
commitbf16ae250999e76aff0491a362073a552db965fc (patch)
tree9b012f0f4e9cc146648fe8914346452563f999d9 /arch
parent0b79dada976198cb1a4c043068e3b44d5cab2a5a (diff)
parent1526a756fba5b1f2eb5001b8e8de2a0ea1bd2c66 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-pat
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-pat: generic: add ioremap_wc() interface wrapper /dev/mem: make promisc the default pat: cleanups x86: PAT use reserve free memtype in mmap of /dev/mem x86: PAT phys_mem_access_prot_allowed for dev/mem mmap x86: PAT avoid aliasing in /dev/mem read/write devmem: add range_is_allowed() check to mmap of /dev/mem x86: introduce /dev/mem restrictions with a config option
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/Kconfig.debug11
-rw-r--r--arch/x86/mm/init_32.c19
-rw-r--r--arch/x86/mm/init_64.c20
-rw-r--r--arch/x86/mm/ioremap.c29
-rw-r--r--arch/x86/mm/pat.c175
5 files changed, 240 insertions, 14 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 610aaecc19f8..239fd9fba0a5 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -5,6 +5,17 @@ config TRACE_IRQFLAGS_SUPPORT
5 5
6source "lib/Kconfig.debug" 6source "lib/Kconfig.debug"
7 7
8config NONPROMISC_DEVMEM
9 bool "Disable promiscuous /dev/mem"
10 help
11 The /dev/mem file by default only allows userspace access to PCI
12 space and the BIOS code and data regions. This is sufficient for
13 dosemu and X and all common users of /dev/mem. With this config
14 option, you allow userspace access to all of memory, including
15 kernel and userspace memory. Accidental access to this is
16 obviously disasterous, but specific access can be used by people
17 debugging the kernel.
18
8config EARLY_PRINTK 19config EARLY_PRINTK
9 bool "Early printk" if EMBEDDED 20 bool "Early printk" if EMBEDDED
10 default y 21 default y
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 08aa1878fad4..baf7c4f643c8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -227,6 +227,25 @@ static inline int page_kills_ppro(unsigned long pagenr)
227 return 0; 227 return 0;
228} 228}
229 229
230/*
231 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
232 * is valid. The argument is a physical page number.
233 *
234 *
235 * On x86, access has to be given to the first megabyte of ram because that area
236 * contains bios code and data regions used by X and dosemu and similar apps.
237 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
238 * mmio resources as well as potential bios/acpi data regions.
239 */
240int devmem_is_allowed(unsigned long pagenr)
241{
242 if (pagenr <= 256)
243 return 1;
244 if (!page_is_ram(pagenr))
245 return 1;
246 return 0;
247}
248
230#ifdef CONFIG_HIGHMEM 249#ifdef CONFIG_HIGHMEM
231pte_t *kmap_pte; 250pte_t *kmap_pte;
232pgprot_t kmap_prot; 251pgprot_t kmap_prot;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b798e7b92b17..0cca62663037 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -663,6 +663,26 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
663 663
664#endif /* CONFIG_MEMORY_HOTPLUG */ 664#endif /* CONFIG_MEMORY_HOTPLUG */
665 665
666/*
667 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
668 * is valid. The argument is a physical page number.
669 *
670 *
671 * On x86, access has to be given to the first megabyte of ram because that area
672 * contains bios code and data regions used by X and dosemu and similar apps.
673 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
674 * mmio resources as well as potential bios/acpi data regions.
675 */
676int devmem_is_allowed(unsigned long pagenr)
677{
678 if (pagenr <= 256)
679 return 1;
680 if (!page_is_ram(pagenr))
681 return 1;
682 return 0;
683}
684
685
666static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, 686static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
667 kcore_modules, kcore_vsyscall; 687 kcore_modules, kcore_vsyscall;
668 688
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 36a3f7ded626..d176b23110cc 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -336,6 +336,35 @@ void iounmap(volatile void __iomem *addr)
336} 336}
337EXPORT_SYMBOL(iounmap); 337EXPORT_SYMBOL(iounmap);
338 338
339/*
340 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
341 * access
342 */
343void *xlate_dev_mem_ptr(unsigned long phys)
344{
345 void *addr;
346 unsigned long start = phys & PAGE_MASK;
347
348 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
349 if (page_is_ram(start >> PAGE_SHIFT))
350 return __va(phys);
351
352 addr = (void *)ioremap(start, PAGE_SIZE);
353 if (addr)
354 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
355
356 return addr;
357}
358
359void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
360{
361 if (page_is_ram(phys >> PAGE_SHIFT))
362 return;
363
364 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
365 return;
366}
367
339#ifdef CONFIG_X86_32 368#ifdef CONFIG_X86_32
340 369
341int __initdata early_ioremap_debug; 370int __initdata early_ioremap_debug;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 72c0f6097402..ef8b64b89c7d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/bootmem.h>
14 15
15#include <asm/msr.h> 16#include <asm/msr.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
@@ -21,6 +22,7 @@
21#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
22#include <asm/fcntl.h> 23#include <asm/fcntl.h>
23#include <asm/mtrr.h> 24#include <asm/mtrr.h>
25#include <asm/io.h>
24 26
25int pat_wc_enabled = 1; 27int pat_wc_enabled = 1;
26 28
@@ -190,6 +192,21 @@ static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
190 return 0; 192 return 0;
191} 193}
192 194
195/*
196 * req_type typically has one of the:
197 * - _PAGE_CACHE_WB
198 * - _PAGE_CACHE_WC
199 * - _PAGE_CACHE_UC_MINUS
200 * - _PAGE_CACHE_UC
201 *
202 * req_type will have a special case value '-1', when requester want to inherit
203 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
204 *
205 * If ret_type is NULL, function will return an error if it cannot reserve the
206 * region with req_type. If ret_type is non-null, function will return
207 * available type in ret_type in case of no error. In case of any error
208 * it will return a negative return value.
209 */
193int reserve_memtype(u64 start, u64 end, unsigned long req_type, 210int reserve_memtype(u64 start, u64 end, unsigned long req_type,
194 unsigned long *ret_type) 211 unsigned long *ret_type)
195{ 212{
@@ -200,9 +217,14 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
200 217
201 /* Only track when pat_wc_enabled */ 218 /* Only track when pat_wc_enabled */
202 if (!pat_wc_enabled) { 219 if (!pat_wc_enabled) {
203 if (ret_type) 220 /* This is identical to page table setting without PAT */
204 *ret_type = req_type; 221 if (ret_type) {
205 222 if (req_type == -1) {
223 *ret_type = _PAGE_CACHE_WB;
224 } else {
225 *ret_type = req_type;
226 }
227 }
206 return 0; 228 return 0;
207 } 229 }
208 230
@@ -214,8 +236,29 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
214 return 0; 236 return 0;
215 } 237 }
216 238
217 req_type &= _PAGE_CACHE_MASK; 239 if (req_type == -1) {
218 err = pat_x_mtrr_type(start, end, req_type, &actual_type); 240 /*
241 * Special case where caller wants to inherit from mtrr or
242 * existing pat mapping, defaulting to UC_MINUS in case of
243 * no match.
244 */
245 u8 mtrr_type = mtrr_type_lookup(start, end);
246 if (mtrr_type == 0xFE) { /* MTRR match error */
247 err = -1;
248 }
249
250 if (mtrr_type == MTRR_TYPE_WRBACK) {
251 req_type = _PAGE_CACHE_WB;
252 actual_type = _PAGE_CACHE_WB;
253 } else {
254 req_type = _PAGE_CACHE_UC_MINUS;
255 actual_type = _PAGE_CACHE_UC_MINUS;
256 }
257 } else {
258 req_type &= _PAGE_CACHE_MASK;
259 err = pat_x_mtrr_type(start, end, req_type, &actual_type);
260 }
261
219 if (err) { 262 if (err) {
220 if (ret_type) 263 if (ret_type)
221 *ret_type = actual_type; 264 *ret_type = actual_type;
@@ -241,7 +284,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
241 struct memtype *saved_ptr; 284 struct memtype *saved_ptr;
242 285
243 if (parse->start >= end) { 286 if (parse->start >= end) {
244 printk("New Entry\n"); 287 pr_debug("New Entry\n");
245 list_add(&new_entry->nd, parse->nd.prev); 288 list_add(&new_entry->nd, parse->nd.prev);
246 new_entry = NULL; 289 new_entry = NULL;
247 break; 290 break;
@@ -343,7 +386,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
343 break; 386 break;
344 } 387 }
345 388
346 printk("Overlap at 0x%Lx-0x%Lx\n", 389 printk(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
347 saved_ptr->start, saved_ptr->end); 390 saved_ptr->start, saved_ptr->end);
348 /* No conflict. Go ahead and add this new entry */ 391 /* No conflict. Go ahead and add this new entry */
349 list_add(&new_entry->nd, &saved_ptr->nd); 392 list_add(&new_entry->nd, &saved_ptr->nd);
@@ -353,7 +396,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
353 } 396 }
354 397
355 if (err) { 398 if (err) {
356 printk( 399 printk(KERN_INFO
357 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", 400 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
358 start, end, cattr_name(new_entry->type), 401 start, end, cattr_name(new_entry->type),
359 cattr_name(req_type)); 402 cattr_name(req_type));
@@ -365,16 +408,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
365 if (new_entry) { 408 if (new_entry) {
366 /* No conflict. Not yet added to the list. Add to the tail */ 409 /* No conflict. Not yet added to the list. Add to the tail */
367 list_add_tail(&new_entry->nd, &memtype_list); 410 list_add_tail(&new_entry->nd, &memtype_list);
368 printk("New Entry\n"); 411 pr_debug("New Entry\n");
369 } 412 }
370 413
371 if (ret_type) { 414 if (ret_type) {
372 printk( 415 pr_debug(
373 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 416 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
374 start, end, cattr_name(actual_type), 417 start, end, cattr_name(actual_type),
375 cattr_name(req_type), cattr_name(*ret_type)); 418 cattr_name(req_type), cattr_name(*ret_type));
376 } else { 419 } else {
377 printk( 420 pr_debug(
378 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n", 421 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
379 start, end, cattr_name(actual_type), 422 start, end, cattr_name(actual_type),
380 cattr_name(req_type)); 423 cattr_name(req_type));
@@ -411,11 +454,115 @@ int free_memtype(u64 start, u64 end)
411 spin_unlock(&memtype_lock); 454 spin_unlock(&memtype_lock);
412 455
413 if (err) { 456 if (err) {
414 printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n", 457 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
415 current->comm, current->pid, start, end); 458 current->comm, current->pid, start, end);
416 } 459 }
417 460
418 printk( "free_memtype request 0x%Lx-0x%Lx\n", start, end); 461 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start, end);
419 return err; 462 return err;
420} 463}
421 464
465
466/*
467 * /dev/mem mmap interface. The memtype used for mapping varies:
468 * - Use UC for mappings with O_SYNC flag
469 * - Without O_SYNC flag, if there is any conflict in reserve_memtype,
470 * inherit the memtype from existing mapping.
471 * - Else use UC_MINUS memtype (for backward compatibility with existing
472 * X drivers.
473 */
474pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
475 unsigned long size, pgprot_t vma_prot)
476{
477 return vma_prot;
478}
479
480int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
481 unsigned long size, pgprot_t *vma_prot)
482{
483 u64 offset = ((u64) pfn) << PAGE_SHIFT;
484 unsigned long flags = _PAGE_CACHE_UC_MINUS;
485 unsigned long ret_flags;
486 int retval;
487
488 if (file->f_flags & O_SYNC) {
489 flags = _PAGE_CACHE_UC;
490 }
491
492#ifdef CONFIG_X86_32
493 /*
494 * On the PPro and successors, the MTRRs are used to set
495 * memory types for physical addresses outside main memory,
496 * so blindly setting UC or PWT on those pages is wrong.
497 * For Pentiums and earlier, the surround logic should disable
498 * caching for the high addresses through the KEN pin, but
499 * we maintain the tradition of paranoia in this code.
500 */
501 if (!pat_wc_enabled &&
502 ! ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
503 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
504 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
505 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) &&
506 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
507 flags = _PAGE_CACHE_UC;
508 }
509#endif
510
511 /*
512 * With O_SYNC, we can only take UC mapping. Fail if we cannot.
513 * Without O_SYNC, we want to get
514 * - WB for WB-able memory and no other conflicting mappings
515 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
516 * - Inherit from confliting mappings otherwise
517 */
518 if (flags != _PAGE_CACHE_UC_MINUS) {
519 retval = reserve_memtype(offset, offset + size, flags, NULL);
520 } else {
521 retval = reserve_memtype(offset, offset + size, -1, &ret_flags);
522 }
523
524 if (retval < 0)
525 return 0;
526
527 flags = ret_flags;
528
529 if (pfn <= max_pfn_mapped &&
530 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
531 free_memtype(offset, offset + size);
532 printk(KERN_INFO
533 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
534 current->comm, current->pid,
535 cattr_name(flags),
536 offset, offset + size);
537 return 0;
538 }
539
540 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
541 flags);
542 return 1;
543}
544
545void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
546{
547 u64 addr = (u64)pfn << PAGE_SHIFT;
548 unsigned long flags;
549 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
550
551 reserve_memtype(addr, addr + size, want_flags, &flags);
552 if (flags != want_flags) {
553 printk(KERN_INFO
554 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
555 current->comm, current->pid,
556 cattr_name(want_flags),
557 addr, addr + size,
558 cattr_name(flags));
559 }
560}
561
562void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
563{
564 u64 addr = (u64)pfn << PAGE_SHIFT;
565
566 free_memtype(addr, addr + size);
567}
568