aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 14:13:51 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-20 22:21:16 -0500
commit64b3d0e8122b422e879b23d42f9e0e8efbbf9744 (patch)
treeb1fab3fc39fd3117d0c050b0a54d6fe09f3a2948 /arch/powerpc/kernel
parent77520351805cc19ba37394ae33f862ef6d3c2a23 (diff)
powerpc/mm: Rework usage of _PAGE_COHERENT/NO_CACHE/GUARDED
Currently, we never set _PAGE_COHERENT in the PTEs, we just OR it in in the hash code based on some CPU feature bit. We also manipulate _PAGE_NO_CACHE and _PAGE_GUARDED by hand in all sorts of places. This changes the logic so that instead, the PTE now contains _PAGE_COHERENT for all normal RAM pages thay have I = 0 on platforms that need it. The hash code clears it if the feature bit is not set. It also adds some clean accessors to setup various valid combinations of access flags and change various bits of code to use them instead. This should help having the PTE actually containing the bit combinations that we really want. I also removed _PAGE_GUARDED from _PAGE_BASE on 44x and instead set it explicitely from the TLB miss. I will ultimately remove it completely as it appears that it might not be needed after all but in the meantime, having it in the TLB miss makes things a lot easier. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/head_44x.S1
-rw-r--r--arch/powerpc/kernel/pci-common.c24
2 files changed, 10 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index f3a1ea9d7fe4..26237357a88c 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -570,6 +570,7 @@ finish_tlb_load:
570 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 570 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
571 and r11,r12,r10 /* Mask PTE bits to keep */ 571 and r11,r12,r10 /* Mask PTE bits to keep */
572 andi. r10,r12,_PAGE_USER /* User page ? */ 572 andi. r10,r12,_PAGE_USER /* User page ? */
573 ori r11,r11,_PAGE_GUARDED /* 440 errata, needs G set */
573 beq 1f /* nope, leave U bits empty */ 574 beq 1f /* nope, leave U bits empty */
574 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 575 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
5751: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ 5761: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 1a32db331a5c..2538030954d8 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -370,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
370 } 370 }
371 371
372 /* XXX would be nice to have a way to ask for write-through */ 372 /* XXX would be nice to have a way to ask for write-through */
373 prot |= _PAGE_NO_CACHE;
374 if (write_combine) 373 if (write_combine)
375 prot &= ~_PAGE_GUARDED; 374 return pgprot_noncached_wc(prot);
376 else 375 else
377 prot |= _PAGE_GUARDED; 376 return pgprot_noncached(prot);
378
379 return __pgprot(prot);
380} 377}
381 378
382/* 379/*
@@ -387,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
387pgprot_t pci_phys_mem_access_prot(struct file *file, 384pgprot_t pci_phys_mem_access_prot(struct file *file,
388 unsigned long pfn, 385 unsigned long pfn,
389 unsigned long size, 386 unsigned long size,
390 pgprot_t protection) 387 pgprot_t prot)
391{ 388{
392 struct pci_dev *pdev = NULL; 389 struct pci_dev *pdev = NULL;
393 struct resource *found = NULL; 390 struct resource *found = NULL;
394 unsigned long prot = pgprot_val(protection);
395 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; 391 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
396 int i; 392 int i;
397 393
398 if (page_is_ram(pfn)) 394 if (page_is_ram(pfn))
399 return __pgprot(prot); 395 return prot;
400
401 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
402 396
397 prot = pgprot_noncached(prot);
403 for_each_pci_dev(pdev) { 398 for_each_pci_dev(pdev) {
404 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 399 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
405 struct resource *rp = &pdev->resource[i]; 400 struct resource *rp = &pdev->resource[i];
@@ -420,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
420 } 415 }
421 if (found) { 416 if (found) {
422 if (found->flags & IORESOURCE_PREFETCH) 417 if (found->flags & IORESOURCE_PREFETCH)
423 prot &= ~_PAGE_GUARDED; 418 prot = pgprot_noncached_wc(prot);
424 pci_dev_put(pdev); 419 pci_dev_put(pdev);
425 } 420 }
426 421
427 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", 422 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
428 (unsigned long long)offset, prot); 423 (unsigned long long)offset, pgprot_val(prot));
429 424
430 return __pgprot(prot); 425 return prot;
431} 426}
432 427
433 428
@@ -583,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
583 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); 578 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
584 579
585 vma->vm_pgoff = offset >> PAGE_SHIFT; 580 vma->vm_pgoff = offset >> PAGE_SHIFT;
586 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 581 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
587 | _PAGE_NO_CACHE | _PAGE_GUARDED);
588 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 582 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
589 vma->vm_end - vma->vm_start, 583 vma->vm_end - vma->vm_start,
590 vma->vm_page_prot); 584 vma->vm_page_prot);