aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2018-10-09 09:51:45 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2018-10-14 03:04:09 -0400
commitc766ee72235d09b0080f77474085fc17d6ae2fb1 (patch)
tree10f9b09f5ad1a05d986d8dde9cf47c9d92441c07
parent56f3c1413f5cce0c8f4d6f1ab79d790da5aa61af (diff)
powerpc: handover page flags with a pgprot_t parameter
In order to avoid multiple conversions, handover directly a pgprot_t to map_kernel_page() as already done for radix. Do the same for __ioremap_caller() and __ioremap_at(). Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/fixmap.h2
-rw-r--r--arch/powerpc/include/asm/io.h4
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h3
-rw-r--r--arch/powerpc/kernel/io-workarounds.c4
-rw-r--r--arch/powerpc/kernel/isa-bridge.c6
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/lib/code-patching.c3
-rw-r--r--arch/powerpc/mm/8xx_mmu.c3
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/pgtable-book3e.c9
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c7
-rw-r--r--arch/powerpc/mm/pgtable_32.c37
-rw-r--r--arch/powerpc/mm/pgtable_64.c37
-rw-r--r--drivers/pcmcia/electra_cf.c2
20 files changed, 64 insertions, 77 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 751cf931bb3f..7a9f0ed599ff 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -292,7 +292,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
292#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 292#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
293#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 293#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
294 294
295int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); 295int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
296 296
297/* Generic accessors to PTE bits */ 297/* Generic accessors to PTE bits */
298static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} 298static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index fcf8b10a209f..247aff9cc6ba 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,8 +201,7 @@ static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
201#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 201#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
202 202
203 203
204extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, 204int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
205 unsigned long flags);
206extern int __meminit hash__vmemmap_create_mapping(unsigned long start, 205extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
207 unsigned long page_size, 206 unsigned long page_size,
208 unsigned long phys); 207 unsigned long phys);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c68cbbff3429..eae6e1030523 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1030,17 +1030,16 @@ extern struct page *pgd_page(pgd_t pgd);
1030#define pgd_ERROR(e) \ 1030#define pgd_ERROR(e) \
1031 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 1031 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
1032 1032
1033static inline int map_kernel_page(unsigned long ea, unsigned long pa, 1033static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
1034 unsigned long flags)
1035{ 1034{
1036 if (radix_enabled()) { 1035 if (radix_enabled()) {
1037#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM) 1036#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
1038 unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift; 1037 unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
1039 WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE"); 1038 WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
1040#endif 1039#endif
1041 return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE); 1040 return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
1042 } 1041 }
1043 return hash__map_kernel_page(ea, pa, flags); 1042 return hash__map_kernel_page(ea, pa, prot);
1044} 1043}
1045 1044
1046static inline int __meminit vmemmap_create_mapping(unsigned long start, 1045static inline int __meminit vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 41cc15c14eee..b9fbed84ddca 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -72,7 +72,7 @@ enum fixed_addresses {
72static inline void __set_fixmap(enum fixed_addresses idx, 72static inline void __set_fixmap(enum fixed_addresses idx,
73 phys_addr_t phys, pgprot_t flags) 73 phys_addr_t phys, pgprot_t flags)
74{ 74{
75 map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags)); 75 map_kernel_page(fix_to_virt(idx), phys, flags);
76} 76}
77 77
78#endif /* !__ASSEMBLY__ */ 78#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index cdccab3938db..0a034519957d 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -786,12 +786,12 @@ extern void iounmap(volatile void __iomem *addr);
786extern void __iomem *__ioremap(phys_addr_t, unsigned long size, 786extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
787 unsigned long flags); 787 unsigned long flags);
788extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, 788extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
789 unsigned long flags, void *caller); 789 pgprot_t prot, void *caller);
790 790
791extern void __iounmap(volatile void __iomem *addr); 791extern void __iounmap(volatile void __iomem *addr);
792 792
793extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea, 793extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
794 unsigned long size, unsigned long flags); 794 unsigned long size, pgprot_t prot);
795extern void __iounmap_at(void *ea, unsigned long size); 795extern void __iounmap_at(void *ea, unsigned long size);
796 796
797/* 797/*
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index b4831f1338db..8311869005fa 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -35,7 +35,7 @@ struct machdep_calls {
35 char *name; 35 char *name;
36#ifdef CONFIG_PPC64 36#ifdef CONFIG_PPC64
37 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 37 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
38 unsigned long flags, void *caller); 38 pgprot_t prot, void *caller);
39 void (*iounmap)(volatile void __iomem *token); 39 void (*iounmap)(volatile void __iomem *token);
40 40
41#ifdef CONFIG_PM 41#ifdef CONFIG_PM
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index a507a65b0866..a7f44498ab6f 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -323,7 +323,7 @@ static inline int pte_young(pte_t pte)
323#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 323#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
324#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 324#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
325 325
326int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); 326int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
327 327
328#endif /* !__ASSEMBLY__ */ 328#endif /* !__ASSEMBLY__ */
329 329
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 7cd6809f4d33..513b6e9e62c6 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -327,8 +327,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
327#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 327#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
328#define __swp_entry_to_pte(x) __pte((x).val) 328#define __swp_entry_to_pte(x) __pte((x).val)
329 329
330extern int map_kernel_page(unsigned long ea, unsigned long pa, 330int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
331 unsigned long flags);
332extern int __meminit vmemmap_create_mapping(unsigned long start, 331extern int __meminit vmemmap_create_mapping(unsigned long start,
333 unsigned long page_size, 332 unsigned long page_size,
334 unsigned long phys); 333 unsigned long phys);
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index aa9f1b8261db..7e89d02a84e1 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -153,10 +153,10 @@ static const struct ppc_pci_io iowa_pci_io = {
153 153
154#ifdef CONFIG_PPC_INDIRECT_MMIO 154#ifdef CONFIG_PPC_INDIRECT_MMIO
155static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, 155static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
156 unsigned long flags, void *caller) 156 pgprot_t prot, void *caller)
157{ 157{
158 struct iowa_bus *bus; 158 struct iowa_bus *bus;
159 void __iomem *res = __ioremap_caller(addr, size, flags, caller); 159 void __iomem *res = __ioremap_caller(addr, size, prot, caller);
160 int busno; 160 int busno;
161 161
162 bus = iowa_pci_find(0, (unsigned long)addr); 162 bus = iowa_pci_find(0, (unsigned long)addr);
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 072e384f8c86..fda3ae48480c 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -110,14 +110,14 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
110 size = 0x10000; 110 size = 0x10000;
111 111
112 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 112 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
113 size, pgprot_val(pgprot_noncached(PAGE_KERNEL))); 113 size, pgprot_noncached(PAGE_KERNEL));
114 return; 114 return;
115 115
116inval_range: 116inval_range:
117 printk(KERN_ERR "no ISA IO ranges or unexpected isa range, " 117 printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
118 "mapping 64k\n"); 118 "mapping 64k\n");
119 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE, 119 __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
120 0x10000, pgprot_val(pgprot_noncached(PAGE_KERNEL))); 120 0x10000, pgprot_noncached(PAGE_KERNEL));
121} 121}
122 122
123 123
@@ -253,7 +253,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
253 */ 253 */
254 isa_io_base = ISA_IO_BASE; 254 isa_io_base = ISA_IO_BASE;
255 __ioremap_at(pbase, (void *)ISA_IO_BASE, 255 __ioremap_at(pbase, (void *)ISA_IO_BASE,
256 size, pgprot_val(pgprot_noncached(PAGE_KERNEL))); 256 size, pgprot_noncached(PAGE_KERNEL));
257 257
258 pr_debug("ISA: Non-PCI bridge is %pOF\n", np); 258 pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
259} 259}
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 64bb4dd2b8f1..9d8c10d55407 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -159,7 +159,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
159 159
160 /* Establish the mapping */ 160 /* Establish the mapping */
161 if (__ioremap_at(phys_page, area->addr, size_page, 161 if (__ioremap_at(phys_page, area->addr, size_page,
162 pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL) 162 pgprot_noncached(PAGE_KERNEL)) == NULL)
163 return -ENOMEM; 163 return -ENOMEM;
164 164
165 /* Fixup hose IO resource */ 165 /* Fixup hose IO resource */
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 5ffee298745f..89502cbccb1b 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -98,8 +98,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr)
98 else 98 else
99 pfn = __pa_symbol(addr) >> PAGE_SHIFT; 99 pfn = __pa_symbol(addr) >> PAGE_SHIFT;
100 100
101 err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), 101 err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
102 pgprot_val(PAGE_KERNEL));
103 102
104 pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err); 103 pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
105 if (err) 104 if (err)
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index cf77d755246d..9137361d687d 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -91,11 +91,10 @@ static void __init mmu_mapin_immr(void)
91{ 91{
92 unsigned long p = PHYS_IMMR_BASE; 92 unsigned long p = PHYS_IMMR_BASE;
93 unsigned long v = VIRT_IMMR_BASE; 93 unsigned long v = VIRT_IMMR_BASE;
94 unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
95 int offset; 94 int offset;
96 95
97 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE) 96 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
98 map_kernel_page(v + offset, p + offset, f); 97 map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
99} 98}
100 99
101/* Address of instructions to patch */ 100/* Address of instructions to patch */
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 382528475433..b6e7b5952ab5 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
228 do { 228 do {
229 SetPageReserved(page); 229 SetPageReserved(page);
230 map_kernel_page(vaddr, page_to_phys(page), 230 map_kernel_page(vaddr, page_to_phys(page),
231 pgprot_val(pgprot_noncached(PAGE_KERNEL))); 231 pgprot_noncached(PAGE_KERNEL));
232 page++; 232 page++;
233 vaddr += PAGE_SIZE; 233 vaddr += PAGE_SIZE;
234 } while (size -= PAGE_SIZE); 234 } while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 04ccb274a620..cb421aeb7674 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -309,11 +309,11 @@ void __init paging_init(void)
309 unsigned long end = __fix_to_virt(FIX_HOLE); 309 unsigned long end = __fix_to_virt(FIX_HOLE);
310 310
311 for (; v < end; v += PAGE_SIZE) 311 for (; v < end; v += PAGE_SIZE)
312 map_kernel_page(v, 0, 0); /* XXX gross */ 312 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
313#endif 313#endif
314 314
315#ifdef CONFIG_HIGHMEM 315#ifdef CONFIG_HIGHMEM
316 map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */ 316 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
317 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 317 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
318 318
319 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 319 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c
index a2298930f990..e0ccf36714b2 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/pgtable-book3e.c
@@ -42,7 +42,7 @@ int __meminit vmemmap_create_mapping(unsigned long start,
42 * thus must have the low bits clear 42 * thus must have the low bits clear
43 */ 43 */
44 for (i = 0; i < page_size; i += PAGE_SIZE) 44 for (i = 0; i < page_size; i += PAGE_SIZE)
45 BUG_ON(map_kernel_page(start + i, phys, flags)); 45 BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
46 46
47 return 0; 47 return 0;
48} 48}
@@ -70,7 +70,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
70 * map_kernel_page adds an entry to the ioremap page table 70 * map_kernel_page adds an entry to the ioremap page table
71 * and adds an entry to the HPT, possibly bolting it 71 * and adds an entry to the HPT, possibly bolting it
72 */ 72 */
73int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) 73int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
74{ 74{
75 pgd_t *pgdp; 75 pgd_t *pgdp;
76 pud_t *pudp; 76 pud_t *pudp;
@@ -89,8 +89,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
89 ptep = pte_alloc_kernel(pmdp, ea); 89 ptep = pte_alloc_kernel(pmdp, ea);
90 if (!ptep) 90 if (!ptep)
91 return -ENOMEM; 91 return -ENOMEM;
92 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
93 __pgprot(flags)));
94 } else { 92 } else {
95 pgdp = pgd_offset_k(ea); 93 pgdp = pgd_offset_k(ea);
96#ifndef __PAGETABLE_PUD_FOLDED 94#ifndef __PAGETABLE_PUD_FOLDED
@@ -113,9 +111,8 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
113 pmd_populate_kernel(&init_mm, pmdp, ptep); 111 pmd_populate_kernel(&init_mm, pmdp, ptep);
114 } 112 }
115 ptep = pte_offset_kernel(pmdp, ea); 113 ptep = pte_offset_kernel(pmdp, ea);
116 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
117 __pgprot(flags)));
118 } 114 }
115 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
119 116
120 smp_wmb(); 117 smp_wmb();
121 return 0; 118 return 0;
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 692bfc9e372c..c08d49046a96 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -142,7 +142,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
142 * map_kernel_page adds an entry to the ioremap page table 142 * map_kernel_page adds an entry to the ioremap page table
143 * and adds an entry to the HPT, possibly bolting it 143 * and adds an entry to the HPT, possibly bolting it
144 */ 144 */
145int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) 145int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
146{ 146{
147 pgd_t *pgdp; 147 pgd_t *pgdp;
148 pud_t *pudp; 148 pud_t *pudp;
@@ -161,8 +161,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
161 ptep = pte_alloc_kernel(pmdp, ea); 161 ptep = pte_alloc_kernel(pmdp, ea);
162 if (!ptep) 162 if (!ptep)
163 return -ENOMEM; 163 return -ENOMEM;
164 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 164 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
165 __pgprot(flags)));
166 } else { 165 } else {
167 /* 166 /*
168 * If the mm subsystem is not fully up, we cannot create a 167 * If the mm subsystem is not fully up, we cannot create a
@@ -170,7 +169,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
170 * entry in the hardware page table. 169 * entry in the hardware page table.
171 * 170 *
172 */ 171 */
173 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, 172 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
174 mmu_io_psize, mmu_kernel_ssize)) { 173 mmu_io_psize, mmu_kernel_ssize)) {
175 printk(KERN_ERR "Failed to do bolted mapping IO " 174 printk(KERN_ERR "Failed to do bolted mapping IO "
176 "memory at %016lx !\n", pa); 175 "memory at %016lx !\n", pa);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6a81a2446c47..0bbc7b7d8a05 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -76,36 +76,36 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
76void __iomem * 76void __iomem *
77ioremap(phys_addr_t addr, unsigned long size) 77ioremap(phys_addr_t addr, unsigned long size)
78{ 78{
79 unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL)); 79 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
80 80
81 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 81 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
82} 82}
83EXPORT_SYMBOL(ioremap); 83EXPORT_SYMBOL(ioremap);
84 84
85void __iomem * 85void __iomem *
86ioremap_wc(phys_addr_t addr, unsigned long size) 86ioremap_wc(phys_addr_t addr, unsigned long size)
87{ 87{
88 unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL)); 88 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
89 89
90 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 90 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
91} 91}
92EXPORT_SYMBOL(ioremap_wc); 92EXPORT_SYMBOL(ioremap_wc);
93 93
94void __iomem * 94void __iomem *
95ioremap_wt(phys_addr_t addr, unsigned long size) 95ioremap_wt(phys_addr_t addr, unsigned long size)
96{ 96{
97 unsigned long flags = pgprot_val(pgprot_cached_wthru(PAGE_KERNEL)); 97 pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
98 98
99 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 99 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
100} 100}
101EXPORT_SYMBOL(ioremap_wt); 101EXPORT_SYMBOL(ioremap_wt);
102 102
103void __iomem * 103void __iomem *
104ioremap_coherent(phys_addr_t addr, unsigned long size) 104ioremap_coherent(phys_addr_t addr, unsigned long size)
105{ 105{
106 unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL)); 106 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
107 107
108 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 108 return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
109} 109}
110EXPORT_SYMBOL(ioremap_coherent); 110EXPORT_SYMBOL(ioremap_coherent);
111 111
@@ -120,19 +120,18 @@ ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
120 flags &= ~(_PAGE_USER | _PAGE_EXEC); 120 flags &= ~(_PAGE_USER | _PAGE_EXEC);
121 flags |= _PAGE_PRIVILEGED; 121 flags |= _PAGE_PRIVILEGED;
122 122
123 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 123 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
124} 124}
125EXPORT_SYMBOL(ioremap_prot); 125EXPORT_SYMBOL(ioremap_prot);
126 126
127void __iomem * 127void __iomem *
128__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) 128__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
129{ 129{
130 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 130 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
131} 131}
132 132
133void __iomem * 133void __iomem *
134__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, 134__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
135 void *caller)
136{ 135{
137 unsigned long v, i; 136 unsigned long v, i;
138 phys_addr_t p; 137 phys_addr_t p;
@@ -195,7 +194,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
195 194
196 err = 0; 195 err = 0;
197 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 196 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
198 err = map_kernel_page(v+i, p+i, flags); 197 err = map_kernel_page(v + i, p + i, prot);
199 if (err) { 198 if (err) {
200 if (slab_is_available()) 199 if (slab_is_available())
201 vunmap((void *)v); 200 vunmap((void *)v);
@@ -221,7 +220,7 @@ void iounmap(volatile void __iomem *addr)
221} 220}
222EXPORT_SYMBOL(iounmap); 221EXPORT_SYMBOL(iounmap);
223 222
224int map_kernel_page(unsigned long va, phys_addr_t pa, int flags) 223int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
225{ 224{
226 pmd_t *pd; 225 pmd_t *pd;
227 pte_t *pg; 226 pte_t *pg;
@@ -237,9 +236,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
237 * hash table 236 * hash table
238 */ 237 */
239 BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && 238 BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
240 flags); 239 pgprot_val(prot));
241 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, 240 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
242 __pgprot(flags)));
243 } 241 }
244 smp_wmb(); 242 smp_wmb();
245 return err; 243 return err;
@@ -250,7 +248,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
250 */ 248 */
251static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) 249static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
252{ 250{
253 unsigned long v, s, f; 251 unsigned long v, s;
254 phys_addr_t p; 252 phys_addr_t p;
255 int ktext; 253 int ktext;
256 254
@@ -260,8 +258,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
260 for (; s < top; s += PAGE_SIZE) { 258 for (; s < top; s += PAGE_SIZE) {
261 ktext = ((char *)v >= _stext && (char *)v < etext) || 259 ktext = ((char *)v >= _stext && (char *)v < etext) ||
262 ((char *)v >= _sinittext && (char *)v < _einittext); 260 ((char *)v >= _sinittext && (char *)v < _einittext);
263 f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL); 261 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
264 map_kernel_page(v, p, f);
265#ifdef CONFIG_PPC_STD_MMU_32 262#ifdef CONFIG_PPC_STD_MMU_32
266 if (ktext) 263 if (ktext)
267 hash_preload(&init_mm, v, 0, 0x300); 264 hash_preload(&init_mm, v, 0, 0x300);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 1f1bb40555a8..b0f4a4b4f62b 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -113,13 +113,12 @@ unsigned long ioremap_bot = IOREMAP_BASE;
113 * __ioremap_at - Low level function to establish the page tables 113 * __ioremap_at - Low level function to establish the page tables
114 * for an IO mapping 114 * for an IO mapping
115 */ 115 */
116void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, 116void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
117 unsigned long flags)
118{ 117{
119 unsigned long i; 118 unsigned long i;
120 119
121 /* We don't support the 4K PFN hack with ioremap */ 120 /* We don't support the 4K PFN hack with ioremap */
122 if (flags & H_PAGE_4K_PFN) 121 if (pgprot_val(prot) & H_PAGE_4K_PFN)
123 return NULL; 122 return NULL;
124 123
125 WARN_ON(pa & ~PAGE_MASK); 124 WARN_ON(pa & ~PAGE_MASK);
@@ -127,7 +126,7 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
127 WARN_ON(size & ~PAGE_MASK); 126 WARN_ON(size & ~PAGE_MASK);
128 127
129 for (i = 0; i < size; i += PAGE_SIZE) 128 for (i = 0; i < size; i += PAGE_SIZE)
130 if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) 129 if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
131 return NULL; 130 return NULL;
132 131
133 return (void __iomem *)ea; 132 return (void __iomem *)ea;
@@ -148,7 +147,7 @@ void __iounmap_at(void *ea, unsigned long size)
148} 147}
149 148
150void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, 149void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
151 unsigned long flags, void *caller) 150 pgprot_t prot, void *caller)
152{ 151{
153 phys_addr_t paligned; 152 phys_addr_t paligned;
154 void __iomem *ret; 153 void __iomem *ret;
@@ -178,11 +177,11 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
178 return NULL; 177 return NULL;
179 178
180 area->phys_addr = paligned; 179 area->phys_addr = paligned;
181 ret = __ioremap_at(paligned, area->addr, size, flags); 180 ret = __ioremap_at(paligned, area->addr, size, prot);
182 if (!ret) 181 if (!ret)
183 vunmap(area->addr); 182 vunmap(area->addr);
184 } else { 183 } else {
185 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); 184 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
186 if (ret) 185 if (ret)
187 ioremap_bot += size; 186 ioremap_bot += size;
188 } 187 }
@@ -195,37 +194,37 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
195void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 194void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
196 unsigned long flags) 195 unsigned long flags)
197{ 196{
198 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 197 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
199} 198}
200 199
201void __iomem * ioremap(phys_addr_t addr, unsigned long size) 200void __iomem * ioremap(phys_addr_t addr, unsigned long size)
202{ 201{
203 unsigned long flags = pgprot_val(pgprot_noncached(PAGE_KERNEL)); 202 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
204 void *caller = __builtin_return_address(0); 203 void *caller = __builtin_return_address(0);
205 204
206 if (ppc_md.ioremap) 205 if (ppc_md.ioremap)
207 return ppc_md.ioremap(addr, size, flags, caller); 206 return ppc_md.ioremap(addr, size, prot, caller);
208 return __ioremap_caller(addr, size, flags, caller); 207 return __ioremap_caller(addr, size, prot, caller);
209} 208}
210 209
211void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) 210void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
212{ 211{
213 unsigned long flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL)); 212 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
214 void *caller = __builtin_return_address(0); 213 void *caller = __builtin_return_address(0);
215 214
216 if (ppc_md.ioremap) 215 if (ppc_md.ioremap)
217 return ppc_md.ioremap(addr, size, flags, caller); 216 return ppc_md.ioremap(addr, size, prot, caller);
218 return __ioremap_caller(addr, size, flags, caller); 217 return __ioremap_caller(addr, size, prot, caller);
219} 218}
220 219
221void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) 220void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
222{ 221{
223 unsigned long flags = pgprot_val(pgprot_cached(PAGE_KERNEL)); 222 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
224 void *caller = __builtin_return_address(0); 223 void *caller = __builtin_return_address(0);
225 224
226 if (ppc_md.ioremap) 225 if (ppc_md.ioremap)
227 return ppc_md.ioremap(addr, size, flags, caller); 226 return ppc_md.ioremap(addr, size, prot, caller);
228 return __ioremap_caller(addr, size, flags, caller); 227 return __ioremap_caller(addr, size, prot, caller);
229} 228}
230 229
231void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, 230void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
@@ -246,8 +245,8 @@ void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
246 flags |= _PAGE_PRIVILEGED; 245 flags |= _PAGE_PRIVILEGED;
247 246
248 if (ppc_md.ioremap) 247 if (ppc_md.ioremap)
249 return ppc_md.ioremap(addr, size, flags, caller); 248 return ppc_md.ioremap(addr, size, __pgprot(flags), caller);
250 return __ioremap_caller(addr, size, flags, caller); 249 return __ioremap_caller(addr, size, __pgprot(flags), caller);
251} 250}
252 251
253 252
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 34d6c1a0971e..b31abe35ed2c 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -230,7 +230,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
230 230
231 if (!cf->mem_base || !cf->io_virt || !cf->gpio_base || 231 if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
232 (__ioremap_at(io.start, cf->io_virt, cf->io_size, 232 (__ioremap_at(io.start, cf->io_virt, cf->io_size,
233 pgprot_val(pgprot_noncached(PAGE_KERNEL))) == NULL)) { 233 pgprot_noncached(PAGE_KERNEL)) == NULL)) {
234 dev_err(device, "can't ioremap ranges\n"); 234 dev_err(device, "can't ioremap ranges\n");
235 status = -ENOMEM; 235 status = -ENOMEM;
236 goto fail1; 236 goto fail1;