aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r--arch/powerpc/mm/pgtable_32.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 03b1a3b0fbd5..7692d1bb1bc6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -54,9 +54,6 @@ extern char etext[], _stext[];
54#ifdef HAVE_BATS 54#ifdef HAVE_BATS
55extern phys_addr_t v_mapped_by_bats(unsigned long va); 55extern phys_addr_t v_mapped_by_bats(unsigned long va);
56extern unsigned long p_mapped_by_bats(phys_addr_t pa); 56extern unsigned long p_mapped_by_bats(phys_addr_t pa);
57void setbat(int index, unsigned long virt, phys_addr_t phys,
58 unsigned int size, int flags);
59
60#else /* !HAVE_BATS */ 57#else /* !HAVE_BATS */
61#define v_mapped_by_bats(x) (0UL) 58#define v_mapped_by_bats(x) (0UL)
62#define p_mapped_by_bats(x) (0UL) 59#define p_mapped_by_bats(x) (0UL)
@@ -110,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
110__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 107__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
111{ 108{
112 pte_t *pte; 109 pte_t *pte;
113 extern int mem_init_done;
114 110
115 if (mem_init_done) { 111 if (slab_is_available()) {
116 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 112 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
117 } else { 113 } else {
118 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 114 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
@@ -192,7 +188,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
192 188
193 /* Make sure we have the base flags */ 189 /* Make sure we have the base flags */
194 if ((flags & _PAGE_PRESENT) == 0) 190 if ((flags & _PAGE_PRESENT) == 0)
195 flags |= PAGE_KERNEL; 191 flags |= pgprot_val(PAGE_KERNEL);
196 192
197 /* Non-cacheable page cannot be coherent */ 193 /* Non-cacheable page cannot be coherent */
198 if (flags & _PAGE_NO_CACHE) 194 if (flags & _PAGE_NO_CACHE)
@@ -219,9 +215,9 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
219 * Don't allow anybody to remap normal RAM that we're using. 215 * Don't allow anybody to remap normal RAM that we're using.
220 * mem_init() sets high_memory so only do the check after that. 216 * mem_init() sets high_memory so only do the check after that.
221 */ 217 */
222 if (mem_init_done && (p < virt_to_phys(high_memory)) && 218 if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
223 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { 219 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
224 printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n", 220 printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
225 (unsigned long long)p, __builtin_return_address(0)); 221 (unsigned long long)p, __builtin_return_address(0));
226 return NULL; 222 return NULL;
227 } 223 }
@@ -247,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
247 if ((v = p_mapped_by_tlbcam(p))) 243 if ((v = p_mapped_by_tlbcam(p)))
248 goto out; 244 goto out;
249 245
250 if (mem_init_done) { 246 if (slab_is_available()) {
251 struct vm_struct *area; 247 struct vm_struct *area;
252 area = get_vm_area_caller(size, VM_IOREMAP, caller); 248 area = get_vm_area_caller(size, VM_IOREMAP, caller);
253 if (area == 0) 249 if (area == 0)
@@ -266,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
266 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 262 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
267 err = map_page(v+i, p+i, flags); 263 err = map_page(v+i, p+i, flags);
268 if (err) { 264 if (err) {
269 if (mem_init_done) 265 if (slab_is_available())
270 vunmap((void *)v); 266 vunmap((void *)v);
271 return NULL; 267 return NULL;
272 } 268 }
@@ -327,7 +323,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
327 p = memstart_addr + s; 323 p = memstart_addr + s;
328 for (; s < top; s += PAGE_SIZE) { 324 for (; s < top; s += PAGE_SIZE) {
329 ktext = ((char *) v >= _stext && (char *) v < etext); 325 ktext = ((char *) v >= _stext && (char *) v < etext);
330 f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; 326 f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
331 map_page(v, p, f); 327 map_page(v, p, f);
332#ifdef CONFIG_PPC_STD_MMU_32 328#ifdef CONFIG_PPC_STD_MMU_32
333 if (ktext) 329 if (ktext)