diff options
| author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
|---|---|---|
| committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
| commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
| tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/microblaze/mm | |
| parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
| parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) | |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/microblaze/mm')
| -rw-r--r-- | arch/microblaze/mm/Makefile | 2 | ||||
| -rw-r--r-- | arch/microblaze/mm/consistent.c | 255 | ||||
| -rw-r--r-- | arch/microblaze/mm/fault.c | 37 | ||||
| -rw-r--r-- | arch/microblaze/mm/init.c | 47 | ||||
| -rw-r--r-- | arch/microblaze/mm/pgtable.c | 66 |
5 files changed, 321 insertions, 86 deletions
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 6c8a924d9e26..09c49ed87235 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile | |||
| @@ -2,6 +2,6 @@ | |||
| 2 | # Makefile | 2 | # Makefile |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := init.o | 5 | obj-y := consistent.o init.o |
| 6 | 6 | ||
| 7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o | 7 | obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o |
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c new file mode 100644 index 000000000000..5a59dad62bd2 --- /dev/null +++ b/arch/microblaze/mm/consistent.c | |||
| @@ -0,0 +1,255 @@ | |||
| 1 | /* | ||
| 2 | * Microblaze support for cache consistent memory. | ||
| 3 | * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> | ||
| 4 | * Copyright (C) 2010 PetaLogix | ||
| 5 | * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> | ||
| 6 | * | ||
| 7 | * Based on PowerPC version derived from arch/arm/mm/consistent.c | ||
| 8 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) | ||
| 9 | * Copyright (C) 2000 Russell King | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License version 2 as | ||
| 13 | * published by the Free Software Foundation. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/signal.h> | ||
| 18 | #include <linux/sched.h> | ||
| 19 | #include <linux/kernel.h> | ||
| 20 | #include <linux/errno.h> | ||
| 21 | #include <linux/string.h> | ||
| 22 | #include <linux/types.h> | ||
| 23 | #include <linux/ptrace.h> | ||
| 24 | #include <linux/mman.h> | ||
| 25 | #include <linux/mm.h> | ||
| 26 | #include <linux/swap.h> | ||
| 27 | #include <linux/stddef.h> | ||
| 28 | #include <linux/vmalloc.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | #include <linux/bootmem.h> | ||
| 32 | #include <linux/highmem.h> | ||
| 33 | #include <linux/pci.h> | ||
| 34 | #include <linux/interrupt.h> | ||
| 35 | #include <linux/gfp.h> | ||
| 36 | |||
| 37 | #include <asm/pgalloc.h> | ||
| 38 | #include <linux/io.h> | ||
| 39 | #include <linux/hardirq.h> | ||
| 40 | #include <asm/mmu_context.h> | ||
| 41 | #include <asm/mmu.h> | ||
| 42 | #include <linux/uaccess.h> | ||
| 43 | #include <asm/pgtable.h> | ||
| 44 | #include <asm/cpuinfo.h> | ||
| 45 | #include <asm/tlbflush.h> | ||
| 46 | |||
| 47 | #ifndef CONFIG_MMU | ||
| 48 | /* I have to use dcache values because I can't relate on ram size */ | ||
| 49 | # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) | ||
| 50 | #endif | ||
| 51 | |||
| 52 | /* | ||
| 53 | * Consistent memory allocators. Used for DMA devices that want to | ||
| 54 | * share uncached memory with the processor core. | ||
| 55 | * My crufty no-MMU approach is simple. In the HW platform we can optionally | ||
| 56 | * mirror the DDR up above the processor cacheable region. So, memory accessed | ||
| 57 | * in this mirror region will not be cached. It's alloced from the same | ||
| 58 | * pool as normal memory, but the handle we return is shifted up into the | ||
| 59 | * uncached region. This will no doubt cause big problems if memory allocated | ||
| 60 | * here is not also freed properly. -- JW | ||
| 61 | */ | ||
| 62 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | ||
| 63 | { | ||
| 64 | unsigned long order, vaddr; | ||
| 65 | void *ret; | ||
| 66 | unsigned int i, err = 0; | ||
| 67 | struct page *page, *end; | ||
| 68 | |||
| 69 | #ifdef CONFIG_MMU | ||
| 70 | phys_addr_t pa; | ||
| 71 | struct vm_struct *area; | ||
| 72 | unsigned long va; | ||
| 73 | #endif | ||
| 74 | |||
| 75 | if (in_interrupt()) | ||
| 76 | BUG(); | ||
| 77 | |||
| 78 | /* Only allocate page size areas. */ | ||
| 79 | size = PAGE_ALIGN(size); | ||
| 80 | order = get_order(size); | ||
| 81 | |||
| 82 | vaddr = __get_free_pages(gfp, order); | ||
| 83 | if (!vaddr) | ||
| 84 | return NULL; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * we need to ensure that there are no cachelines in use, | ||
| 88 | * or worse dirty in this area. | ||
| 89 | */ | ||
| 90 | flush_dcache_range(virt_to_phys((void *)vaddr), | ||
| 91 | virt_to_phys((void *)vaddr) + size); | ||
| 92 | |||
| 93 | #ifndef CONFIG_MMU | ||
| 94 | ret = (void *)vaddr; | ||
| 95 | /* | ||
| 96 | * Here's the magic! Note if the uncached shadow is not implemented, | ||
| 97 | * it's up to the calling code to also test that condition and make | ||
| 98 | * other arranegments, such as manually flushing the cache and so on. | ||
| 99 | */ | ||
| 100 | # ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
| 101 | ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); | ||
| 102 | # endif | ||
| 103 | if ((unsigned int)ret > cpuinfo.dcache_base && | ||
| 104 | (unsigned int)ret < cpuinfo.dcache_high) | ||
| 105 | printk(KERN_WARNING | ||
| 106 | "ERROR: Your cache coherent area is CACHED!!!\n"); | ||
| 107 | |||
| 108 | /* dma_handle is same as physical (shadowed) address */ | ||
| 109 | *dma_handle = (dma_addr_t)ret; | ||
| 110 | #else | ||
| 111 | /* Allocate some common virtual space to map the new pages. */ | ||
| 112 | area = get_vm_area(size, VM_ALLOC); | ||
| 113 | if (!area) { | ||
| 114 | free_pages(vaddr, order); | ||
| 115 | return NULL; | ||
| 116 | } | ||
| 117 | va = (unsigned long) area->addr; | ||
| 118 | ret = (void *)va; | ||
| 119 | |||
| 120 | /* This gives us the real physical address of the first page. */ | ||
| 121 | *dma_handle = pa = virt_to_bus((void *)vaddr); | ||
| 122 | #endif | ||
| 123 | |||
| 124 | /* | ||
| 125 | * free wasted pages. We skip the first page since we know | ||
| 126 | * that it will have count = 1 and won't require freeing. | ||
| 127 | * We also mark the pages in use as reserved so that | ||
| 128 | * remap_page_range works. | ||
| 129 | */ | ||
| 130 | page = virt_to_page(vaddr); | ||
| 131 | end = page + (1 << order); | ||
| 132 | |||
| 133 | split_page(page, order); | ||
| 134 | |||
| 135 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) { | ||
| 136 | #ifdef CONFIG_MMU | ||
| 137 | /* MS: This is the whole magic - use cache inhibit pages */ | ||
| 138 | err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); | ||
| 139 | #endif | ||
| 140 | |||
| 141 | SetPageReserved(page); | ||
| 142 | page++; | ||
| 143 | } | ||
| 144 | |||
| 145 | /* Free the otherwise unused pages. */ | ||
| 146 | while (page < end) { | ||
| 147 | __free_page(page); | ||
| 148 | page++; | ||
| 149 | } | ||
| 150 | |||
| 151 | if (err) { | ||
| 152 | free_pages(vaddr, order); | ||
| 153 | return NULL; | ||
| 154 | } | ||
| 155 | |||
| 156 | return ret; | ||
| 157 | } | ||
| 158 | EXPORT_SYMBOL(consistent_alloc); | ||
| 159 | |||
| 160 | /* | ||
| 161 | * free page(s) as defined by the above mapping. | ||
| 162 | */ | ||
| 163 | void consistent_free(size_t size, void *vaddr) | ||
| 164 | { | ||
| 165 | struct page *page; | ||
| 166 | |||
| 167 | if (in_interrupt()) | ||
| 168 | BUG(); | ||
| 169 | |||
| 170 | size = PAGE_ALIGN(size); | ||
| 171 | |||
| 172 | #ifndef CONFIG_MMU | ||
| 173 | /* Clear SHADOW_MASK bit in address, and free as per usual */ | ||
| 174 | # ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
| 175 | vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); | ||
| 176 | # endif | ||
| 177 | page = virt_to_page(vaddr); | ||
| 178 | |||
| 179 | do { | ||
| 180 | ClearPageReserved(page); | ||
| 181 | __free_page(page); | ||
| 182 | page++; | ||
| 183 | } while (size -= PAGE_SIZE); | ||
| 184 | #else | ||
| 185 | do { | ||
| 186 | pte_t *ptep; | ||
| 187 | unsigned long pfn; | ||
| 188 | |||
| 189 | ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( | ||
| 190 | (unsigned int)vaddr), | ||
| 191 | (unsigned int)vaddr), | ||
| 192 | (unsigned int)vaddr); | ||
| 193 | if (!pte_none(*ptep) && pte_present(*ptep)) { | ||
| 194 | pfn = pte_pfn(*ptep); | ||
| 195 | pte_clear(&init_mm, (unsigned int)vaddr, ptep); | ||
| 196 | if (pfn_valid(pfn)) { | ||
| 197 | page = pfn_to_page(pfn); | ||
| 198 | |||
| 199 | ClearPageReserved(page); | ||
| 200 | __free_page(page); | ||
| 201 | } | ||
| 202 | } | ||
| 203 | vaddr += PAGE_SIZE; | ||
| 204 | } while (size -= PAGE_SIZE); | ||
| 205 | |||
| 206 | /* flush tlb */ | ||
| 207 | flush_tlb_all(); | ||
| 208 | #endif | ||
| 209 | } | ||
| 210 | EXPORT_SYMBOL(consistent_free); | ||
| 211 | |||
| 212 | /* | ||
| 213 | * make an area consistent. | ||
| 214 | */ | ||
| 215 | void consistent_sync(void *vaddr, size_t size, int direction) | ||
| 216 | { | ||
| 217 | unsigned long start; | ||
| 218 | unsigned long end; | ||
| 219 | |||
| 220 | start = (unsigned long)vaddr; | ||
| 221 | |||
| 222 | /* Convert start address back down to unshadowed memory region */ | ||
| 223 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
| 224 | start &= ~UNCACHED_SHADOW_MASK; | ||
| 225 | #endif | ||
| 226 | end = start + size; | ||
| 227 | |||
| 228 | switch (direction) { | ||
| 229 | case PCI_DMA_NONE: | ||
| 230 | BUG(); | ||
| 231 | case PCI_DMA_FROMDEVICE: /* invalidate only */ | ||
| 232 | invalidate_dcache_range(start, end); | ||
| 233 | break; | ||
| 234 | case PCI_DMA_TODEVICE: /* writeback only */ | ||
| 235 | flush_dcache_range(start, end); | ||
| 236 | break; | ||
| 237 | case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
| 238 | flush_dcache_range(start, end); | ||
| 239 | break; | ||
| 240 | } | ||
| 241 | } | ||
| 242 | EXPORT_SYMBOL(consistent_sync); | ||
| 243 | |||
| 244 | /* | ||
| 245 | * consistent_sync_page makes memory consistent. identical | ||
| 246 | * to consistent_sync, but takes a struct page instead of a | ||
| 247 | * virtual address | ||
| 248 | */ | ||
| 249 | void consistent_sync_page(struct page *page, unsigned long offset, | ||
| 250 | size_t size, int direction) | ||
| 251 | { | ||
| 252 | unsigned long start = (unsigned long)page_address(page) + offset; | ||
| 253 | consistent_sync((void *)start, size, direction); | ||
| 254 | } | ||
| 255 | EXPORT_SYMBOL(consistent_sync_page); | ||
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index d9d249a66ff2..bab922993185 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
| @@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 106 | regs->esr = error_code; | 106 | regs->esr = error_code; |
| 107 | 107 | ||
| 108 | /* On a kernel SLB miss we can only check for a valid exception entry */ | 108 | /* On a kernel SLB miss we can only check for a valid exception entry */ |
| 109 | if (kernel_mode(regs) && (address >= TASK_SIZE)) { | 109 | if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) { |
| 110 | printk(KERN_WARNING "kernel task_size exceed"); | 110 | printk(KERN_WARNING "kernel task_size exceed"); |
| 111 | _exception(SIGSEGV, regs, code, address); | 111 | _exception(SIGSEGV, regs, code, address); |
| 112 | } | 112 | } |
| @@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 122 | } | 122 | } |
| 123 | #endif /* CONFIG_KGDB */ | 123 | #endif /* CONFIG_KGDB */ |
| 124 | 124 | ||
| 125 | if (in_atomic() || !mm) { | 125 | if (unlikely(in_atomic() || !mm)) { |
| 126 | if (kernel_mode(regs)) | 126 | if (kernel_mode(regs)) |
| 127 | goto bad_area_nosemaphore; | 127 | goto bad_area_nosemaphore; |
| 128 | 128 | ||
| @@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 150 | * source. If this is invalid we can skip the address space check, | 150 | * source. If this is invalid we can skip the address space check, |
| 151 | * thus avoiding the deadlock. | 151 | * thus avoiding the deadlock. |
| 152 | */ | 152 | */ |
| 153 | if (!down_read_trylock(&mm->mmap_sem)) { | 153 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
| 154 | if (kernel_mode(regs) && !search_exception_tables(regs->pc)) | 154 | if (kernel_mode(regs) && !search_exception_tables(regs->pc)) |
| 155 | goto bad_area_nosemaphore; | 155 | goto bad_area_nosemaphore; |
| 156 | 156 | ||
| @@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | vma = find_vma(mm, address); | 160 | vma = find_vma(mm, address); |
| 161 | if (!vma) | 161 | if (unlikely(!vma)) |
| 162 | goto bad_area; | 162 | goto bad_area; |
| 163 | 163 | ||
| 164 | if (vma->vm_start <= address) | 164 | if (vma->vm_start <= address) |
| 165 | goto good_area; | 165 | goto good_area; |
| 166 | 166 | ||
| 167 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 167 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) |
| 168 | goto bad_area; | 168 | goto bad_area; |
| 169 | 169 | ||
| 170 | if (!is_write) | 170 | if (unlikely(!is_write)) |
| 171 | goto bad_area; | 171 | goto bad_area; |
| 172 | 172 | ||
| 173 | /* | 173 | /* |
| @@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 179 | * before setting the user r1. Thus we allow the stack to | 179 | * before setting the user r1. Thus we allow the stack to |
| 180 | * expand to 1MB without further checks. | 180 | * expand to 1MB without further checks. |
| 181 | */ | 181 | */ |
| 182 | if (address + 0x100000 < vma->vm_end) { | 182 | if (unlikely(address + 0x100000 < vma->vm_end)) { |
| 183 | 183 | ||
| 184 | /* get user regs even if this fault is in kernel mode */ | 184 | /* get user regs even if this fault is in kernel mode */ |
| 185 | struct pt_regs *uregs = current->thread.regs; | 185 | struct pt_regs *uregs = current->thread.regs; |
| @@ -209,15 +209,15 @@ good_area: | |||
| 209 | code = SEGV_ACCERR; | 209 | code = SEGV_ACCERR; |
| 210 | 210 | ||
| 211 | /* a write */ | 211 | /* a write */ |
| 212 | if (is_write) { | 212 | if (unlikely(is_write)) { |
| 213 | if (!(vma->vm_flags & VM_WRITE)) | 213 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
| 214 | goto bad_area; | 214 | goto bad_area; |
| 215 | /* a read */ | 215 | /* a read */ |
| 216 | } else { | 216 | } else { |
| 217 | /* protection fault */ | 217 | /* protection fault */ |
| 218 | if (error_code & 0x08000000) | 218 | if (unlikely(error_code & 0x08000000)) |
| 219 | goto bad_area; | 219 | goto bad_area; |
| 220 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | 220 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) |
| 221 | goto bad_area; | 221 | goto bad_area; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| @@ -235,7 +235,7 @@ survive: | |||
| 235 | goto do_sigbus; | 235 | goto do_sigbus; |
| 236 | BUG(); | 236 | BUG(); |
| 237 | } | 237 | } |
| 238 | if (fault & VM_FAULT_MAJOR) | 238 | if (unlikely(fault & VM_FAULT_MAJOR)) |
| 239 | current->maj_flt++; | 239 | current->maj_flt++; |
| 240 | else | 240 | else |
| 241 | current->min_flt++; | 241 | current->min_flt++; |
| @@ -273,16 +273,11 @@ bad_area_nosemaphore: | |||
| 273 | * us unable to handle the page fault gracefully. | 273 | * us unable to handle the page fault gracefully. |
| 274 | */ | 274 | */ |
| 275 | out_of_memory: | 275 | out_of_memory: |
| 276 | if (current->pid == 1) { | ||
| 277 | yield(); | ||
| 278 | down_read(&mm->mmap_sem); | ||
| 279 | goto survive; | ||
| 280 | } | ||
| 281 | up_read(&mm->mmap_sem); | 276 | up_read(&mm->mmap_sem); |
| 282 | printk(KERN_WARNING "VM: killing process %s\n", current->comm); | 277 | if (!user_mode(regs)) |
| 283 | if (user_mode(regs)) | 278 | bad_page_fault(regs, address, SIGKILL); |
| 284 | do_exit(SIGKILL); | 279 | else |
| 285 | bad_page_fault(regs, address, SIGKILL); | 280 | pagefault_out_of_memory(); |
| 286 | return; | 281 | return; |
| 287 | 282 | ||
| 288 | do_sigbus: | 283 | do_sigbus: |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index a44892e7cd5b..cca3579d4268 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/initrd.h> | 15 | #include <linux/initrd.h> |
| 16 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/pfn.h> | 17 | #include <linux/pfn.h> |
| 18 | #include <linux/slab.h> | ||
| 18 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
| 19 | 20 | ||
| 20 | #include <asm/page.h> | 21 | #include <asm/page.h> |
| @@ -23,6 +24,9 @@ | |||
| 23 | #include <asm/sections.h> | 24 | #include <asm/sections.h> |
| 24 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
| 25 | 26 | ||
| 27 | /* Use for MMU and noMMU because of PCI generic code */ | ||
| 28 | int mem_init_done; | ||
| 29 | |||
| 26 | #ifndef CONFIG_MMU | 30 | #ifndef CONFIG_MMU |
| 27 | unsigned int __page_offset; | 31 | unsigned int __page_offset; |
| 28 | EXPORT_SYMBOL(__page_offset); | 32 | EXPORT_SYMBOL(__page_offset); |
| @@ -30,7 +34,6 @@ EXPORT_SYMBOL(__page_offset); | |||
| 30 | #else | 34 | #else |
| 31 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 35 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 32 | 36 | ||
| 33 | int mem_init_done; | ||
| 34 | static int init_bootmem_done; | 37 | static int init_bootmem_done; |
| 35 | #endif /* CONFIG_MMU */ | 38 | #endif /* CONFIG_MMU */ |
| 36 | 39 | ||
| @@ -41,8 +44,10 @@ char *klimit = _end; | |||
| 41 | * have available. | 44 | * have available. |
| 42 | */ | 45 | */ |
| 43 | unsigned long memory_start; | 46 | unsigned long memory_start; |
| 47 | EXPORT_SYMBOL(memory_start); | ||
| 44 | unsigned long memory_end; /* due to mm/nommu.c */ | 48 | unsigned long memory_end; /* due to mm/nommu.c */ |
| 45 | unsigned long memory_size; | 49 | unsigned long memory_size; |
| 50 | EXPORT_SYMBOL(memory_size); | ||
| 46 | 51 | ||
| 47 | /* | 52 | /* |
| 48 | * paging_init() sets up the page tables - in fact we've already done this. | 53 | * paging_init() sets up the page tables - in fact we've already done this. |
| @@ -162,7 +167,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
| 162 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 167 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
| 163 | ClearPageReserved(virt_to_page(addr)); | 168 | ClearPageReserved(virt_to_page(addr)); |
| 164 | init_page_count(virt_to_page(addr)); | 169 | init_page_count(virt_to_page(addr)); |
| 165 | memset((void *)addr, 0xcc, PAGE_SIZE); | ||
| 166 | free_page(addr); | 170 | free_page(addr); |
| 167 | totalram_pages++; | 171 | totalram_pages++; |
| 168 | } | 172 | } |
| @@ -192,12 +196,6 @@ void free_initmem(void) | |||
| 192 | (unsigned long)(&__init_end)); | 196 | (unsigned long)(&__init_end)); |
| 193 | } | 197 | } |
| 194 | 198 | ||
| 195 | /* FIXME from arch/powerpc/mm/mem.c*/ | ||
| 196 | void show_mem(void) | ||
| 197 | { | ||
| 198 | printk(KERN_NOTICE "%s\n", __func__); | ||
| 199 | } | ||
| 200 | |||
| 201 | void __init mem_init(void) | 199 | void __init mem_init(void) |
| 202 | { | 200 | { |
| 203 | high_memory = (void *)__va(memory_end); | 201 | high_memory = (void *)__va(memory_end); |
| @@ -207,20 +205,14 @@ void __init mem_init(void) | |||
| 207 | printk(KERN_INFO "Memory: %luk/%luk available\n", | 205 | printk(KERN_INFO "Memory: %luk/%luk available\n", |
| 208 | nr_free_pages() << (PAGE_SHIFT-10), | 206 | nr_free_pages() << (PAGE_SHIFT-10), |
| 209 | num_physpages << (PAGE_SHIFT-10)); | 207 | num_physpages << (PAGE_SHIFT-10)); |
| 210 | #ifdef CONFIG_MMU | ||
| 211 | mem_init_done = 1; | 208 | mem_init_done = 1; |
| 212 | #endif | ||
| 213 | } | 209 | } |
| 214 | 210 | ||
| 215 | #ifndef CONFIG_MMU | 211 | #ifndef CONFIG_MMU |
| 216 | /* Check against bounds of physical memory */ | 212 | int page_is_ram(unsigned long pfn) |
| 217 | int ___range_ok(unsigned long addr, unsigned long size) | ||
| 218 | { | 213 | { |
| 219 | return ((addr < memory_start) || | 214 | return __range_ok(pfn, 0); |
| 220 | ((addr + size) > memory_end)); | ||
| 221 | } | 215 | } |
| 222 | EXPORT_SYMBOL(___range_ok); | ||
| 223 | |||
| 224 | #else | 216 | #else |
| 225 | int page_is_ram(unsigned long pfn) | 217 | int page_is_ram(unsigned long pfn) |
| 226 | { | 218 | { |
| @@ -348,4 +340,27 @@ void __init *early_get_page(void) | |||
| 348 | } | 340 | } |
| 349 | return p; | 341 | return p; |
| 350 | } | 342 | } |
| 343 | |||
| 351 | #endif /* CONFIG_MMU */ | 344 | #endif /* CONFIG_MMU */ |
| 345 | |||
| 346 | void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) | ||
| 347 | { | ||
| 348 | if (mem_init_done) | ||
| 349 | return kmalloc(size, mask); | ||
| 350 | else | ||
| 351 | return alloc_bootmem(size); | ||
| 352 | } | ||
| 353 | |||
| 354 | void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) | ||
| 355 | { | ||
| 356 | void *p; | ||
| 357 | |||
| 358 | if (mem_init_done) | ||
| 359 | p = kzalloc(size, mask); | ||
| 360 | else { | ||
| 361 | p = alloc_bootmem(size); | ||
| 362 | if (p) | ||
| 363 | memset(p, 0, size); | ||
| 364 | } | ||
| 365 | return p; | ||
| 366 | } | ||
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 46c4ca5d15c5..59bf2335a4ce 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | 42 | ||
| 43 | unsigned long ioremap_base; | 43 | unsigned long ioremap_base; |
| 44 | unsigned long ioremap_bot; | 44 | unsigned long ioremap_bot; |
| 45 | EXPORT_SYMBOL(ioremap_bot); | ||
| 45 | 46 | ||
| 46 | /* The maximum lowmem defaults to 768Mb, but this can be configured to | 47 | /* The maximum lowmem defaults to 768Mb, but this can be configured to |
| 47 | * another value. | 48 | * another value. |
| @@ -103,7 +104,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, | |||
| 103 | area = get_vm_area(size, VM_IOREMAP); | 104 | area = get_vm_area(size, VM_IOREMAP); |
| 104 | if (area == NULL) | 105 | if (area == NULL) |
| 105 | return NULL; | 106 | return NULL; |
| 106 | v = VMALLOC_VMADDR(area->addr); | 107 | v = (unsigned long) area->addr; |
| 107 | } else { | 108 | } else { |
| 108 | v = (ioremap_bot -= size); | 109 | v = (ioremap_bot -= size); |
| 109 | } | 110 | } |
| @@ -144,7 +145,6 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) | |||
| 144 | pmd_t *pd; | 145 | pmd_t *pd; |
| 145 | pte_t *pg; | 146 | pte_t *pg; |
| 146 | int err = -ENOMEM; | 147 | int err = -ENOMEM; |
| 147 | /* spin_lock(&init_mm.page_table_lock); */ | ||
| 148 | /* Use upper 10 bits of VA to index the first level map */ | 148 | /* Use upper 10 bits of VA to index the first level map */ |
| 149 | pd = pmd_offset(pgd_offset_k(va), va); | 149 | pd = pmd_offset(pgd_offset_k(va), va); |
| 150 | /* Use middle 10 bits of VA to index the second-level map */ | 150 | /* Use middle 10 bits of VA to index the second-level map */ |
| @@ -155,39 +155,13 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) | |||
| 155 | err = 0; | 155 | err = 0; |
| 156 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, | 156 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, |
| 157 | __pgprot(flags))); | 157 | __pgprot(flags))); |
| 158 | if (mem_init_done) | 158 | if (unlikely(mem_init_done)) |
| 159 | flush_HPTE(0, va, pmd_val(*pd)); | 159 | flush_HPTE(0, va, pmd_val(*pd)); |
| 160 | /* flush_HPTE(0, va, pg); */ | 160 | /* flush_HPTE(0, va, pg); */ |
| 161 | |||
| 162 | } | 161 | } |
| 163 | /* spin_unlock(&init_mm.page_table_lock); */ | ||
| 164 | return err; | 162 | return err; |
| 165 | } | 163 | } |
| 166 | 164 | ||
| 167 | void __init adjust_total_lowmem(void) | ||
| 168 | { | ||
| 169 | /* TBD */ | ||
| 170 | #if 0 | ||
| 171 | unsigned long max_low_mem = MAX_LOW_MEM; | ||
| 172 | |||
| 173 | if (total_lowmem > max_low_mem) { | ||
| 174 | total_lowmem = max_low_mem; | ||
| 175 | #ifndef CONFIG_HIGHMEM | ||
| 176 | printk(KERN_INFO "Warning, memory limited to %ld Mb, use " | ||
| 177 | "CONFIG_HIGHMEM to reach %ld Mb\n", | ||
| 178 | max_low_mem >> 20, total_memory >> 20); | ||
| 179 | total_memory = total_lowmem; | ||
| 180 | #endif /* CONFIG_HIGHMEM */ | ||
| 181 | } | ||
| 182 | #endif | ||
| 183 | } | ||
| 184 | |||
| 185 | static void show_tmem(unsigned long tmem) | ||
| 186 | { | ||
| 187 | volatile unsigned long a; | ||
| 188 | a = a + tmem; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* | 165 | /* |
| 192 | * Map in all of physical memory starting at CONFIG_KERNEL_START. | 166 | * Map in all of physical memory starting at CONFIG_KERNEL_START. |
| 193 | */ | 167 | */ |
| @@ -197,7 +171,6 @@ void __init mapin_ram(void) | |||
| 197 | 171 | ||
| 198 | v = CONFIG_KERNEL_START; | 172 | v = CONFIG_KERNEL_START; |
| 199 | p = memory_start; | 173 | p = memory_start; |
| 200 | show_tmem(memory_size); | ||
| 201 | for (s = 0; s < memory_size; s += PAGE_SIZE) { | 174 | for (s = 0; s < memory_size; s += PAGE_SIZE) { |
| 202 | f = _PAGE_PRESENT | _PAGE_ACCESSED | | 175 | f = _PAGE_PRESENT | _PAGE_ACCESSED | |
| 203 | _PAGE_SHARED | _PAGE_HWEXEC; | 176 | _PAGE_SHARED | _PAGE_HWEXEC; |
| @@ -216,24 +189,6 @@ void __init mapin_ram(void) | |||
| 216 | /* is x a power of 2? */ | 189 | /* is x a power of 2? */ |
| 217 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) | 190 | #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) |
| 218 | 191 | ||
| 219 | /* | ||
| 220 | * Set up a mapping for a block of I/O. | ||
| 221 | * virt, phys, size must all be page-aligned. | ||
| 222 | * This should only be called before ioremap is called. | ||
| 223 | */ | ||
| 224 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | ||
| 225 | unsigned int size, int flags) | ||
| 226 | { | ||
| 227 | int i; | ||
| 228 | |||
| 229 | if (virt > CONFIG_KERNEL_START && virt < ioremap_bot) | ||
| 230 | ioremap_bot = ioremap_base = virt; | ||
| 231 | |||
| 232 | /* Put it in the page tables. */ | ||
| 233 | for (i = 0; i < size; i += PAGE_SIZE) | ||
| 234 | map_page(virt + i, phys + i, flags); | ||
| 235 | } | ||
| 236 | |||
| 237 | /* Scan the real Linux page tables and return a PTE pointer for | 192 | /* Scan the real Linux page tables and return a PTE pointer for |
| 238 | * a virtual address in a context. | 193 | * a virtual address in a context. |
| 239 | * Returns true (1) if PTE was found, zero otherwise. The pointer to | 194 | * Returns true (1) if PTE was found, zero otherwise. The pointer to |
| @@ -284,3 +239,18 @@ unsigned long iopa(unsigned long addr) | |||
| 284 | 239 | ||
| 285 | return pa; | 240 | return pa; |
| 286 | } | 241 | } |
| 242 | |||
| 243 | __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | ||
| 244 | unsigned long address) | ||
| 245 | { | ||
| 246 | pte_t *pte; | ||
| 247 | if (mem_init_done) { | ||
| 248 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | ||
| 249 | __GFP_REPEAT | __GFP_ZERO); | ||
| 250 | } else { | ||
| 251 | pte = (pte_t *)early_get_page(); | ||
| 252 | if (pte) | ||
| 253 | clear_page(pte); | ||
| 254 | } | ||
| 255 | return pte; | ||
| 256 | } | ||
