diff options
Diffstat (limited to 'arch/arm/mm')
38 files changed, 341 insertions, 666 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 101105e5261..87ec141fcaa 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
| @@ -717,17 +717,6 @@ config TLS_REG_EMUL | |||
| 717 | a few prototypes like that in existence) and therefore access to | 717 | a few prototypes like that in existence) and therefore access to |
| 718 | that required register must be emulated. | 718 | that required register must be emulated. |
| 719 | 719 | ||
| 720 | config HAS_TLS_REG | ||
| 721 | bool | ||
| 722 | depends on !TLS_REG_EMUL | ||
| 723 | default y if SMP || CPU_32v7 | ||
| 724 | help | ||
| 725 | This selects support for the CP15 thread register. | ||
| 726 | It is defined to be available on some ARMv6 processors (including | ||
| 727 | all SMP capable ARMv6's) or later processors. User space may | ||
| 728 | assume directly accessing that register and always obtain the | ||
| 729 | expected value only on ARMv7 and above. | ||
| 730 | |||
| 731 | config NEEDS_SYSCALL_FOR_CMPXCHG | 720 | config NEEDS_SYSCALL_FOR_CMPXCHG |
| 732 | bool | 721 | bool |
| 733 | help | 722 | help |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index e8d34a80851..d63b6c41375 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
| @@ -15,7 +15,6 @@ endif | |||
| 15 | obj-$(CONFIG_MODULES) += proc-syms.o | 15 | obj-$(CONFIG_MODULES) += proc-syms.o |
| 16 | 16 | ||
| 17 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o | 17 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o |
| 18 | obj-$(CONFIG_DISCONTIGMEM) += discontig.o | ||
| 19 | obj-$(CONFIG_HIGHMEM) += highmem.o | 18 | obj-$(CONFIG_HIGHMEM) += highmem.o |
| 20 | 19 | ||
| 21 | obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o | 20 | obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f98c358989..d073b64ae87 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
| @@ -924,8 +924,20 @@ static int __init alignment_init(void) | |||
| 924 | ai_usermode = UM_FIXUP; | 924 | ai_usermode = UM_FIXUP; |
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); | 927 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, |
| 928 | hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); | 928 | "alignment exception"); |
| 929 | |||
| 930 | /* | ||
| 931 | * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section | ||
| 932 | * fault, not as alignment error. | ||
| 933 | * | ||
| 934 | * TODO: handle ARMv6K properly. Runtime check for 'K' extension is | ||
| 935 | * needed. | ||
| 936 | */ | ||
| 937 | if (cpu_architecture() <= CPU_ARCH_ARMv6) { | ||
| 938 | hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, | ||
| 939 | "alignment exception"); | ||
| 940 | } | ||
| 929 | 941 | ||
| 930 | return 0; | 942 | return 0; |
| 931 | } | 943 | } |
diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c deleted file mode 100644 index c8c0c4b0f0a..00000000000 --- a/arch/arm/mm/discontig.c +++ /dev/null | |||
| @@ -1,45 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/mm/discontig.c | ||
| 3 | * | ||
| 4 | * Discontiguous memory support. | ||
| 5 | * | ||
| 6 | * Initial code: Copyright (C) 1999-2000 Nicolas Pitre | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | ||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/mmzone.h> | ||
| 14 | #include <linux/bootmem.h> | ||
| 15 | |||
| 16 | #if MAX_NUMNODES != 4 && MAX_NUMNODES != 16 | ||
| 17 | # error Fix Me Please | ||
| 18 | #endif | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Our node_data structure for discontiguous memory. | ||
| 22 | */ | ||
| 23 | |||
| 24 | pg_data_t discontig_node_data[MAX_NUMNODES] = { | ||
| 25 | { .bdata = &bootmem_node_data[0] }, | ||
| 26 | { .bdata = &bootmem_node_data[1] }, | ||
| 27 | { .bdata = &bootmem_node_data[2] }, | ||
| 28 | { .bdata = &bootmem_node_data[3] }, | ||
| 29 | #if MAX_NUMNODES == 16 | ||
| 30 | { .bdata = &bootmem_node_data[4] }, | ||
| 31 | { .bdata = &bootmem_node_data[5] }, | ||
| 32 | { .bdata = &bootmem_node_data[6] }, | ||
| 33 | { .bdata = &bootmem_node_data[7] }, | ||
| 34 | { .bdata = &bootmem_node_data[8] }, | ||
| 35 | { .bdata = &bootmem_node_data[9] }, | ||
| 36 | { .bdata = &bootmem_node_data[10] }, | ||
| 37 | { .bdata = &bootmem_node_data[11] }, | ||
| 38 | { .bdata = &bootmem_node_data[12] }, | ||
| 39 | { .bdata = &bootmem_node_data[13] }, | ||
| 40 | { .bdata = &bootmem_node_data[14] }, | ||
| 41 | { .bdata = &bootmem_node_data[15] }, | ||
| 42 | #endif | ||
| 43 | }; | ||
| 44 | |||
| 45 | EXPORT_SYMBOL(discontig_node_data); | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9e7742f0a10..c704eed63c5 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -183,6 +183,8 @@ static void * | |||
| 183 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | 183 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) |
| 184 | { | 184 | { |
| 185 | struct arm_vmregion *c; | 185 | struct arm_vmregion *c; |
| 186 | size_t align; | ||
| 187 | int bit; | ||
| 186 | 188 | ||
| 187 | if (!consistent_pte[0]) { | 189 | if (!consistent_pte[0]) { |
| 188 | printk(KERN_ERR "%s: not initialised\n", __func__); | 190 | printk(KERN_ERR "%s: not initialised\n", __func__); |
| @@ -191,9 +193,20 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
| 191 | } | 193 | } |
| 192 | 194 | ||
| 193 | /* | 195 | /* |
| 196 | * Align the virtual region allocation - maximum alignment is | ||
| 197 | * a section size, minimum is a page size. This helps reduce | ||
| 198 | * fragmentation of the DMA space, and also prevents allocations | ||
| 199 | * smaller than a section from crossing a section boundary. | ||
| 200 | */ | ||
| 201 | bit = fls(size - 1) + 1; | ||
| 202 | if (bit > SECTION_SHIFT) | ||
| 203 | bit = SECTION_SHIFT; | ||
| 204 | align = 1 << bit; | ||
| 205 | |||
| 206 | /* | ||
| 194 | * Allocate a virtual address in the consistent mapping region. | 207 | * Allocate a virtual address in the consistent mapping region. |
| 195 | */ | 208 | */ |
| 196 | c = arm_vmregion_alloc(&consistent_head, size, | 209 | c = arm_vmregion_alloc(&consistent_head, align, size, |
| 197 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 210 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); |
| 198 | if (c) { | 211 | if (c) { |
| 199 | pte_t *pte; | 212 | pte_t *pte; |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index cbfb2edcf7d..23b0b03af5e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
| @@ -413,7 +413,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
| 413 | pmd_k = pmd_offset(pgd_k, addr); | 413 | pmd_k = pmd_offset(pgd_k, addr); |
| 414 | pmd = pmd_offset(pgd, addr); | 414 | pmd = pmd_offset(pgd, addr); |
| 415 | 415 | ||
| 416 | if (pmd_none(*pmd_k)) | 416 | /* |
| 417 | * On ARM one Linux PGD entry contains two hardware entries (see page | ||
| 418 | * tables layout in pgtable.h). We normally guarantee that we always | ||
| 419 | * fill both L1 entries. But create_mapping() doesn't follow the rule. | ||
| 420 | * It can create inidividual L1 entries, so here we have to call | ||
| 421 | * pmd_none() check for the entry really corresponded to address, not | ||
| 422 | * for the first of pair. | ||
| 423 | */ | ||
| 424 | index = (addr >> SECTION_SHIFT) & 1; | ||
| 425 | if (pmd_none(pmd_k[index])) | ||
| 417 | goto bad_area; | 426 | goto bad_area; |
| 418 | 427 | ||
| 419 | copy_pmd(pmd, pmd_k); | 428 | copy_pmd(pmd, pmd_k); |
| @@ -463,15 +472,10 @@ static struct fsr_info { | |||
| 463 | * defines these to be "precise" aborts. | 472 | * defines these to be "precise" aborts. |
| 464 | */ | 473 | */ |
| 465 | { do_bad, SIGSEGV, 0, "vector exception" }, | 474 | { do_bad, SIGSEGV, 0, "vector exception" }, |
| 466 | { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, | 475 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, |
| 467 | { do_bad, SIGKILL, 0, "terminal exception" }, | 476 | { do_bad, SIGKILL, 0, "terminal exception" }, |
| 468 | { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, | 477 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, |
| 469 | /* Do we need runtime check ? */ | ||
| 470 | #if __LINUX_ARM_ARCH__ < 6 | ||
| 471 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | 478 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, |
| 472 | #else | ||
| 473 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" }, | ||
| 474 | #endif | ||
| 475 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | 479 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, |
| 476 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | 480 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, |
| 477 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | 481 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, |
| @@ -508,13 +512,15 @@ static struct fsr_info { | |||
| 508 | 512 | ||
| 509 | void __init | 513 | void __init |
| 510 | hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), | 514 | hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), |
| 511 | int sig, const char *name) | 515 | int sig, int code, const char *name) |
| 512 | { | 516 | { |
| 513 | if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) { | 517 | if (nr < 0 || nr >= ARRAY_SIZE(fsr_info)) |
| 514 | fsr_info[nr].fn = fn; | 518 | BUG(); |
| 515 | fsr_info[nr].sig = sig; | 519 | |
| 516 | fsr_info[nr].name = name; | 520 | fsr_info[nr].fn = fn; |
| 517 | } | 521 | fsr_info[nr].sig = sig; |
| 522 | fsr_info[nr].code = code; | ||
| 523 | fsr_info[nr].name = name; | ||
| 518 | } | 524 | } |
| 519 | 525 | ||
| 520 | /* | 526 | /* |
| @@ -594,3 +600,25 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | |||
| 594 | arm_notify_die("", regs, &info, ifsr, 0); | 600 | arm_notify_die("", regs, &info, ifsr, 0); |
| 595 | } | 601 | } |
| 596 | 602 | ||
| 603 | static int __init exceptions_init(void) | ||
| 604 | { | ||
| 605 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { | ||
| 606 | hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR, | ||
| 607 | "I-cache maintenance fault"); | ||
| 608 | } | ||
| 609 | |||
| 610 | if (cpu_architecture() >= CPU_ARCH_ARMv7) { | ||
| 611 | /* | ||
| 612 | * TODO: Access flag faults introduced in ARMv6K. | ||
| 613 | * Runtime check for 'K' extension is needed | ||
| 614 | */ | ||
| 615 | hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR, | ||
| 616 | "section access flag fault"); | ||
| 617 | hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR, | ||
| 618 | "section access flag fault"); | ||
| 619 | } | ||
| 620 | |||
| 621 | return 0; | ||
| 622 | } | ||
| 623 | |||
| 624 | arch_initcall(exceptions_init); | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f6a99946532..7185b00650f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
| 18 | #include <linux/highmem.h> | 18 | #include <linux/highmem.h> |
| 19 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
| 20 | #include <linux/memblock.h> | ||
| 20 | 21 | ||
| 21 | #include <asm/mach-types.h> | 22 | #include <asm/mach-types.h> |
| 22 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
| @@ -79,38 +80,37 @@ struct meminfo meminfo; | |||
| 79 | void show_mem(void) | 80 | void show_mem(void) |
| 80 | { | 81 | { |
| 81 | int free = 0, total = 0, reserved = 0; | 82 | int free = 0, total = 0, reserved = 0; |
| 82 | int shared = 0, cached = 0, slab = 0, node, i; | 83 | int shared = 0, cached = 0, slab = 0, i; |
| 83 | struct meminfo * mi = &meminfo; | 84 | struct meminfo * mi = &meminfo; |
| 84 | 85 | ||
| 85 | printk("Mem-info:\n"); | 86 | printk("Mem-info:\n"); |
| 86 | show_free_areas(); | 87 | show_free_areas(); |
| 87 | for_each_online_node(node) { | 88 | |
| 88 | for_each_nodebank (i,mi,node) { | 89 | for_each_bank (i, mi) { |
| 89 | struct membank *bank = &mi->bank[i]; | 90 | struct membank *bank = &mi->bank[i]; |
| 90 | unsigned int pfn1, pfn2; | 91 | unsigned int pfn1, pfn2; |
| 91 | struct page *page, *end; | 92 | struct page *page, *end; |
| 92 | 93 | ||
| 93 | pfn1 = bank_pfn_start(bank); | 94 | pfn1 = bank_pfn_start(bank); |
| 94 | pfn2 = bank_pfn_end(bank); | 95 | pfn2 = bank_pfn_end(bank); |
| 95 | 96 | ||
| 96 | page = pfn_to_page(pfn1); | 97 | page = pfn_to_page(pfn1); |
| 97 | end = pfn_to_page(pfn2 - 1) + 1; | 98 | end = pfn_to_page(pfn2 - 1) + 1; |
| 98 | 99 | ||
| 99 | do { | 100 | do { |
| 100 | total++; | 101 | total++; |
| 101 | if (PageReserved(page)) | 102 | if (PageReserved(page)) |
| 102 | reserved++; | 103 | reserved++; |
| 103 | else if (PageSwapCache(page)) | 104 | else if (PageSwapCache(page)) |
| 104 | cached++; | 105 | cached++; |
| 105 | else if (PageSlab(page)) | 106 | else if (PageSlab(page)) |
| 106 | slab++; | 107 | slab++; |
| 107 | else if (!page_count(page)) | 108 | else if (!page_count(page)) |
| 108 | free++; | 109 | free++; |
| 109 | else | 110 | else |
| 110 | shared += page_count(page) - 1; | 111 | shared += page_count(page) - 1; |
| 111 | page++; | 112 | page++; |
| 112 | } while (page < end); | 113 | } while (page < end); |
| 113 | } | ||
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | printk("%d pages of RAM\n", total); | 116 | printk("%d pages of RAM\n", total); |
| @@ -121,7 +121,7 @@ void show_mem(void) | |||
| 121 | printk("%d pages swap cached\n", cached); | 121 | printk("%d pages swap cached\n", cached); |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | static void __init find_node_limits(int node, struct meminfo *mi, | 124 | static void __init find_limits(struct meminfo *mi, |
| 125 | unsigned long *min, unsigned long *max_low, unsigned long *max_high) | 125 | unsigned long *min, unsigned long *max_low, unsigned long *max_high) |
| 126 | { | 126 | { |
| 127 | int i; | 127 | int i; |
| @@ -129,7 +129,7 @@ static void __init find_node_limits(int node, struct meminfo *mi, | |||
| 129 | *min = -1UL; | 129 | *min = -1UL; |
| 130 | *max_low = *max_high = 0; | 130 | *max_low = *max_high = 0; |
| 131 | 131 | ||
| 132 | for_each_nodebank(i, mi, node) { | 132 | for_each_bank (i, mi) { |
| 133 | struct membank *bank = &mi->bank[i]; | 133 | struct membank *bank = &mi->bank[i]; |
| 134 | unsigned long start, end; | 134 | unsigned long start, end; |
| 135 | 135 | ||
| @@ -147,155 +147,64 @@ static void __init find_node_limits(int node, struct meminfo *mi, | |||
| 147 | } | 147 | } |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | /* | 150 | static void __init arm_bootmem_init(struct meminfo *mi, |
| 151 | * FIXME: We really want to avoid allocating the bootmap bitmap | ||
| 152 | * over the top of the initrd. Hopefully, this is located towards | ||
| 153 | * the start of a bank, so if we allocate the bootmap bitmap at | ||
| 154 | * the end, we won't clash. | ||
| 155 | */ | ||
| 156 | static unsigned int __init | ||
| 157 | find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) | ||
| 158 | { | ||
| 159 | unsigned int start_pfn, i, bootmap_pfn; | ||
| 160 | |||
| 161 | start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; | ||
| 162 | bootmap_pfn = 0; | ||
| 163 | |||
| 164 | for_each_nodebank(i, mi, node) { | ||
| 165 | struct membank *bank = &mi->bank[i]; | ||
| 166 | unsigned int start, end; | ||
| 167 | |||
| 168 | start = bank_pfn_start(bank); | ||
| 169 | end = bank_pfn_end(bank); | ||
| 170 | |||
| 171 | if (end < start_pfn) | ||
| 172 | continue; | ||
| 173 | |||
| 174 | if (start < start_pfn) | ||
| 175 | start = start_pfn; | ||
| 176 | |||
| 177 | if (end <= start) | ||
| 178 | continue; | ||
| 179 | |||
| 180 | if (end - start >= bootmap_pages) { | ||
| 181 | bootmap_pfn = start; | ||
| 182 | break; | ||
| 183 | } | ||
| 184 | } | ||
| 185 | |||
| 186 | if (bootmap_pfn == 0) | ||
| 187 | BUG(); | ||
| 188 | |||
| 189 | return bootmap_pfn; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int __init check_initrd(struct meminfo *mi) | ||
| 193 | { | ||
| 194 | int initrd_node = -2; | ||
| 195 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 196 | unsigned long end = phys_initrd_start + phys_initrd_size; | ||
| 197 | |||
| 198 | /* | ||
| 199 | * Make sure that the initrd is within a valid area of | ||
| 200 | * memory. | ||
| 201 | */ | ||
| 202 | if (phys_initrd_size) { | ||
| 203 | unsigned int i; | ||
| 204 | |||
| 205 | initrd_node = -1; | ||
| 206 | |||
| 207 | for (i = 0; i < mi->nr_banks; i++) { | ||
| 208 | struct membank *bank = &mi->bank[i]; | ||
| 209 | if (bank_phys_start(bank) <= phys_initrd_start && | ||
| 210 | end <= bank_phys_end(bank)) | ||
| 211 | initrd_node = bank->node; | ||
| 212 | } | ||
| 213 | } | ||
| 214 | |||
| 215 | if (initrd_node == -1) { | ||
| 216 | printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond " | ||
| 217 | "physical memory - disabling initrd\n", | ||
| 218 | phys_initrd_start, phys_initrd_size); | ||
| 219 | phys_initrd_start = phys_initrd_size = 0; | ||
| 220 | } | ||
| 221 | #endif | ||
| 222 | |||
| 223 | return initrd_node; | ||
| 224 | } | ||
| 225 | |||
| 226 | static void __init bootmem_init_node(int node, struct meminfo *mi, | ||
| 227 | unsigned long start_pfn, unsigned long end_pfn) | 151 | unsigned long start_pfn, unsigned long end_pfn) |
| 228 | { | 152 | { |
| 229 | unsigned long boot_pfn; | ||
| 230 | unsigned int boot_pages; | 153 | unsigned int boot_pages; |
| 154 | phys_addr_t bitmap; | ||
| 231 | pg_data_t *pgdat; | 155 | pg_data_t *pgdat; |
| 232 | int i; | 156 | int i; |
| 233 | 157 | ||
| 234 | /* | 158 | /* |
| 235 | * Allocate the bootmem bitmap page. | 159 | * Allocate the bootmem bitmap page. This must be in a region |
| 160 | * of memory which has already been mapped. | ||
| 236 | */ | 161 | */ |
| 237 | boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | 162 | boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
| 238 | boot_pfn = find_bootmap_pfn(node, mi, boot_pages); | 163 | bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES, |
| 164 | __pfn_to_phys(end_pfn)); | ||
| 239 | 165 | ||
| 240 | /* | 166 | /* |
| 241 | * Initialise the bootmem allocator for this node, handing the | 167 | * Initialise the bootmem allocator, handing the |
| 242 | * memory banks over to bootmem. | 168 | * memory banks over to bootmem. |
| 243 | */ | 169 | */ |
| 244 | node_set_online(node); | 170 | node_set_online(0); |
| 245 | pgdat = NODE_DATA(node); | 171 | pgdat = NODE_DATA(0); |
| 246 | init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); | 172 | init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); |
| 247 | 173 | ||
| 248 | for_each_nodebank(i, mi, node) { | 174 | for_each_bank(i, mi) { |
| 249 | struct membank *bank = &mi->bank[i]; | 175 | struct membank *bank = &mi->bank[i]; |
| 250 | if (!bank->highmem) | 176 | if (!bank->highmem) |
| 251 | free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); | 177 | free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); |
| 252 | } | 178 | } |
| 253 | 179 | ||
| 254 | /* | 180 | /* |
| 255 | * Reserve the bootmem bitmap for this node. | 181 | * Reserve the memblock reserved regions in bootmem. |
| 256 | */ | 182 | */ |
| 257 | reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, | 183 | for (i = 0; i < memblock.reserved.cnt; i++) { |
| 258 | boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); | 184 | phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); |
| 259 | } | 185 | if (start >= start_pfn && |
| 260 | 186 | memblock_end_pfn(&memblock.reserved, i) <= end_pfn) | |
| 261 | static void __init bootmem_reserve_initrd(int node) | 187 | reserve_bootmem_node(pgdat, __pfn_to_phys(start), |
| 262 | { | 188 | memblock_size_bytes(&memblock.reserved, i), |
| 263 | #ifdef CONFIG_BLK_DEV_INITRD | 189 | BOOTMEM_DEFAULT); |
| 264 | pg_data_t *pgdat = NODE_DATA(node); | ||
| 265 | int res; | ||
| 266 | |||
| 267 | res = reserve_bootmem_node(pgdat, phys_initrd_start, | ||
| 268 | phys_initrd_size, BOOTMEM_EXCLUSIVE); | ||
| 269 | |||
| 270 | if (res == 0) { | ||
| 271 | initrd_start = __phys_to_virt(phys_initrd_start); | ||
| 272 | initrd_end = initrd_start + phys_initrd_size; | ||
| 273 | } else { | ||
| 274 | printk(KERN_ERR | ||
| 275 | "INITRD: 0x%08lx+0x%08lx overlaps in-use " | ||
| 276 | "memory region - disabling initrd\n", | ||
| 277 | phys_initrd_start, phys_initrd_size); | ||
| 278 | } | 190 | } |
| 279 | #endif | ||
| 280 | } | 191 | } |
| 281 | 192 | ||
| 282 | static void __init bootmem_free_node(int node, struct meminfo *mi) | 193 | static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, |
| 194 | unsigned long max_low, unsigned long max_high) | ||
| 283 | { | 195 | { |
| 284 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | 196 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; |
| 285 | unsigned long min, max_low, max_high; | ||
| 286 | int i; | 197 | int i; |
| 287 | 198 | ||
| 288 | find_node_limits(node, mi, &min, &max_low, &max_high); | ||
| 289 | |||
| 290 | /* | 199 | /* |
| 291 | * initialise the zones within this node. | 200 | * initialise the zones. |
| 292 | */ | 201 | */ |
| 293 | memset(zone_size, 0, sizeof(zone_size)); | 202 | memset(zone_size, 0, sizeof(zone_size)); |
| 294 | 203 | ||
| 295 | /* | 204 | /* |
| 296 | * The size of this node has already been determined. If we need | 205 | * The memory size has already been determined. If we need |
| 297 | * to do anything fancy with the allocation of this memory to the | 206 | * to do anything fancy with the allocation of this memory |
| 298 | * zones, now is the time to do it. | 207 | * to the zones, now is the time to do it. |
| 299 | */ | 208 | */ |
| 300 | zone_size[0] = max_low - min; | 209 | zone_size[0] = max_low - min; |
| 301 | #ifdef CONFIG_HIGHMEM | 210 | #ifdef CONFIG_HIGHMEM |
| @@ -303,11 +212,11 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) | |||
| 303 | #endif | 212 | #endif |
| 304 | 213 | ||
| 305 | /* | 214 | /* |
| 306 | * For each bank in this node, calculate the size of the holes. | 215 | * Calculate the size of the holes. |
| 307 | * holes = node_size - sum(bank_sizes_in_node) | 216 | * holes = node_size - sum(bank_sizes) |
| 308 | */ | 217 | */ |
| 309 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | 218 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); |
| 310 | for_each_nodebank(i, mi, node) { | 219 | for_each_bank(i, mi) { |
| 311 | int idx = 0; | 220 | int idx = 0; |
| 312 | #ifdef CONFIG_HIGHMEM | 221 | #ifdef CONFIG_HIGHMEM |
| 313 | if (mi->bank[i].highmem) | 222 | if (mi->bank[i].highmem) |
| @@ -320,24 +229,23 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) | |||
| 320 | * Adjust the sizes according to any special requirements for | 229 | * Adjust the sizes according to any special requirements for |
| 321 | * this machine type. | 230 | * this machine type. |
| 322 | */ | 231 | */ |
| 323 | arch_adjust_zones(node, zone_size, zhole_size); | 232 | arch_adjust_zones(zone_size, zhole_size); |
| 324 | 233 | ||
| 325 | free_area_init_node(node, zone_size, min, zhole_size); | 234 | free_area_init_node(0, zone_size, min, zhole_size); |
| 326 | } | 235 | } |
| 327 | 236 | ||
| 328 | #ifndef CONFIG_SPARSEMEM | 237 | #ifndef CONFIG_SPARSEMEM |
| 329 | int pfn_valid(unsigned long pfn) | 238 | int pfn_valid(unsigned long pfn) |
| 330 | { | 239 | { |
| 331 | struct meminfo *mi = &meminfo; | 240 | struct memblock_region *mem = &memblock.memory; |
| 332 | unsigned int left = 0, right = mi->nr_banks; | 241 | unsigned int left = 0, right = mem->cnt; |
| 333 | 242 | ||
| 334 | do { | 243 | do { |
| 335 | unsigned int mid = (right + left) / 2; | 244 | unsigned int mid = (right + left) / 2; |
| 336 | struct membank *bank = &mi->bank[mid]; | ||
| 337 | 245 | ||
| 338 | if (pfn < bank_pfn_start(bank)) | 246 | if (pfn < memblock_start_pfn(mem, mid)) |
| 339 | right = mid; | 247 | right = mid; |
| 340 | else if (pfn >= bank_pfn_end(bank)) | 248 | else if (pfn >= memblock_end_pfn(mem, mid)) |
| 341 | left = mid + 1; | 249 | left = mid + 1; |
| 342 | else | 250 | else |
| 343 | return 1; | 251 | return 1; |
| @@ -346,73 +254,69 @@ int pfn_valid(unsigned long pfn) | |||
| 346 | } | 254 | } |
| 347 | EXPORT_SYMBOL(pfn_valid); | 255 | EXPORT_SYMBOL(pfn_valid); |
| 348 | 256 | ||
| 349 | static void arm_memory_present(struct meminfo *mi, int node) | 257 | static void arm_memory_present(void) |
| 350 | { | 258 | { |
| 351 | } | 259 | } |
| 352 | #else | 260 | #else |
| 353 | static void arm_memory_present(struct meminfo *mi, int node) | 261 | static void arm_memory_present(void) |
| 354 | { | 262 | { |
| 355 | int i; | 263 | int i; |
| 356 | for_each_nodebank(i, mi, node) { | 264 | for (i = 0; i < memblock.memory.cnt; i++) |
| 357 | struct membank *bank = &mi->bank[i]; | 265 | memory_present(0, memblock_start_pfn(&memblock.memory, i), |
| 358 | memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); | 266 | memblock_end_pfn(&memblock.memory, i)); |
| 359 | } | ||
| 360 | } | 267 | } |
| 361 | #endif | 268 | #endif |
| 362 | 269 | ||
| 363 | void __init bootmem_init(void) | 270 | void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) |
| 364 | { | 271 | { |
| 365 | struct meminfo *mi = &meminfo; | 272 | int i; |
| 366 | unsigned long min, max_low, max_high; | ||
| 367 | int node, initrd_node; | ||
| 368 | 273 | ||
| 369 | /* | 274 | memblock_init(); |
| 370 | * Locate which node contains the ramdisk image, if any. | 275 | for (i = 0; i < mi->nr_banks; i++) |
| 371 | */ | 276 | memblock_add(mi->bank[i].start, mi->bank[i].size); |
| 372 | initrd_node = check_initrd(mi); | ||
| 373 | 277 | ||
| 374 | max_low = max_high = 0; | 278 | /* Register the kernel text, kernel data and initrd with memblock. */ |
| 279 | #ifdef CONFIG_XIP_KERNEL | ||
| 280 | memblock_reserve(__pa(_data), _end - _data); | ||
| 281 | #else | ||
| 282 | memblock_reserve(__pa(_stext), _end - _stext); | ||
| 283 | #endif | ||
| 284 | #ifdef CONFIG_BLK_DEV_INITRD | ||
| 285 | if (phys_initrd_size) { | ||
| 286 | memblock_reserve(phys_initrd_start, phys_initrd_size); | ||
| 375 | 287 | ||
| 376 | /* | 288 | /* Now convert initrd to virtual addresses */ |
| 377 | * Run through each node initialising the bootmem allocator. | 289 | initrd_start = __phys_to_virt(phys_initrd_start); |
| 378 | */ | 290 | initrd_end = initrd_start + phys_initrd_size; |
| 379 | for_each_node(node) { | 291 | } |
| 380 | unsigned long node_low, node_high; | 292 | #endif |
| 381 | 293 | ||
| 382 | find_node_limits(node, mi, &min, &node_low, &node_high); | 294 | arm_mm_memblock_reserve(); |
| 383 | 295 | ||
| 384 | if (node_low > max_low) | 296 | /* reserve any platform specific memblock areas */ |
| 385 | max_low = node_low; | 297 | if (mdesc->reserve) |
| 386 | if (node_high > max_high) | 298 | mdesc->reserve(); |
| 387 | max_high = node_high; | ||
| 388 | 299 | ||
| 389 | /* | 300 | memblock_analyze(); |
| 390 | * If there is no memory in this node, ignore it. | 301 | memblock_dump_all(); |
| 391 | * (We can't have nodes which have no lowmem) | 302 | } |
| 392 | */ | ||
| 393 | if (node_low == 0) | ||
| 394 | continue; | ||
| 395 | 303 | ||
| 396 | bootmem_init_node(node, mi, min, node_low); | 304 | void __init bootmem_init(void) |
| 305 | { | ||
| 306 | struct meminfo *mi = &meminfo; | ||
| 307 | unsigned long min, max_low, max_high; | ||
| 397 | 308 | ||
| 398 | /* | 309 | max_low = max_high = 0; |
| 399 | * Reserve any special node zero regions. | ||
| 400 | */ | ||
| 401 | if (node == 0) | ||
| 402 | reserve_node_zero(NODE_DATA(node)); | ||
| 403 | 310 | ||
| 404 | /* | 311 | find_limits(mi, &min, &max_low, &max_high); |
| 405 | * If the initrd is in this node, reserve its memory. | ||
| 406 | */ | ||
| 407 | if (node == initrd_node) | ||
| 408 | bootmem_reserve_initrd(node); | ||
| 409 | 312 | ||
| 410 | /* | 313 | arm_bootmem_init(mi, min, max_low); |
| 411 | * Sparsemem tries to allocate bootmem in memory_present(), | 314 | |
| 412 | * so must be done after the fixed reservations | 315 | /* |
| 413 | */ | 316 | * Sparsemem tries to allocate bootmem in memory_present(), |
| 414 | arm_memory_present(mi, node); | 317 | * so must be done after the fixed reservations |
| 415 | } | 318 | */ |
| 319 | arm_memory_present(); | ||
| 416 | 320 | ||
| 417 | /* | 321 | /* |
| 418 | * sparse_init() needs the bootmem allocator up and running. | 322 | * sparse_init() needs the bootmem allocator up and running. |
| @@ -420,12 +324,11 @@ void __init bootmem_init(void) | |||
| 420 | sparse_init(); | 324 | sparse_init(); |
| 421 | 325 | ||
| 422 | /* | 326 | /* |
| 423 | * Now free memory in each node - free_area_init_node needs | 327 | * Now free the memory - free_area_init_node needs |
| 424 | * the sparse mem_map arrays initialized by sparse_init() | 328 | * the sparse mem_map arrays initialized by sparse_init() |
| 425 | * for memmap_init_zone(), otherwise all PFNs are invalid. | 329 | * for memmap_init_zone(), otherwise all PFNs are invalid. |
| 426 | */ | 330 | */ |
| 427 | for_each_node(node) | 331 | arm_bootmem_free(mi, min, max_low, max_high); |
| 428 | bootmem_free_node(node, mi); | ||
| 429 | 332 | ||
| 430 | high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; | 333 | high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; |
| 431 | 334 | ||
| @@ -460,7 +363,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) | |||
| 460 | } | 363 | } |
| 461 | 364 | ||
| 462 | static inline void | 365 | static inline void |
| 463 | free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | 366 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
| 464 | { | 367 | { |
| 465 | struct page *start_pg, *end_pg; | 368 | struct page *start_pg, *end_pg; |
| 466 | unsigned long pg, pgend; | 369 | unsigned long pg, pgend; |
| @@ -483,40 +386,39 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | |||
| 483 | * free the section of the memmap array. | 386 | * free the section of the memmap array. |
| 484 | */ | 387 | */ |
| 485 | if (pg < pgend) | 388 | if (pg < pgend) |
| 486 | free_bootmem_node(NODE_DATA(node), pg, pgend - pg); | 389 | free_bootmem(pg, pgend - pg); |
| 487 | } | 390 | } |
| 488 | 391 | ||
| 489 | /* | 392 | /* |
| 490 | * The mem_map array can get very big. Free the unused area of the memory map. | 393 | * The mem_map array can get very big. Free the unused area of the memory map. |
| 491 | */ | 394 | */ |
| 492 | static void __init free_unused_memmap_node(int node, struct meminfo *mi) | 395 | static void __init free_unused_memmap(struct meminfo *mi) |
| 493 | { | 396 | { |
| 494 | unsigned long bank_start, prev_bank_end = 0; | 397 | unsigned long bank_start, prev_bank_end = 0; |
| 495 | unsigned int i; | 398 | unsigned int i; |
| 496 | 399 | ||
| 497 | /* | 400 | /* |
| 498 | * [FIXME] This relies on each bank being in address order. This | 401 | * This relies on each bank being in address order. |
| 499 | * may not be the case, especially if the user has provided the | 402 | * The banks are sorted previously in bootmem_init(). |
| 500 | * information on the command line. | ||
| 501 | */ | 403 | */ |
| 502 | for_each_nodebank(i, mi, node) { | 404 | for_each_bank(i, mi) { |
| 503 | struct membank *bank = &mi->bank[i]; | 405 | struct membank *bank = &mi->bank[i]; |
| 504 | 406 | ||
| 505 | bank_start = bank_pfn_start(bank); | 407 | bank_start = bank_pfn_start(bank); |
| 506 | if (bank_start < prev_bank_end) { | ||
| 507 | printk(KERN_ERR "MEM: unordered memory banks. " | ||
| 508 | "Not freeing memmap.\n"); | ||
| 509 | break; | ||
| 510 | } | ||
| 511 | 408 | ||
| 512 | /* | 409 | /* |
| 513 | * If we had a previous bank, and there is a space | 410 | * If we had a previous bank, and there is a space |
| 514 | * between the current bank and the previous, free it. | 411 | * between the current bank and the previous, free it. |
| 515 | */ | 412 | */ |
| 516 | if (prev_bank_end && prev_bank_end != bank_start) | 413 | if (prev_bank_end && prev_bank_end < bank_start) |
| 517 | free_memmap(node, prev_bank_end, bank_start); | 414 | free_memmap(prev_bank_end, bank_start); |
| 518 | 415 | ||
| 519 | prev_bank_end = bank_pfn_end(bank); | 416 | /* |
| 417 | * Align up here since the VM subsystem insists that the | ||
| 418 | * memmap entries are valid from the bank end aligned to | ||
| 419 | * MAX_ORDER_NR_PAGES. | ||
| 420 | */ | ||
| 421 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | ||
| 520 | } | 422 | } |
| 521 | } | 423 | } |
| 522 | 424 | ||
| @@ -528,21 +430,19 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
| 528 | void __init mem_init(void) | 430 | void __init mem_init(void) |
| 529 | { | 431 | { |
| 530 | unsigned long reserved_pages, free_pages; | 432 | unsigned long reserved_pages, free_pages; |
| 531 | int i, node; | 433 | int i; |
| 434 | #ifdef CONFIG_HAVE_TCM | ||
| 435 | /* These pointers are filled in on TCM detection */ | ||
| 436 | extern u32 dtcm_end; | ||
| 437 | extern u32 itcm_end; | ||
| 438 | #endif | ||
| 532 | 439 | ||
| 533 | #ifndef CONFIG_DISCONTIGMEM | ||
| 534 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; | 440 | max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; |
| 535 | #endif | ||
| 536 | 441 | ||
| 537 | /* this will put all unused low memory onto the freelists */ | 442 | /* this will put all unused low memory onto the freelists */ |
| 538 | for_each_online_node(node) { | 443 | free_unused_memmap(&meminfo); |
| 539 | pg_data_t *pgdat = NODE_DATA(node); | ||
| 540 | 444 | ||
| 541 | free_unused_memmap_node(node, &meminfo); | 445 | totalram_pages += free_all_bootmem(); |
| 542 | |||
| 543 | if (pgdat->node_spanned_pages != 0) | ||
| 544 | totalram_pages += free_all_bootmem_node(pgdat); | ||
| 545 | } | ||
| 546 | 446 | ||
| 547 | #ifdef CONFIG_SA1111 | 447 | #ifdef CONFIG_SA1111 |
| 548 | /* now that our DMA memory is actually so designated, we can free it */ | 448 | /* now that our DMA memory is actually so designated, we can free it */ |
| @@ -552,39 +452,35 @@ void __init mem_init(void) | |||
| 552 | 452 | ||
| 553 | #ifdef CONFIG_HIGHMEM | 453 | #ifdef CONFIG_HIGHMEM |
| 554 | /* set highmem page free */ | 454 | /* set highmem page free */ |
| 555 | for_each_online_node(node) { | 455 | for_each_bank (i, &meminfo) { |
| 556 | for_each_nodebank (i, &meminfo, node) { | 456 | unsigned long start = bank_pfn_start(&meminfo.bank[i]); |
| 557 | unsigned long start = bank_pfn_start(&meminfo.bank[i]); | 457 | unsigned long end = bank_pfn_end(&meminfo.bank[i]); |
| 558 | unsigned long end = bank_pfn_end(&meminfo.bank[i]); | 458 | if (start >= max_low_pfn + PHYS_PFN_OFFSET) |
| 559 | if (start >= max_low_pfn + PHYS_PFN_OFFSET) | 459 | totalhigh_pages += free_area(start, end, NULL); |
| 560 | totalhigh_pages += free_area(start, end, NULL); | ||
| 561 | } | ||
| 562 | } | 460 | } |
| 563 | totalram_pages += totalhigh_pages; | 461 | totalram_pages += totalhigh_pages; |
| 564 | #endif | 462 | #endif |
| 565 | 463 | ||
| 566 | reserved_pages = free_pages = 0; | 464 | reserved_pages = free_pages = 0; |
| 567 | 465 | ||
| 568 | for_each_online_node(node) { | 466 | for_each_bank(i, &meminfo) { |
| 569 | for_each_nodebank(i, &meminfo, node) { | 467 | struct membank *bank = &meminfo.bank[i]; |
| 570 | struct membank *bank = &meminfo.bank[i]; | 468 | unsigned int pfn1, pfn2; |
| 571 | unsigned int pfn1, pfn2; | 469 | struct page *page, *end; |
| 572 | struct page *page, *end; | 470 | |
| 573 | 471 | pfn1 = bank_pfn_start(bank); | |
| 574 | pfn1 = bank_pfn_start(bank); | 472 | pfn2 = bank_pfn_end(bank); |
| 575 | pfn2 = bank_pfn_end(bank); | 473 | |
| 576 | 474 | page = pfn_to_page(pfn1); | |
| 577 | page = pfn_to_page(pfn1); | 475 | end = pfn_to_page(pfn2 - 1) + 1; |
| 578 | end = pfn_to_page(pfn2 - 1) + 1; | 476 | |
| 579 | 477 | do { | |
| 580 | do { | 478 | if (PageReserved(page)) |
| 581 | if (PageReserved(page)) | 479 | reserved_pages++; |
| 582 | reserved_pages++; | 480 | else if (!page_count(page)) |
| 583 | else if (!page_count(page)) | 481 | free_pages++; |
| 584 | free_pages++; | 482 | page++; |
| 585 | page++; | 483 | } while (page < end); |
| 586 | } while (page < end); | ||
| 587 | } | ||
| 588 | } | 484 | } |
| 589 | 485 | ||
| 590 | /* | 486 | /* |
| @@ -611,6 +507,10 @@ void __init mem_init(void) | |||
| 611 | 507 | ||
| 612 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" | 508 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" |
| 613 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" | 509 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 510 | #ifdef CONFIG_HAVE_TCM | ||
| 511 | " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 512 | " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" | ||
| 513 | #endif | ||
| 614 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 514 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 615 | #ifdef CONFIG_MMU | 515 | #ifdef CONFIG_MMU |
| 616 | " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" | 516 | " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" |
| @@ -627,6 +527,10 @@ void __init mem_init(void) | |||
| 627 | 527 | ||
| 628 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + | 528 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + |
| 629 | (PAGE_SIZE)), | 529 | (PAGE_SIZE)), |
| 530 | #ifdef CONFIG_HAVE_TCM | ||
| 531 | MLK(DTCM_OFFSET, (unsigned long) dtcm_end), | ||
| 532 | MLK(ITCM_OFFSET, (unsigned long) itcm_end), | ||
| 533 | #endif | ||
| 630 | MLK(FIXADDR_START, FIXADDR_TOP), | 534 | MLK(FIXADDR_START, FIXADDR_TOP), |
| 631 | #ifdef CONFIG_MMU | 535 | #ifdef CONFIG_MMU |
| 632 | MLM(CONSISTENT_BASE, CONSISTENT_END), | 536 | MLM(CONSISTENT_BASE, CONSISTENT_END), |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 28c8b950ef0..ab506272b2d 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
| @@ -42,78 +42,11 @@ | |||
| 42 | */ | 42 | */ |
| 43 | #define VM_ARM_SECTION_MAPPING 0x80000000 | 43 | #define VM_ARM_SECTION_MAPPING 0x80000000 |
| 44 | 44 | ||
| 45 | static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, | ||
| 46 | unsigned long phys_addr, const struct mem_type *type) | ||
| 47 | { | ||
| 48 | pgprot_t prot = __pgprot(type->prot_pte); | ||
| 49 | pte_t *pte; | ||
| 50 | |||
| 51 | pte = pte_alloc_kernel(pmd, addr); | ||
| 52 | if (!pte) | ||
| 53 | return -ENOMEM; | ||
| 54 | |||
| 55 | do { | ||
| 56 | if (!pte_none(*pte)) | ||
| 57 | goto bad; | ||
| 58 | |||
| 59 | set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); | ||
| 60 | phys_addr += PAGE_SIZE; | ||
| 61 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
| 62 | return 0; | ||
| 63 | |||
| 64 | bad: | ||
| 65 | printk(KERN_CRIT "remap_area_pte: page already exists\n"); | ||
| 66 | BUG(); | ||
| 67 | } | ||
| 68 | |||
| 69 | static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, | ||
| 70 | unsigned long end, unsigned long phys_addr, | ||
| 71 | const struct mem_type *type) | ||
| 72 | { | ||
| 73 | unsigned long next; | ||
| 74 | pmd_t *pmd; | ||
| 75 | int ret = 0; | ||
| 76 | |||
| 77 | pmd = pmd_alloc(&init_mm, pgd, addr); | ||
| 78 | if (!pmd) | ||
| 79 | return -ENOMEM; | ||
| 80 | |||
| 81 | do { | ||
| 82 | next = pmd_addr_end(addr, end); | ||
| 83 | ret = remap_area_pte(pmd, addr, next, phys_addr, type); | ||
| 84 | if (ret) | ||
| 85 | return ret; | ||
| 86 | phys_addr += next - addr; | ||
| 87 | } while (pmd++, addr = next, addr != end); | ||
| 88 | return ret; | ||
| 89 | } | ||
| 90 | |||
| 91 | static int remap_area_pages(unsigned long start, unsigned long pfn, | ||
| 92 | size_t size, const struct mem_type *type) | ||
| 93 | { | ||
| 94 | unsigned long addr = start; | ||
| 95 | unsigned long next, end = start + size; | ||
| 96 | unsigned long phys_addr = __pfn_to_phys(pfn); | ||
| 97 | pgd_t *pgd; | ||
| 98 | int err = 0; | ||
| 99 | |||
| 100 | BUG_ON(addr >= end); | ||
| 101 | pgd = pgd_offset_k(addr); | ||
| 102 | do { | ||
| 103 | next = pgd_addr_end(addr, end); | ||
| 104 | err = remap_area_pmd(pgd, addr, next, phys_addr, type); | ||
| 105 | if (err) | ||
| 106 | break; | ||
| 107 | phys_addr += next - addr; | ||
| 108 | } while (pgd++, addr = next, addr != end); | ||
| 109 | |||
| 110 | return err; | ||
| 111 | } | ||
| 112 | |||
| 113 | int ioremap_page(unsigned long virt, unsigned long phys, | 45 | int ioremap_page(unsigned long virt, unsigned long phys, |
| 114 | const struct mem_type *mtype) | 46 | const struct mem_type *mtype) |
| 115 | { | 47 | { |
| 116 | return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); | 48 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, |
| 49 | __pgprot(mtype->prot_pte)); | ||
| 117 | } | 50 | } |
| 118 | EXPORT_SYMBOL(ioremap_page); | 51 | EXPORT_SYMBOL(ioremap_page); |
| 119 | 52 | ||
| @@ -268,6 +201,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
| 268 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
| 269 | return NULL; | 202 | return NULL; |
| 270 | 203 | ||
| 204 | /* | ||
| 205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
| 206 | */ | ||
| 207 | if (WARN_ON(pfn_valid(pfn))) | ||
| 208 | return NULL; | ||
| 209 | |||
| 271 | type = get_mem_type(mtype); | 210 | type = get_mem_type(mtype); |
| 272 | if (!type) | 211 | if (!type) |
| 273 | return NULL; | 212 | return NULL; |
| @@ -294,7 +233,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
| 294 | err = remap_area_sections(addr, pfn, size, type); | 233 | err = remap_area_sections(addr, pfn, size, type); |
| 295 | } else | 234 | } else |
| 296 | #endif | 235 | #endif |
| 297 | err = remap_area_pages(addr, pfn, size, type); | 236 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), |
| 237 | __pgprot(type->prot_pte)); | ||
| 298 | 238 | ||
| 299 | if (err) { | 239 | if (err) { |
| 300 | vunmap((void *)addr); | 240 | vunmap((void *)addr); |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 815d08eecbb..6630620380a 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
| @@ -28,7 +28,5 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
| 28 | 28 | ||
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | struct pglist_data; | ||
| 32 | |||
| 33 | void __init bootmem_init(void); | 31 | void __init bootmem_init(void); |
| 34 | void reserve_node_zero(struct pglist_data *pgdat); | 32 | void arm_mm_memblock_reserve(void); |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f5abc51c5a0..4f5b39687df 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/shm.h> | 7 | #include <linux/shm.h> |
| 8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
| 9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
| 10 | #include <linux/random.h> | ||
| 10 | #include <asm/cputype.h> | 11 | #include <asm/cputype.h> |
| 11 | #include <asm/system.h> | 12 | #include <asm/system.h> |
| 12 | 13 | ||
| @@ -80,6 +81,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
| 80 | start_addr = addr = TASK_UNMAPPED_BASE; | 81 | start_addr = addr = TASK_UNMAPPED_BASE; |
| 81 | mm->cached_hole_size = 0; | 82 | mm->cached_hole_size = 0; |
| 82 | } | 83 | } |
| 84 | /* 8 bits of randomness in 20 address space bits */ | ||
| 85 | if (current->flags & PF_RANDOMIZE) | ||
| 86 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; | ||
| 83 | 87 | ||
| 84 | full_search: | 88 | full_search: |
| 85 | if (do_align) | 89 | if (do_align) |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 28589417118..6e1c4f6a2b3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -11,13 +11,12 @@ | |||
| 11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/bootmem.h> | ||
| 15 | #include <linux/mman.h> | 14 | #include <linux/mman.h> |
| 16 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
| 16 | #include <linux/memblock.h> | ||
| 17 | #include <linux/sort.h> | 17 | #include <linux/sort.h> |
| 18 | 18 | ||
| 19 | #include <asm/cputype.h> | 19 | #include <asm/cputype.h> |
| 20 | #include <asm/mach-types.h> | ||
| 21 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
| 22 | #include <asm/cachetype.h> | 21 | #include <asm/cachetype.h> |
| 23 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
| @@ -258,6 +257,19 @@ static struct mem_type mem_types[] = { | |||
| 258 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 257 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
| 259 | .domain = DOMAIN_KERNEL, | 258 | .domain = DOMAIN_KERNEL, |
| 260 | }, | 259 | }, |
| 260 | [MT_MEMORY_DTCM] = { | ||
| 261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | | ||
| 262 | L_PTE_DIRTY | L_PTE_WRITE, | ||
| 263 | .prot_l1 = PMD_TYPE_TABLE, | ||
| 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | ||
| 265 | .domain = DOMAIN_KERNEL, | ||
| 266 | }, | ||
| 267 | [MT_MEMORY_ITCM] = { | ||
| 268 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
| 269 | L_PTE_USER | L_PTE_EXEC, | ||
| 270 | .prot_l1 = PMD_TYPE_TABLE, | ||
| 271 | .domain = DOMAIN_IO, | ||
| 272 | }, | ||
| 261 | }; | 273 | }; |
| 262 | 274 | ||
| 263 | const struct mem_type *get_mem_type(unsigned int type) | 275 | const struct mem_type *get_mem_type(unsigned int type) |
| @@ -488,18 +500,28 @@ static void __init build_mem_type_table(void) | |||
| 488 | 500 | ||
| 489 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 501 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
| 490 | 502 | ||
| 491 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | 503 | static void __init *early_alloc(unsigned long sz) |
| 492 | unsigned long end, unsigned long pfn, | ||
| 493 | const struct mem_type *type) | ||
| 494 | { | 504 | { |
| 495 | pte_t *pte; | 505 | void *ptr = __va(memblock_alloc(sz, sz)); |
| 506 | memset(ptr, 0, sz); | ||
| 507 | return ptr; | ||
| 508 | } | ||
| 496 | 509 | ||
| 510 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) | ||
| 511 | { | ||
| 497 | if (pmd_none(*pmd)) { | 512 | if (pmd_none(*pmd)) { |
| 498 | pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); | 513 | pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); |
| 499 | __pmd_populate(pmd, __pa(pte) | type->prot_l1); | 514 | __pmd_populate(pmd, __pa(pte) | prot); |
| 500 | } | 515 | } |
| 516 | BUG_ON(pmd_bad(*pmd)); | ||
| 517 | return pte_offset_kernel(pmd, addr); | ||
| 518 | } | ||
| 501 | 519 | ||
| 502 | pte = pte_offset_kernel(pmd, addr); | 520 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, |
| 521 | unsigned long end, unsigned long pfn, | ||
| 522 | const struct mem_type *type) | ||
| 523 | { | ||
| 524 | pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); | ||
| 503 | do { | 525 | do { |
| 504 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); | 526 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); |
| 505 | pfn++; | 527 | pfn++; |
| @@ -668,7 +690,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
| 668 | create_mapping(io_desc + i); | 690 | create_mapping(io_desc + i); |
| 669 | } | 691 | } |
| 670 | 692 | ||
| 671 | static unsigned long __initdata vmalloc_reserve = SZ_128M; | 693 | static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); |
| 672 | 694 | ||
| 673 | /* | 695 | /* |
| 674 | * vmalloc=size forces the vmalloc area to be exactly 'size' | 696 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
| @@ -677,7 +699,7 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; | |||
| 677 | */ | 699 | */ |
| 678 | static int __init early_vmalloc(char *arg) | 700 | static int __init early_vmalloc(char *arg) |
| 679 | { | 701 | { |
| 680 | vmalloc_reserve = memparse(arg, NULL); | 702 | unsigned long vmalloc_reserve = memparse(arg, NULL); |
| 681 | 703 | ||
| 682 | if (vmalloc_reserve < SZ_16M) { | 704 | if (vmalloc_reserve < SZ_16M) { |
| 683 | vmalloc_reserve = SZ_16M; | 705 | vmalloc_reserve = SZ_16M; |
| @@ -692,22 +714,26 @@ static int __init early_vmalloc(char *arg) | |||
| 692 | "vmalloc area is too big, limiting to %luMB\n", | 714 | "vmalloc area is too big, limiting to %luMB\n", |
| 693 | vmalloc_reserve >> 20); | 715 | vmalloc_reserve >> 20); |
| 694 | } | 716 | } |
| 717 | |||
| 718 | vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); | ||
| 695 | return 0; | 719 | return 0; |
| 696 | } | 720 | } |
| 697 | early_param("vmalloc", early_vmalloc); | 721 | early_param("vmalloc", early_vmalloc); |
| 698 | 722 | ||
| 699 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | 723 | phys_addr_t lowmem_end_addr; |
| 700 | 724 | ||
| 701 | static void __init sanity_check_meminfo(void) | 725 | static void __init sanity_check_meminfo(void) |
| 702 | { | 726 | { |
| 703 | int i, j, highmem = 0; | 727 | int i, j, highmem = 0; |
| 704 | 728 | ||
| 729 | lowmem_end_addr = __pa(vmalloc_min - 1) + 1; | ||
| 730 | |||
| 705 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 731 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
| 706 | struct membank *bank = &meminfo.bank[j]; | 732 | struct membank *bank = &meminfo.bank[j]; |
| 707 | *bank = meminfo.bank[i]; | 733 | *bank = meminfo.bank[i]; |
| 708 | 734 | ||
| 709 | #ifdef CONFIG_HIGHMEM | 735 | #ifdef CONFIG_HIGHMEM |
| 710 | if (__va(bank->start) > VMALLOC_MIN || | 736 | if (__va(bank->start) > vmalloc_min || |
| 711 | __va(bank->start) < (void *)PAGE_OFFSET) | 737 | __va(bank->start) < (void *)PAGE_OFFSET) |
| 712 | highmem = 1; | 738 | highmem = 1; |
| 713 | 739 | ||
| @@ -717,8 +743,8 @@ static void __init sanity_check_meminfo(void) | |||
| 717 | * Split those memory banks which are partially overlapping | 743 | * Split those memory banks which are partially overlapping |
| 718 | * the vmalloc area greatly simplifying things later. | 744 | * the vmalloc area greatly simplifying things later. |
| 719 | */ | 745 | */ |
| 720 | if (__va(bank->start) < VMALLOC_MIN && | 746 | if (__va(bank->start) < vmalloc_min && |
| 721 | bank->size > VMALLOC_MIN - __va(bank->start)) { | 747 | bank->size > vmalloc_min - __va(bank->start)) { |
| 722 | if (meminfo.nr_banks >= NR_BANKS) { | 748 | if (meminfo.nr_banks >= NR_BANKS) { |
| 723 | printk(KERN_CRIT "NR_BANKS too low, " | 749 | printk(KERN_CRIT "NR_BANKS too low, " |
| 724 | "ignoring high memory\n"); | 750 | "ignoring high memory\n"); |
| @@ -727,12 +753,12 @@ static void __init sanity_check_meminfo(void) | |||
| 727 | (meminfo.nr_banks - i) * sizeof(*bank)); | 753 | (meminfo.nr_banks - i) * sizeof(*bank)); |
| 728 | meminfo.nr_banks++; | 754 | meminfo.nr_banks++; |
| 729 | i++; | 755 | i++; |
| 730 | bank[1].size -= VMALLOC_MIN - __va(bank->start); | 756 | bank[1].size -= vmalloc_min - __va(bank->start); |
| 731 | bank[1].start = __pa(VMALLOC_MIN - 1) + 1; | 757 | bank[1].start = __pa(vmalloc_min - 1) + 1; |
| 732 | bank[1].highmem = highmem = 1; | 758 | bank[1].highmem = highmem = 1; |
| 733 | j++; | 759 | j++; |
| 734 | } | 760 | } |
| 735 | bank->size = VMALLOC_MIN - __va(bank->start); | 761 | bank->size = vmalloc_min - __va(bank->start); |
| 736 | } | 762 | } |
| 737 | #else | 763 | #else |
| 738 | bank->highmem = highmem; | 764 | bank->highmem = highmem; |
| @@ -741,7 +767,7 @@ static void __init sanity_check_meminfo(void) | |||
| 741 | * Check whether this memory bank would entirely overlap | 767 | * Check whether this memory bank would entirely overlap |
| 742 | * the vmalloc area. | 768 | * the vmalloc area. |
| 743 | */ | 769 | */ |
| 744 | if (__va(bank->start) >= VMALLOC_MIN || | 770 | if (__va(bank->start) >= vmalloc_min || |
| 745 | __va(bank->start) < (void *)PAGE_OFFSET) { | 771 | __va(bank->start) < (void *)PAGE_OFFSET) { |
| 746 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | 772 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " |
| 747 | "(vmalloc region overlap).\n", | 773 | "(vmalloc region overlap).\n", |
| @@ -753,9 +779,9 @@ static void __init sanity_check_meminfo(void) | |||
| 753 | * Check whether this memory bank would partially overlap | 779 | * Check whether this memory bank would partially overlap |
| 754 | * the vmalloc area. | 780 | * the vmalloc area. |
| 755 | */ | 781 | */ |
| 756 | if (__va(bank->start + bank->size) > VMALLOC_MIN || | 782 | if (__va(bank->start + bank->size) > vmalloc_min || |
| 757 | __va(bank->start + bank->size) < __va(bank->start)) { | 783 | __va(bank->start + bank->size) < __va(bank->start)) { |
| 758 | unsigned long newsize = VMALLOC_MIN - __va(bank->start); | 784 | unsigned long newsize = vmalloc_min - __va(bank->start); |
| 759 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | 785 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " |
| 760 | "to -%.8lx (vmalloc region overlap).\n", | 786 | "to -%.8lx (vmalloc region overlap).\n", |
| 761 | bank->start, bank->start + bank->size - 1, | 787 | bank->start, bank->start + bank->size - 1, |
| @@ -827,101 +853,23 @@ static inline void prepare_page_table(void) | |||
| 827 | } | 853 | } |
| 828 | 854 | ||
| 829 | /* | 855 | /* |
| 830 | * Reserve the various regions of node 0 | 856 | * Reserve the special regions of memory |
| 831 | */ | 857 | */ |
| 832 | void __init reserve_node_zero(pg_data_t *pgdat) | 858 | void __init arm_mm_memblock_reserve(void) |
| 833 | { | 859 | { |
| 834 | unsigned long res_size = 0; | ||
| 835 | |||
| 836 | /* | ||
| 837 | * Register the kernel text and data with bootmem. | ||
| 838 | * Note that this can only be in node 0. | ||
| 839 | */ | ||
| 840 | #ifdef CONFIG_XIP_KERNEL | ||
| 841 | reserve_bootmem_node(pgdat, __pa(_data), _end - _data, | ||
| 842 | BOOTMEM_DEFAULT); | ||
| 843 | #else | ||
| 844 | reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, | ||
| 845 | BOOTMEM_DEFAULT); | ||
| 846 | #endif | ||
| 847 | |||
| 848 | /* | 860 | /* |
| 849 | * Reserve the page tables. These are already in use, | 861 | * Reserve the page tables. These are already in use, |
| 850 | * and can only be in node 0. | 862 | * and can only be in node 0. |
| 851 | */ | 863 | */ |
| 852 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), | 864 | memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); |
| 853 | PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); | ||
| 854 | |||
| 855 | /* | ||
| 856 | * Hmm... This should go elsewhere, but we really really need to | ||
| 857 | * stop things allocating the low memory; ideally we need a better | ||
| 858 | * implementation of GFP_DMA which does not assume that DMA-able | ||
| 859 | * memory starts at zero. | ||
| 860 | */ | ||
| 861 | if (machine_is_integrator() || machine_is_cintegrator()) | ||
| 862 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | ||
| 863 | |||
| 864 | /* | ||
| 865 | * These should likewise go elsewhere. They pre-reserve the | ||
| 866 | * screen memory region at the start of main system memory. | ||
| 867 | */ | ||
| 868 | if (machine_is_edb7211()) | ||
| 869 | res_size = 0x00020000; | ||
| 870 | if (machine_is_p720t()) | ||
| 871 | res_size = 0x00014000; | ||
| 872 | |||
| 873 | /* H1940, RX3715 and RX1950 need to reserve this for suspend */ | ||
| 874 | |||
| 875 | if (machine_is_h1940() || machine_is_rx3715() | ||
| 876 | || machine_is_rx1950()) { | ||
| 877 | reserve_bootmem_node(pgdat, 0x30003000, 0x1000, | ||
| 878 | BOOTMEM_DEFAULT); | ||
| 879 | reserve_bootmem_node(pgdat, 0x30081000, 0x1000, | ||
| 880 | BOOTMEM_DEFAULT); | ||
| 881 | } | ||
| 882 | |||
| 883 | if (machine_is_palmld() || machine_is_palmtx()) { | ||
| 884 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | ||
| 885 | BOOTMEM_EXCLUSIVE); | ||
| 886 | reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, | ||
| 887 | BOOTMEM_EXCLUSIVE); | ||
| 888 | } | ||
| 889 | |||
| 890 | if (machine_is_treo680() || machine_is_centro()) { | ||
| 891 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | ||
| 892 | BOOTMEM_EXCLUSIVE); | ||
| 893 | reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, | ||
| 894 | BOOTMEM_EXCLUSIVE); | ||
| 895 | } | ||
| 896 | |||
| 897 | if (machine_is_palmt5()) | ||
| 898 | reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, | ||
| 899 | BOOTMEM_EXCLUSIVE); | ||
| 900 | |||
| 901 | /* | ||
| 902 | * U300 - This platform family can share physical memory | ||
| 903 | * between two ARM cpus, one running Linux and the other | ||
| 904 | * running another OS. | ||
| 905 | */ | ||
| 906 | if (machine_is_u300()) { | ||
| 907 | #ifdef CONFIG_MACH_U300_SINGLE_RAM | ||
| 908 | #if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ | ||
| 909 | CONFIG_MACH_U300_2MB_ALIGNMENT_FIX | ||
| 910 | res_size = 0x00100000; | ||
| 911 | #endif | ||
| 912 | #endif | ||
| 913 | } | ||
| 914 | 865 | ||
| 915 | #ifdef CONFIG_SA1111 | 866 | #ifdef CONFIG_SA1111 |
| 916 | /* | 867 | /* |
| 917 | * Because of the SA1111 DMA bug, we want to preserve our | 868 | * Because of the SA1111 DMA bug, we want to preserve our |
| 918 | * precious DMA-able memory... | 869 | * precious DMA-able memory... |
| 919 | */ | 870 | */ |
| 920 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; | 871 | memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); |
| 921 | #endif | 872 | #endif |
| 922 | if (res_size) | ||
| 923 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, | ||
| 924 | BOOTMEM_DEFAULT); | ||
| 925 | } | 873 | } |
| 926 | 874 | ||
| 927 | /* | 875 | /* |
| @@ -940,7 +888,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
| 940 | /* | 888 | /* |
| 941 | * Allocate the vector page early. | 889 | * Allocate the vector page early. |
| 942 | */ | 890 | */ |
| 943 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | 891 | vectors = early_alloc(PAGE_SIZE); |
| 944 | 892 | ||
| 945 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 893 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
| 946 | pmd_clear(pmd_off_k(addr)); | 894 | pmd_clear(pmd_off_k(addr)); |
| @@ -1011,11 +959,8 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
| 1011 | static void __init kmap_init(void) | 959 | static void __init kmap_init(void) |
| 1012 | { | 960 | { |
| 1013 | #ifdef CONFIG_HIGHMEM | 961 | #ifdef CONFIG_HIGHMEM |
| 1014 | pmd_t *pmd = pmd_off_k(PKMAP_BASE); | 962 | pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), |
| 1015 | pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); | 963 | PKMAP_BASE, _PAGE_KERNEL_TABLE); |
| 1016 | BUG_ON(!pmd_none(*pmd) || !pte); | ||
| 1017 | __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); | ||
| 1018 | pkmap_page_table = pte + PTRS_PER_PTE; | ||
| 1019 | #endif | 964 | #endif |
| 1020 | } | 965 | } |
| 1021 | 966 | ||
| @@ -1066,17 +1011,16 @@ void __init paging_init(struct machine_desc *mdesc) | |||
| 1066 | sanity_check_meminfo(); | 1011 | sanity_check_meminfo(); |
| 1067 | prepare_page_table(); | 1012 | prepare_page_table(); |
| 1068 | map_lowmem(); | 1013 | map_lowmem(); |
| 1069 | bootmem_init(); | ||
| 1070 | devicemaps_init(mdesc); | 1014 | devicemaps_init(mdesc); |
| 1071 | kmap_init(); | 1015 | kmap_init(); |
| 1072 | 1016 | ||
| 1073 | top_pmd = pmd_off_k(0xffff0000); | 1017 | top_pmd = pmd_off_k(0xffff0000); |
| 1074 | 1018 | ||
| 1075 | /* | 1019 | /* allocate the zero page. */ |
| 1076 | * allocate the zero page. Note that this always succeeds and | 1020 | zero_page = early_alloc(PAGE_SIZE); |
| 1077 | * returns a zeroed result. | 1021 | |
| 1078 | */ | 1022 | bootmem_init(); |
| 1079 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 1023 | |
| 1080 | empty_zero_page = virt_to_page(zero_page); | 1024 | empty_zero_page = virt_to_page(zero_page); |
| 1081 | __flush_dcache_page(NULL, empty_zero_page); | 1025 | __flush_dcache_page(NULL, empty_zero_page); |
| 1082 | } | 1026 | } |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 33b327379f0..687d02319a4 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
| 7 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
| 8 | #include <linux/pagemap.h> | 8 | #include <linux/pagemap.h> |
| 9 | #include <linux/bootmem.h> | ||
| 10 | #include <linux/io.h> | 9 | #include <linux/io.h> |
| 10 | #include <linux/memblock.h> | ||
| 11 | 11 | ||
| 12 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
| 13 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
| @@ -17,30 +17,14 @@ | |||
| 17 | 17 | ||
| 18 | #include "mm.h" | 18 | #include "mm.h" |
| 19 | 19 | ||
| 20 | /* | 20 | void __init arm_mm_memblock_reserve(void) |
| 21 | * Reserve the various regions of node 0 | ||
| 22 | */ | ||
| 23 | void __init reserve_node_zero(pg_data_t *pgdat) | ||
| 24 | { | 21 | { |
| 25 | /* | 22 | /* |
| 26 | * Register the kernel text and data with bootmem. | ||
| 27 | * Note that this can only be in node 0. | ||
| 28 | */ | ||
| 29 | #ifdef CONFIG_XIP_KERNEL | ||
| 30 | reserve_bootmem_node(pgdat, __pa(_data), _end - _data, | ||
| 31 | BOOTMEM_DEFAULT); | ||
| 32 | #else | ||
| 33 | reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, | ||
| 34 | BOOTMEM_DEFAULT); | ||
| 35 | #endif | ||
| 36 | |||
| 37 | /* | ||
| 38 | * Register the exception vector page. | 23 | * Register the exception vector page. |
| 39 | * some architectures which the DRAM is the exception vector to trap, | 24 | * some architectures which the DRAM is the exception vector to trap, |
| 40 | * alloc_page breaks with error, although it is not NULL, but "0." | 25 | * alloc_page breaks with error, although it is not NULL, but "0." |
| 41 | */ | 26 | */ |
| 42 | reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE, | 27 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); |
| 43 | BOOTMEM_DEFAULT); | ||
| 44 | } | 28 | } |
| 45 | 29 | ||
| 46 | /* | 30 | /* |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 72507c630ce..203a4e944d9 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
| @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020_proc_init) | |||
| 79 | * cpu_arm1020_proc_fin() | 79 | * cpu_arm1020_proc_fin() |
| 80 | */ | 80 | */ |
| 81 | ENTRY(cpu_arm1020_proc_fin) | 81 | ENTRY(cpu_arm1020_proc_fin) |
| 82 | stmfd sp!, {lr} | ||
| 83 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 84 | msr cpsr_c, ip | ||
| 85 | bl arm1020_flush_kern_cache_all | ||
| 86 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 82 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 87 | bic r0, r0, #0x1000 @ ...i............ | 83 | bic r0, r0, #0x1000 @ ...i............ |
| 88 | bic r0, r0, #0x000e @ ............wca. | 84 | bic r0, r0, #0x000e @ ............wca. |
| 89 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 85 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 90 | ldmfd sp!, {pc} | 86 | mov pc, lr |
| 91 | 87 | ||
| 92 | /* | 88 | /* |
| 93 | * cpu_arm1020_reset(loc) | 89 | * cpu_arm1020_reset(loc) |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index d2782980560..1a511e76590 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
| @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020e_proc_init) | |||
| 79 | * cpu_arm1020e_proc_fin() | 79 | * cpu_arm1020e_proc_fin() |
| 80 | */ | 80 | */ |
| 81 | ENTRY(cpu_arm1020e_proc_fin) | 81 | ENTRY(cpu_arm1020e_proc_fin) |
| 82 | stmfd sp!, {lr} | ||
| 83 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 84 | msr cpsr_c, ip | ||
| 85 | bl arm1020e_flush_kern_cache_all | ||
| 86 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 82 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 87 | bic r0, r0, #0x1000 @ ...i............ | 83 | bic r0, r0, #0x1000 @ ...i............ |
| 88 | bic r0, r0, #0x000e @ ............wca. | 84 | bic r0, r0, #0x000e @ ............wca. |
| 89 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 85 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 90 | ldmfd sp!, {pc} | 86 | mov pc, lr |
| 91 | 87 | ||
| 92 | /* | 88 | /* |
| 93 | * cpu_arm1020e_reset(loc) | 89 | * cpu_arm1020e_reset(loc) |
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index ce13e4a827d..1ffa4eb9c34 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
| @@ -68,15 +68,11 @@ ENTRY(cpu_arm1022_proc_init) | |||
| 68 | * cpu_arm1022_proc_fin() | 68 | * cpu_arm1022_proc_fin() |
| 69 | */ | 69 | */ |
| 70 | ENTRY(cpu_arm1022_proc_fin) | 70 | ENTRY(cpu_arm1022_proc_fin) |
| 71 | stmfd sp!, {lr} | ||
| 72 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 73 | msr cpsr_c, ip | ||
| 74 | bl arm1022_flush_kern_cache_all | ||
| 75 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 71 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 76 | bic r0, r0, #0x1000 @ ...i............ | 72 | bic r0, r0, #0x1000 @ ...i............ |
| 77 | bic r0, r0, #0x000e @ ............wca. | 73 | bic r0, r0, #0x000e @ ............wca. |
| 78 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 74 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 79 | ldmfd sp!, {pc} | 75 | mov pc, lr |
| 80 | 76 | ||
| 81 | /* | 77 | /* |
| 82 | * cpu_arm1022_reset(loc) | 78 | * cpu_arm1022_reset(loc) |
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 636672a29c6..5697c34b95b 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
| @@ -68,15 +68,11 @@ ENTRY(cpu_arm1026_proc_init) | |||
| 68 | * cpu_arm1026_proc_fin() | 68 | * cpu_arm1026_proc_fin() |
| 69 | */ | 69 | */ |
| 70 | ENTRY(cpu_arm1026_proc_fin) | 70 | ENTRY(cpu_arm1026_proc_fin) |
| 71 | stmfd sp!, {lr} | ||
| 72 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 73 | msr cpsr_c, ip | ||
| 74 | bl arm1026_flush_kern_cache_all | ||
| 75 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 71 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 76 | bic r0, r0, #0x1000 @ ...i............ | 72 | bic r0, r0, #0x1000 @ ...i............ |
| 77 | bic r0, r0, #0x000e @ ............wca. | 73 | bic r0, r0, #0x000e @ ............wca. |
| 78 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 74 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 79 | ldmfd sp!, {pc} | 75 | mov pc, lr |
| 80 | 76 | ||
| 81 | /* | 77 | /* |
| 82 | * cpu_arm1026_reset(loc) | 78 | * cpu_arm1026_reset(loc) |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 795dc615f43..64e0b327c7c 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
| @@ -184,8 +184,6 @@ ENTRY(cpu_arm7_proc_init) | |||
| 184 | 184 | ||
| 185 | ENTRY(cpu_arm6_proc_fin) | 185 | ENTRY(cpu_arm6_proc_fin) |
| 186 | ENTRY(cpu_arm7_proc_fin) | 186 | ENTRY(cpu_arm7_proc_fin) |
| 187 | mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 188 | msr cpsr_c, r0 | ||
| 189 | mov r0, #0x31 @ ....S..DP...M | 187 | mov r0, #0x31 @ ....S..DP...M |
| 190 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 188 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 191 | mov pc, lr | 189 | mov pc, lr |
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 0b62de24466..9d96824134f 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
| @@ -54,15 +54,11 @@ ENTRY(cpu_arm720_proc_init) | |||
| 54 | mov pc, lr | 54 | mov pc, lr |
| 55 | 55 | ||
| 56 | ENTRY(cpu_arm720_proc_fin) | 56 | ENTRY(cpu_arm720_proc_fin) |
| 57 | stmfd sp!, {lr} | ||
| 58 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 59 | msr cpsr_c, ip | ||
| 60 | mrc p15, 0, r0, c1, c0, 0 | 57 | mrc p15, 0, r0, c1, c0, 0 |
| 61 | bic r0, r0, #0x1000 @ ...i............ | 58 | bic r0, r0, #0x1000 @ ...i............ |
| 62 | bic r0, r0, #0x000e @ ............wca. | 59 | bic r0, r0, #0x000e @ ............wca. |
| 63 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 60 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 64 | mcr p15, 0, r1, c7, c7, 0 @ invalidate cache | 61 | mov pc, lr |
| 65 | ldmfd sp!, {pc} | ||
| 66 | 62 | ||
| 67 | /* | 63 | /* |
| 68 | * Function: arm720_proc_do_idle(void) | 64 | * Function: arm720_proc_do_idle(void) |
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 01860cdeb2e..6c1a9ab059a 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
| @@ -36,15 +36,11 @@ ENTRY(cpu_arm740_switch_mm) | |||
| 36 | * cpu_arm740_proc_fin() | 36 | * cpu_arm740_proc_fin() |
| 37 | */ | 37 | */ |
| 38 | ENTRY(cpu_arm740_proc_fin) | 38 | ENTRY(cpu_arm740_proc_fin) |
| 39 | stmfd sp!, {lr} | ||
| 40 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 41 | msr cpsr_c, ip | ||
| 42 | mrc p15, 0, r0, c1, c0, 0 | 39 | mrc p15, 0, r0, c1, c0, 0 |
| 43 | bic r0, r0, #0x3f000000 @ bank/f/lock/s | 40 | bic r0, r0, #0x3f000000 @ bank/f/lock/s |
| 44 | bic r0, r0, #0x0000000c @ w-buffer/cache | 41 | bic r0, r0, #0x0000000c @ w-buffer/cache |
| 45 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 42 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 46 | mcr p15, 0, r0, c7, c0, 0 @ invalidate cache | 43 | mov pc, lr |
| 47 | ldmfd sp!, {pc} | ||
| 48 | 44 | ||
| 49 | /* | 45 | /* |
| 50 | * cpu_arm740_reset(loc) | 46 | * cpu_arm740_reset(loc) |
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 1201b986382..6a850dbba22 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
| @@ -36,8 +36,6 @@ ENTRY(cpu_arm7tdmi_switch_mm) | |||
| 36 | * cpu_arm7tdmi_proc_fin() | 36 | * cpu_arm7tdmi_proc_fin() |
| 37 | */ | 37 | */ |
| 38 | ENTRY(cpu_arm7tdmi_proc_fin) | 38 | ENTRY(cpu_arm7tdmi_proc_fin) |
| 39 | mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 40 | msr cpsr_c, r0 | ||
| 41 | mov pc, lr | 39 | mov pc, lr |
| 42 | 40 | ||
| 43 | /* | 41 | /* |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 8be81992645..86f80aa5621 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
| @@ -69,19 +69,11 @@ ENTRY(cpu_arm920_proc_init) | |||
| 69 | * cpu_arm920_proc_fin() | 69 | * cpu_arm920_proc_fin() |
| 70 | */ | 70 | */ |
| 71 | ENTRY(cpu_arm920_proc_fin) | 71 | ENTRY(cpu_arm920_proc_fin) |
| 72 | stmfd sp!, {lr} | ||
| 73 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 74 | msr cpsr_c, ip | ||
| 75 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 76 | bl arm920_flush_kern_cache_all | ||
| 77 | #else | ||
| 78 | bl v4wt_flush_kern_cache_all | ||
| 79 | #endif | ||
| 80 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 72 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 81 | bic r0, r0, #0x1000 @ ...i............ | 73 | bic r0, r0, #0x1000 @ ...i............ |
| 82 | bic r0, r0, #0x000e @ ............wca. | 74 | bic r0, r0, #0x000e @ ............wca. |
| 83 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 75 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 84 | ldmfd sp!, {pc} | 76 | mov pc, lr |
| 85 | 77 | ||
| 86 | /* | 78 | /* |
| 87 | * cpu_arm920_reset(loc) | 79 | * cpu_arm920_reset(loc) |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index c0ff8e4b107..f76ce9b6288 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
| @@ -71,19 +71,11 @@ ENTRY(cpu_arm922_proc_init) | |||
| 71 | * cpu_arm922_proc_fin() | 71 | * cpu_arm922_proc_fin() |
| 72 | */ | 72 | */ |
| 73 | ENTRY(cpu_arm922_proc_fin) | 73 | ENTRY(cpu_arm922_proc_fin) |
| 74 | stmfd sp!, {lr} | ||
| 75 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 76 | msr cpsr_c, ip | ||
| 77 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | ||
| 78 | bl arm922_flush_kern_cache_all | ||
| 79 | #else | ||
| 80 | bl v4wt_flush_kern_cache_all | ||
| 81 | #endif | ||
| 82 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 74 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 83 | bic r0, r0, #0x1000 @ ...i............ | 75 | bic r0, r0, #0x1000 @ ...i............ |
| 84 | bic r0, r0, #0x000e @ ............wca. | 76 | bic r0, r0, #0x000e @ ............wca. |
| 85 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 77 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 86 | ldmfd sp!, {pc} | 78 | mov pc, lr |
| 87 | 79 | ||
| 88 | /* | 80 | /* |
| 89 | * cpu_arm922_reset(loc) | 81 | * cpu_arm922_reset(loc) |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 3c6cffe400f..657bd3f7c15 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
| @@ -92,15 +92,11 @@ ENTRY(cpu_arm925_proc_init) | |||
| 92 | * cpu_arm925_proc_fin() | 92 | * cpu_arm925_proc_fin() |
| 93 | */ | 93 | */ |
| 94 | ENTRY(cpu_arm925_proc_fin) | 94 | ENTRY(cpu_arm925_proc_fin) |
| 95 | stmfd sp!, {lr} | ||
| 96 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 97 | msr cpsr_c, ip | ||
| 98 | bl arm925_flush_kern_cache_all | ||
| 99 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 95 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 100 | bic r0, r0, #0x1000 @ ...i............ | 96 | bic r0, r0, #0x1000 @ ...i............ |
| 101 | bic r0, r0, #0x000e @ ............wca. | 97 | bic r0, r0, #0x000e @ ............wca. |
| 102 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 98 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 103 | ldmfd sp!, {pc} | 99 | mov pc, lr |
| 104 | 100 | ||
| 105 | /* | 101 | /* |
| 106 | * cpu_arm925_reset(loc) | 102 | * cpu_arm925_reset(loc) |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 75b707c9cce..73f1f3c6891 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
| @@ -61,15 +61,11 @@ ENTRY(cpu_arm926_proc_init) | |||
| 61 | * cpu_arm926_proc_fin() | 61 | * cpu_arm926_proc_fin() |
| 62 | */ | 62 | */ |
| 63 | ENTRY(cpu_arm926_proc_fin) | 63 | ENTRY(cpu_arm926_proc_fin) |
| 64 | stmfd sp!, {lr} | ||
| 65 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 66 | msr cpsr_c, ip | ||
| 67 | bl arm926_flush_kern_cache_all | ||
| 68 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 64 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 69 | bic r0, r0, #0x1000 @ ...i............ | 65 | bic r0, r0, #0x1000 @ ...i............ |
| 70 | bic r0, r0, #0x000e @ ............wca. | 66 | bic r0, r0, #0x000e @ ............wca. |
| 71 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 67 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 72 | ldmfd sp!, {pc} | 68 | mov pc, lr |
| 73 | 69 | ||
| 74 | /* | 70 | /* |
| 75 | * cpu_arm926_reset(loc) | 71 | * cpu_arm926_reset(loc) |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 1af1657819e..fffb061a45a 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
| @@ -37,15 +37,11 @@ ENTRY(cpu_arm940_switch_mm) | |||
| 37 | * cpu_arm940_proc_fin() | 37 | * cpu_arm940_proc_fin() |
| 38 | */ | 38 | */ |
| 39 | ENTRY(cpu_arm940_proc_fin) | 39 | ENTRY(cpu_arm940_proc_fin) |
| 40 | stmfd sp!, {lr} | ||
| 41 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 42 | msr cpsr_c, ip | ||
| 43 | bl arm940_flush_kern_cache_all | ||
| 44 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 40 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 45 | bic r0, r0, #0x00001000 @ i-cache | 41 | bic r0, r0, #0x00001000 @ i-cache |
| 46 | bic r0, r0, #0x00000004 @ d-cache | 42 | bic r0, r0, #0x00000004 @ d-cache |
| 47 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 43 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 48 | ldmfd sp!, {pc} | 44 | mov pc, lr |
| 49 | 45 | ||
| 50 | /* | 46 | /* |
| 51 | * cpu_arm940_reset(loc) | 47 | * cpu_arm940_reset(loc) |
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1664b6aaff7..249a6053760 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
| @@ -44,15 +44,11 @@ ENTRY(cpu_arm946_switch_mm) | |||
| 44 | * cpu_arm946_proc_fin() | 44 | * cpu_arm946_proc_fin() |
| 45 | */ | 45 | */ |
| 46 | ENTRY(cpu_arm946_proc_fin) | 46 | ENTRY(cpu_arm946_proc_fin) |
| 47 | stmfd sp!, {lr} | ||
| 48 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 49 | msr cpsr_c, ip | ||
| 50 | bl arm946_flush_kern_cache_all | ||
| 51 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 47 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 52 | bic r0, r0, #0x00001000 @ i-cache | 48 | bic r0, r0, #0x00001000 @ i-cache |
| 53 | bic r0, r0, #0x00000004 @ d-cache | 49 | bic r0, r0, #0x00000004 @ d-cache |
| 54 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 50 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 55 | ldmfd sp!, {pc} | 51 | mov pc, lr |
| 56 | 52 | ||
| 57 | /* | 53 | /* |
| 58 | * cpu_arm946_reset(loc) | 54 | * cpu_arm946_reset(loc) |
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 28545c29dbc..db475667fac 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
| @@ -36,8 +36,6 @@ ENTRY(cpu_arm9tdmi_switch_mm) | |||
| 36 | * cpu_arm9tdmi_proc_fin() | 36 | * cpu_arm9tdmi_proc_fin() |
| 37 | */ | 37 | */ |
| 38 | ENTRY(cpu_arm9tdmi_proc_fin) | 38 | ENTRY(cpu_arm9tdmi_proc_fin) |
| 39 | mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 40 | msr cpsr_c, r0 | ||
| 41 | mov pc, lr | 39 | mov pc, lr |
| 42 | 40 | ||
| 43 | /* | 41 | /* |
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08f5ac237ad..7803fdf7002 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
| @@ -39,17 +39,13 @@ ENTRY(cpu_fa526_proc_init) | |||
| 39 | * cpu_fa526_proc_fin() | 39 | * cpu_fa526_proc_fin() |
| 40 | */ | 40 | */ |
| 41 | ENTRY(cpu_fa526_proc_fin) | 41 | ENTRY(cpu_fa526_proc_fin) |
| 42 | stmfd sp!, {lr} | ||
| 43 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 44 | msr cpsr_c, ip | ||
| 45 | bl fa_flush_kern_cache_all | ||
| 46 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 42 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 47 | bic r0, r0, #0x1000 @ ...i............ | 43 | bic r0, r0, #0x1000 @ ...i............ |
| 48 | bic r0, r0, #0x000e @ ............wca. | 44 | bic r0, r0, #0x000e @ ............wca. |
| 49 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 45 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 50 | nop | 46 | nop |
| 51 | nop | 47 | nop |
| 52 | ldmfd sp!, {pc} | 48 | mov pc, lr |
| 53 | 49 | ||
| 54 | /* | 50 | /* |
| 55 | * cpu_fa526_reset(loc) | 51 | * cpu_fa526_reset(loc) |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 53e63234384..b304d0104a4 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
| @@ -75,11 +75,6 @@ ENTRY(cpu_feroceon_proc_init) | |||
| 75 | * cpu_feroceon_proc_fin() | 75 | * cpu_feroceon_proc_fin() |
| 76 | */ | 76 | */ |
| 77 | ENTRY(cpu_feroceon_proc_fin) | 77 | ENTRY(cpu_feroceon_proc_fin) |
| 78 | stmfd sp!, {lr} | ||
| 79 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 80 | msr cpsr_c, ip | ||
| 81 | bl feroceon_flush_kern_cache_all | ||
| 82 | |||
| 83 | #if defined(CONFIG_CACHE_FEROCEON_L2) && \ | 78 | #if defined(CONFIG_CACHE_FEROCEON_L2) && \ |
| 84 | !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) | 79 | !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) |
| 85 | mov r0, #0 | 80 | mov r0, #0 |
| @@ -91,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin) | |||
| 91 | bic r0, r0, #0x1000 @ ...i............ | 86 | bic r0, r0, #0x1000 @ ...i............ |
| 92 | bic r0, r0, #0x000e @ ............wca. | 87 | bic r0, r0, #0x000e @ ............wca. |
| 93 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 88 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 94 | ldmfd sp!, {pc} | 89 | mov pc, lr |
| 95 | 90 | ||
| 96 | /* | 91 | /* |
| 97 | * cpu_feroceon_reset(loc) | 92 | * cpu_feroceon_reset(loc) |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index caa31154e7d..5f6892fcc16 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
| @@ -51,15 +51,11 @@ ENTRY(cpu_mohawk_proc_init) | |||
| 51 | * cpu_mohawk_proc_fin() | 51 | * cpu_mohawk_proc_fin() |
| 52 | */ | 52 | */ |
| 53 | ENTRY(cpu_mohawk_proc_fin) | 53 | ENTRY(cpu_mohawk_proc_fin) |
| 54 | stmfd sp!, {lr} | ||
| 55 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 56 | msr cpsr_c, ip | ||
| 57 | bl mohawk_flush_kern_cache_all | ||
| 58 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 54 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 59 | bic r0, r0, #0x1800 @ ...iz........... | 55 | bic r0, r0, #0x1800 @ ...iz........... |
| 60 | bic r0, r0, #0x0006 @ .............ca. | 56 | bic r0, r0, #0x0006 @ .............ca. |
| 61 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 57 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 62 | ldmfd sp!, {pc} | 58 | mov pc, lr |
| 63 | 59 | ||
| 64 | /* | 60 | /* |
| 65 | * cpu_mohawk_reset(loc) | 61 | * cpu_mohawk_reset(loc) |
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 7b706b38990..a201eb04b5e 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
| @@ -44,17 +44,13 @@ ENTRY(cpu_sa110_proc_init) | |||
| 44 | * cpu_sa110_proc_fin() | 44 | * cpu_sa110_proc_fin() |
| 45 | */ | 45 | */ |
| 46 | ENTRY(cpu_sa110_proc_fin) | 46 | ENTRY(cpu_sa110_proc_fin) |
| 47 | stmfd sp!, {lr} | 47 | mov r0, #0 |
| 48 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 49 | msr cpsr_c, ip | ||
| 50 | bl v4wb_flush_kern_cache_all @ clean caches | ||
| 51 | 1: mov r0, #0 | ||
| 52 | mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching | 48 | mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching |
| 53 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 49 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 54 | bic r0, r0, #0x1000 @ ...i............ | 50 | bic r0, r0, #0x1000 @ ...i............ |
| 55 | bic r0, r0, #0x000e @ ............wca. | 51 | bic r0, r0, #0x000e @ ............wca. |
| 56 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 52 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 57 | ldmfd sp!, {pc} | 53 | mov pc, lr |
| 58 | 54 | ||
| 59 | /* | 55 | /* |
| 60 | * cpu_sa110_reset(loc) | 56 | * cpu_sa110_reset(loc) |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 5c47760c206..7ddc4805bf9 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
| @@ -55,16 +55,12 @@ ENTRY(cpu_sa1100_proc_init) | |||
| 55 | * - Clean and turn off caches. | 55 | * - Clean and turn off caches. |
| 56 | */ | 56 | */ |
| 57 | ENTRY(cpu_sa1100_proc_fin) | 57 | ENTRY(cpu_sa1100_proc_fin) |
| 58 | stmfd sp!, {lr} | ||
| 59 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | ||
| 60 | msr cpsr_c, ip | ||
| 61 | bl v4wb_flush_kern_cache_all | ||
| 62 | mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching | 58 | mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching |
| 63 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 59 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 64 | bic r0, r0, #0x1000 @ ...i............ | 60 | bic r0, r0, #0x1000 @ ...i............ |
| 65 | bic r0, r0, #0x000e @ ............wca. | 61 | bic r0, r0, #0x000e @ ............wca. |
| 66 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 62 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 67 | ldmfd sp!, {pc} | 63 | mov pc, lr |
| 68 | 64 | ||
| 69 | /* | 65 | /* |
| 70 | * cpu_sa1100_reset(loc) | 66 | * cpu_sa1100_reset(loc) |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 7a5337ed7d6..22aac851519 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
| @@ -42,14 +42,11 @@ ENTRY(cpu_v6_proc_init) | |||
| 42 | mov pc, lr | 42 | mov pc, lr |
| 43 | 43 | ||
| 44 | ENTRY(cpu_v6_proc_fin) | 44 | ENTRY(cpu_v6_proc_fin) |
| 45 | stmfd sp!, {lr} | ||
| 46 | cpsid if @ disable interrupts | ||
| 47 | bl v6_flush_kern_cache_all | ||
| 48 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 45 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 49 | bic r0, r0, #0x1000 @ ...i............ | 46 | bic r0, r0, #0x1000 @ ...i............ |
| 50 | bic r0, r0, #0x0006 @ .............ca. | 47 | bic r0, r0, #0x0006 @ .............ca. |
| 51 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 48 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 52 | ldmfd sp!, {pc} | 49 | mov pc, lr |
| 53 | 50 | ||
| 54 | /* | 51 | /* |
| 55 | * cpu_v6_reset(loc) | 52 | * cpu_v6_reset(loc) |
| @@ -239,7 +236,8 @@ __v6_proc_info: | |||
| 239 | b __v6_setup | 236 | b __v6_setup |
| 240 | .long cpu_arch_name | 237 | .long cpu_arch_name |
| 241 | .long cpu_elf_name | 238 | .long cpu_elf_name |
| 242 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA | 239 | /* See also feat_v6_fixup() for HWCAP_TLS */ |
| 240 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS | ||
| 243 | .long cpu_v6_name | 241 | .long cpu_v6_name |
| 244 | .long v6_processor_functions | 242 | .long v6_processor_functions |
| 245 | .long v6wbi_tlb_fns | 243 | .long v6wbi_tlb_fns |
| @@ -262,7 +260,7 @@ __pj4_v6_proc_info: | |||
| 262 | b __v6_setup | 260 | b __v6_setup |
| 263 | .long cpu_arch_name | 261 | .long cpu_arch_name |
| 264 | .long cpu_elf_name | 262 | .long cpu_elf_name |
| 265 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 263 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS |
| 266 | .long cpu_pj4_name | 264 | .long cpu_pj4_name |
| 267 | .long v6_processor_functions | 265 | .long v6_processor_functions |
| 268 | .long v6wbi_tlb_fns | 266 | .long v6wbi_tlb_fns |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7aaf88a3b7a..6a8506d99ee 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -45,14 +45,11 @@ ENTRY(cpu_v7_proc_init) | |||
| 45 | ENDPROC(cpu_v7_proc_init) | 45 | ENDPROC(cpu_v7_proc_init) |
| 46 | 46 | ||
| 47 | ENTRY(cpu_v7_proc_fin) | 47 | ENTRY(cpu_v7_proc_fin) |
| 48 | stmfd sp!, {lr} | ||
| 49 | cpsid if @ disable interrupts | ||
| 50 | bl v7_flush_kern_cache_all | ||
| 51 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 48 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 52 | bic r0, r0, #0x1000 @ ...i............ | 49 | bic r0, r0, #0x1000 @ ...i............ |
| 53 | bic r0, r0, #0x0006 @ .............ca. | 50 | bic r0, r0, #0x0006 @ .............ca. |
| 54 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 51 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 55 | ldmfd sp!, {pc} | 52 | mov pc, lr |
| 56 | ENDPROC(cpu_v7_proc_fin) | 53 | ENDPROC(cpu_v7_proc_fin) |
| 57 | 54 | ||
| 58 | /* | 55 | /* |
| @@ -344,7 +341,7 @@ __v7_proc_info: | |||
| 344 | b __v7_setup | 341 | b __v7_setup |
| 345 | .long cpu_arch_name | 342 | .long cpu_arch_name |
| 346 | .long cpu_elf_name | 343 | .long cpu_elf_name |
| 347 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 344 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS |
| 348 | .long cpu_v7_name | 345 | .long cpu_v7_name |
| 349 | .long v7_processor_functions | 346 | .long v7_processor_functions |
| 350 | .long v7wbi_tlb_fns | 347 | .long v7wbi_tlb_fns |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index e5797f1c1db..361a51e4903 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
| @@ -90,15 +90,11 @@ ENTRY(cpu_xsc3_proc_init) | |||
| 90 | * cpu_xsc3_proc_fin() | 90 | * cpu_xsc3_proc_fin() |
| 91 | */ | 91 | */ |
| 92 | ENTRY(cpu_xsc3_proc_fin) | 92 | ENTRY(cpu_xsc3_proc_fin) |
| 93 | str lr, [sp, #-4]! | ||
| 94 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | ||
| 95 | msr cpsr_c, r0 | ||
| 96 | bl xsc3_flush_kern_cache_all @ clean caches | ||
| 97 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 93 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 98 | bic r0, r0, #0x1800 @ ...IZ........... | 94 | bic r0, r0, #0x1800 @ ...IZ........... |
| 99 | bic r0, r0, #0x0006 @ .............CA. | 95 | bic r0, r0, #0x0006 @ .............CA. |
| 100 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 96 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 101 | ldr pc, [sp], #4 | 97 | mov pc, lr |
| 102 | 98 | ||
| 103 | /* | 99 | /* |
| 104 | * cpu_xsc3_reset(loc) | 100 | * cpu_xsc3_reset(loc) |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 63037e2162f..14075979bcb 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
| @@ -124,15 +124,11 @@ ENTRY(cpu_xscale_proc_init) | |||
| 124 | * cpu_xscale_proc_fin() | 124 | * cpu_xscale_proc_fin() |
| 125 | */ | 125 | */ |
| 126 | ENTRY(cpu_xscale_proc_fin) | 126 | ENTRY(cpu_xscale_proc_fin) |
| 127 | str lr, [sp, #-4]! | ||
| 128 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | ||
| 129 | msr cpsr_c, r0 | ||
| 130 | bl xscale_flush_kern_cache_all @ clean caches | ||
| 131 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | 127 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register |
| 132 | bic r0, r0, #0x1800 @ ...IZ........... | 128 | bic r0, r0, #0x1800 @ ...IZ........... |
| 133 | bic r0, r0, #0x0006 @ .............CA. | 129 | bic r0, r0, #0x0006 @ .............CA. |
| 134 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | 130 | mcr p15, 0, r0, c1, c0, 0 @ disable caches |
| 135 | ldr pc, [sp], #4 | 131 | mov pc, lr |
| 136 | 132 | ||
| 137 | /* | 133 | /* |
| 138 | * cpu_xscale_reset(loc) | 134 | * cpu_xscale_reset(loc) |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 19e09bdb1b8..935993e1b1e 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c | |||
| @@ -35,7 +35,8 @@ | |||
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | struct arm_vmregion * | 37 | struct arm_vmregion * |
| 38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) | 38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, |
| 39 | size_t size, gfp_t gfp) | ||
| 39 | { | 40 | { |
| 40 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 41 | unsigned long addr = head->vm_start, end = head->vm_end - size; |
| 41 | unsigned long flags; | 42 | unsigned long flags; |
| @@ -58,7 +59,7 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) | |||
| 58 | goto nospc; | 59 | goto nospc; |
| 59 | if ((addr + size) <= c->vm_start) | 60 | if ((addr + size) <= c->vm_start) |
| 60 | goto found; | 61 | goto found; |
| 61 | addr = c->vm_end; | 62 | addr = ALIGN(c->vm_end, align); |
| 62 | if (addr > end) | 63 | if (addr > end) |
| 63 | goto nospc; | 64 | goto nospc; |
| 64 | } | 65 | } |
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 6b2cdbdf3a8..15e9f044db9 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h | |||
| @@ -21,7 +21,7 @@ struct arm_vmregion { | |||
| 21 | int vm_active; | 21 | int vm_active; |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); | 24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); |
| 25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | 25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); |
| 26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | 26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); |
| 27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | 27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); |
