aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/fault_32.c17
-rw-r--r--arch/sparc/mm/generic_32.c2
-rw-r--r--arch/sparc/mm/generic_64.c2
-rw-r--r--arch/sparc/mm/highmem.c48
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/sparc/mm/init_32.c8
-rw-r--r--arch/sparc/mm/init_64.c68
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/sparc/mm/srmmu.c16
-rw-r--r--arch/sparc/mm/sun4c.c22
-rw-r--r--arch/sparc/mm/tlb.c43
-rw-r--r--arch/sparc/mm/tsb.c21
12 files changed, 132 insertions, 119 deletions
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index bd8601601afa..7543ddbdadb2 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -135,7 +135,7 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
135 135
136 default: 136 default:
137 break; 137 break;
138 }; 138 }
139 139
140 memset(&regs, 0, sizeof (regs)); 140 memset(&regs, 0, sizeof (regs));
141 regs.pc = pc; 141 regs.pc = pc;
@@ -240,11 +240,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
240 * only copy the information from the master page table, 240 * only copy the information from the master page table,
241 * nothing more. 241 * nothing more.
242 */ 242 */
243 code = SEGV_MAPERR;
243 if (!ARCH_SUN4C && address >= TASK_SIZE) 244 if (!ARCH_SUN4C && address >= TASK_SIZE)
244 goto vmalloc_fault; 245 goto vmalloc_fault;
245 246
246 code = SEGV_MAPERR;
247
248 /* 247 /*
249 * If we're in an interrupt or have no user 248 * If we're in an interrupt or have no user
250 * context, we must not take the fault.. 249 * context, we must not take the fault..
@@ -539,6 +538,12 @@ do_sigbus:
539 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); 538 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
540} 539}
541 540
541static void check_stack_aligned(unsigned long sp)
542{
543 if (sp & 0x7UL)
544 force_sig(SIGILL, current);
545}
546
542void window_overflow_fault(void) 547void window_overflow_fault(void)
543{ 548{
544 unsigned long sp; 549 unsigned long sp;
@@ -547,6 +552,8 @@ void window_overflow_fault(void)
547 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 552 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
548 force_user_fault(sp + 0x38, 1); 553 force_user_fault(sp + 0x38, 1);
549 force_user_fault(sp, 1); 554 force_user_fault(sp, 1);
555
556 check_stack_aligned(sp);
550} 557}
551 558
552void window_underflow_fault(unsigned long sp) 559void window_underflow_fault(unsigned long sp)
@@ -554,6 +561,8 @@ void window_underflow_fault(unsigned long sp)
554 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 561 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
555 force_user_fault(sp + 0x38, 0); 562 force_user_fault(sp + 0x38, 0);
556 force_user_fault(sp, 0); 563 force_user_fault(sp, 0);
564
565 check_stack_aligned(sp);
557} 566}
558 567
559void window_ret_fault(struct pt_regs *regs) 568void window_ret_fault(struct pt_regs *regs)
@@ -564,4 +573,6 @@ void window_ret_fault(struct pt_regs *regs)
564 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) 573 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
565 force_user_fault(sp + 0x38, 0); 574 force_user_fault(sp + 0x38, 0);
566 force_user_fault(sp, 0); 575 force_user_fault(sp, 0);
576
577 check_stack_aligned(sp);
567} 578}
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
index 5edcac184eaf..e6067b75f11c 100644
--- a/arch/sparc/mm/generic_32.c
+++ b/arch/sparc/mm/generic_32.c
@@ -50,7 +50,7 @@ static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned
50 end = PGDIR_SIZE; 50 end = PGDIR_SIZE;
51 offset -= address; 51 offset -= address;
52 do { 52 do {
53 pte_t * pte = pte_alloc_map(mm, pmd, address); 53 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
54 if (!pte) 54 if (!pte)
55 return -ENOMEM; 55 return -ENOMEM;
56 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); 56 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
index 04f2bf4cd571..3cb00dfd4bd6 100644
--- a/arch/sparc/mm/generic_64.c
+++ b/arch/sparc/mm/generic_64.c
@@ -92,7 +92,7 @@ static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned
92 end = PGDIR_SIZE; 92 end = PGDIR_SIZE;
93 offset -= address; 93 offset -= address;
94 do { 94 do {
95 pte_t * pte = pte_alloc_map(mm, pmd, address); 95 pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
96 if (!pte) 96 if (!pte)
97 return -ENOMEM; 97 return -ENOMEM;
98 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); 98 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index e139e9cbf5f7..4730eac0747b 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -29,17 +29,17 @@
29#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
30#include <asm/fixmap.h> 30#include <asm/fixmap.h>
31 31
32void *kmap_atomic(struct page *page, enum km_type type) 32void *__kmap_atomic(struct page *page)
33{ 33{
34 unsigned long idx;
35 unsigned long vaddr; 34 unsigned long vaddr;
35 long idx, type;
36 36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 pagefault_disable(); 38 pagefault_disable();
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
41 41
42 debug_kmap_atomic(type); 42 type = kmap_atomic_idx_push();
43 idx = type + KM_TYPE_NR*smp_processor_id(); 43 idx = type + KM_TYPE_NR*smp_processor_id();
44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45 45
@@ -63,44 +63,52 @@ void *kmap_atomic(struct page *page, enum km_type type)
63 63
64 return (void*) vaddr; 64 return (void*) vaddr;
65} 65}
66EXPORT_SYMBOL(kmap_atomic); 66EXPORT_SYMBOL(__kmap_atomic);
67 67
68void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 68void __kunmap_atomic(void *kvaddr)
69{ 69{
70#ifdef CONFIG_DEBUG_HIGHMEM
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); 71 int type;
73 72
74 if (vaddr < FIXADDR_START) { // FIXME 73 if (vaddr < FIXADDR_START) { // FIXME
75 pagefault_enable(); 74 pagefault_enable();
76 return; 75 return;
77 } 76 }
78 77
79 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)); 78 type = kmap_atomic_idx();
80 79
81/* XXX Fix - Anton */ 80#ifdef CONFIG_DEBUG_HIGHMEM
81 {
82 unsigned long idx;
83
84 idx = type + KM_TYPE_NR * smp_processor_id();
85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
86
87 /* XXX Fix - Anton */
82#if 0 88#if 0
83 __flush_cache_one(vaddr); 89 __flush_cache_one(vaddr);
84#else 90#else
85 flush_cache_all(); 91 flush_cache_all();
86#endif 92#endif
87 93
88 /* 94 /*
89 * force other mappings to Oops if they'll try to access 95 * force other mappings to Oops if they'll try to access
90 * this pte without first remap it 96 * this pte without first remap it
91 */ 97 */
92 pte_clear(&init_mm, vaddr, kmap_pte-idx); 98 pte_clear(&init_mm, vaddr, kmap_pte-idx);
93/* XXX Fix - Anton */ 99 /* XXX Fix - Anton */
94#if 0 100#if 0
95 __flush_tlb_one(vaddr); 101 __flush_tlb_one(vaddr);
96#else 102#else
97 flush_tlb_all(); 103 flush_tlb_all();
98#endif 104#endif
105 }
99#endif 106#endif
100 107
108 kmap_atomic_idx_pop();
101 pagefault_enable(); 109 pagefault_enable();
102} 110}
103EXPORT_SYMBOL(kunmap_atomic_notypecheck); 111EXPORT_SYMBOL(__kunmap_atomic);
104 112
105/* We may be fed a pagetable here by ptep_to_xxx and others. */ 113/* We may be fed a pagetable here by ptep_to_xxx and others. */
106struct page *kmap_atomic_to_page(void *ptr) 114struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 5fdddf134caa..f4e97646ce23 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -214,7 +214,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
214 if (pud) { 214 if (pud) {
215 pmd = pmd_alloc(mm, pud, addr); 215 pmd = pmd_alloc(mm, pud, addr);
216 if (pmd) 216 if (pmd)
217 pte = pte_alloc_map(mm, pmd, addr); 217 pte = pte_alloc_map(mm, NULL, pmd, addr);
218 } 218 }
219 return pte; 219 return pte;
220} 220}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 6d0e02c4fe09..7b00de61c5f1 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -37,8 +37,6 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/leon.h> 38#include <asm/leon.h>
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long *sparc_valid_addr_bitmap; 40unsigned long *sparc_valid_addr_bitmap;
43EXPORT_SYMBOL(sparc_valid_addr_bitmap); 41EXPORT_SYMBOL(sparc_valid_addr_bitmap);
44 42
@@ -75,10 +73,10 @@ void __init kmap_init(void)
75 kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); 73 kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
76} 74}
77 75
78void show_mem(void) 76void show_mem(unsigned int filter)
79{ 77{
80 printk("Mem-info:\n"); 78 printk("Mem-info:\n");
81 show_free_areas(); 79 show_free_areas(filter);
82 printk("Free swap: %6ldkB\n", 80 printk("Free swap: %6ldkB\n",
83 nr_swap_pages << (PAGE_SHIFT-10)); 81 nr_swap_pages << (PAGE_SHIFT-10));
84 printk("%ld pages of RAM\n", totalram_pages); 82 printk("%ld pages of RAM\n", totalram_pages);
@@ -342,7 +340,7 @@ void __init paging_init(void)
342 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); 340 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
343 prom_printf("paging_init: Halting...\n"); 341 prom_printf("paging_init: Halting...\n");
344 prom_halt(); 342 prom_halt();
345 }; 343 }
346 344
347 /* Initialize the protection map with non-constant, MMU dependent values. */ 345 /* Initialize the protection map with non-constant, MMU dependent values. */
348 protection_map[0] = PAGE_NONE; 346 protection_map[0] = PAGE_NONE;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f0434513df15..3fd8e18bed80 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -88,7 +88,7 @@ static void __init read_obp_memory(const char *property,
88 struct linux_prom64_registers *regs, 88 struct linux_prom64_registers *regs,
89 int *num_ents) 89 int *num_ents)
90{ 90{
91 int node = prom_finddevice("/memory"); 91 phandle node = prom_finddevice("/memory");
92 int prop_size = prom_getproplen(node, property); 92 int prop_size = prom_getproplen(node, property);
93 int ents, ret, i; 93 int ents, ret, i;
94 94
@@ -785,8 +785,7 @@ static int find_node(unsigned long addr)
785 return -1; 785 return -1;
786} 786}
787 787
788static unsigned long long nid_range(unsigned long long start, 788u64 memblock_nid_range(u64 start, u64 end, int *nid)
789 unsigned long long end, int *nid)
790{ 789{
791 *nid = find_node(start); 790 *nid = find_node(start);
792 start += PAGE_SIZE; 791 start += PAGE_SIZE;
@@ -804,8 +803,7 @@ static unsigned long long nid_range(unsigned long long start,
804 return start; 803 return start;
805} 804}
806#else 805#else
807static unsigned long long nid_range(unsigned long long start, 806u64 memblock_nid_range(u64 start, u64 end, int *nid)
808 unsigned long long end, int *nid)
809{ 807{
810 *nid = 0; 808 *nid = 0;
811 return end; 809 return end;
@@ -822,8 +820,7 @@ static void __init allocate_node_data(int nid)
822 struct pglist_data *p; 820 struct pglist_data *p;
823 821
824#ifdef CONFIG_NEED_MULTIPLE_NODES 822#ifdef CONFIG_NEED_MULTIPLE_NODES
825 paddr = memblock_alloc_nid(sizeof(struct pglist_data), 823 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
826 SMP_CACHE_BYTES, nid, nid_range);
827 if (!paddr) { 824 if (!paddr) {
828 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); 825 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
829 prom_halt(); 826 prom_halt();
@@ -843,8 +840,7 @@ static void __init allocate_node_data(int nid)
843 if (p->node_spanned_pages) { 840 if (p->node_spanned_pages) {
844 num_pages = bootmem_bootmap_pages(p->node_spanned_pages); 841 num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
845 842
846 paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, 843 paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
847 nid_range);
848 if (!paddr) { 844 if (!paddr) {
849 prom_printf("Cannot allocate bootmap for nid[%d]\n", 845 prom_printf("Cannot allocate bootmap for nid[%d]\n",
850 nid); 846 nid);
@@ -866,7 +862,7 @@ static void init_node_masks_nonnuma(void)
866 for (i = 0; i < NR_CPUS; i++) 862 for (i = 0; i < NR_CPUS; i++)
867 numa_cpu_lookup_table[i] = 0; 863 numa_cpu_lookup_table[i] = 0;
868 864
869 numa_cpumask_lookup_table[0] = CPU_MASK_ALL; 865 cpumask_setall(&numa_cpumask_lookup_table[0]);
870} 866}
871 867
872#ifdef CONFIG_NEED_MULTIPLE_NODES 868#ifdef CONFIG_NEED_MULTIPLE_NODES
@@ -972,19 +968,19 @@ int of_node_to_nid(struct device_node *dp)
972 968
973static void __init add_node_ranges(void) 969static void __init add_node_ranges(void)
974{ 970{
975 int i; 971 struct memblock_region *reg;
976 972
977 for (i = 0; i < memblock.memory.cnt; i++) { 973 for_each_memblock(memory, reg) {
978 unsigned long size = memblock_size_bytes(&memblock.memory, i); 974 unsigned long size = reg->size;
979 unsigned long start, end; 975 unsigned long start, end;
980 976
981 start = memblock.memory.region[i].base; 977 start = reg->base;
982 end = start + size; 978 end = start + size;
983 while (start < end) { 979 while (start < end) {
984 unsigned long this_end; 980 unsigned long this_end;
985 int nid; 981 int nid;
986 982
987 this_end = nid_range(start, end, &nid); 983 this_end = memblock_nid_range(start, end, &nid);
988 984
989 numadbg("Adding active range nid[%d] " 985 numadbg("Adding active range nid[%d] "
990 "start[%lx] end[%lx]\n", 986 "start[%lx] end[%lx]\n",
@@ -1084,7 +1080,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1084{ 1080{
1085 u64 arc; 1081 u64 arc;
1086 1082
1087 cpus_clear(*mask); 1083 cpumask_clear(mask);
1088 1084
1089 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { 1085 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1090 u64 target = mdesc_arc_target(md, arc); 1086 u64 target = mdesc_arc_target(md, arc);
@@ -1095,7 +1091,7 @@ static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1095 continue; 1091 continue;
1096 id = mdesc_get_property(md, target, "id", NULL); 1092 id = mdesc_get_property(md, target, "id", NULL);
1097 if (*id < nr_cpu_ids) 1093 if (*id < nr_cpu_ids)
1098 cpu_set(*id, *mask); 1094 cpumask_set_cpu(*id, mask);
1099 } 1095 }
1100} 1096}
1101 1097
@@ -1157,13 +1153,13 @@ static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1157 1153
1158 numa_parse_mdesc_group_cpus(md, grp, &mask); 1154 numa_parse_mdesc_group_cpus(md, grp, &mask);
1159 1155
1160 for_each_cpu_mask(cpu, mask) 1156 for_each_cpu(cpu, &mask)
1161 numa_cpu_lookup_table[cpu] = index; 1157 numa_cpu_lookup_table[cpu] = index;
1162 numa_cpumask_lookup_table[index] = mask; 1158 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1163 1159
1164 if (numa_debug) { 1160 if (numa_debug) {
1165 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); 1161 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1166 for_each_cpu_mask(cpu, mask) 1162 for_each_cpu(cpu, &mask)
1167 printk("%d ", cpu); 1163 printk("%d ", cpu);
1168 printk("]\n"); 1164 printk("]\n");
1169 } 1165 }
@@ -1222,7 +1218,7 @@ static int __init numa_parse_jbus(void)
1222 index = 0; 1218 index = 0;
1223 for_each_present_cpu(cpu) { 1219 for_each_present_cpu(cpu) {
1224 numa_cpu_lookup_table[cpu] = index; 1220 numa_cpu_lookup_table[cpu] = index;
1225 numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); 1221 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1226 node_masks[index].mask = ~((1UL << 36UL) - 1UL); 1222 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1227 node_masks[index].val = cpu << 36UL; 1223 node_masks[index].val = cpu << 36UL;
1228 1224
@@ -1281,7 +1277,7 @@ static void __init bootmem_init_nonnuma(void)
1281{ 1277{
1282 unsigned long top_of_ram = memblock_end_of_DRAM(); 1278 unsigned long top_of_ram = memblock_end_of_DRAM();
1283 unsigned long total_ram = memblock_phys_mem_size(); 1279 unsigned long total_ram = memblock_phys_mem_size();
1284 unsigned int i; 1280 struct memblock_region *reg;
1285 1281
1286 numadbg("bootmem_init_nonnuma()\n"); 1282 numadbg("bootmem_init_nonnuma()\n");
1287 1283
@@ -1292,15 +1288,14 @@ static void __init bootmem_init_nonnuma(void)
1292 1288
1293 init_node_masks_nonnuma(); 1289 init_node_masks_nonnuma();
1294 1290
1295 for (i = 0; i < memblock.memory.cnt; i++) { 1291 for_each_memblock(memory, reg) {
1296 unsigned long size = memblock_size_bytes(&memblock.memory, i);
1297 unsigned long start_pfn, end_pfn; 1292 unsigned long start_pfn, end_pfn;
1298 1293
1299 if (!size) 1294 if (!reg->size)
1300 continue; 1295 continue;
1301 1296
1302 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; 1297 start_pfn = memblock_region_memory_base_pfn(reg);
1303 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); 1298 end_pfn = memblock_region_memory_end_pfn(reg);
1304 add_active_range(0, start_pfn, end_pfn); 1299 add_active_range(0, start_pfn, end_pfn);
1305 } 1300 }
1306 1301
@@ -1318,7 +1313,7 @@ static void __init reserve_range_in_node(int nid, unsigned long start,
1318 unsigned long this_end; 1313 unsigned long this_end;
1319 int n; 1314 int n;
1320 1315
1321 this_end = nid_range(start, end, &n); 1316 this_end = memblock_nid_range(start, end, &n);
1322 if (n == nid) { 1317 if (n == nid) {
1323 numadbg(" MATCH reserving range [%lx:%lx]\n", 1318 numadbg(" MATCH reserving range [%lx:%lx]\n",
1324 start, this_end); 1319 start, this_end);
@@ -1334,17 +1329,12 @@ static void __init reserve_range_in_node(int nid, unsigned long start,
1334 1329
1335static void __init trim_reserved_in_node(int nid) 1330static void __init trim_reserved_in_node(int nid)
1336{ 1331{
1337 int i; 1332 struct memblock_region *reg;
1338 1333
1339 numadbg(" trim_reserved_in_node(%d)\n", nid); 1334 numadbg(" trim_reserved_in_node(%d)\n", nid);
1340 1335
1341 for (i = 0; i < memblock.reserved.cnt; i++) { 1336 for_each_memblock(reserved, reg)
1342 unsigned long start = memblock.reserved.region[i].base; 1337 reserve_range_in_node(nid, reg->base, reg->base + reg->size);
1343 unsigned long size = memblock_size_bytes(&memblock.reserved, i);
1344 unsigned long end = start + size;
1345
1346 reserve_range_in_node(nid, start, end);
1347 }
1348} 1338}
1349 1339
1350static void __init bootmem_init_one_node(int nid) 1340static void __init bootmem_init_one_node(int nid)
@@ -1635,7 +1625,7 @@ static void __init sun4v_ktsb_init(void)
1635 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1636 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1637 break; 1627 break;
1638 }; 1628 }
1639 1629
1640 ktsb_descr[0].assoc = 1; 1630 ktsb_descr[0].assoc = 1;
1641 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1631 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
@@ -2276,7 +2266,7 @@ unsigned long pte_sz_bits(unsigned long sz)
2276 return _PAGE_SZ512K_4V; 2266 return _PAGE_SZ512K_4V;
2277 case 4 * 1024 * 1024: 2267 case 4 * 1024 * 1024:
2278 return _PAGE_SZ4MB_4V; 2268 return _PAGE_SZ4MB_4V;
2279 }; 2269 }
2280 } else { 2270 } else {
2281 switch (sz) { 2271 switch (sz) {
2282 case 8 * 1024: 2272 case 8 * 1024:
@@ -2288,7 +2278,7 @@ unsigned long pte_sz_bits(unsigned long sz)
2288 return _PAGE_SZ512K_4U; 2278 return _PAGE_SZ512K_4U;
2289 case 4 * 1024 * 1024: 2279 case 4 * 1024 * 1024:
2290 return _PAGE_SZ4MB_4U; 2280 return _PAGE_SZ4MB_4U;
2291 }; 2281 }
2292 } 2282 }
2293} 2283}
2294 2284
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index c0e01297e64e..e485a6804998 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
226 * Leon2 and Leon3 differ in their way of telling cache information 226 * Leon2 and Leon3 differ in their way of telling cache information
227 * 227 *
228 */ 228 */
229int leon_flush_needed(void) 229int __init leon_flush_needed(void)
230{ 230{
231 int flush_needed = -1; 231 int flush_needed = -1;
232 unsigned int ssize, sets; 232 unsigned int ssize, sets;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index b0b43aa5e45a..cbef74e793b8 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -650,7 +650,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
650 * mappings on the kernel stack without any special code as we did 650 * mappings on the kernel stack without any special code as we did
651 * need on the sun4c. 651 * need on the sun4c.
652 */ 652 */
653static struct thread_info *srmmu_alloc_thread_info(void) 653static struct thread_info *srmmu_alloc_thread_info_node(int node)
654{ 654{
655 struct thread_info *ret; 655 struct thread_info *ret;
656 656
@@ -1262,7 +1262,8 @@ extern unsigned long bootmem_init(unsigned long *pages_avail);
1262 1262
1263void __init srmmu_paging_init(void) 1263void __init srmmu_paging_init(void)
1264{ 1264{
1265 int i, cpunode; 1265 int i;
1266 phandle cpunode;
1266 char node_str[128]; 1267 char node_str[128];
1267 pgd_t *pgd; 1268 pgd_t *pgd;
1268 pmd_t *pmd; 1269 pmd_t *pmd;
@@ -1398,7 +1399,8 @@ static void __init srmmu_is_bad(void)
1398 1399
1399static void __init init_vac_layout(void) 1400static void __init init_vac_layout(void)
1400{ 1401{
1401 int nd, cache_lines; 1402 phandle nd;
1403 int cache_lines;
1402 char node_str[128]; 1404 char node_str[128];
1403#ifdef CONFIG_SMP 1405#ifdef CONFIG_SMP
1404 int cpu = 0; 1406 int cpu = 0;
@@ -1663,7 +1665,7 @@ static void __init init_swift(void)
1663 default: 1665 default:
1664 srmmu_modtype = Swift_ok; 1666 srmmu_modtype = Swift_ok;
1665 break; 1667 break;
1666 }; 1668 }
1667 1669
1668 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1670 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1669 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); 1671 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
@@ -2067,7 +2069,7 @@ static void __init get_srmmu_type(void)
2067 /* Some other Cypress revision, assume a 605. */ 2069 /* Some other Cypress revision, assume a 605. */
2068 init_cypress_605(mod_rev); 2070 init_cypress_605(mod_rev);
2069 break; 2071 break;
2070 }; 2072 }
2071 return; 2073 return;
2072 } 2074 }
2073 2075
@@ -2082,7 +2084,7 @@ static void __init get_srmmu_type(void)
2082 2084
2083 /* Next check for Fujitsu Swift. */ 2085 /* Next check for Fujitsu Swift. */
2084 if(psr_typ == 0 && psr_vers == 4) { 2086 if(psr_typ == 0 && psr_vers == 4) {
2085 int cpunode; 2087 phandle cpunode;
2086 char node_str[128]; 2088 char node_str[128];
2087 2089
2088 /* Look if it is not a TurboSparc emulating Swift... */ 2090 /* Look if it is not a TurboSparc emulating Swift... */
@@ -2269,7 +2271,7 @@ void __init ld_mmu_srmmu(void)
2269 2271
2270 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); 2272 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2271 2273
2272 BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); 2274 BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM);
2273 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); 2275 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
2274 2276
2275 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); 2277 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 4289f90f8697..1cf4f198709a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -318,7 +318,7 @@ void __init sun4c_probe_vac(void)
318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n", 318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
319 sun4c_vacinfo.linesize); 319 sun4c_vacinfo.linesize);
320 prom_halt(); 320 prom_halt();
321 }; 321 }
322 322
323 sun4c_flush_all(); 323 sun4c_flush_all();
324 sun4c_enable_vac(); 324 sun4c_enable_vac();
@@ -364,7 +364,7 @@ static void __init patch_kernel_fault_handler(void)
364 prom_printf("Unhandled number of segmaps: %d\n", 364 prom_printf("Unhandled number of segmaps: %d\n",
365 num_segmaps); 365 num_segmaps);
366 prom_halt(); 366 prom_halt();
367 }; 367 }
368 switch (num_contexts) { 368 switch (num_contexts) {
369 case 8: 369 case 8:
370 /* Default, nothing to do. */ 370 /* Default, nothing to do. */
@@ -377,7 +377,7 @@ static void __init patch_kernel_fault_handler(void)
377 prom_printf("Unhandled number of contexts: %d\n", 377 prom_printf("Unhandled number of contexts: %d\n",
378 num_contexts); 378 num_contexts);
379 prom_halt(); 379 prom_halt();
380 }; 380 }
381 381
382 if (sun4c_vacinfo.do_hwflushes != 0) { 382 if (sun4c_vacinfo.do_hwflushes != 0) {
383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1); 383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
@@ -394,7 +394,7 @@ static void __init patch_kernel_fault_handler(void)
394 prom_printf("Impossible VAC linesize %d, halting...\n", 394 prom_printf("Impossible VAC linesize %d, halting...\n",
395 sun4c_vacinfo.linesize); 395 sun4c_vacinfo.linesize);
396 prom_halt(); 396 prom_halt();
397 }; 397 }
398 } 398 }
399} 399}
400 400
@@ -420,7 +420,7 @@ volatile unsigned long __iomem *sun4c_memerr_reg = NULL;
420 420
421void __init sun4c_probe_memerr_reg(void) 421void __init sun4c_probe_memerr_reg(void)
422{ 422{
423 int node; 423 phandle node;
424 struct linux_prom_registers regs[1]; 424 struct linux_prom_registers regs[1];
425 425
426 node = prom_getchild(prom_root_node); 426 node = prom_getchild(prom_root_node);
@@ -435,16 +435,14 @@ void __init sun4c_probe_memerr_reg(void)
435 435
436static inline void sun4c_init_ss2_cache_bug(void) 436static inline void sun4c_init_ss2_cache_bug(void)
437{ 437{
438 extern unsigned long start;
439
440 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || 438 if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
441 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || 439 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
442 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { 440 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
443 /* Whee.. */ 441 /* Whee.. */
444 printk("SS2 cache bug detected, uncaching trap table page\n"); 442 printk("SS2 cache bug detected, uncaching trap table page\n");
445 sun4c_flush_page((unsigned int) &start); 443 sun4c_flush_page((unsigned int) &_start);
446 sun4c_put_pte(((unsigned long) &start), 444 sun4c_put_pte(((unsigned long) &_start),
447 (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE)); 445 (sun4c_get_pte((unsigned long) &_start) | _SUN4C_PAGE_NOCACHE));
448 } 446 }
449} 447}
450 448
@@ -924,7 +922,7 @@ static inline void garbage_collect(int entry)
924 free_locked_segment(BUCKET_ADDR(entry)); 922 free_locked_segment(BUCKET_ADDR(entry));
925} 923}
926 924
927static struct thread_info *sun4c_alloc_thread_info(void) 925static struct thread_info *sun4c_alloc_thread_info_node(int node)
928{ 926{
929 unsigned long addr, pages; 927 unsigned long addr, pages;
930 int entry; 928 int entry;
@@ -2157,7 +2155,7 @@ void __init ld_mmu_sun4c(void)
2157 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM); 2155 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
2158 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM); 2156 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
2159 2157
2160 BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM); 2158 BTFIXUPSET_CALL(alloc_thread_info_node, sun4c_alloc_thread_info_node, BTFIXUPCALL_NORM);
2161 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM); 2159 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
2162 2160
2163 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM); 2161 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index d8f21e24a82f..b1f279cd00bf 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -19,33 +19,34 @@
19 19
20/* Heavily inspired by the ppc64 code. */ 20/* Heavily inspired by the ppc64 code. */
21 21
22DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 23
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 27
28 if (mp->tlb_nr) { 28 if (tb->tlb_nr) {
29 flush_tsb_user(mp); 29 flush_tsb_user(tb);
30 30
31 if (CTX_VALID(mp->mm->context)) { 31 if (CTX_VALID(tb->mm->context)) {
32#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &mp->vaddrs[0]); 34 &tb->vaddrs[0]);
35#else 35#else
36 __flush_tlb_pending(CTX_HWBITS(mp->mm->context), 36 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 mp->tlb_nr, &mp->vaddrs[0]); 37 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 38#endif
39 } 39 }
40 mp->tlb_nr = 0; 40 tb->tlb_nr = 0;
41 } 41 }
42 42
43 put_cpu_var(mmu_gathers); 43 put_cpu_var(tlb_batch);
44} 44}
45 45
46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
47 pte_t *ptep, pte_t orig, int fullmm)
47{ 48{
48 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
49 unsigned long nr; 50 unsigned long nr;
50 51
51 vaddr &= PAGE_MASK; 52 vaddr &= PAGE_MASK;
@@ -77,21 +78,25 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
77 78
78no_cache_flush: 79no_cache_flush:
79 80
80 if (mp->fullmm) 81 if (fullmm) {
82 put_cpu_var(tlb_batch);
81 return; 83 return;
84 }
82 85
83 nr = mp->tlb_nr; 86 nr = tb->tlb_nr;
84 87
85 if (unlikely(nr != 0 && mm != mp->mm)) { 88 if (unlikely(nr != 0 && mm != tb->mm)) {
86 flush_tlb_pending(); 89 flush_tlb_pending();
87 nr = 0; 90 nr = 0;
88 } 91 }
89 92
90 if (nr == 0) 93 if (nr == 0)
91 mp->mm = mm; 94 tb->mm = mm;
92 95
93 mp->vaddrs[nr] = vaddr; 96 tb->vaddrs[nr] = vaddr;
94 mp->tlb_nr = ++nr; 97 tb->tlb_nr = ++nr;
95 if (nr >= TLB_BATCH_NR) 98 if (nr >= TLB_BATCH_NR)
96 flush_tlb_pending(); 99 flush_tlb_pending();
100
101 put_cpu_var(tlb_batch);
97} 102}
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 101d7c82870b..a5f51b22fcbe 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
47 } 47 }
48} 48}
49 49
50static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) 50static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
51 unsigned long tsb, unsigned long nentries)
51{ 52{
52 unsigned long i; 53 unsigned long i;
53 54
54 for (i = 0; i < mp->tlb_nr; i++) { 55 for (i = 0; i < tb->tlb_nr; i++) {
55 unsigned long v = mp->vaddrs[i]; 56 unsigned long v = tb->vaddrs[i];
56 unsigned long tag, ent, hash; 57 unsigned long tag, ent, hash;
57 58
58 v &= ~0x1UL; 59 v &= ~0x1UL;
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, uns
65 } 66 }
66} 67}
67 68
68void flush_tsb_user(struct mmu_gather *mp) 69void flush_tsb_user(struct tlb_batch *tb)
69{ 70{
70 struct mm_struct *mm = mp->mm; 71 struct mm_struct *mm = tb->mm;
71 unsigned long nentries, base, flags; 72 unsigned long nentries, base, flags;
72 73
73 spin_lock_irqsave(&mm->context.lock, flags); 74 spin_lock_irqsave(&mm->context.lock, flags);
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *mp)
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 77 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 78 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base); 79 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); 80 __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
80 81
81#ifdef CONFIG_HUGETLB_PAGE 82#ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 83 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *mp)
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 85 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 86 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base); 87 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); 88 __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
88 } 89 }
89#endif 90#endif
90 spin_unlock_irqrestore(&mm->context.lock, flags); 91 spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -179,7 +180,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
179 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", 180 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
180 current->comm, current->pid, tsb_bytes); 181 current->comm, current->pid, tsb_bytes);
181 do_exit(SIGSEGV); 182 do_exit(SIGSEGV);
182 }; 183 }
183 tte |= pte_sz_bits(page_sz); 184 tte |= pte_sz_bits(page_sz);
184 185
185 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 186 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
@@ -214,7 +215,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
214#endif 215#endif
215 default: 216 default:
216 BUG(); 217 BUG();
217 }; 218 }
218 hp->assoc = 1; 219 hp->assoc = 1;
219 hp->num_ttes = tsb_bytes / 16; 220 hp->num_ttes = tsb_bytes / 16;
220 hp->ctx_idx = 0; 221 hp->ctx_idx = 0;
@@ -229,7 +230,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
229#endif 230#endif
230 default: 231 default:
231 BUG(); 232 BUG();
232 }; 233 }
233 hp->tsb_base = tsb_paddr; 234 hp->tsb_base = tsb_paddr;
234 hp->resv = 0; 235 hp->resv = 0;
235 } 236 }