aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/setup.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/kernel/setup.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/kernel/setup.c')
-rw-r--r--arch/tile/kernel/setup.c70
1 files changed, 43 insertions, 27 deletions
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index e7d54c73d5c1..6cdc9ba55fe0 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -30,8 +30,6 @@
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <asm/setup.h> 31#include <asm/setup.h>
32#include <asm/sections.h> 32#include <asm/sections.h>
33#include <asm/sections.h>
34#include <asm/cacheflush.h>
35#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
36#include <asm/pgalloc.h> 34#include <asm/pgalloc.h>
37#include <asm/mmu_context.h> 35#include <asm/mmu_context.h>
@@ -61,6 +59,8 @@ unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
61unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; 59unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
62unsigned long __initdata node_free_pfn[MAX_NUMNODES]; 60unsigned long __initdata node_free_pfn[MAX_NUMNODES];
63 61
62static unsigned long __initdata node_percpu[MAX_NUMNODES];
63
64#ifdef CONFIG_HIGHMEM 64#ifdef CONFIG_HIGHMEM
65/* Page frame index of end of lowmem on each controller. */ 65/* Page frame index of end of lowmem on each controller. */
66unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; 66unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
@@ -187,11 +187,11 @@ early_param("vmalloc", parse_vmalloc);
187 187
188#ifdef CONFIG_HIGHMEM 188#ifdef CONFIG_HIGHMEM
189/* 189/*
190 * Determine for each controller where its lowmem is mapped and how 190 * Determine for each controller where its lowmem is mapped and how much of
191 * much of it is mapped there. On controller zero, the first few 191 * it is mapped there. On controller zero, the first few megabytes are
192 * megabytes are mapped at 0xfd000000 as code, so in principle we 192 * already mapped in as code at MEM_SV_INTRPT, so in principle we could
193 * could start our data mappings higher up, but for now we don't 193 * start our data mappings higher up, but for now we don't bother, to avoid
194 * bother, to avoid additional confusion. 194 * additional confusion.
195 * 195 *
196 * One question is whether, on systems with more than 768 Mb and 196 * One question is whether, on systems with more than 768 Mb and
197 * controllers of different sizes, to map in a proportionate amount of 197 * controllers of different sizes, to map in a proportionate amount of
@@ -311,7 +311,7 @@ static void __init setup_memory(void)
311#endif 311#endif
312 312
313 /* We are using a char to hold the cpu_2_node[] mapping */ 313 /* We are using a char to hold the cpu_2_node[] mapping */
314 BUG_ON(MAX_NUMNODES > 127); 314 BUILD_BUG_ON(MAX_NUMNODES > 127);
315 315
316 /* Discover the ranges of memory available to us */ 316 /* Discover the ranges of memory available to us */
317 for (i = 0; ; ++i) { 317 for (i = 0; ; ++i) {
@@ -556,7 +556,6 @@ static void __init setup_bootmem_allocator(void)
556 reserve_bootmem(crashk_res.start, 556 reserve_bootmem(crashk_res.start,
557 crashk_res.end - crashk_res.start + 1, 0); 557 crashk_res.end - crashk_res.start + 1, 0);
558#endif 558#endif
559
560} 559}
561 560
562void *__init alloc_remap(int nid, unsigned long size) 561void *__init alloc_remap(int nid, unsigned long size)
@@ -570,11 +569,13 @@ void *__init alloc_remap(int nid, unsigned long size)
570 569
571static int __init percpu_size(void) 570static int __init percpu_size(void)
572{ 571{
573 int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); 572 int size = __per_cpu_end - __per_cpu_start;
574#ifdef CONFIG_MODULES 573 size += PERCPU_MODULE_RESERVE;
575 if (size < PERCPU_ENOUGH_ROOM) 574 size += PERCPU_DYNAMIC_EARLY_SIZE;
576 size = PERCPU_ENOUGH_ROOM; 575 if (size < PCPU_MIN_UNIT_SIZE)
577#endif 576 size = PCPU_MIN_UNIT_SIZE;
577 size = roundup(size, PAGE_SIZE);
578
578 /* In several places we assume the per-cpu data fits on a huge page. */ 579 /* In several places we assume the per-cpu data fits on a huge page. */
579 BUG_ON(kdata_huge && size > HPAGE_SIZE); 580 BUG_ON(kdata_huge && size > HPAGE_SIZE);
580 return size; 581 return size;
@@ -591,7 +592,6 @@ static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
591static void __init zone_sizes_init(void) 592static void __init zone_sizes_init(void)
592{ 593{
593 unsigned long zones_size[MAX_NR_ZONES] = { 0 }; 594 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
594 unsigned long node_percpu[MAX_NUMNODES] = { 0 };
595 int size = percpu_size(); 595 int size = percpu_size();
596 int num_cpus = smp_height * smp_width; 596 int num_cpus = smp_height * smp_width;
597 int i; 597 int i;
@@ -676,7 +676,7 @@ static void __init zone_sizes_init(void)
676 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; 676 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
677 677
678 free_area_init_node(i, zones_size, start, NULL); 678 free_area_init_node(i, zones_size, start, NULL);
679 printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", 679 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
680 PFN_UP(node_percpu[i])); 680 PFN_UP(node_percpu[i]));
681 681
682 /* Track the type of memory on each node */ 682 /* Track the type of memory on each node */
@@ -842,7 +842,7 @@ static int __init topology_init(void)
842 for_each_online_node(i) 842 for_each_online_node(i)
843 register_one_node(i); 843 register_one_node(i);
844 844
845 for_each_present_cpu(i) 845 for (i = 0; i < smp_height * smp_width; ++i)
846 register_cpu(&cpu_devices[i], i); 846 register_cpu(&cpu_devices[i], i);
847 847
848 return 0; 848 return 0;
@@ -870,11 +870,14 @@ void __cpuinit setup_cpu(int boot)
870 870
871 /* Allow asynchronous TLB interrupts. */ 871 /* Allow asynchronous TLB interrupts. */
872#if CHIP_HAS_TILE_DMA() 872#if CHIP_HAS_TILE_DMA()
873 raw_local_irq_unmask(INT_DMATLB_MISS); 873 arch_local_irq_unmask(INT_DMATLB_MISS);
874 raw_local_irq_unmask(INT_DMATLB_ACCESS); 874 arch_local_irq_unmask(INT_DMATLB_ACCESS);
875#endif 875#endif
876#if CHIP_HAS_SN_PROC() 876#if CHIP_HAS_SN_PROC()
877 raw_local_irq_unmask(INT_SNITLB_MISS); 877 arch_local_irq_unmask(INT_SNITLB_MISS);
878#endif
879#ifdef __tilegx__
880 arch_local_irq_unmask(INT_SINGLE_STEP_K);
878#endif 881#endif
879 882
880 /* 883 /*
@@ -893,11 +896,12 @@ void __cpuinit setup_cpu(int boot)
893#endif 896#endif
894 897
895 /* 898 /*
896 * Set the MPL for interrupt control 0 to user level. 899 * Set the MPL for interrupt control 0 & 1 to the corresponding
897 * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, 900 * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
898 * as well as the PL 0 interrupt mask. 901 * SPRs, as well as the interrupt mask.
899 */ 902 */
900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 903 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
904 __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
901 905
902 /* Initialize IRQ support for this cpu. */ 906 /* Initialize IRQ support for this cpu. */
903 setup_irq_regs(); 907 setup_irq_regs();
@@ -908,6 +912,8 @@ void __cpuinit setup_cpu(int boot)
908#endif 912#endif
909} 913}
910 914
915#ifdef CONFIG_BLK_DEV_INITRD
916
911static int __initdata set_initramfs_file; 917static int __initdata set_initramfs_file;
912static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 918static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
913 919
@@ -965,6 +971,10 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
965 free_bootmem(__pa(begin), end - begin); 971 free_bootmem(__pa(begin), end - begin);
966} 972}
967 973
974#else
975static inline void load_hv_initrd(void) {}
976#endif /* CONFIG_BLK_DEV_INITRD */
977
968static void __init validate_hv(void) 978static void __init validate_hv(void)
969{ 979{
970 /* 980 /*
@@ -1033,7 +1043,7 @@ static void __init validate_va(void)
1033 * In addition, make sure we CAN'T use the end of memory, since 1043 * In addition, make sure we CAN'T use the end of memory, since
1034 * we use the last chunk of each pgd for the pgd_list. 1044 * we use the last chunk of each pgd for the pgd_list.
1035 */ 1045 */
1036 int i, fc_fd_ok = 0; 1046 int i, user_kernel_ok = 0;
1037 unsigned long max_va = 0; 1047 unsigned long max_va = 0;
1038 unsigned long list_va = 1048 unsigned long list_va =
1039 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); 1049 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
@@ -1044,13 +1054,13 @@ static void __init validate_va(void)
1044 break; 1054 break;
1045 if (range.start <= MEM_USER_INTRPT && 1055 if (range.start <= MEM_USER_INTRPT &&
1046 range.start + range.size >= MEM_HV_INTRPT) 1056 range.start + range.size >= MEM_HV_INTRPT)
1047 fc_fd_ok = 1; 1057 user_kernel_ok = 1;
1048 if (range.start == 0) 1058 if (range.start == 0)
1049 max_va = range.size; 1059 max_va = range.size;
1050 BUG_ON(range.start + range.size > list_va); 1060 BUG_ON(range.start + range.size > list_va);
1051 } 1061 }
1052 if (!fc_fd_ok) 1062 if (!user_kernel_ok)
1053 early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); 1063 early_panic("Hypervisor not configured for user/kernel VAs\n");
1054 if (max_va == 0) 1064 if (max_va == 0)
1055 early_panic("Hypervisor not configured for low VAs\n"); 1065 early_panic("Hypervisor not configured for low VAs\n");
1056 if (max_va < KERNEL_HIGH_VADDR) 1066 if (max_va < KERNEL_HIGH_VADDR)
@@ -1310,6 +1320,8 @@ static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1310 1320
1311 BUG_ON(size % PAGE_SIZE != 0); 1321 BUG_ON(size % PAGE_SIZE != 0);
1312 pfn_offset[nid] += size / PAGE_SIZE; 1322 pfn_offset[nid] += size / PAGE_SIZE;
1323 BUG_ON(node_percpu[nid] < size);
1324 node_percpu[nid] -= size;
1313 if (percpu_pfn[cpu] == 0) 1325 if (percpu_pfn[cpu] == 0)
1314 percpu_pfn[cpu] = pfn; 1326 percpu_pfn[cpu] = pfn;
1315 return pfn_to_kaddr(pfn); 1327 return pfn_to_kaddr(pfn);
@@ -1334,6 +1346,10 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
1334 pte_t *pte; 1346 pte_t *pte;
1335 1347
1336 BUG_ON(pgd_addr_invalid(addr)); 1348 BUG_ON(pgd_addr_invalid(addr));
1349 if (addr < VMALLOC_START || addr >= VMALLOC_END)
1350 panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1351 " try increasing CONFIG_VMALLOC_RESERVE\n",
1352 addr, VMALLOC_START, VMALLOC_END);
1337 1353
1338 pgd = swapper_pg_dir + pgd_index(addr); 1354 pgd = swapper_pg_dir + pgd_index(addr);
1339 pud = pud_offset(pgd, addr); 1355 pud = pud_offset(pgd, addr);