aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/setup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-06 14:14:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-06 14:14:33 -0400
commit4de9ad9bc08b4953fc03336ad38908496e2f8826 (patch)
treebd44add223061a58317034a0d6c9686d95d12fba /arch/tile/kernel/setup.c
parent576c25eb5954035b64112188d9a2683144600f3d (diff)
parent06da6629e68ddc8ffe2933d33b3681f09104b3f1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull Tile arch updates from Chris Metcalf: "These changes bring in a bunch of new functionality that has been maintained internally at Tilera over the last year, plus other stray bits of work that I've taken into the tile tree from other folks. The changes include some PCI root complex work, interrupt-driven console support, support for performing fast-path unaligned data fixups by kernel-based JIT code generation, CONFIG_PREEMPT support, vDSO support for gettimeofday(), a serial driver for the tilegx on-chip UART, KGDB support, more optimized string routines, support for ftrace and kprobes, improved ASLR, and many bug fixes. We also remove support for the old TILE64 chip, which is no longer buildable" * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: (85 commits) tile: refresh tile defconfig files tile: rework <asm/cmpxchg.h> tile PCI RC: make default consistent DMA mask 32-bit tile: add null check for kzalloc in tile/kernel/setup.c tile: make __write_once a synonym for __read_mostly tile: remove support for TILE64 tile: use asm-generic/bitops/builtin-*.h tile: eliminate no-op "noatomichash" boot argument tile: use standard tile_bundle_bits type in traps.c tile: simplify code referencing hypervisor API addresses tile: change <asm/system.h> to <asm/switch_to.h> in comments tile: mark pcibios_init() as __init tile: check for correct compiler earlier in asm-offsets.c tile: use standard 'generic-y' model for <asm/hw_irq.h> tile: use asm-generic version of <asm/local64.h> tile PCI RC: add comment about "PCI hole" problem tile: remove DEBUG_EXTRA_FLAGS kernel config option tile: add virt_to_kpte() API and clean up and document behavior tile: support FRAME_POINTER tile: support reporting Tilera hypervisor statistics ...
Diffstat (limited to 'arch/tile/kernel/setup.c')
-rw-r--r--arch/tile/kernel/setup.c162
1 files changed, 134 insertions, 28 deletions
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index eceb8344280f..4c34caea9dd3 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -154,6 +154,65 @@ static int __init setup_maxnodemem(char *str)
154} 154}
155early_param("maxnodemem", setup_maxnodemem); 155early_param("maxnodemem", setup_maxnodemem);
156 156
157struct memmap_entry {
158 u64 addr; /* start of memory segment */
159 u64 size; /* size of memory segment */
160};
161static struct memmap_entry memmap_map[64];
162static int memmap_nr;
163
164static void add_memmap_region(u64 addr, u64 size)
165{
166 if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
167 pr_err("Ooops! Too many entries in the memory map!\n");
168 return;
169 }
170 memmap_map[memmap_nr].addr = addr;
171 memmap_map[memmap_nr].size = size;
172 memmap_nr++;
173}
174
175static int __init setup_memmap(char *p)
176{
177 char *oldp;
178 u64 start_at, mem_size;
179
180 if (!p)
181 return -EINVAL;
182
183 if (!strncmp(p, "exactmap", 8)) {
184 pr_err("\"memmap=exactmap\" not valid on tile\n");
185 return 0;
186 }
187
188 oldp = p;
189 mem_size = memparse(p, &p);
190 if (p == oldp)
191 return -EINVAL;
192
193 if (*p == '@') {
194 pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
195 } else if (*p == '#') {
196 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
197 } else if (*p == '$') {
198 start_at = memparse(p+1, &p);
199 add_memmap_region(start_at, mem_size);
200 } else {
201 if (mem_size == 0)
202 return -EINVAL;
203 maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
204 (HPAGE_SHIFT - PAGE_SHIFT);
205 }
206 return *p == '\0' ? 0 : -EINVAL;
207}
208early_param("memmap", setup_memmap);
209
210static int __init setup_mem(char *str)
211{
212 return setup_maxmem(str);
213}
214early_param("mem", setup_mem); /* compatibility with x86 */
215
157static int __init setup_isolnodes(char *str) 216static int __init setup_isolnodes(char *str)
158{ 217{
159 char buf[MAX_NUMNODES * 5]; 218 char buf[MAX_NUMNODES * 5];
@@ -209,7 +268,7 @@ early_param("vmalloc", parse_vmalloc);
209/* 268/*
210 * Determine for each controller where its lowmem is mapped and how much of 269 * Determine for each controller where its lowmem is mapped and how much of
211 * it is mapped there. On controller zero, the first few megabytes are 270 * it is mapped there. On controller zero, the first few megabytes are
212 * already mapped in as code at MEM_SV_INTRPT, so in principle we could 271 * already mapped in as code at MEM_SV_START, so in principle we could
213 * start our data mappings higher up, but for now we don't bother, to avoid 272 * start our data mappings higher up, but for now we don't bother, to avoid
214 * additional confusion. 273 * additional confusion.
215 * 274 *
@@ -614,11 +673,12 @@ static void __init setup_bootmem_allocator_node(int i)
614 /* 673 /*
615 * Throw away any memory aliased by the PCI region. 674 * Throw away any memory aliased by the PCI region.
616 */ 675 */
617 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) 676 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
618 reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), 677 start = max(pci_reserve_start_pfn, start);
619 PFN_PHYS(pci_reserve_end_pfn - 678 end = min(pci_reserve_end_pfn, end);
620 pci_reserve_start_pfn), 679 reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
621 BOOTMEM_EXCLUSIVE); 680 BOOTMEM_EXCLUSIVE);
681 }
622#endif 682#endif
623} 683}
624 684
@@ -628,6 +688,31 @@ static void __init setup_bootmem_allocator(void)
628 for (i = 0; i < MAX_NUMNODES; ++i) 688 for (i = 0; i < MAX_NUMNODES; ++i)
629 setup_bootmem_allocator_node(i); 689 setup_bootmem_allocator_node(i);
630 690
691 /* Reserve any memory excluded by "memmap" arguments. */
692 for (i = 0; i < memmap_nr; ++i) {
693 struct memmap_entry *m = &memmap_map[i];
694 reserve_bootmem(m->addr, m->size, 0);
695 }
696
697#ifdef CONFIG_BLK_DEV_INITRD
698 if (initrd_start) {
699 /* Make sure the initrd memory region is not modified. */
700 if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
701 BOOTMEM_EXCLUSIVE)) {
702 pr_crit("The initrd memory region has been polluted. Disabling it.\n");
703 initrd_start = 0;
704 initrd_end = 0;
705 } else {
706 /*
707 * Translate initrd_start & initrd_end from PA to VA for
708 * future access.
709 */
710 initrd_start += PAGE_OFFSET;
711 initrd_end += PAGE_OFFSET;
712 }
713 }
714#endif
715
631#ifdef CONFIG_KEXEC 716#ifdef CONFIG_KEXEC
632 if (crashk_res.start != crashk_res.end) 717 if (crashk_res.start != crashk_res.end)
633 reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); 718 reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
@@ -961,9 +1046,6 @@ void setup_cpu(int boot)
961 arch_local_irq_unmask(INT_DMATLB_MISS); 1046 arch_local_irq_unmask(INT_DMATLB_MISS);
962 arch_local_irq_unmask(INT_DMATLB_ACCESS); 1047 arch_local_irq_unmask(INT_DMATLB_ACCESS);
963#endif 1048#endif
964#if CHIP_HAS_SN_PROC()
965 arch_local_irq_unmask(INT_SNITLB_MISS);
966#endif
967#ifdef __tilegx__ 1049#ifdef __tilegx__
968 arch_local_irq_unmask(INT_SINGLE_STEP_K); 1050 arch_local_irq_unmask(INT_SINGLE_STEP_K);
969#endif 1051#endif
@@ -978,10 +1060,6 @@ void setup_cpu(int boot)
978 /* Static network is not restricted. */ 1060 /* Static network is not restricted. */
979 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); 1061 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
980#endif 1062#endif
981#if CHIP_HAS_SN_PROC()
982 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
983 __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
984#endif
985 1063
986 /* 1064 /*
987 * Set the MPL for interrupt control 0 & 1 to the corresponding 1065 * Set the MPL for interrupt control 0 & 1 to the corresponding
@@ -1029,6 +1107,10 @@ static void __init load_hv_initrd(void)
1029 int fd, rc; 1107 int fd, rc;
1030 void *initrd; 1108 void *initrd;
1031 1109
1110 /* If initrd has already been set, skip initramfs file in hvfs. */
1111 if (initrd_start)
1112 return;
1113
1032 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 1114 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1033 if (fd == HV_ENOENT) { 1115 if (fd == HV_ENOENT) {
1034 if (set_initramfs_file) { 1116 if (set_initramfs_file) {
@@ -1067,6 +1149,25 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
1067 free_bootmem(__pa(begin), end - begin); 1149 free_bootmem(__pa(begin), end - begin);
1068} 1150}
1069 1151
1152static int __init setup_initrd(char *str)
1153{
1154 char *endp;
1155 unsigned long initrd_size;
1156
1157 initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
1158 if (initrd_size == 0 || *endp != '@')
1159 return -EINVAL;
1160
1161 initrd_start = simple_strtoul(endp+1, &endp, 0);
1162 if (initrd_start == 0)
1163 return -EINVAL;
1164
1165 initrd_end = initrd_start + initrd_size;
1166
1167 return 0;
1168}
1169early_param("initrd", setup_initrd);
1170
1070#else 1171#else
1071static inline void load_hv_initrd(void) {} 1172static inline void load_hv_initrd(void) {}
1072#endif /* CONFIG_BLK_DEV_INITRD */ 1173#endif /* CONFIG_BLK_DEV_INITRD */
@@ -1134,7 +1235,7 @@ static void __init validate_va(void)
1134#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ 1235#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1135 /* 1236 /*
1136 * Similarly, make sure we're only using allowed VAs. 1237 * Similarly, make sure we're only using allowed VAs.
1137 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, 1238 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
1138 * and 0 .. KERNEL_HIGH_VADDR. 1239 * and 0 .. KERNEL_HIGH_VADDR.
1139 * In addition, make sure we CAN'T use the end of memory, since 1240 * In addition, make sure we CAN'T use the end of memory, since
1140 * we use the last chunk of each pgd for the pgd_list. 1241 * we use the last chunk of each pgd for the pgd_list.
@@ -1149,7 +1250,7 @@ static void __init validate_va(void)
1149 if (range.size == 0) 1250 if (range.size == 0)
1150 break; 1251 break;
1151 if (range.start <= MEM_USER_INTRPT && 1252 if (range.start <= MEM_USER_INTRPT &&
1152 range.start + range.size >= MEM_HV_INTRPT) 1253 range.start + range.size >= MEM_HV_START)
1153 user_kernel_ok = 1; 1254 user_kernel_ok = 1;
1154 if (range.start == 0) 1255 if (range.start == 0)
1155 max_va = range.size; 1256 max_va = range.size;
@@ -1183,7 +1284,6 @@ static void __init validate_va(void)
1183struct cpumask __write_once cpu_lotar_map; 1284struct cpumask __write_once cpu_lotar_map;
1184EXPORT_SYMBOL(cpu_lotar_map); 1285EXPORT_SYMBOL(cpu_lotar_map);
1185 1286
1186#if CHIP_HAS_CBOX_HOME_MAP()
1187/* 1287/*
1188 * hash_for_home_map lists all the tiles that hash-for-home data 1288 * hash_for_home_map lists all the tiles that hash-for-home data
1189 * will be cached on. Note that this may includes tiles that are not 1289 * will be cached on. Note that this may includes tiles that are not
@@ -1193,7 +1293,6 @@ EXPORT_SYMBOL(cpu_lotar_map);
1193 */ 1293 */
1194struct cpumask hash_for_home_map; 1294struct cpumask hash_for_home_map;
1195EXPORT_SYMBOL(hash_for_home_map); 1295EXPORT_SYMBOL(hash_for_home_map);
1196#endif
1197 1296
1198/* 1297/*
1199 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can 1298 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
@@ -1286,7 +1385,6 @@ static void __init setup_cpu_maps(void)
1286 cpu_lotar_map = *cpu_possible_mask; 1385 cpu_lotar_map = *cpu_possible_mask;
1287 } 1386 }
1288 1387
1289#if CHIP_HAS_CBOX_HOME_MAP()
1290 /* Retrieve set of CPUs used for hash-for-home caching */ 1388 /* Retrieve set of CPUs used for hash-for-home caching */
1291 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, 1389 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1292 (HV_VirtAddr) hash_for_home_map.bits, 1390 (HV_VirtAddr) hash_for_home_map.bits,
@@ -1294,9 +1392,6 @@ static void __init setup_cpu_maps(void)
1294 if (rc < 0) 1392 if (rc < 0)
1295 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); 1393 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1296 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map); 1394 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1297#else
1298 cpu_cacheable_map = *cpu_possible_mask;
1299#endif
1300} 1395}
1301 1396
1302 1397
@@ -1492,7 +1587,7 @@ void __init setup_per_cpu_areas(void)
1492 1587
1493 /* Update the vmalloc mapping and page home. */ 1588 /* Update the vmalloc mapping and page home. */
1494 unsigned long addr = (unsigned long)ptr + i; 1589 unsigned long addr = (unsigned long)ptr + i;
1495 pte_t *ptep = virt_to_pte(NULL, addr); 1590 pte_t *ptep = virt_to_kpte(addr);
1496 pte_t pte = *ptep; 1591 pte_t pte = *ptep;
1497 BUG_ON(pfn != pte_pfn(pte)); 1592 BUG_ON(pfn != pte_pfn(pte));
1498 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1593 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
@@ -1501,12 +1596,12 @@ void __init setup_per_cpu_areas(void)
1501 1596
1502 /* Update the lowmem mapping for consistency. */ 1597 /* Update the lowmem mapping for consistency. */
1503 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1598 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1504 ptep = virt_to_pte(NULL, lowmem_va); 1599 ptep = virt_to_kpte(lowmem_va);
1505 if (pte_huge(*ptep)) { 1600 if (pte_huge(*ptep)) {
1506 printk(KERN_DEBUG "early shatter of huge page" 1601 printk(KERN_DEBUG "early shatter of huge page"
1507 " at %#lx\n", lowmem_va); 1602 " at %#lx\n", lowmem_va);
1508 shatter_pmd((pmd_t *)ptep); 1603 shatter_pmd((pmd_t *)ptep);
1509 ptep = virt_to_pte(NULL, lowmem_va); 1604 ptep = virt_to_kpte(lowmem_va);
1510 BUG_ON(pte_huge(*ptep)); 1605 BUG_ON(pte_huge(*ptep));
1511 } 1606 }
1512 BUG_ON(pfn != pte_pfn(*ptep)); 1607 BUG_ON(pfn != pte_pfn(*ptep));
@@ -1548,6 +1643,8 @@ insert_non_bus_resource(void)
1548{ 1643{
1549 struct resource *res = 1644 struct resource *res =
1550 kzalloc(sizeof(struct resource), GFP_ATOMIC); 1645 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1646 if (!res)
1647 return NULL;
1551 res->name = "Non-Bus Physical Address Space"; 1648 res->name = "Non-Bus Physical Address Space";
1552 res->start = (1ULL << 32); 1649 res->start = (1ULL << 32);
1553 res->end = -1LL; 1650 res->end = -1LL;
@@ -1561,11 +1658,13 @@ insert_non_bus_resource(void)
1561#endif 1658#endif
1562 1659
1563static struct resource* __init 1660static struct resource* __init
1564insert_ram_resource(u64 start_pfn, u64 end_pfn) 1661insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
1565{ 1662{
1566 struct resource *res = 1663 struct resource *res =
1567 kzalloc(sizeof(struct resource), GFP_ATOMIC); 1664 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1568 res->name = "System RAM"; 1665 if (!res)
1666 return NULL;
1667 res->name = reserved ? "Reserved" : "System RAM";
1569 res->start = start_pfn << PAGE_SHIFT; 1668 res->start = start_pfn << PAGE_SHIFT;
1570 res->end = (end_pfn << PAGE_SHIFT) - 1; 1669 res->end = (end_pfn << PAGE_SHIFT) - 1;
1571 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 1670 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
@@ -1585,7 +1684,7 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn)
1585static int __init request_standard_resources(void) 1684static int __init request_standard_resources(void)
1586{ 1685{
1587 int i; 1686 int i;
1588 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 1687 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
1589 1688
1590#if defined(CONFIG_PCI) && !defined(__tilegx__) 1689#if defined(CONFIG_PCI) && !defined(__tilegx__)
1591 insert_non_bus_resource(); 1690 insert_non_bus_resource();
@@ -1600,11 +1699,11 @@ static int __init request_standard_resources(void)
1600 end_pfn > pci_reserve_start_pfn) { 1699 end_pfn > pci_reserve_start_pfn) {
1601 if (end_pfn > pci_reserve_end_pfn) 1700 if (end_pfn > pci_reserve_end_pfn)
1602 insert_ram_resource(pci_reserve_end_pfn, 1701 insert_ram_resource(pci_reserve_end_pfn,
1603 end_pfn); 1702 end_pfn, 0);
1604 end_pfn = pci_reserve_start_pfn; 1703 end_pfn = pci_reserve_start_pfn;
1605 } 1704 }
1606#endif 1705#endif
1607 insert_ram_resource(start_pfn, end_pfn); 1706 insert_ram_resource(start_pfn, end_pfn, 0);
1608 } 1707 }
1609 1708
1610 code_resource.start = __pa(_text - CODE_DELTA); 1709 code_resource.start = __pa(_text - CODE_DELTA);
@@ -1615,6 +1714,13 @@ static int __init request_standard_resources(void)
1615 insert_resource(&iomem_resource, &code_resource); 1714 insert_resource(&iomem_resource, &code_resource);
1616 insert_resource(&iomem_resource, &data_resource); 1715 insert_resource(&iomem_resource, &data_resource);
1617 1716
1717 /* Mark any "memmap" regions busy for the resource manager. */
1718 for (i = 0; i < memmap_nr; ++i) {
1719 struct memmap_entry *m = &memmap_map[i];
1720 insert_ram_resource(PFN_DOWN(m->addr),
1721 PFN_UP(m->addr + m->size - 1), 1);
1722 }
1723
1618#ifdef CONFIG_KEXEC 1724#ifdef CONFIG_KEXEC
1619 insert_resource(&iomem_resource, &crashk_res); 1725 insert_resource(&iomem_resource, &crashk_res);
1620#endif 1726#endif