aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/setup.c')
-rw-r--r--arch/tile/kernel/setup.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 5f85d8b34dbb..bff23f476110 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -103,13 +103,11 @@ unsigned long __initdata pci_reserve_end_pfn = -1U;
103 103
104static int __init setup_maxmem(char *str) 104static int __init setup_maxmem(char *str)
105{ 105{
106 long maxmem_mb; 106 unsigned long long maxmem;
107 if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || 107 if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
108 maxmem_mb == 0)
109 return -EINVAL; 108 return -EINVAL;
110 109
111 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << 110 maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
112 (HPAGE_SHIFT - PAGE_SHIFT);
113 pr_info("Forcing RAM used to no more than %dMB\n", 111 pr_info("Forcing RAM used to no more than %dMB\n",
114 maxmem_pfn >> (20 - PAGE_SHIFT)); 112 maxmem_pfn >> (20 - PAGE_SHIFT));
115 return 0; 113 return 0;
@@ -119,14 +117,15 @@ early_param("maxmem", setup_maxmem);
119static int __init setup_maxnodemem(char *str) 117static int __init setup_maxnodemem(char *str)
120{ 118{
121 char *endp; 119 char *endp;
122 long maxnodemem_mb, node; 120 unsigned long long maxnodemem;
121 long node;
123 122
124 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; 123 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
125 if (node >= MAX_NUMNODES || *endp != ':' || 124 if (node >= MAX_NUMNODES || *endp != ':')
126 strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
127 return -EINVAL; 125 return -EINVAL;
128 126
129 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << 127 maxnodemem = memparse(endp+1, NULL);
128 maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
130 (HPAGE_SHIFT - PAGE_SHIFT); 129 (HPAGE_SHIFT - PAGE_SHIFT);
131 pr_info("Forcing RAM used on node %ld to no more than %dMB\n", 130 pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
132 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 131 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
@@ -913,6 +912,13 @@ void __cpuinit setup_cpu(int boot)
913 912
914#ifdef CONFIG_BLK_DEV_INITRD 913#ifdef CONFIG_BLK_DEV_INITRD
915 914
915/*
916 * Note that the kernel can potentially support other compression
917 * techniques than gz, though we don't do so by default. If we ever
918 * decide to do so we can either look for other filename extensions,
919 * or just allow a file with this name to be compressed with an
920 * arbitrary compressor (somewhat counterintuitively).
921 */
916static int __initdata set_initramfs_file; 922static int __initdata set_initramfs_file;
917static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 923static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
918 924
@@ -928,9 +934,9 @@ static int __init setup_initramfs_file(char *str)
928early_param("initramfs_file", setup_initramfs_file); 934early_param("initramfs_file", setup_initramfs_file);
929 935
930/* 936/*
931 * We look for an additional "initramfs.cpio.gz" file in the hvfs. 937 * We look for an "initramfs.cpio.gz" file in the hvfs.
932 * If there is one, we allocate some memory for it and it will be 938 * If there is one, we allocate some memory for it and it will be
933 * unpacked to the initramfs after any built-in initramfs_data. 939 * unpacked to the initramfs.
934 */ 940 */
935static void __init load_hv_initrd(void) 941static void __init load_hv_initrd(void)
936{ 942{
@@ -1100,7 +1106,7 @@ EXPORT_SYMBOL(hash_for_home_map);
1100 1106
1101/* 1107/*
1102 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can 1108 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1103 * flush on our behalf. It is set to cpu_possible_map OR'ed with 1109 * flush on our behalf. It is set to cpu_possible_mask OR'ed with
1104 * hash_for_home_map, and it is what should be passed to 1110 * hash_for_home_map, and it is what should be passed to
1105 * hv_flush_remote() to flush all caches. Note that if there are 1111 * hv_flush_remote() to flush all caches. Note that if there are
1106 * dedicated hypervisor driver tiles that have authorized use of their 1112 * dedicated hypervisor driver tiles that have authorized use of their
@@ -1186,7 +1192,7 @@ static void __init setup_cpu_maps(void)
1186 sizeof(cpu_lotar_map)); 1192 sizeof(cpu_lotar_map));
1187 if (rc < 0) { 1193 if (rc < 0) {
1188 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); 1194 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1189 cpu_lotar_map = cpu_possible_map; 1195 cpu_lotar_map = *cpu_possible_mask;
1190 } 1196 }
1191 1197
1192#if CHIP_HAS_CBOX_HOME_MAP() 1198#if CHIP_HAS_CBOX_HOME_MAP()
@@ -1196,9 +1202,9 @@ static void __init setup_cpu_maps(void)
1196 sizeof(hash_for_home_map)); 1202 sizeof(hash_for_home_map));
1197 if (rc < 0) 1203 if (rc < 0)
1198 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); 1204 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1199 cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); 1205 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1200#else 1206#else
1201 cpu_cacheable_map = cpu_possible_map; 1207 cpu_cacheable_map = *cpu_possible_mask;
1202#endif 1208#endif
1203} 1209}
1204 1210