aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/init.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-06-25 17:04:17 -0400
committerChris Metcalf <cmetcalf@tilera.com>2010-07-06 13:41:51 -0400
commit0707ad30d10110aebc01a5a64fb63f4b32d20b73 (patch)
tree64d8ba73e605ac26e56808d1d77701b3f83cf8b2 /arch/tile/mm/init.c
parentc78095bd8c77fca2619769ff8efb639fd100e373 (diff)
arch/tile: Miscellaneous cleanup changes.
This commit is primarily changes caused by reviewing "sparse" and "checkpatch" output on our sources, so is somewhat noisy, since things like "printk() -> pr_err()" (or whatever) throughout the codebase tend to get tedious to read. Rather than trying to tease apart precisely which things changed due to which type of code review, this commit includes various cleanups in the code: - sparse: Add declarations in headers for globals. - sparse: Fix __user annotations. - sparse: Using gfp_t consistently instead of int. - sparse: removing functions not actually used. - checkpatch: Clean up printk() warnings by using pr_info(), etc.; also avoid partial-line printks except in bootup code. - checkpatch: Use exposed structs rather than typedefs. - checkpatch: Change some C99 comments to C89 comments. In addition, a couple of minor other changes are rolled in to this commit: - Add support for a "raise" instruction to cause SIGFPE, etc., to be raised. - Remove some compat code that is unnecessary when we fully eliminate some of the deprecated syscalls from the generic syscall ABI. - Update the tile_defconfig to reflect current config contents. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/tile/mm/init.c')
-rw-r--r--arch/tile/mm/init.c99
1 files changed, 51 insertions, 48 deletions
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 125ac53b60fc..d89c9eacd162 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -67,7 +67,9 @@
67 67
68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) 68#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
69 69
70#ifndef __tilegx__
70unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; 71unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
72#endif
71 73
72DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 74DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
73 75
@@ -282,9 +284,9 @@ static pgprot_t __init init_pgprot(ulong address)
282 /* 284 /*
283 * Everything else that isn't data or bss is heap, so mark it 285 * Everything else that isn't data or bss is heap, so mark it
284 * with the initial heap home (hash-for-home, or this cpu). This 286 * with the initial heap home (hash-for-home, or this cpu). This
285 * includes any addresses after the loaded image; any address before 287 * includes any addresses after the loaded image and any address before
286 * _einittext (since we already captured the case of text before 288 * _einitdata, since we already captured the case of text before
287 * _sinittext); and any init-data pages. 289 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
288 * 290 *
289 * All the LOWMEM pages that we mark this way will get their 291 * All the LOWMEM pages that we mark this way will get their
290 * struct page homecache properly marked later, in set_page_homes(). 292 * struct page homecache properly marked later, in set_page_homes().
@@ -292,9 +294,7 @@ static pgprot_t __init init_pgprot(ulong address)
292 * homes, but with a zero free_time we don't have to actually 294 * homes, but with a zero free_time we don't have to actually
293 * do a flush action the first time we use them, either. 295 * do a flush action the first time we use them, either.
294 */ 296 */
295 if (address >= (ulong) _end || address < (ulong) _sdata || 297 if (address >= (ulong) _end || address < (ulong) _einitdata)
296 (address >= (ulong) _sinitdata &&
297 address < (ulong) _einitdata))
298 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 298 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
299 299
300#if CHIP_HAS_CBOX_HOME_MAP() 300#if CHIP_HAS_CBOX_HOME_MAP()
@@ -304,35 +304,38 @@ static pgprot_t __init init_pgprot(ulong address)
304#endif 304#endif
305 305
306 /* 306 /*
307 * Make the w1data homed like heap to start with, to avoid
308 * making it part of the page-striped data area when we're just
309 * going to convert it to read-only soon anyway.
310 */
311 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
312 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
313
314 /*
307 * Otherwise we just hand out consecutive cpus. To avoid 315 * Otherwise we just hand out consecutive cpus. To avoid
308 * requiring this function to hold state, we just walk forward from 316 * requiring this function to hold state, we just walk forward from
309 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach 317 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
310 * the requested address, while walking cpu home around kdata_mask. 318 * the requested address, while walking cpu home around kdata_mask.
311 * This is typically no more than a dozen or so iterations. 319 * This is typically no more than a dozen or so iterations.
312 */ 320 */
313 BUG_ON(_einitdata != __bss_start); 321 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK;
314 for (page = (ulong)_sdata, cpu = NR_CPUS; ; ) { 322 BUG_ON(address < page || address >= (ulong)_end);
315 cpu = cpumask_next(cpu, &kdata_mask); 323 cpu = cpumask_first(&kdata_mask);
316 if (cpu == NR_CPUS) 324 for (; page < address; page += PAGE_SIZE) {
317 cpu = cpumask_first(&kdata_mask); 325 if (page >= (ulong)&init_thread_union &&
318 if (page >= address) 326 page < (ulong)&init_thread_union + THREAD_SIZE)
319 break; 327 continue;
320 page += PAGE_SIZE;
321 if (page == (ulong)__start_rodata)
322 page = (ulong)__end_rodata;
323 if (page == (ulong)&init_thread_union)
324 page += THREAD_SIZE;
325 if (page == (ulong)_sinitdata)
326 page = (ulong)_einitdata;
327 if (page == (ulong)empty_zero_page) 328 if (page == (ulong)empty_zero_page)
328 page += PAGE_SIZE; 329 continue;
329#ifndef __tilegx__ 330#ifndef __tilegx__
330#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() 331#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
331 if (page == (ulong)atomic_locks) 332 if (page == (ulong)atomic_locks)
332 page += PAGE_SIZE; 333 continue;
333#endif 334#endif
334#endif 335#endif
335 336 cpu = cpumask_next(cpu, &kdata_mask);
337 if (cpu == NR_CPUS)
338 cpu = cpumask_first(&kdata_mask);
336 } 339 }
337 return construct_pgprot(PAGE_KERNEL, cpu); 340 return construct_pgprot(PAGE_KERNEL, cpu);
338} 341}
@@ -362,7 +365,7 @@ static int __init setup_ktext(char *str)
362 /* If you have a leading "nocache", turn off ktext caching */ 365 /* If you have a leading "nocache", turn off ktext caching */
363 if (strncmp(str, "nocache", 7) == 0) { 366 if (strncmp(str, "nocache", 7) == 0) {
364 ktext_nocache = 1; 367 ktext_nocache = 1;
365 printk("ktext: disabling local caching of kernel text\n"); 368 pr_info("ktext: disabling local caching of kernel text\n");
366 str += 7; 369 str += 7;
367 if (*str == ',') 370 if (*str == ',')
368 ++str; 371 ++str;
@@ -374,20 +377,20 @@ static int __init setup_ktext(char *str)
374 377
375 /* Default setting on Tile64: use a huge page */ 378 /* Default setting on Tile64: use a huge page */
376 if (strcmp(str, "huge") == 0) 379 if (strcmp(str, "huge") == 0)
377 printk("ktext: using one huge locally cached page\n"); 380 pr_info("ktext: using one huge locally cached page\n");
378 381
379 /* Pay TLB cost but get no cache benefit: cache small pages locally */ 382 /* Pay TLB cost but get no cache benefit: cache small pages locally */
380 else if (strcmp(str, "local") == 0) { 383 else if (strcmp(str, "local") == 0) {
381 ktext_small = 1; 384 ktext_small = 1;
382 ktext_local = 1; 385 ktext_local = 1;
383 printk("ktext: using small pages with local caching\n"); 386 pr_info("ktext: using small pages with local caching\n");
384 } 387 }
385 388
386 /* Neighborhood cache ktext pages on all cpus. */ 389 /* Neighborhood cache ktext pages on all cpus. */
387 else if (strcmp(str, "all") == 0) { 390 else if (strcmp(str, "all") == 0) {
388 ktext_small = 1; 391 ktext_small = 1;
389 ktext_all = 1; 392 ktext_all = 1;
390 printk("ktext: using maximal caching neighborhood\n"); 393 pr_info("ktext: using maximal caching neighborhood\n");
391 } 394 }
392 395
393 396
@@ -397,10 +400,10 @@ static int __init setup_ktext(char *str)
397 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); 400 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
398 if (cpumask_weight(&ktext_mask) > 1) { 401 if (cpumask_weight(&ktext_mask) > 1) {
399 ktext_small = 1; 402 ktext_small = 1;
400 printk("ktext: using caching neighborhood %s " 403 pr_info("ktext: using caching neighborhood %s "
401 "with small pages\n", buf); 404 "with small pages\n", buf);
402 } else { 405 } else {
403 printk("ktext: caching on cpu %s with one huge page\n", 406 pr_info("ktext: caching on cpu %s with one huge page\n",
404 buf); 407 buf);
405 } 408 }
406 } 409 }
@@ -470,19 +473,19 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
470 473
471#if CHIP_HAS_CBOX_HOME_MAP() 474#if CHIP_HAS_CBOX_HOME_MAP()
472 if (ktext_arg_seen && ktext_hash) { 475 if (ktext_arg_seen && ktext_hash) {
473 printk("warning: \"ktext\" boot argument ignored" 476 pr_warning("warning: \"ktext\" boot argument ignored"
474 " if \"kcache_hash\" sets up text hash-for-home\n"); 477 " if \"kcache_hash\" sets up text hash-for-home\n");
475 ktext_small = 0; 478 ktext_small = 0;
476 } 479 }
477 480
478 if (kdata_arg_seen && kdata_hash) { 481 if (kdata_arg_seen && kdata_hash) {
479 printk("warning: \"kdata\" boot argument ignored" 482 pr_warning("warning: \"kdata\" boot argument ignored"
480 " if \"kcache_hash\" sets up data hash-for-home\n"); 483 " if \"kcache_hash\" sets up data hash-for-home\n");
481 } 484 }
482 485
483 if (kdata_huge && !hash_default) { 486 if (kdata_huge && !hash_default) {
484 printk("warning: disabling \"kdata=huge\"; requires" 487 pr_warning("warning: disabling \"kdata=huge\"; requires"
485 " kcache_hash=all or =allbutstack\n"); 488 " kcache_hash=all or =allbutstack\n");
486 kdata_huge = 0; 489 kdata_huge = 0;
487 } 490 }
488#endif 491#endif
@@ -556,11 +559,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
556 if (!cpumask_empty(&bad)) { 559 if (!cpumask_empty(&bad)) {
557 char buf[NR_CPUS * 5]; 560 char buf[NR_CPUS * 5];
558 cpulist_scnprintf(buf, sizeof(buf), &bad); 561 cpulist_scnprintf(buf, sizeof(buf), &bad);
559 printk("ktext: not using unavailable cpus %s\n", buf); 562 pr_info("ktext: not using unavailable cpus %s\n", buf);
560 } 563 }
561 if (cpumask_empty(&ktext_mask)) { 564 if (cpumask_empty(&ktext_mask)) {
562 printk("ktext: no valid cpus; caching on %d.\n", 565 pr_warning("ktext: no valid cpus; caching on %d.\n",
563 smp_processor_id()); 566 smp_processor_id());
564 cpumask_copy(&ktext_mask, 567 cpumask_copy(&ktext_mask,
565 cpumask_of(smp_processor_id())); 568 cpumask_of(smp_processor_id()));
566 } 569 }
@@ -737,17 +740,18 @@ static void __init set_non_bootmem_pages_init(void)
737 for_each_zone(z) { 740 for_each_zone(z) {
738 unsigned long start, end; 741 unsigned long start, end;
739 int nid = z->zone_pgdat->node_id; 742 int nid = z->zone_pgdat->node_id;
743 int idx = zone_idx(z);
740 744
741 start = z->zone_start_pfn; 745 start = z->zone_start_pfn;
742 if (start == 0) 746 if (start == 0)
743 continue; /* bootmem */ 747 continue; /* bootmem */
744 end = start + z->spanned_pages; 748 end = start + z->spanned_pages;
745 if (zone_idx(z) == ZONE_NORMAL) { 749 if (idx == ZONE_NORMAL) {
746 BUG_ON(start != node_start_pfn[nid]); 750 BUG_ON(start != node_start_pfn[nid]);
747 start = node_free_pfn[nid]; 751 start = node_free_pfn[nid];
748 } 752 }
749#ifdef CONFIG_HIGHMEM 753#ifdef CONFIG_HIGHMEM
750 if (zone_idx(z) == ZONE_HIGHMEM) 754 if (idx == ZONE_HIGHMEM)
751 totalhigh_pages += z->spanned_pages; 755 totalhigh_pages += z->spanned_pages;
752#endif 756#endif
753 if (kdata_huge) { 757 if (kdata_huge) {
@@ -841,9 +845,9 @@ void __init mem_init(void)
841#ifdef CONFIG_HIGHMEM 845#ifdef CONFIG_HIGHMEM
842 /* check that fixmap and pkmap do not overlap */ 846 /* check that fixmap and pkmap do not overlap */
843 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { 847 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
844 printk(KERN_ERR "fixmap and kmap areas overlap" 848 pr_err("fixmap and kmap areas overlap"
845 " - this will crash\n"); 849 " - this will crash\n");
846 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", 850 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
847 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), 851 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
848 FIXADDR_START); 852 FIXADDR_START);
849 BUG(); 853 BUG();
@@ -863,7 +867,7 @@ void __init mem_init(void)
863 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; 867 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext;
864 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; 868 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata;
865 869
866 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", 870 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
867 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 871 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
868 num_physpages << (PAGE_SHIFT-10), 872 num_physpages << (PAGE_SHIFT-10),
869 codesize >> 10, 873 codesize >> 10,
@@ -968,7 +972,6 @@ static void mark_w1data_ro(void)
968 BUG_ON((addr & (PAGE_SIZE-1)) != 0); 972 BUG_ON((addr & (PAGE_SIZE-1)) != 0);
969 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { 973 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
970 unsigned long pfn = kaddr_to_pfn((void *)addr); 974 unsigned long pfn = kaddr_to_pfn((void *)addr);
971 struct page *page = pfn_to_page(pfn);
972 pte_t *ptep = virt_to_pte(NULL, addr); 975 pte_t *ptep = virt_to_pte(NULL, addr);
973 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ 976 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */
974 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); 977 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
@@ -986,7 +989,7 @@ static long __write_once initfree = 1;
986static int __init set_initfree(char *str) 989static int __init set_initfree(char *str)
987{ 990{
988 strict_strtol(str, 0, &initfree); 991 strict_strtol(str, 0, &initfree);
989 printk("initfree: %s free init pages\n", initfree ? "will" : "won't"); 992 pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
990 return 1; 993 return 1;
991} 994}
992__setup("initfree=", set_initfree); 995__setup("initfree=", set_initfree);
@@ -996,8 +999,8 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
996 unsigned long addr = (unsigned long) begin; 999 unsigned long addr = (unsigned long) begin;
997 1000
998 if (kdata_huge && !initfree) { 1001 if (kdata_huge && !initfree) {
999 printk("Warning: ignoring initfree=0:" 1002 pr_warning("Warning: ignoring initfree=0:"
1000 " incompatible with kdata=huge\n"); 1003 " incompatible with kdata=huge\n");
1001 initfree = 1; 1004 initfree = 1;
1002 } 1005 }
1003 end = (end + PAGE_SIZE - 1) & PAGE_MASK; 1006 end = (end + PAGE_SIZE - 1) & PAGE_MASK;
@@ -1033,7 +1036,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
1033 free_page(addr); 1036 free_page(addr);
1034 totalram_pages++; 1037 totalram_pages++;
1035 } 1038 }
1036 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); 1039 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
1037} 1040}
1038 1041
1039void free_initmem(void) 1042void free_initmem(void)