diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 22:05:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 22:05:45 -0500 |
commit | df32e43a54d04eda35d2859beaf90e3864d53288 (patch) | |
tree | 7a61cf658b2949bd426285eb9902be7758ced1ba /arch | |
parent | fbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff) | |
parent | 78d5506e82b21a1a1de68c24182db2c2fe521422 (diff) |
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton:
- a couple of misc things
- inotify/fsnotify work from Jan
- ocfs2 updates (partial)
- about half of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits)
mm/migrate: remove unused function, fail_migrate_page()
mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages
mm/migrate: correct failure handling if !hugepage_migration_support()
mm/migrate: add comment about permanent failure path
mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure
mm: compaction: reset scanner positions immediately when they meet
mm: compaction: do not mark unmovable pageblocks as skipped in async compaction
mm: compaction: detect when scanners meet in isolate_freepages
mm: compaction: reset cached scanner pfn's before reading them
mm: compaction: encapsulate defer reset logic
mm: compaction: trace compaction begin and end
memcg, oom: lock mem_cgroup_print_oom_info
sched: add tracepoints related to NUMA task migration
mm: numa: do not automatically migrate KSM pages
mm: numa: trace tasks that fail migration due to rate limiting
mm: numa: limit scope of lock for NUMA migrate rate limiting
mm: numa: make NUMA-migrate related functions static
lib/show_mem.c: show num_poisoned_pages when oom
mm/hwpoison: add '#' to hwpoison_inject
mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/dma.h | 4 | ||||
-rw-r--r-- | arch/arm/kernel/devtree.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-omap2/omap_hwmod.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 5 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 68 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 63 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 48 | ||||
-rw-r--r-- | arch/metag/mm/init.c | 3 | ||||
-rw-r--r-- | arch/metag/mm/numa.c | 3 | ||||
-rw-r--r-- | arch/microblaze/mm/init.c | 3 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 59 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 8 | ||||
-rw-r--r-- | arch/score/Kconfig | 1 | ||||
-rw-r--r-- | arch/sh/kernel/kgdb.c | 1 | ||||
-rw-r--r-- | arch/sh/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 5 | ||||
-rw-r--r-- | arch/unicore32/mm/init.c | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/page_types.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/check.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/e820.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/memtest.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 52 | ||||
-rw-r--r-- | arch/x86/mm/srat.c | 5 |
28 files changed, 153 insertions, 212 deletions
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 58b8c6a0ab1f..99084431d6ae 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h | |||
@@ -8,8 +8,8 @@ | |||
8 | #define MAX_DMA_ADDRESS 0xffffffffUL | 8 | #define MAX_DMA_ADDRESS 0xffffffffUL |
9 | #else | 9 | #else |
10 | #define MAX_DMA_ADDRESS ({ \ | 10 | #define MAX_DMA_ADDRESS ({ \ |
11 | extern unsigned long arm_dma_zone_size; \ | 11 | extern phys_addr_t arm_dma_zone_size; \ |
12 | arm_dma_zone_size ? \ | 12 | arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ |
13 | (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) | 13 | (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) |
14 | #endif | 14 | #endif |
15 | 15 | ||
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 34d5fd585bbb..f751714d52c1 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -33,7 +33,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
33 | 33 | ||
34 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 34 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
35 | { | 35 | { |
36 | return alloc_bootmem_align(size, align); | 36 | return memblock_virt_alloc(size, align); |
37 | } | 37 | } |
38 | 38 | ||
39 | void __init arm_dt_memblock_reserve(void) | 39 | void __init arm_dt_memblock_reserve(void) |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 987a7f5bce5f..8ce1cbd08dba 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -717,7 +717,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) | |||
717 | kernel_data.end = virt_to_phys(_end - 1); | 717 | kernel_data.end = virt_to_phys(_end - 1); |
718 | 718 | ||
719 | for_each_memblock(memory, region) { | 719 | for_each_memblock(memory, region) { |
720 | res = alloc_bootmem_low(sizeof(*res)); | 720 | res = memblock_virt_alloc(sizeof(*res), 0); |
721 | res->name = "System RAM"; | 721 | res->name = "System RAM"; |
722 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); | 722 | res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); |
723 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; | 723 | res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 8a1b5e0bad40..f7a6fd35b1e4 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2791,9 +2791,7 @@ static int __init _alloc_links(struct omap_hwmod_link **ml, | |||
2791 | sz = sizeof(struct omap_hwmod_link) * LINKS_PER_OCP_IF; | 2791 | sz = sizeof(struct omap_hwmod_link) * LINKS_PER_OCP_IF; |
2792 | 2792 | ||
2793 | *sl = NULL; | 2793 | *sl = NULL; |
2794 | *ml = alloc_bootmem(sz); | 2794 | *ml = memblock_virt_alloc(sz, 0); |
2795 | |||
2796 | memset(*ml, 0, sz); | ||
2797 | 2795 | ||
2798 | *sl = (void *)(*ml) + sizeof(struct omap_hwmod_link); | 2796 | *sl = (void *)(*ml) + sizeof(struct omap_hwmod_link); |
2799 | 2797 | ||
@@ -2912,9 +2910,7 @@ static int __init _alloc_linkspace(struct omap_hwmod_ocp_if **ois) | |||
2912 | pr_debug("omap_hwmod: %s: allocating %d byte linkspace (%d links)\n", | 2910 | pr_debug("omap_hwmod: %s: allocating %d byte linkspace (%d links)\n", |
2913 | __func__, sz, max_ls); | 2911 | __func__, sz, max_ls); |
2914 | 2912 | ||
2915 | linkspace = alloc_bootmem(sz); | 2913 | linkspace = memblock_virt_alloc(sz, 0); |
2916 | |||
2917 | memset(linkspace, 0, sz); | ||
2918 | 2914 | ||
2919 | return 0; | 2915 | return 0; |
2920 | } | 2916 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3e8f106ee5fe..11eb8add7820 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -92,9 +92,6 @@ void show_mem(unsigned int filter) | |||
92 | printk("Mem-info:\n"); | 92 | printk("Mem-info:\n"); |
93 | show_free_areas(filter); | 93 | show_free_areas(filter); |
94 | 94 | ||
95 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
96 | return; | ||
97 | |||
98 | for_each_bank (i, mi) { | 95 | for_each_bank (i, mi) { |
99 | struct membank *bank = &mi->bank[i]; | 96 | struct membank *bank = &mi->bank[i]; |
100 | unsigned int pfn1, pfn2; | 97 | unsigned int pfn1, pfn2; |
@@ -461,7 +458,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
461 | * free the section of the memmap array. | 458 | * free the section of the memmap array. |
462 | */ | 459 | */ |
463 | if (pg < pgend) | 460 | if (pg < pgend) |
464 | free_bootmem(pg, pgend - pg); | 461 | memblock_free_early(pg, pgend - pg); |
465 | } | 462 | } |
466 | 463 | ||
467 | /* | 464 | /* |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index da5237d636d6..52715a71aede 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -31,74 +31,6 @@ | |||
31 | static unsigned long max_gap; | 31 | static unsigned long max_gap; |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | /** | ||
35 | * show_mem - give short summary of memory stats | ||
36 | * | ||
37 | * Shows a simple page count of reserved and used pages in the system. | ||
38 | * For discontig machines, it does this on a per-pgdat basis. | ||
39 | */ | ||
40 | void show_mem(unsigned int filter) | ||
41 | { | ||
42 | int i, total_reserved = 0; | ||
43 | int total_shared = 0, total_cached = 0; | ||
44 | unsigned long total_present = 0; | ||
45 | pg_data_t *pgdat; | ||
46 | |||
47 | printk(KERN_INFO "Mem-info:\n"); | ||
48 | show_free_areas(filter); | ||
49 | printk(KERN_INFO "Node memory in pages:\n"); | ||
50 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
51 | return; | ||
52 | for_each_online_pgdat(pgdat) { | ||
53 | unsigned long present; | ||
54 | unsigned long flags; | ||
55 | int shared = 0, cached = 0, reserved = 0; | ||
56 | int nid = pgdat->node_id; | ||
57 | |||
58 | if (skip_free_areas_node(filter, nid)) | ||
59 | continue; | ||
60 | pgdat_resize_lock(pgdat, &flags); | ||
61 | present = pgdat->node_present_pages; | ||
62 | for(i = 0; i < pgdat->node_spanned_pages; i++) { | ||
63 | struct page *page; | ||
64 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | ||
65 | touch_nmi_watchdog(); | ||
66 | if (pfn_valid(pgdat->node_start_pfn + i)) | ||
67 | page = pfn_to_page(pgdat->node_start_pfn + i); | ||
68 | else { | ||
69 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
70 | if (max_gap < LARGE_GAP) | ||
71 | continue; | ||
72 | #endif | ||
73 | i = vmemmap_find_next_valid_pfn(nid, i) - 1; | ||
74 | continue; | ||
75 | } | ||
76 | if (PageReserved(page)) | ||
77 | reserved++; | ||
78 | else if (PageSwapCache(page)) | ||
79 | cached++; | ||
80 | else if (page_count(page)) | ||
81 | shared += page_count(page)-1; | ||
82 | } | ||
83 | pgdat_resize_unlock(pgdat, &flags); | ||
84 | total_present += present; | ||
85 | total_reserved += reserved; | ||
86 | total_cached += cached; | ||
87 | total_shared += shared; | ||
88 | printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " | ||
89 | "shrd: %10d, swpd: %10d\n", nid, | ||
90 | present, reserved, shared, cached); | ||
91 | } | ||
92 | printk(KERN_INFO "%ld pages of RAM\n", total_present); | ||
93 | printk(KERN_INFO "%d reserved pages\n", total_reserved); | ||
94 | printk(KERN_INFO "%d pages shared\n", total_shared); | ||
95 | printk(KERN_INFO "%d pages swap cached\n", total_cached); | ||
96 | printk(KERN_INFO "Total of %ld pages in page table cache\n", | ||
97 | quicklist_total_size()); | ||
98 | printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); | ||
99 | } | ||
100 | |||
101 | |||
102 | /* physical address where the bootmem map is located */ | 34 | /* physical address where the bootmem map is located */ |
103 | unsigned long bootmap_start; | 35 | unsigned long bootmap_start; |
104 | 36 | ||
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 2de08f4d9930..878626805369 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -608,69 +608,6 @@ void *per_cpu_init(void) | |||
608 | #endif /* CONFIG_SMP */ | 608 | #endif /* CONFIG_SMP */ |
609 | 609 | ||
610 | /** | 610 | /** |
611 | * show_mem - give short summary of memory stats | ||
612 | * | ||
613 | * Shows a simple page count of reserved and used pages in the system. | ||
614 | * For discontig machines, it does this on a per-pgdat basis. | ||
615 | */ | ||
616 | void show_mem(unsigned int filter) | ||
617 | { | ||
618 | int i, total_reserved = 0; | ||
619 | int total_shared = 0, total_cached = 0; | ||
620 | unsigned long total_present = 0; | ||
621 | pg_data_t *pgdat; | ||
622 | |||
623 | printk(KERN_INFO "Mem-info:\n"); | ||
624 | show_free_areas(filter); | ||
625 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
626 | return; | ||
627 | printk(KERN_INFO "Node memory in pages:\n"); | ||
628 | for_each_online_pgdat(pgdat) { | ||
629 | unsigned long present; | ||
630 | unsigned long flags; | ||
631 | int shared = 0, cached = 0, reserved = 0; | ||
632 | int nid = pgdat->node_id; | ||
633 | |||
634 | if (skip_free_areas_node(filter, nid)) | ||
635 | continue; | ||
636 | pgdat_resize_lock(pgdat, &flags); | ||
637 | present = pgdat->node_present_pages; | ||
638 | for(i = 0; i < pgdat->node_spanned_pages; i++) { | ||
639 | struct page *page; | ||
640 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | ||
641 | touch_nmi_watchdog(); | ||
642 | if (pfn_valid(pgdat->node_start_pfn + i)) | ||
643 | page = pfn_to_page(pgdat->node_start_pfn + i); | ||
644 | else { | ||
645 | i = vmemmap_find_next_valid_pfn(nid, i) - 1; | ||
646 | continue; | ||
647 | } | ||
648 | if (PageReserved(page)) | ||
649 | reserved++; | ||
650 | else if (PageSwapCache(page)) | ||
651 | cached++; | ||
652 | else if (page_count(page)) | ||
653 | shared += page_count(page)-1; | ||
654 | } | ||
655 | pgdat_resize_unlock(pgdat, &flags); | ||
656 | total_present += present; | ||
657 | total_reserved += reserved; | ||
658 | total_cached += cached; | ||
659 | total_shared += shared; | ||
660 | printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " | ||
661 | "shrd: %10d, swpd: %10d\n", nid, | ||
662 | present, reserved, shared, cached); | ||
663 | } | ||
664 | printk(KERN_INFO "%ld pages of RAM\n", total_present); | ||
665 | printk(KERN_INFO "%d reserved pages\n", total_reserved); | ||
666 | printk(KERN_INFO "%d pages shared\n", total_shared); | ||
667 | printk(KERN_INFO "%d pages swap cached\n", total_cached); | ||
668 | printk(KERN_INFO "Total of %ld pages in page table cache\n", | ||
669 | quicklist_total_size()); | ||
670 | printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * call_pernode_memory - use SRAT to call callback functions with node info | 611 | * call_pernode_memory - use SRAT to call callback functions with node info |
675 | * @start: physical start of range | 612 | * @start: physical start of range |
676 | * @len: length of range | 613 | * @len: length of range |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 88504abf5704..25c350264a41 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -684,3 +684,51 @@ per_linux32_init(void) | |||
684 | } | 684 | } |
685 | 685 | ||
686 | __initcall(per_linux32_init); | 686 | __initcall(per_linux32_init); |
687 | |||
688 | /** | ||
689 | * show_mem - give short summary of memory stats | ||
690 | * | ||
691 | * Shows a simple page count of reserved and used pages in the system. | ||
692 | * For discontig machines, it does this on a per-pgdat basis. | ||
693 | */ | ||
694 | void show_mem(unsigned int filter) | ||
695 | { | ||
696 | int total_reserved = 0; | ||
697 | unsigned long total_present = 0; | ||
698 | pg_data_t *pgdat; | ||
699 | |||
700 | printk(KERN_INFO "Mem-info:\n"); | ||
701 | show_free_areas(filter); | ||
702 | printk(KERN_INFO "Node memory in pages:\n"); | ||
703 | for_each_online_pgdat(pgdat) { | ||
704 | unsigned long present; | ||
705 | unsigned long flags; | ||
706 | int reserved = 0; | ||
707 | int nid = pgdat->node_id; | ||
708 | int zoneid; | ||
709 | |||
710 | if (skip_free_areas_node(filter, nid)) | ||
711 | continue; | ||
712 | pgdat_resize_lock(pgdat, &flags); | ||
713 | |||
714 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { | ||
715 | struct zone *zone = &pgdat->node_zones[zoneid]; | ||
716 | if (!populated_zone(zone)) | ||
717 | continue; | ||
718 | |||
719 | reserved += zone->present_pages - zone->managed_pages; | ||
720 | } | ||
721 | present = pgdat->node_present_pages; | ||
722 | |||
723 | pgdat_resize_unlock(pgdat, &flags); | ||
724 | total_present += present; | ||
725 | total_reserved += reserved; | ||
726 | printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ", | ||
727 | nid, present, reserved); | ||
728 | } | ||
729 | printk(KERN_INFO "%ld pages of RAM\n", total_present); | ||
730 | printk(KERN_INFO "%d reserved pages\n", total_reserved); | ||
731 | printk(KERN_INFO "Total of %ld pages in page table cache\n", | ||
732 | quicklist_total_size()); | ||
733 | printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); | ||
734 | } | ||
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 3cd6288f65c2..11fa51c89617 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c | |||
@@ -204,7 +204,8 @@ static void __init do_init_bootmem(void) | |||
204 | start_pfn = memblock_region_memory_base_pfn(reg); | 204 | start_pfn = memblock_region_memory_base_pfn(reg); |
205 | end_pfn = memblock_region_memory_end_pfn(reg); | 205 | end_pfn = memblock_region_memory_end_pfn(reg); |
206 | memblock_set_node(PFN_PHYS(start_pfn), | 206 | memblock_set_node(PFN_PHYS(start_pfn), |
207 | PFN_PHYS(end_pfn - start_pfn), 0); | 207 | PFN_PHYS(end_pfn - start_pfn), |
208 | &memblock.memory, 0); | ||
208 | } | 209 | } |
209 | 210 | ||
210 | /* All of system RAM sits in node 0 for the non-NUMA case */ | 211 | /* All of system RAM sits in node 0 for the non-NUMA case */ |
diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c index b172aa45fcf8..67b46c295072 100644 --- a/arch/metag/mm/numa.c +++ b/arch/metag/mm/numa.c | |||
@@ -42,7 +42,8 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) | |||
42 | memblock_add(start, end - start); | 42 | memblock_add(start, end - start); |
43 | 43 | ||
44 | memblock_set_node(PFN_PHYS(start_pfn), | 44 | memblock_set_node(PFN_PHYS(start_pfn), |
45 | PFN_PHYS(end_pfn - start_pfn), nid); | 45 | PFN_PHYS(end_pfn - start_pfn), |
46 | &memblock.memory, nid); | ||
46 | 47 | ||
47 | /* Node-local pgdat */ | 48 | /* Node-local pgdat */ |
48 | pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data), | 49 | pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data), |
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 74c7bcc1e82d..89077d346714 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c | |||
@@ -192,7 +192,8 @@ void __init setup_memory(void) | |||
192 | start_pfn = memblock_region_memory_base_pfn(reg); | 192 | start_pfn = memblock_region_memory_base_pfn(reg); |
193 | end_pfn = memblock_region_memory_end_pfn(reg); | 193 | end_pfn = memblock_region_memory_end_pfn(reg); |
194 | memblock_set_node(start_pfn << PAGE_SHIFT, | 194 | memblock_set_node(start_pfn << PAGE_SHIFT, |
195 | (end_pfn - start_pfn) << PAGE_SHIFT, 0); | 195 | (end_pfn - start_pfn) << PAGE_SHIFT, |
196 | &memblock.memory, 0); | ||
196 | } | 197 | } |
197 | 198 | ||
198 | /* free bootmem is whole main memory */ | 199 | /* free bootmem is whole main memory */ |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 96f8168cf4ec..ae085ad0fba0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -645,55 +645,30 @@ EXPORT_SYMBOL(empty_zero_page); | |||
645 | 645 | ||
646 | void show_mem(unsigned int filter) | 646 | void show_mem(unsigned int filter) |
647 | { | 647 | { |
648 | int i,free = 0,total = 0,reserved = 0; | 648 | int total = 0,reserved = 0; |
649 | int shared = 0, cached = 0; | 649 | pg_data_t *pgdat; |
650 | 650 | ||
651 | printk(KERN_INFO "Mem-info:\n"); | 651 | printk(KERN_INFO "Mem-info:\n"); |
652 | show_free_areas(filter); | 652 | show_free_areas(filter); |
653 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
654 | return; | ||
655 | #ifndef CONFIG_DISCONTIGMEM | ||
656 | i = max_mapnr; | ||
657 | while (i-- > 0) { | ||
658 | total++; | ||
659 | if (PageReserved(mem_map+i)) | ||
660 | reserved++; | ||
661 | else if (PageSwapCache(mem_map+i)) | ||
662 | cached++; | ||
663 | else if (!page_count(&mem_map[i])) | ||
664 | free++; | ||
665 | else | ||
666 | shared += page_count(&mem_map[i]) - 1; | ||
667 | } | ||
668 | #else | ||
669 | for (i = 0; i < npmem_ranges; i++) { | ||
670 | int j; | ||
671 | 653 | ||
672 | for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { | 654 | for_each_online_pgdat(pgdat) { |
673 | struct page *p; | 655 | unsigned long flags; |
674 | unsigned long flags; | 656 | int zoneid; |
675 | 657 | ||
676 | pgdat_resize_lock(NODE_DATA(i), &flags); | 658 | pgdat_resize_lock(pgdat, &flags); |
677 | p = nid_page_nr(i, j) - node_start_pfn(i); | 659 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
678 | 660 | struct zone *zone = &pgdat->node_zones[zoneid]; | |
679 | total++; | 661 | if (!populated_zone(zone)) |
680 | if (PageReserved(p)) | 662 | continue; |
681 | reserved++; | 663 | |
682 | else if (PageSwapCache(p)) | 664 | total += zone->present_pages; |
683 | cached++; | 665 | reserved = zone->present_pages - zone->managed_pages; |
684 | else if (!page_count(p)) | 666 | } |
685 | free++; | 667 | pgdat_resize_unlock(pgdat, &flags); |
686 | else | ||
687 | shared += page_count(p) - 1; | ||
688 | pgdat_resize_unlock(NODE_DATA(i), &flags); | ||
689 | } | ||
690 | } | 668 | } |
691 | #endif | 669 | |
692 | printk(KERN_INFO "%d pages of RAM\n", total); | 670 | printk(KERN_INFO "%d pages of RAM\n", total); |
693 | printk(KERN_INFO "%d reserved pages\n", reserved); | 671 | printk(KERN_INFO "%d reserved pages\n", reserved); |
694 | printk(KERN_INFO "%d pages shared\n", shared); | ||
695 | printk(KERN_INFO "%d pages swap cached\n", cached); | ||
696 | |||
697 | 672 | ||
698 | #ifdef CONFIG_DISCONTIGMEM | 673 | #ifdef CONFIG_DISCONTIGMEM |
699 | { | 674 | { |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 3fa93dc7fe75..8c1dd23652a1 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -209,7 +209,7 @@ void __init do_init_bootmem(void) | |||
209 | /* Place all memblock_regions in the same node and merge contiguous | 209 | /* Place all memblock_regions in the same node and merge contiguous |
210 | * memblock_regions | 210 | * memblock_regions |
211 | */ | 211 | */ |
212 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | 212 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
213 | 213 | ||
214 | /* Add all physical memory to the bootmem map, mark each area | 214 | /* Add all physical memory to the bootmem map, mark each area |
215 | * present. | 215 | * present. |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 078d3e00a616..5a944f25e94f 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -670,7 +670,8 @@ static void __init parse_drconf_memory(struct device_node *memory) | |||
670 | node_set_online(nid); | 670 | node_set_online(nid); |
671 | sz = numa_enforce_memory_limit(base, size); | 671 | sz = numa_enforce_memory_limit(base, size); |
672 | if (sz) | 672 | if (sz) |
673 | memblock_set_node(base, sz, nid); | 673 | memblock_set_node(base, sz, |
674 | &memblock.memory, nid); | ||
674 | } while (--ranges); | 675 | } while (--ranges); |
675 | } | 676 | } |
676 | } | 677 | } |
@@ -760,7 +761,7 @@ new_range: | |||
760 | continue; | 761 | continue; |
761 | } | 762 | } |
762 | 763 | ||
763 | memblock_set_node(start, size, nid); | 764 | memblock_set_node(start, size, &memblock.memory, nid); |
764 | 765 | ||
765 | if (--ranges) | 766 | if (--ranges) |
766 | goto new_range; | 767 | goto new_range; |
@@ -797,7 +798,8 @@ static void __init setup_nonnuma(void) | |||
797 | 798 | ||
798 | fake_numa_create_new_node(end_pfn, &nid); | 799 | fake_numa_create_new_node(end_pfn, &nid); |
799 | memblock_set_node(PFN_PHYS(start_pfn), | 800 | memblock_set_node(PFN_PHYS(start_pfn), |
800 | PFN_PHYS(end_pfn - start_pfn), nid); | 801 | PFN_PHYS(end_pfn - start_pfn), |
802 | &memblock.memory, nid); | ||
801 | node_set_online(nid); | 803 | node_set_online(nid); |
802 | } | 804 | } |
803 | } | 805 | } |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index 305f7ee1f382..c75d06aa27c3 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -2,7 +2,6 @@ menu "Machine selection" | |||
2 | 2 | ||
3 | config SCORE | 3 | config SCORE |
4 | def_bool y | 4 | def_bool y |
5 | select HAVE_GENERIC_HARDIRQS | ||
6 | select GENERIC_IRQ_SHOW | 5 | select GENERIC_IRQ_SHOW |
7 | select GENERIC_IOMAP | 6 | select GENERIC_IOMAP |
8 | select GENERIC_ATOMIC64 | 7 | select GENERIC_ATOMIC64 |
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index 38b313909ac9..adad46e41a1d 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/kdebug.h> | 13 | #include <linux/kdebug.h> |
14 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/sched.h> | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <asm/traps.h> | 18 | #include <asm/traps.h> |
18 | 19 | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 1cf90e947dbf..de19cfa768f2 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -230,8 +230,8 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, | |||
230 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, | 230 | pmb_bolt_mapping((unsigned long)__va(start), start, end - start, |
231 | PAGE_KERNEL); | 231 | PAGE_KERNEL); |
232 | 232 | ||
233 | memblock_set_node(PFN_PHYS(start_pfn), | 233 | memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), |
234 | PFN_PHYS(end_pfn - start_pfn), nid); | 234 | &memblock.memory, nid); |
235 | } | 235 | } |
236 | 236 | ||
237 | void __init __weak plat_early_device_setup(void) | 237 | void __init __weak plat_early_device_setup(void) |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 5322e530d09c..eafbc65c9c47 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -1021,7 +1021,8 @@ static void __init add_node_ranges(void) | |||
1021 | "start[%lx] end[%lx]\n", | 1021 | "start[%lx] end[%lx]\n", |
1022 | nid, start, this_end); | 1022 | nid, start, this_end); |
1023 | 1023 | ||
1024 | memblock_set_node(start, this_end - start, nid); | 1024 | memblock_set_node(start, this_end - start, |
1025 | &memblock.memory, nid); | ||
1025 | start = this_end; | 1026 | start = this_end; |
1026 | } | 1027 | } |
1027 | } | 1028 | } |
@@ -1325,7 +1326,7 @@ static void __init bootmem_init_nonnuma(void) | |||
1325 | (top_of_ram - total_ram) >> 20); | 1326 | (top_of_ram - total_ram) >> 20); |
1326 | 1327 | ||
1327 | init_node_masks_nonnuma(); | 1328 | init_node_masks_nonnuma(); |
1328 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | 1329 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
1329 | allocate_node_data(0); | 1330 | allocate_node_data(0); |
1330 | node_set_online(0); | 1331 | node_set_online(0); |
1331 | } | 1332 | } |
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index ae6bc036db92..be2bde9b07cf 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c | |||
@@ -66,9 +66,6 @@ void show_mem(unsigned int filter) | |||
66 | printk(KERN_DEFAULT "Mem-info:\n"); | 66 | printk(KERN_DEFAULT "Mem-info:\n"); |
67 | show_free_areas(filter); | 67 | show_free_areas(filter); |
68 | 68 | ||
69 | if (filter & SHOW_MEM_FILTER_PAGE_COUNT) | ||
70 | return; | ||
71 | |||
72 | for_each_bank(i, mi) { | 69 | for_each_bank(i, mi) { |
73 | struct membank *bank = &mi->bank[i]; | 70 | struct membank *bank = &mi->bank[i]; |
74 | unsigned int pfn1, pfn2; | 71 | unsigned int pfn1, pfn2; |
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index f97fbe3abb67..2f59cce3b38a 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -51,9 +51,9 @@ extern int devmem_is_allowed(unsigned long pagenr); | |||
51 | extern unsigned long max_low_pfn_mapped; | 51 | extern unsigned long max_low_pfn_mapped; |
52 | extern unsigned long max_pfn_mapped; | 52 | extern unsigned long max_pfn_mapped; |
53 | 53 | ||
54 | static inline phys_addr_t get_max_mapped(void) | 54 | static inline phys_addr_t get_max_low_mapped(void) |
55 | { | 55 | { |
56 | return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; | 56 | return (phys_addr_t)max_low_pfn_mapped << PAGE_SHIFT; |
57 | } | 57 | } |
58 | 58 | ||
59 | bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); | 59 | bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); |
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index e2dbcb7dabdd..83a7995625a6 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -91,7 +91,7 @@ void __init setup_bios_corruption_check(void) | |||
91 | 91 | ||
92 | corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); | 92 | corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); |
93 | 93 | ||
94 | for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) { | 94 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) { |
95 | start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE), | 95 | start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE), |
96 | PAGE_SIZE, corruption_check_size); | 96 | PAGE_SIZE, corruption_check_size); |
97 | end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE), | 97 | end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE), |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 174da5fc5a7b..988c00a1f60d 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -1120,7 +1120,7 @@ void __init memblock_find_dma_reserve(void) | |||
1120 | nr_pages += end_pfn - start_pfn; | 1120 | nr_pages += end_pfn - start_pfn; |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) { | 1123 | for_each_free_mem_range(u, NUMA_NO_NODE, &start, &end, NULL) { |
1124 | start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); | 1124 | start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); |
1125 | end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); | 1125 | end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); |
1126 | if (start_pfn < end_pfn) | 1126 | if (start_pfn < end_pfn) |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 06853e670354..c9675594d7ca 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p) | |||
1119 | 1119 | ||
1120 | setup_real_mode(); | 1120 | setup_real_mode(); |
1121 | 1121 | ||
1122 | memblock_set_current_limit(get_max_mapped()); | 1122 | memblock_set_current_limit(get_max_low_mapped()); |
1123 | dma_contiguous_reserve(0); | 1123 | dma_contiguous_reserve(0); |
1124 | 1124 | ||
1125 | /* | 1125 | /* |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5bdc5430597c..e39504878aec 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -665,7 +665,7 @@ void __init initmem_init(void) | |||
665 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | 665 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
666 | #endif | 666 | #endif |
667 | 667 | ||
668 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | 668 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
669 | sparse_memory_present_with_active_regions(0); | 669 | sparse_memory_present_with_active_regions(0); |
670 | 670 | ||
671 | #ifdef CONFIG_FLATMEM | 671 | #ifdef CONFIG_FLATMEM |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 104d56a9245f..f35c66c5959a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -643,7 +643,7 @@ kernel_physical_mapping_init(unsigned long start, | |||
643 | #ifndef CONFIG_NUMA | 643 | #ifndef CONFIG_NUMA |
644 | void __init initmem_init(void) | 644 | void __init initmem_init(void) |
645 | { | 645 | { |
646 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); | 646 | memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); |
647 | } | 647 | } |
648 | #endif | 648 | #endif |
649 | 649 | ||
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 8dabbed409ee..1e9da795767a 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -74,7 +74,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end) | |||
74 | u64 i; | 74 | u64 i; |
75 | phys_addr_t this_start, this_end; | 75 | phys_addr_t this_start, this_end; |
76 | 76 | ||
77 | for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) { | 77 | for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) { |
78 | this_start = clamp_t(phys_addr_t, this_start, start, end); | 78 | this_start = clamp_t(phys_addr_t, this_start, start, end); |
79 | this_end = clamp_t(phys_addr_t, this_end, start, end); | 79 | this_end = clamp_t(phys_addr_t, this_end, start, end); |
80 | if (this_start < this_end) { | 80 | if (this_start < this_end) { |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index c85da7bb6b60..81b2750f3666 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -491,7 +491,16 @@ static int __init numa_register_memblks(struct numa_meminfo *mi) | |||
491 | 491 | ||
492 | for (i = 0; i < mi->nr_blks; i++) { | 492 | for (i = 0; i < mi->nr_blks; i++) { |
493 | struct numa_memblk *mb = &mi->blk[i]; | 493 | struct numa_memblk *mb = &mi->blk[i]; |
494 | memblock_set_node(mb->start, mb->end - mb->start, mb->nid); | 494 | memblock_set_node(mb->start, mb->end - mb->start, |
495 | &memblock.memory, mb->nid); | ||
496 | |||
497 | /* | ||
498 | * At this time, all memory regions reserved by memblock are | ||
499 | * used by the kernel. Set the nid in memblock.reserved will | ||
500 | * mark out all the nodes the kernel resides in. | ||
501 | */ | ||
502 | memblock_set_node(mb->start, mb->end - mb->start, | ||
503 | &memblock.reserved, mb->nid); | ||
495 | } | 504 | } |
496 | 505 | ||
497 | /* | 506 | /* |
@@ -553,6 +562,30 @@ static void __init numa_init_array(void) | |||
553 | } | 562 | } |
554 | } | 563 | } |
555 | 564 | ||
565 | static void __init numa_clear_kernel_node_hotplug(void) | ||
566 | { | ||
567 | int i, nid; | ||
568 | nodemask_t numa_kernel_nodes; | ||
569 | unsigned long start, end; | ||
570 | struct memblock_type *type = &memblock.reserved; | ||
571 | |||
572 | /* Mark all kernel nodes. */ | ||
573 | for (i = 0; i < type->cnt; i++) | ||
574 | node_set(type->regions[i].nid, numa_kernel_nodes); | ||
575 | |||
576 | /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ | ||
577 | for (i = 0; i < numa_meminfo.nr_blks; i++) { | ||
578 | nid = numa_meminfo.blk[i].nid; | ||
579 | if (!node_isset(nid, numa_kernel_nodes)) | ||
580 | continue; | ||
581 | |||
582 | start = numa_meminfo.blk[i].start; | ||
583 | end = numa_meminfo.blk[i].end; | ||
584 | |||
585 | memblock_clear_hotplug(start, end - start); | ||
586 | } | ||
587 | } | ||
588 | |||
556 | static int __init numa_init(int (*init_func)(void)) | 589 | static int __init numa_init(int (*init_func)(void)) |
557 | { | 590 | { |
558 | int i; | 591 | int i; |
@@ -565,7 +598,12 @@ static int __init numa_init(int (*init_func)(void)) | |||
565 | nodes_clear(node_possible_map); | 598 | nodes_clear(node_possible_map); |
566 | nodes_clear(node_online_map); | 599 | nodes_clear(node_online_map); |
567 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); | 600 | memset(&numa_meminfo, 0, sizeof(numa_meminfo)); |
568 | WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES)); | 601 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, |
602 | MAX_NUMNODES)); | ||
603 | WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, | ||
604 | MAX_NUMNODES)); | ||
605 | /* In case that parsing SRAT failed. */ | ||
606 | WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); | ||
569 | numa_reset_distance(); | 607 | numa_reset_distance(); |
570 | 608 | ||
571 | ret = init_func(); | 609 | ret = init_func(); |
@@ -601,6 +639,16 @@ static int __init numa_init(int (*init_func)(void)) | |||
601 | numa_clear_node(i); | 639 | numa_clear_node(i); |
602 | } | 640 | } |
603 | numa_init_array(); | 641 | numa_init_array(); |
642 | |||
643 | /* | ||
644 | * At very early time, the kernel have to use some memory such as | ||
645 | * loading the kernel image. We cannot prevent this anyway. So any | ||
646 | * node the kernel resides in should be un-hotpluggable. | ||
647 | * | ||
648 | * And when we come here, numa_init() won't fail. | ||
649 | */ | ||
650 | numa_clear_kernel_node_hotplug(); | ||
651 | |||
604 | return 0; | 652 | return 0; |
605 | } | 653 | } |
606 | 654 | ||
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index 266ca912f62e..1a25187e151e 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -181,6 +181,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
181 | (unsigned long long) start, (unsigned long long) end - 1, | 181 | (unsigned long long) start, (unsigned long long) end - 1, |
182 | hotpluggable ? " hotplug" : ""); | 182 | hotpluggable ? " hotplug" : ""); |
183 | 183 | ||
184 | /* Mark hotplug range in memblock. */ | ||
185 | if (hotpluggable && memblock_mark_hotplug(start, ma->length)) | ||
186 | pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", | ||
187 | (unsigned long long)start, (unsigned long long)end - 1); | ||
188 | |||
184 | return 0; | 189 | return 0; |
185 | out_err_bad_srat: | 190 | out_err_bad_srat: |
186 | bad_srat(); | 191 | bad_srat(); |