diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 22:25:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 22:25:39 -0400 |
commit | ac694dbdbc403c00e2c14d10bc7b8412cc378259 (patch) | |
tree | e37328cfbeaf43716dd5914cad9179e57e84df76 /mm/sparse.c | |
parent | a40a1d3d0a2fd613fdec6d89d3c053268ced76ed (diff) | |
parent | 437ea90cc3afdca5229b41c6b1d38c4842756cb9 (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge Andrew's second set of patches:
- MM
- a few random fixes
- a couple of RTC leftovers
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (120 commits)
rtc/rtc-88pm80x: remove unneed devm_kfree
rtc/rtc-88pm80x: assign ret only when rtc_register_driver fails
mm: hugetlbfs: close race during teardown of hugetlbfs shared page tables
tmpfs: distribute interleave better across nodes
mm: remove redundant initialization
mm: warn if pg_data_t isn't initialized with zero
mips: zero out pg_data_t when it's allocated
memcg: gix memory accounting scalability in shrink_page_list
mm/sparse: remove index_init_lock
mm/sparse: more checks on mem_section number
mm/sparse: optimize sparse_index_alloc
memcg: add mem_cgroup_from_css() helper
memcg: further prevent OOM with too many dirty pages
memcg: prevent OOM with too many dirty pages
mm: mmu_notifier: fix freed page still mapped in secondary MMU
mm: memcg: only check anon swapin page charges for swap cache
mm: memcg: only check swap cache pages for repeated charging
mm: memcg: split swapin charge function into private and public part
mm: memcg: remove needless !mm fixup to init_mm when charging
mm: memcg: remove unneeded shmem charge type
...
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 29 |
1 files changed, 10 insertions, 19 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index c7bb952400c8..fac95f2888f2 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -65,21 +65,18 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) | |||
65 | 65 | ||
66 | if (slab_is_available()) { | 66 | if (slab_is_available()) { |
67 | if (node_state(nid, N_HIGH_MEMORY)) | 67 | if (node_state(nid, N_HIGH_MEMORY)) |
68 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | 68 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
69 | else | 69 | else |
70 | section = kmalloc(array_size, GFP_KERNEL); | 70 | section = kzalloc(array_size, GFP_KERNEL); |
71 | } else | 71 | } else { |
72 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 72 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); |
73 | 73 | } | |
74 | if (section) | ||
75 | memset(section, 0, array_size); | ||
76 | 74 | ||
77 | return section; | 75 | return section; |
78 | } | 76 | } |
79 | 77 | ||
80 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | 78 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
81 | { | 79 | { |
82 | static DEFINE_SPINLOCK(index_init_lock); | ||
83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | 80 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
84 | struct mem_section *section; | 81 | struct mem_section *section; |
85 | int ret = 0; | 82 | int ret = 0; |
@@ -90,20 +87,9 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid) | |||
90 | section = sparse_index_alloc(nid); | 87 | section = sparse_index_alloc(nid); |
91 | if (!section) | 88 | if (!section) |
92 | return -ENOMEM; | 89 | return -ENOMEM; |
93 | /* | ||
94 | * This lock keeps two different sections from | ||
95 | * reallocating for the same index | ||
96 | */ | ||
97 | spin_lock(&index_init_lock); | ||
98 | |||
99 | if (mem_section[root]) { | ||
100 | ret = -EEXIST; | ||
101 | goto out; | ||
102 | } | ||
103 | 90 | ||
104 | mem_section[root] = section; | 91 | mem_section[root] = section; |
105 | out: | 92 | |
106 | spin_unlock(&index_init_lock); | ||
107 | return ret; | 93 | return ret; |
108 | } | 94 | } |
109 | #else /* !SPARSEMEM_EXTREME */ | 95 | #else /* !SPARSEMEM_EXTREME */ |
@@ -132,6 +118,8 @@ int __section_nr(struct mem_section* ms) | |||
132 | break; | 118 | break; |
133 | } | 119 | } |
134 | 120 | ||
121 | VM_BUG_ON(root_nr == NR_SECTION_ROOTS); | ||
122 | |||
135 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | 123 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
136 | } | 124 | } |
137 | 125 | ||
@@ -493,6 +481,9 @@ void __init sparse_init(void) | |||
493 | struct page **map_map; | 481 | struct page **map_map; |
494 | #endif | 482 | #endif |
495 | 483 | ||
484 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | ||
485 | set_pageblock_order(); | ||
486 | |||
496 | /* | 487 | /* |
497 | * map is using big page (aka 2M in x86 64 bit) | 488 | * map is using big page (aka 2M in x86 64 bit) |
498 | * usemap is less one page (aka 24 bytes) | 489 | * usemap is less one page (aka 24 bytes) |