aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/bootmem.h5
-rw-r--r--kernel/pid.c15
-rw-r--r--mm/page_alloc.c13
3 files changed, 16 insertions, 17 deletions
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index bc3ab707369..dd97fb8408a 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -132,9 +132,6 @@ static inline void *alloc_remap(int nid, unsigned long size)
132} 132}
133#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */ 133#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
134 134
135extern unsigned long __meminitdata nr_kernel_pages;
136extern unsigned long __meminitdata nr_all_pages;
137
138extern void *alloc_large_system_hash(const char *tablename, 135extern void *alloc_large_system_hash(const char *tablename,
139 unsigned long bucketsize, 136 unsigned long bucketsize,
140 unsigned long numentries, 137 unsigned long numentries,
@@ -145,6 +142,8 @@ extern void *alloc_large_system_hash(const char *tablename,
145 unsigned long limit); 142 unsigned long limit);
146 143
147#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ 144#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
145#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
146 * shift passed via *_hash_shift */
148 147
149/* Only NUMA needs hash distribution. 64bit NUMA architectures have 148/* Only NUMA needs hash distribution. 64bit NUMA architectures have
150 * sufficient vmalloc space. 149 * sufficient vmalloc space.
diff --git a/kernel/pid.c b/kernel/pid.c
index 31310b5d3f5..d3f722d20f9 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -40,7 +40,7 @@
40#define pid_hashfn(nr, ns) \ 40#define pid_hashfn(nr, ns) \
41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) 41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42static struct hlist_head *pid_hash; 42static struct hlist_head *pid_hash;
43static int pidhash_shift; 43static unsigned int pidhash_shift = 4;
44struct pid init_struct_pid = INIT_STRUCT_PID; 44struct pid init_struct_pid = INIT_STRUCT_PID;
45 45
46int pid_max = PID_MAX_DEFAULT; 46int pid_max = PID_MAX_DEFAULT;
@@ -499,19 +499,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
499void __init pidhash_init(void) 499void __init pidhash_init(void)
500{ 500{
501 int i, pidhash_size; 501 int i, pidhash_size;
502 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
503 502
504 pidhash_shift = max(4, fls(megabytes * 4)); 503 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
505 pidhash_shift = min(12, pidhash_shift); 504 HASH_EARLY | HASH_SMALL,
505 &pidhash_shift, NULL, 4096);
506 pidhash_size = 1 << pidhash_shift; 506 pidhash_size = 1 << pidhash_shift;
507 507
508 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
509 pidhash_size, pidhash_shift,
510 pidhash_size * sizeof(struct hlist_head));
511
512 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
513 if (!pid_hash)
514 panic("Could not alloc pidhash!\n");
515 for (i = 0; i < pidhash_size; i++) 508 for (i = 0; i < pidhash_size; i++)
516 INIT_HLIST_HEAD(&pid_hash[i]); 509 INIT_HLIST_HEAD(&pid_hash[i]);
517} 510}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 33b1a4762a7..770f011e1c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -124,8 +124,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
124 124
125int min_free_kbytes = 1024; 125int min_free_kbytes = 1024;
126 126
127unsigned long __meminitdata nr_kernel_pages; 127static unsigned long __meminitdata nr_kernel_pages;
128unsigned long __meminitdata nr_all_pages; 128static unsigned long __meminitdata nr_all_pages;
129static unsigned long __meminitdata dma_reserve; 129static unsigned long __meminitdata dma_reserve;
130 130
131#ifdef CONFIG_ARCH_POPULATES_NODE_MAP 131#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -4821,7 +4821,14 @@ void *__init alloc_large_system_hash(const char *tablename,
4821 numentries <<= (PAGE_SHIFT - scale); 4821 numentries <<= (PAGE_SHIFT - scale);
4822 4822
4823 /* Make sure we've got at least a 0-order allocation.. */ 4823 /* Make sure we've got at least a 0-order allocation.. */
4824 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 4824 if (unlikely(flags & HASH_SMALL)) {
4825 /* Makes no sense without HASH_EARLY */
4826 WARN_ON(!(flags & HASH_EARLY));
4827 if (!(numentries >> *_hash_shift)) {
4828 numentries = 1UL << *_hash_shift;
4829 BUG_ON(!numentries);
4830 }
4831 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4825 numentries = PAGE_SIZE / bucketsize; 4832 numentries = PAGE_SIZE / bucketsize;
4826 } 4833 }
4827 numentries = roundup_pow_of_two(numentries); 4834 numentries = roundup_pow_of_two(numentries);