aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--mm/page_alloc.c12
2 files changed, 9 insertions, 7 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9742e3c16222..652673ea92f1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -197,7 +197,7 @@ struct zone {
197 197
198 /* 198 /*
199 * wait_table -- the array holding the hash table 199 * wait_table -- the array holding the hash table
200 * wait_table_size -- the size of the hash table array 200 * wait_table_hash_nr_entries -- the size of the hash table array
201 * wait_table_bits -- wait_table_size == (1 << wait_table_bits) 201 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
202 * 202 *
203 * The purpose of all these is to keep track of the people 203 * The purpose of all these is to keep track of the people
@@ -220,7 +220,7 @@ struct zone {
220 * free_area_init_core() performs the initialization of them. 220 * free_area_init_core() performs the initialization of them.
221 */ 221 */
222 wait_queue_head_t * wait_table; 222 wait_queue_head_t * wait_table;
223 unsigned long wait_table_size; 223 unsigned long wait_table_hash_nr_entries;
224 unsigned long wait_table_bits; 224 unsigned long wait_table_bits;
225 225
226 /* 226 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fd631c2536a5..27320a0542d3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1727,7 +1727,7 @@ void __init build_all_zonelists(void)
1727 */ 1727 */
1728#define PAGES_PER_WAITQUEUE 256 1728#define PAGES_PER_WAITQUEUE 256
1729 1729
1730static inline unsigned long wait_table_size(unsigned long pages) 1730static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
1731{ 1731{
1732 unsigned long size = 1; 1732 unsigned long size = 1;
1733 1733
@@ -2019,13 +2019,15 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2019 * The per-page waitqueue mechanism uses hashed waitqueues 2019 * The per-page waitqueue mechanism uses hashed waitqueues
2020 * per zone. 2020 * per zone.
2021 */ 2021 */
2022 zone->wait_table_size = wait_table_size(zone_size_pages); 2022 zone->wait_table_hash_nr_entries =
2023 zone->wait_table_bits = wait_table_bits(zone->wait_table_size); 2023 wait_table_hash_nr_entries(zone_size_pages);
2024 zone->wait_table_bits =
2025 wait_table_bits(zone->wait_table_hash_nr_entries);
2024 zone->wait_table = (wait_queue_head_t *) 2026 zone->wait_table = (wait_queue_head_t *)
2025 alloc_bootmem_node(pgdat, zone->wait_table_size 2027 alloc_bootmem_node(pgdat, zone->wait_table_hash_nr_entries
2026 * sizeof(wait_queue_head_t)); 2028 * sizeof(wait_queue_head_t));
2027 2029
2028 for(i = 0; i < zone->wait_table_size; ++i) 2030 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2029 init_waitqueue_head(zone->wait_table + i); 2031 init_waitqueue_head(zone->wait_table + i);
2030} 2032}
2031 2033