diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 30 |
1 files changed, 2 insertions, 28 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7f2ae99e5daf..0f088f3a2fed 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -440,33 +440,7 @@ struct zone { | |||
440 | seqlock_t span_seqlock; | 440 | seqlock_t span_seqlock; |
441 | #endif | 441 | #endif |
442 | 442 | ||
443 | /* | 443 | int initialized; |
444 | * wait_table -- the array holding the hash table | ||
445 | * wait_table_hash_nr_entries -- the size of the hash table array | ||
446 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) | ||
447 | * | ||
448 | * The purpose of all these is to keep track of the people | ||
449 | * waiting for a page to become available and make them | ||
450 | * runnable again when possible. The trouble is that this | ||
451 | * consumes a lot of space, especially when so few things | ||
452 | * wait on pages at a given time. So instead of using | ||
453 | * per-page waitqueues, we use a waitqueue hash table. | ||
454 | * | ||
455 | * The bucket discipline is to sleep on the same queue when | ||
456 | * colliding and wake all in that wait queue when removing. | ||
457 | * When something wakes, it must check to be sure its page is | ||
458 | * truly available, a la thundering herd. The cost of a | ||
459 | * collision is great, but given the expected load of the | ||
460 | * table, they should be so rare as to be outweighed by the | ||
461 | * benefits from the saved space. | ||
462 | * | ||
463 | * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the | ||
464 | * primary users of these fields, and in mm/page_alloc.c | ||
465 | * free_area_init_core() performs the initialization of them. | ||
466 | */ | ||
467 | wait_queue_head_t *wait_table; | ||
468 | unsigned long wait_table_hash_nr_entries; | ||
469 | unsigned long wait_table_bits; | ||
470 | 444 | ||
471 | /* Write-intensive fields used from the page allocator */ | 445 | /* Write-intensive fields used from the page allocator */ |
472 | ZONE_PADDING(_pad1_) | 446 | ZONE_PADDING(_pad1_) |
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) | |||
546 | 520 | ||
547 | static inline bool zone_is_initialized(struct zone *zone) | 521 | static inline bool zone_is_initialized(struct zone *zone) |
548 | { | 522 | { |
549 | return !!zone->wait_table; | 523 | return zone->initialized; |
550 | } | 524 | } |
551 | 525 | ||
552 | static inline bool zone_is_empty(struct zone *zone) | 526 | static inline bool zone_is_empty(struct zone *zone) |