diff options
| author | Hugh Dickins <hugh@veritas.com> | 2005-11-22 00:32:14 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-11-22 12:13:42 -0500 |
| commit | 664beed0190fae687ac51295694004902ddeb18e (patch) | |
| tree | 89a7c8d9d541fb678c567834cb758fc88b375d47 /include/linux/mm.h | |
| parent | 1cdca61bf8537043edde8ef784ce1a1351361dac (diff) | |
[PATCH] unpaged: unifdefed PageCompound
It looks like snd_xxx is not the only nopage to be using PageReserved as a way
of holding a high-order page together: which no longer works, but is masked by
our failure to free from VM_RESERVED areas. We cannot fix that bug without
first substituting another way to hold the high-order page together, while
farming out the 0-order pages from within it.
That's just what PageCompound is designed for, but it's been kept under
CONFIG_HUGETLB_PAGE. Remove the #ifdefs: which saves some space (out- of-line
put_page), doesn't slow down what most needs to be fast (already using
hugetlb), and unifies the way we handle high-order pages.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 19 |
1 files changed, 0 insertions, 19 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0986d19be0b7..9701210c6680 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -311,8 +311,6 @@ struct page { | |||
| 311 | 311 | ||
| 312 | extern void FASTCALL(__page_cache_release(struct page *)); | 312 | extern void FASTCALL(__page_cache_release(struct page *)); |
| 313 | 313 | ||
| 314 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 315 | |||
| 316 | static inline int page_count(struct page *page) | 314 | static inline int page_count(struct page *page) |
| 317 | { | 315 | { |
| 318 | if (PageCompound(page)) | 316 | if (PageCompound(page)) |
| @@ -329,23 +327,6 @@ static inline void get_page(struct page *page) | |||
| 329 | 327 | ||
| 330 | void put_page(struct page *page); | 328 | void put_page(struct page *page); |
| 331 | 329 | ||
| 332 | #else /* CONFIG_HUGETLB_PAGE */ | ||
| 333 | |||
| 334 | #define page_count(p) (atomic_read(&(p)->_count) + 1) | ||
| 335 | |||
| 336 | static inline void get_page(struct page *page) | ||
| 337 | { | ||
| 338 | atomic_inc(&page->_count); | ||
| 339 | } | ||
| 340 | |||
| 341 | static inline void put_page(struct page *page) | ||
| 342 | { | ||
| 343 | if (put_page_testzero(page)) | ||
| 344 | __page_cache_release(page); | ||
| 345 | } | ||
| 346 | |||
| 347 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
| 348 | |||
| 349 | /* | 330 | /* |
| 350 | * Multiple processes may "see" the same page. E.g. for untouched | 331 | * Multiple processes may "see" the same page. E.g. for untouched |
| 351 | * mappings of /dev/null, all processes see the same page full of | 332 | * mappings of /dev/null, all processes see the same page full of |
