diff options
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 32 |
1 files changed, 8 insertions, 24 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1ebd65c91422..7e1ab155c67c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) | |||
86 | (__force unsigned long)mask; | 86 | (__force unsigned long)mask; |
87 | } | 87 | } |
88 | 88 | ||
89 | /* | ||
90 | * The page cache can be done in larger chunks than | ||
91 | * one page, because it allows for more efficient | ||
92 | * throughput (it can then be mapped into user | ||
93 | * space in smaller chunks for same flexibility). | ||
94 | * | ||
95 | * Or rather, it _will_ be done in larger chunks. | ||
96 | */ | ||
97 | #define PAGE_CACHE_SHIFT PAGE_SHIFT | ||
98 | #define PAGE_CACHE_SIZE PAGE_SIZE | ||
99 | #define PAGE_CACHE_MASK PAGE_MASK | ||
100 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) | ||
101 | |||
102 | #define page_cache_get(page) get_page(page) | ||
103 | #define page_cache_release(page) put_page(page) | ||
104 | void release_pages(struct page **pages, int nr, bool cold); | 89 | void release_pages(struct page **pages, int nr, bool cold); |
105 | 90 | ||
106 | /* | 91 | /* |
@@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page) | |||
390 | return page->index << compound_order(page); | 375 | return page->index << compound_order(page); |
391 | 376 | ||
392 | if (likely(!PageTransTail(page))) | 377 | if (likely(!PageTransTail(page))) |
393 | return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 378 | return page->index; |
394 | 379 | ||
395 | /* | 380 | /* |
396 | * We don't initialize ->index for tail pages: calculate based on | 381 | * We don't initialize ->index for tail pages: calculate based on |
397 | * head page | 382 | * head page |
398 | */ | 383 | */ |
399 | pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 384 | pgoff = compound_head(page)->index; |
400 | pgoff += page - compound_head(page); | 385 | pgoff += page - compound_head(page); |
401 | return pgoff; | 386 | return pgoff; |
402 | } | 387 | } |
@@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page) | |||
406 | */ | 391 | */ |
407 | static inline loff_t page_offset(struct page *page) | 392 | static inline loff_t page_offset(struct page *page) |
408 | { | 393 | { |
409 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | 394 | return ((loff_t)page->index) << PAGE_SHIFT; |
410 | } | 395 | } |
411 | 396 | ||
412 | static inline loff_t page_file_offset(struct page *page) | 397 | static inline loff_t page_file_offset(struct page *page) |
413 | { | 398 | { |
414 | return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; | 399 | return ((loff_t)page_file_index(page)) << PAGE_SHIFT; |
415 | } | 400 | } |
416 | 401 | ||
417 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, | 402 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
@@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |||
425 | return linear_hugepage_index(vma, address); | 410 | return linear_hugepage_index(vma, address); |
426 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | 411 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; |
427 | pgoff += vma->vm_pgoff; | 412 | pgoff += vma->vm_pgoff; |
428 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 413 | return pgoff; |
429 | } | 414 | } |
430 | 415 | ||
431 | extern void __lock_page(struct page *page); | 416 | extern void __lock_page(struct page *page); |
@@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |||
535 | /* | 520 | /* |
536 | * Fault a userspace page into pagetables. Return non-zero on a fault. | 521 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
537 | * | 522 | * |
538 | * This assumes that two userspace pages are always sufficient. That's | 523 | * This assumes that two userspace pages are always sufficient. |
539 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. | ||
540 | */ | 524 | */ |
541 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | 525 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
542 | { | 526 | { |
@@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page, | |||
671 | 655 | ||
672 | static inline unsigned long dir_pages(struct inode *inode) | 656 | static inline unsigned long dir_pages(struct inode *inode) |
673 | { | 657 | { |
674 | return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> | 658 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
675 | PAGE_CACHE_SHIFT; | 659 | PAGE_SHIFT; |
676 | } | 660 | } |
677 | 661 | ||
678 | #endif /* _LINUX_PAGEMAP_H */ | 662 | #endif /* _LINUX_PAGEMAP_H */ |