aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commitea1754a084760e68886f5b725c8eaada9cc57155 (patch)
tree2e14936a959a661ee68d4490cb9b82b94bb27ab9
parent09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (diff)
mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
Mostly direct substitution with occasional adjustment or removing outdated comments. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/filesystems/cramfs.txt2
-rw-r--r--Documentation/filesystems/tmpfs.txt2
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--block/bio.c4
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h4
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h2
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c2
-rw-r--r--fs/btrfs/check-integrity.c4
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/struct-funcs.c4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c2
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cramfs/README26
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/dax.c4
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/ext2/dir.c4
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/readpage.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/ntfs/aops.h2
-rw-r--r--fs/ntfs/compress.c21
-rw-r--r--fs/ntfs/dir.c16
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ntfs/index.c2
-rw-r--r--fs/ntfs/inode.c4
-rw-r--r--fs/ntfs/super.c14
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/squashfs/cache.c4
-rw-r--r--fs/squashfs/file.c2
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_super.c4
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/nilfs2_fs.h4
-rw-r--r--include/linux/pagemap.h3
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--mm/gup.c2
-rw-r--r--mm/memory.c1
-rw-r--r--mm/mincore.c4
-rw-r--r--mm/swap.c2
-rw-r--r--net/sunrpc/xdr.c2
65 files changed, 120 insertions, 135 deletions
diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt
index 31f53f0ab957..4006298f6707 100644
--- a/Documentation/filesystems/cramfs.txt
+++ b/Documentation/filesystems/cramfs.txt
@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
38which the timestamp reverts to 1970, i.e. moves backwards in time. 38which the timestamp reverts to 1970, i.e. moves backwards in time.
39 39
40Currently, cramfs must be written and read with architectures of the 40Currently, cramfs must be written and read with architectures of the
41same endianness, and can be read only by kernels with PAGE_CACHE_SIZE 41same endianness, and can be read only by kernels with PAGE_SIZE
42== 4096. At least the latter of these is a bug, but it hasn't been 42== 4096. At least the latter of these is a bug, but it hasn't been
43decided what the best fix is. For the moment if you have larger pages 43decided what the best fix is. For the moment if you have larger pages
44you can just change the #define in mkcramfs.c, so long as you don't 44you can just change the #define in mkcramfs.c, so long as you don't
diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt
index d392e1505f17..d9c11d25bf02 100644
--- a/Documentation/filesystems/tmpfs.txt
+++ b/Documentation/filesystems/tmpfs.txt
@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
60 default is half of your physical RAM without swap. If you 60 default is half of your physical RAM without swap. If you
61 oversize your tmpfs instances the machine will deadlock 61 oversize your tmpfs instances the machine will deadlock
62 since the OOM handler will not be able to free that memory. 62 since the OOM handler will not be able to free that memory.
63nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. 63nr_blocks: The same as size, but in blocks of PAGE_SIZE.
64nr_inodes: The maximum number of inodes for this instance. The default 64nr_inodes: The maximum number of inodes for this instance. The default
65 is half of the number of your physical RAM pages, or (on a 65 is half of the number of your physical RAM pages, or (on a
66 machine with highmem) the number of lowmem RAM pages, 66 machine with highmem) the number of lowmem RAM pages,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b02a7d598258..4164bd6397a2 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -708,9 +708,9 @@ struct address_space_operations {
708 from the address space. This generally corresponds to either a 708 from the address space. This generally corresponds to either a
709 truncation, punch hole or a complete invalidation of the address 709 truncation, punch hole or a complete invalidation of the address
710 space (in the latter case 'offset' will always be 0 and 'length' 710 space (in the latter case 'offset' will always be 0 and 'length'
711 will be PAGE_CACHE_SIZE). Any private data associated with the page 711 will be PAGE_SIZE). Any private data associated with the page
712 should be updated to reflect this truncation. If offset is 0 and 712 should be updated to reflect this truncation. If offset is 0 and
713 length is PAGE_CACHE_SIZE, then the private data should be released, 713 length is PAGE_SIZE, then the private data should be released,
714 because the page must be able to be completely discarded. This may 714 because the page must be able to be completely discarded. This may
715 be done by calling the ->releasepage function, but in this case the 715 be done by calling the ->releasepage function, but in this case the
716 release MUST succeed. 716 release MUST succeed.
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b96877..6b3e7c6ee096 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -22,7 +22,7 @@
22#include <linux/swap.h> 22#include <linux/swap.h>
23#include <linux/unistd.h> 23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */ 24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages and page_cache_release */ 25#include <linux/pagemap.h> /* for release_pages */
26#include <linux/compat.h> 26#include <linux/compat.h>
27 27
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
diff --git a/block/bio.c b/block/bio.c
index 168531517694..807d25e466ec 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
1615 * the BIO and the offending pages and re-dirty the pages in process context. 1615 * the BIO and the offending pages and re-dirty the pages in process context.
1616 * 1616 *
1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1618 * here on. It will run one page_cache_release() against each page and will 1618 * here on. It will run one put_page() against each page and will run one
1619 * run one bio_put() against the BIO. 1619 * bio_put() against the BIO.
1620 */ 1620 */
1621 1621
1622static void bio_dirty_fn(struct work_struct *work); 1622static void bio_dirty_fn(struct work_struct *work);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c227fd4cad75..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1327,8 +1327,8 @@ struct bm_extent {
1327#endif 1327#endif
1328#endif 1328#endif
1329 1329
1330/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1330/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
1331 * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1331 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1332 * Since we may live in a mixed-platform cluster, 1332 * Since we may live in a mixed-platform cluster,
1333 * we limit us to a platform agnostic constant here for now. 1333 * we limit us to a platform agnostic constant here for now.
1334 * A followup commit may allow even bigger BIO sizes, 1334 * A followup commit may allow even bigger BIO sizes,
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 08f193c341c5..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -514,7 +514,7 @@ typedef struct {
514 /** 514 /**
515 * Starting offset of the fragment within the page. Note that the 515 * Starting offset of the fragment within the page. Note that the
516 * end of the fragment must not pass the end of the page; i.e., 516 * end of the fragment must not pass the end of the page; i.e.,
517 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. 517 * kiov_len + kiov_offset <= PAGE_SIZE.
518 */ 518 */
519 unsigned int kiov_offset; 519 unsigned int kiov_offset;
520} lnet_kiov_t; 520} lnet_kiov_t;
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 5321ddec9580..e689ca1846e1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -390,8 +390,8 @@ typedef struct sfw_test_instance {
390 } tsi_u; 390 } tsi_u;
391} sfw_test_instance_t; 391} sfw_test_instance_t;
392 392
393/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at 393/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
394 * the end of pages are not used */ 394 * pages are not used */
395#define SFW_MAX_CONCUR LST_MAX_CONCUR 395#define SFW_MAX_CONCUR LST_MAX_CONCUR
396#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) 396#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) 397#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index b5088b13a305..242bb1ef6245 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1118,7 +1118,7 @@ struct lu_context_key {
1118 { \ 1118 { \
1119 type *value; \ 1119 type *value; \
1120 \ 1120 \
1121 CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ 1121 CLASSERT(PAGE_SIZE >= sizeof (*value)); \
1122 \ 1122 \
1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1123 value = kzalloc(sizeof(*value), GFP_NOFS); \
1124 if (!value) \ 1124 if (!value) \
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 1e2ebe5cc998..5aae1d06a5fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -1022,10 +1022,10 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
1022 * MDS_READPAGE page size 1022 * MDS_READPAGE page size
1023 * 1023 *
1024 * This is the directory page size packed in MDS_READPAGE RPC. 1024 * This is the directory page size packed in MDS_READPAGE RPC.
1025 * It's different than PAGE_CACHE_SIZE because the client needs to 1025 * It's different than PAGE_SIZE because the client needs to
1026 * access the struct lu_dirpage header packed at the beginning of 1026 * access the struct lu_dirpage header packed at the beginning of
1027 * the "page" and without this there isn't any way to know find the 1027 * the "page" and without this there isn't any way to know find the
1028 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. 1028 * lu_dirpage header is if client and server PAGE_SIZE differ.
1029 */ 1029 */
1030#define LU_PAGE_SHIFT 12 1030#define LU_PAGE_SHIFT 12
1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) 1031#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index a5e9095fbf36..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -112,8 +112,8 @@
112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) 112# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two" 113# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
114# endif 114# endif
115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) 115# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" 116# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
117# endif 117# endif
118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) 118# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
119# error "PTLRPC_MAX_BRW_SIZE too big" 119# error "PTLRPC_MAX_BRW_SIZE too big"
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f4167db65b5d..4264d97650ec 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -272,7 +272,7 @@ struct client_obd {
272 int cl_grant_shrink_interval; /* seconds */ 272 int cl_grant_shrink_interval; /* seconds */
273 273
274 /* A chunk is an optimal size used by osc_extent to determine 274 /* A chunk is an optimal size used by osc_extent to determine
275 * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) 275 * the extent size. A chunk is max(PAGE_SIZE, OST block size)
276 */ 276 */
277 int cl_chunkbits; 277 int cl_chunkbits;
278 int cl_chunk; 278 int cl_chunk;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index a7c02e07fd75..e4c82883e580 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -134,9 +134,8 @@
134 * a header lu_dirpage which describes the start/end hash, and whether this 134 * a header lu_dirpage which describes the start/end hash, and whether this
135 * page is empty (contains no dir entry) or hash collide with next page. 135 * page is empty (contains no dir entry) or hash collide with next page.
136 * After client receives reply, several pages will be integrated into dir page 136 * After client receives reply, several pages will be integrated into dir page
137 * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the 137 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
138 * lu_dirpage for this integrated page will be adjusted. See 138 * for this integrated page will be adjusted. See lmv_adjust_dirpages().
139 * lmv_adjust_dirpages().
140 * 139 *
141 */ 140 */
142 141
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 4c7250ab54e6..edab6c5b7e50 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -521,7 +521,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
521 * striped over, rather than having a constant value for all files here. 521 * striped over, rather than having a constant value for all files here.
522 */ 522 */
523 523
524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). 524/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled 525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
526 * by default, this should be adjusted corresponding with max_read_ahead_mb 526 * by default, this should be adjusted corresponding with max_read_ahead_mb
527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used 527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 75d4df71cab8..85a835976174 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -512,7 +512,7 @@ static int vvp_io_read_start(const struct lu_env *env,
512 vio->cui_ra_window_set = 1; 512 vio->cui_ra_window_set = 1;
513 bead->lrr_start = cl_index(obj, pos); 513 bead->lrr_start = cl_index(obj, pos);
514 /* 514 /*
515 * XXX: explicit PAGE_CACHE_SIZE 515 * XXX: explicit PAGE_SIZE
516 */ 516 */
517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1); 517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
518 ll_ra_read_in(file, bead); 518 ll_ra_read_in(file, bead);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index ce7e8b70dd44..9abb7c2b9231 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2017 * |s|e|f|p|ent| 0 | ... | 0 | 2017 * |s|e|f|p|ent| 0 | ... | 0 |
2018 * '----------------- -----' 2018 * '----------------- -----'
2019 * 2019 *
2020 * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is 2020 * However, on hosts where the native VM page size (PAGE_SIZE) is
2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple 2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple
2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the 2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the
2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately 2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span 2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
2049 * to the first entry of the next lu_dirpage. 2049 * to the first entry of the next lu_dirpage.
2050 */ 2050 */
2051#if PAGE_CACHE_SIZE > LU_PAGE_SIZE 2051#if PAGE_SIZE > LU_PAGE_SIZE
2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) 2052static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2053{ 2053{
2054 int i; 2054 int i;
@@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
2101} 2101}
2102#else 2102#else
2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) 2103#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
2104#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ 2104#endif /* PAGE_SIZE > LU_PAGE_SIZE */
2105 2105
2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, 2106static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2107 struct page **pages, struct ptlrpc_request **request) 2107 struct page **pages, struct ptlrpc_request **request)
@@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2110 struct lmv_obd *lmv = &obd->u.lmv; 2110 struct lmv_obd *lmv = &obd->u.lmv;
2111 __u64 offset = op_data->op_offset; 2111 __u64 offset = op_data->op_offset;
2112 int rc; 2112 int rc;
2113 int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ 2113 int ncfspgs; /* pages read in PAGE_SIZE */
2114 int nlupgs; /* pages read in LU_PAGE_SIZE */ 2114 int nlupgs; /* pages read in LU_PAGE_SIZE */
2115 struct lmv_tgt_desc *tgt; 2115 struct lmv_tgt_desc *tgt;
2116 2116
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 4a2baaff3dc1..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
47#include "../../include/lustre/lustre_idl.h" 47#include "../../include/lustre/lustre_idl.h"
48 48
49#include <linux/fs.h> 49#include <linux/fs.h>
50#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
51 50
52void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) 51void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
53{ 52{
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4e0a357ed43d..5f25bf83dcfc 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
1456 * used, we should return these grants to OST. There're two cases where grants 1456 * used, we should return these grants to OST. There're two cases where grants
1457 * can be lost: 1457 * can be lost:
1458 * 1. truncate; 1458 * 1. truncate;
1459 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was 1459 * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
1460 * written. In this case OST may use less chunks to serve this partial 1460 * written. In this case OST may use less chunks to serve this partial
1461 * write. OSTs don't actually know the page size on the client side. so 1461 * write. OSTs don't actually know the page size on the client side. so
1462 * clients have to calculate lost grant by the blocksize on the OST. 1462 * clients have to calculate lost grant by the blocksize on the OST.
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 866789ede4cb..516e19d1d202 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -3039,13 +3039,13 @@ int btrfsic_mount(struct btrfs_root *root,
3039 3039
3040 if (root->nodesize & ((u64)PAGE_SIZE - 1)) { 3040 if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
3041 printk(KERN_INFO 3041 printk(KERN_INFO
3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3042 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
3043 root->nodesize, PAGE_SIZE); 3043 root->nodesize, PAGE_SIZE);
3044 return -1; 3044 return -1;
3045 } 3045 }
3046 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) { 3046 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
3047 printk(KERN_INFO 3047 printk(KERN_INFO
3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3048 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
3049 root->sectorsize, PAGE_SIZE); 3049 root->sectorsize, PAGE_SIZE);
3050 return -1; 3050 return -1;
3051 } 3051 }
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 93d696d248d9..d247fc0eea19 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
3264 goto done; 3264 goto done;
3265 } 3265 }
3266 /* 3266 /*
3267 * delalloc_end is already one less than the total 3267 * delalloc_end is already one less than the total length, so
3268 * length, so we don't subtract one from 3268 * we don't subtract one from PAGE_SIZE
3269 * PAGE_CACHE_SIZE
3270 */ 3269 */
3271 delalloc_to_write += (delalloc_end - delalloc_start + 3270 delalloc_to_write += (delalloc_end - delalloc_start +
3272 PAGE_SIZE) >> 3271 PAGE_SIZE) >> PAGE_SHIFT;
3273 PAGE_SHIFT;
3274 delalloc_start = delalloc_end + 1; 3272 delalloc_start = delalloc_end + 1;
3275 } 3273 }
3276 if (wbc->nr_to_write < delalloc_to_write) { 3274 if (wbc->nr_to_write < delalloc_to_write) {
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b0721..e05619f241be 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
66 \ 66 \
67 if (token && token->kaddr && token->offset <= offset && \ 67 if (token && token->kaddr && token->offset <= offset && \
68 token->eb == eb && \ 68 token->eb == eb && \
69 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 69 (token->offset + PAGE_SIZE >= offset + size)) { \
70 kaddr = token->kaddr; \ 70 kaddr = token->kaddr; \
71 p = kaddr + part_offset - token->offset; \ 71 p = kaddr + part_offset - token->offset; \
72 res = get_unaligned_le##bits(p + off); \ 72 res = get_unaligned_le##bits(p + off); \
@@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \
104 \ 104 \
105 if (token && token->kaddr && token->offset <= offset && \ 105 if (token && token->kaddr && token->offset <= offset && \
106 token->eb == eb && \ 106 token->eb == eb && \
107 (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 107 (token->offset + PAGE_SIZE >= offset + size)) { \
108 kaddr = token->kaddr; \ 108 kaddr = token->kaddr; \
109 p = kaddr + part_offset - token->offset; \ 109 p = kaddr + part_offset - token->offset; \
110 put_unaligned_le##bits(val, p + off); \ 110 put_unaligned_le##bits(val, p + off); \
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index ac3a06d28531..70948b13bc81 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -239,7 +239,7 @@ static int test_find_delalloc(void)
239 end = 0; 239 end = 0;
240 /* 240 /*
241 * Currently if we fail to find dirty pages in the delalloc range we 241 * Currently if we fail to find dirty pages in the delalloc range we
242 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If 242 * will adjust max_bytes down to PAGE_SIZE and then re-search. If
243 * this changes at any point in the future we will need to fix this 243 * this changes at any point in the future we will need to fix this
244 * tests expected behavior. 244 * tests expected behavior.
245 */ 245 */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d21da9f05bae..f2cc0b3d1af7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
714 * 714 *
715 * Note that this might make for "interesting" allocation problems during 715 * Note that this might make for "interesting" allocation problems during
716 * writeback however as we have to allocate an array of pointers for the 716 * writeback however as we have to allocate an array of pointers for the
717 * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 717 * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096.
718 * 718 *
719 * For reads, there is a similar problem as we need to allocate an array 719 * For reads, there is a similar problem as we need to allocate an array
720 * of kvecs to handle the receive, though that should only need to be done 720 * of kvecs to handle the receive, though that should only need to be done
@@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
733 733
734/* 734/*
735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
736 * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 736 * pages in a single call. With PAGE_SIZE == 4k, this means we can fill
737 * a single wsize request with a single call. 737 * a single wsize request with a single call.
738 */ 738 */
739#define CIFS_DEFAULT_IOSIZE (1024 * 1024) 739#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5ce540dc6996..c03d0744648b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1902 * find_get_pages_tag seems to return a max of 256 on each 1902 * find_get_pages_tag seems to return a max of 256 on each
1903 * iteration, so we must call it several times in order to 1903 * iteration, so we must call it several times in order to
1904 * fill the array or the wsize is effectively limited to 1904 * fill the array or the wsize is effectively limited to
1905 * 256 * PAGE_CACHE_SIZE. 1905 * 256 * PAGE_SIZE.
1906 */ 1906 */
1907 *found_pages = 0; 1907 *found_pages = 0;
1908 pages = wdata->pages; 1908 pages = wdata->pages;
diff --git a/fs/cramfs/README b/fs/cramfs/README
index 445d1c2d7646..9d4e7ea311f4 100644
--- a/fs/cramfs/README
+++ b/fs/cramfs/README
@@ -86,26 +86,26 @@ Block Size
86 86
87(Block size in cramfs refers to the size of input data that is 87(Block size in cramfs refers to the size of input data that is
88compressed at a time. It's intended to be somewhere around 88compressed at a time. It's intended to be somewhere around
89PAGE_CACHE_SIZE for cramfs_readpage's convenience.) 89PAGE_SIZE for cramfs_readpage's convenience.)
90 90
91The superblock ought to indicate the block size that the fs was 91The superblock ought to indicate the block size that the fs was
92written for, since comments in <linux/pagemap.h> indicate that 92written for, since comments in <linux/pagemap.h> indicate that
93PAGE_CACHE_SIZE may grow in future (if I interpret the comment 93PAGE_SIZE may grow in future (if I interpret the comment
94correctly). 94correctly).
95 95
96Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that 96Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that
97for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in 97for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in
98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). 98turn is defined as PAGE_SIZE (which can be as large as 32KB on arm).
99This discrepancy is a bug, though it's not clear which should be 99This discrepancy is a bug, though it's not clear which should be
100changed. 100changed.
101 101
102One option is to change mkcramfs to take its PAGE_CACHE_SIZE from 102One option is to change mkcramfs to take its PAGE_SIZE from
103<asm/page.h>. Personally I don't like this option, but it does 103<asm/page.h>. Personally I don't like this option, but it does
104require the least amount of change: just change `#define 104require the least amount of change: just change `#define
105PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage 105PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage
106is that the generated cramfs cannot always be shared between different 106is that the generated cramfs cannot always be shared between different
107kernels, not even necessarily kernels of the same architecture if 107kernels, not even necessarily kernels of the same architecture if
108PAGE_CACHE_SIZE is subject to change between kernel versions 108PAGE_SIZE is subject to change between kernel versions
109(currently possible with arm and ia64). 109(currently possible with arm and ia64).
110 110
111The remaining options try to make cramfs more sharable. 111The remaining options try to make cramfs more sharable.
@@ -126,22 +126,22 @@ size. The options are:
126 1. Always 4096 bytes. 126 1. Always 4096 bytes.
127 127
128 2. Writer chooses blocksize; kernel adapts but rejects blocksize > 128 2. Writer chooses blocksize; kernel adapts but rejects blocksize >
129 PAGE_CACHE_SIZE. 129 PAGE_SIZE.
130 130
131 3. Writer chooses blocksize; kernel adapts even to blocksize > 131 3. Writer chooses blocksize; kernel adapts even to blocksize >
132 PAGE_CACHE_SIZE. 132 PAGE_SIZE.
133 133
134It's easy enough to change the kernel to use a smaller value than 134It's easy enough to change the kernel to use a smaller value than
135PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. 135PAGE_SIZE: just make cramfs_readpage read multiple blocks.
136 136
137The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE 137The cost of option 1 is that kernels with a larger PAGE_SIZE
138value don't get as good compression as they can. 138value don't get as good compression as they can.
139 139
140The cost of option 2 relative to option 1 is that the code uses 140The cost of option 2 relative to option 1 is that the code uses
141variables instead of #define'd constants. The gain is that people 141variables instead of #define'd constants. The gain is that people
142with kernels having larger PAGE_CACHE_SIZE can make use of that if 142with kernels having larger PAGE_SIZE can make use of that if
143they don't mind their cramfs being inaccessible to kernels with 143they don't mind their cramfs being inaccessible to kernels with
144smaller PAGE_CACHE_SIZE values. 144smaller PAGE_SIZE values.
145 145
146Option 3 is easy to implement if we don't mind being CPU-inefficient: 146Option 3 is easy to implement if we don't mind being CPU-inefficient:
147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which 147e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 2096654dd26d..3a32ddf98095 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
137 * page cache and dentry tree anyway.. 137 * page cache and dentry tree anyway..
138 * 138 *
139 * This also acts as a way to guarantee contiguous areas of up to 139 * This also acts as a way to guarantee contiguous areas of up to
140 * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to 140 * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
141 * worry about end-of-buffer issues even when decompressing a full 141 * worry about end-of-buffer issues even when decompressing a full
142 * page cache. 142 * page cache.
143 */ 143 */
diff --git a/fs/dax.c b/fs/dax.c
index 1f954166704a..75ba46d82a76 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1094 * you are truncating a file, the helper function dax_truncate_page() may be 1094 * you are truncating a file, the helper function dax_truncate_page() may be
1095 * more convenient. 1095 * more convenient.
1096 * 1096 *
1097 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1097 * We work in terms of PAGE_SIZE here for commonality with
1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1099 * took care of disposing of the unnecessary blocks. Even if the filesystem 1099 * took care of disposing of the unnecessary blocks. Even if the filesystem
1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
1146 * Similar to block_truncate_page(), this function can be called by a 1146 * Similar to block_truncate_page(), this function can be called by a
1147 * filesystem when it is truncating a DAX file to handle the partial page. 1147 * filesystem when it is truncating a DAX file to handle the partial page.
1148 * 1148 *
1149 * We work in terms of PAGE_CACHE_SIZE here for commonality with 1149 * We work in terms of PAGE_SIZE here for commonality with
1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1151 * took care of disposing of the unnecessary blocks. Even if the filesystem 1151 * took care of disposing of the unnecessary blocks. Even if the filesystem
1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page 1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 2a988f3a0cdb..224b49e71aa4 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -763,8 +763,8 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
763 } else { /* ia->ia_size < i_size_read(inode) */ 763 } else { /* ia->ia_size < i_size_read(inode) */
764 /* We're chopping off all the pages down to the page 764 /* We're chopping off all the pages down to the page
765 * in which ia->ia_size is located. Fill in the end of 765 * in which ia->ia_size is located. Fill in the end of
766 * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to 766 * that page from (ia->ia_size & ~PAGE_MASK) to
767 * PAGE_CACHE_SIZE with zeros. */ 767 * PAGE_SIZE with zeros. */
768 size_t num_zeros = (PAGE_SIZE 768 size_t num_zeros = (PAGE_SIZE
769 - (ia->ia_size & ~PAGE_MASK)); 769 - (ia->ia_size & ~PAGE_MASK));
770 770
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 1b45694de316..7ff6fcfa685d 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
37{ 37{
38 unsigned len = le16_to_cpu(dlen); 38 unsigned len = le16_to_cpu(dlen);
39 39
40#if (PAGE_CACHE_SIZE >= 65536) 40#if (PAGE_SIZE >= 65536)
41 if (len == EXT2_MAX_REC_LEN) 41 if (len == EXT2_MAX_REC_LEN)
42 return 1 << 16; 42 return 1 << 16;
43#endif 43#endif
@@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
46 46
47static inline __le16 ext2_rec_len_to_disk(unsigned len) 47static inline __le16 ext2_rec_len_to_disk(unsigned len)
48{ 48{
49#if (PAGE_CACHE_SIZE >= 65536) 49#if (PAGE_SIZE >= 65536)
50 if (len == (1 << 16)) 50 if (len == (1 << 16))
51 return cpu_to_le16(EXT2_MAX_REC_LEN); 51 return cpu_to_le16(EXT2_MAX_REC_LEN);
52 else 52 else
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c04743519865..7ccba1aa142d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1961,7 +1961,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
1961{ 1961{
1962 unsigned len = le16_to_cpu(dlen); 1962 unsigned len = le16_to_cpu(dlen);
1963 1963
1964#if (PAGE_CACHE_SIZE >= 65536) 1964#if (PAGE_SIZE >= 65536)
1965 if (len == EXT4_MAX_REC_LEN || len == 0) 1965 if (len == EXT4_MAX_REC_LEN || len == 0)
1966 return blocksize; 1966 return blocksize;
1967 return (len & 65532) | ((len & 3) << 16); 1967 return (len & 65532) | ((len & 3) << 16);
@@ -1974,7 +1974,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
1974{ 1974{
1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) 1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
1976 BUG(); 1976 BUG();
1977#if (PAGE_CACHE_SIZE >= 65536) 1977#if (PAGE_SIZE >= 65536)
1978 if (len < 65536) 1978 if (len < 65536)
1979 return cpu_to_le16(len); 1979 return cpu_to_le16(len);
1980 if (len == blocksize) { 1980 if (len == blocksize) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8a43c683eef9..4f7043ba4447 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4894,7 +4894,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
4894 offset = inode->i_size & (PAGE_SIZE - 1); 4894 offset = inode->i_size & (PAGE_SIZE - 1);
4895 /* 4895 /*
4896 * All buffers in the last page remain valid? Then there's nothing to 4896 * All buffers in the last page remain valid? Then there's nothing to
4897 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4897 * do. We do the check mainly to optimize the common PAGE_SIZE ==
4898 * blocksize case 4898 * blocksize case
4899 */ 4899 */
4900 if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) 4900 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c12174711ce2..eeeade76012e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
119 * 119 *
120 * 120 *
121 * one block each for bitmap and buddy information. So for each group we 121 * one block each for bitmap and buddy information. So for each group we
122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
123 * blocksize) blocks. So it can have information regarding groups_per_page 123 * blocksize) blocks. So it can have information regarding groups_per_page
124 * which is blocks_per_page/2 124 * which is blocks_per_page/2
125 * 125 *
@@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
807 * 807 *
808 * one block each for bitmap and buddy information. 808 * one block each for bitmap and buddy information.
809 * So for each group we take up 2 blocks. A page can 809 * So for each group we take up 2 blocks. A page can
810 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 810 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
811 * So it can have information regarding groups_per_page which 811 * So it can have information regarding groups_per_page which
812 * is blocks_per_page/2 812 * is blocks_per_page/2
813 * 813 *
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index ea27aa1a778c..f24e7299e1c8 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -23,7 +23,7 @@
23 * 23 *
24 * then this code just gives up and calls the buffer_head-based read function. 24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case: 25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 26 * the end-of-file on blocksize < PAGE_SIZE setups.
27 * 27 *
28 */ 28 */
29 29
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index afb7c7f05de5..4ea71eba40a5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset,
237/* 237/*
238 * Support for read() - Find the page attached to f_mapping and copy out the 238 * Support for read() - Find the page attached to f_mapping and copy out the
239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
240 * since it has PAGE_CACHE_SIZE assumptions. 240 * since it has PAGE_SIZE assumptions.
241 */ 241 */
242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 242static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
243{ 243{
diff --git a/fs/mpage.c b/fs/mpage.c
index e7083bfbabb3..eedc644b78d7 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -331,7 +331,7 @@ confused:
331 * 331 *
332 * then this code just gives up and calls the buffer_head-based read function. 332 * then this code just gives up and calls the buffer_head-based read function.
333 * It does handle a page which has holes at the end - that is a common case: 333 * It does handle a page which has holes at the end - that is a common case:
334 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 334 * the end-of-file on blocksize < PAGE_SIZE setups.
335 * 335 *
336 * BH_Boundary explanation: 336 * BH_Boundary explanation:
337 * 337 *
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index a474e7ef92ea..97768a1379f2 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
674 // in the inode. 674 // in the inode.
675 // Again, for each page do: 675 // Again, for each page do:
676 // __set_page_dirty_buffers(); 676 // __set_page_dirty_buffers();
677 // page_cache_release() 677 // put_page()
678 // We don't need to wait on the writes. 678 // We don't need to wait on the writes.
679 // Update iblock. 679 // Update iblock.
680 } 680 }
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 37cd7e45dcbc..820d6eabf60f 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page)
49 * @index: index into the page cache for @mapping of the page to map 49 * @index: index into the page cache for @mapping of the page to map
50 * 50 *
51 * Read a page from the page cache of the address space @mapping at position 51 * Read a page from the page cache of the address space @mapping at position
52 * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. 52 * @index, where @index is in units of PAGE_SIZE, and not in bytes.
53 * 53 *
54 * If the page is not in memory it is loaded from disk first using the readpage 54 * If the page is not in memory it is loaded from disk first using the readpage
55 * method defined in the address space operations of @mapping and the page is 55 * method defined in the address space operations of @mapping and the page is
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index b6074a56661b..f2b5e746f49b 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -105,10 +105,6 @@ static void zero_partial_compressed_page(struct page *page,
105 105
106 ntfs_debug("Zeroing page region outside initialized size."); 106 ntfs_debug("Zeroing page region outside initialized size.");
107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { 107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
108 /*
109 * FIXME: Using clear_page() will become wrong when we get
110 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
111 */
112 clear_page(kp); 108 clear_page(kp);
113 return; 109 return;
114 } 110 }
@@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page,
160 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was 156 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
161 * completed during the decompression of the compression block (@cb_start). 157 * completed during the decompression of the compression block (@cb_start).
162 * 158 *
163 * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up 159 * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up
164 * unpredicatbly! You have been warned! 160 * unpredicatbly! You have been warned!
165 * 161 *
166 * Note to hackers: This function may not sleep until it has finished accessing 162 * Note to hackers: This function may not sleep until it has finished accessing
@@ -462,7 +458,7 @@ return_overflow:
462 * have been written to so that we would lose data if we were to just overwrite 458 * have been written to so that we would lose data if we were to just overwrite
463 * them with the out-of-date uncompressed data. 459 * them with the out-of-date uncompressed data.
464 * 460 *
465 * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at 461 * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at
466 * the end of the file I think. We need to detect this case and zero the out 462 * the end of the file I think. We need to detect this case and zero the out
467 * of bounds remainder of the page in question and mark it as handled. At the 463 * of bounds remainder of the page in question and mark it as handled. At the
468 * moment we would just return -EIO on such a page. This bug will only become 464 * moment we would just return -EIO on such a page. This bug will only become
@@ -470,7 +466,7 @@ return_overflow:
470 * clusters so is probably not going to be seen by anyone. Still this should 466 * clusters so is probably not going to be seen by anyone. Still this should
471 * be fixed. (AIA) 467 * be fixed. (AIA)
472 * 468 *
473 * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in 469 * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in
474 * handling sparse and compressed cbs. (AIA) 470 * handling sparse and compressed cbs. (AIA)
475 * 471 *
476 * FIXME: At the moment we don't do any zeroing out in the case that 472 * FIXME: At the moment we don't do any zeroing out in the case that
@@ -497,12 +493,12 @@ int ntfs_read_compressed_block(struct page *page)
497 u64 cb_size_mask = cb_size - 1UL; 493 u64 cb_size_mask = cb_size - 1UL;
498 VCN vcn; 494 VCN vcn;
499 LCN lcn; 495 LCN lcn;
500 /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ 496 /* The first wanted vcn (minimum alignment is PAGE_SIZE). */
501 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >> 497 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
502 vol->cluster_size_bits; 498 vol->cluster_size_bits;
503 /* 499 /*
504 * The first vcn after the last wanted vcn (minimum alignment is again 500 * The first vcn after the last wanted vcn (minimum alignment is again
505 * PAGE_CACHE_SIZE. 501 * PAGE_SIZE.
506 */ 502 */
507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1) 503 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
508 & ~cb_size_mask) >> vol->cluster_size_bits; 504 & ~cb_size_mask) >> vol->cluster_size_bits;
@@ -753,11 +749,6 @@ lock_retry_remap:
753 for (; cur_page < cb_max_page; cur_page++) { 749 for (; cur_page < cb_max_page; cur_page++) {
754 page = pages[cur_page]; 750 page = pages[cur_page];
755 if (page) { 751 if (page) {
756 /*
757 * FIXME: Using clear_page() will become wrong
758 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
759 * for now there is no problem.
760 */
761 if (likely(!cur_ofs)) 752 if (likely(!cur_ofs))
762 clear_page(page_address(page)); 753 clear_page(page_address(page));
763 else 754 else
@@ -807,7 +798,7 @@ lock_retry_remap:
807 * synchronous io for the majority of pages. 798 * synchronous io for the majority of pages.
808 * Or if we choose not to do the read-ahead/-behind stuff, we 799 * Or if we choose not to do the read-ahead/-behind stuff, we
809 * could just return block_read_full_page(pages[xpage]) as long 800 * could just return block_read_full_page(pages[xpage]) as long
810 * as PAGE_CACHE_SIZE <= cb_size. 801 * as PAGE_SIZE <= cb_size.
811 */ 802 */
812 if (cb_max_ofs) 803 if (cb_max_ofs)
813 cb_max_page--; 804 cb_max_page--;
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 3cdce162592d..a18613579001 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -315,7 +315,7 @@ found_it:
315descend_into_child_node: 315descend_into_child_node:
316 /* 316 /*
317 * Convert vcn to index into the index allocation attribute in units 317 * Convert vcn to index into the index allocation attribute in units
318 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 318 * of PAGE_SIZE and map the page cache page, reading it from
319 * disk if necessary. 319 * disk if necessary.
320 */ 320 */
321 page = ntfs_map_page(ia_mapping, vcn << 321 page = ntfs_map_page(ia_mapping, vcn <<
@@ -793,11 +793,11 @@ found_it:
793descend_into_child_node: 793descend_into_child_node:
794 /* 794 /*
795 * Convert vcn to index into the index allocation attribute in units 795 * Convert vcn to index into the index allocation attribute in units
796 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 796 * of PAGE_SIZE and map the page cache page, reading it from
797 * disk if necessary. 797 * disk if necessary.
798 */ 798 */
799 page = ntfs_map_page(ia_mapping, vcn << 799 page = ntfs_map_page(ia_mapping, vcn <<
800 dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 800 dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT);
801 if (IS_ERR(page)) { 801 if (IS_ERR(page)) {
802 ntfs_error(sb, "Failed to map directory index page, error %ld.", 802 ntfs_error(sb, "Failed to map directory index page, error %ld.",
803 -PTR_ERR(page)); 803 -PTR_ERR(page));
@@ -809,9 +809,9 @@ descend_into_child_node:
809fast_descend_into_child_node: 809fast_descend_into_child_node:
810 /* Get to the index allocation block. */ 810 /* Get to the index allocation block. */
811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 812 dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK));
813 /* Bounds checks. */ 813 /* Bounds checks. */
814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 814 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) {
815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
816 "inode 0x%lx or driver bug.", dir_ni->mft_no); 816 "inode 0x%lx or driver bug.", dir_ni->mft_no);
817 goto unm_err_out; 817 goto unm_err_out;
@@ -844,7 +844,7 @@ fast_descend_into_child_node:
844 goto unm_err_out; 844 goto unm_err_out;
845 } 845 }
846 index_end = (u8*)ia + dir_ni->itype.index.block_size; 846 index_end = (u8*)ia + dir_ni->itype.index.block_size;
847 if (index_end > kaddr + PAGE_CACHE_SIZE) { 847 if (index_end > kaddr + PAGE_SIZE) {
848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode "
849 "0x%lx crosses page boundary. Impossible! " 849 "0x%lx crosses page boundary. Impossible! "
850 "Cannot access! This is probably a bug in the " 850 "Cannot access! This is probably a bug in the "
@@ -968,9 +968,9 @@ found_it2:
968 /* If vcn is in the same page cache page as old_vcn we 968 /* If vcn is in the same page cache page as old_vcn we
969 * recycle the mapped page. */ 969 * recycle the mapped page. */
970 if (old_vcn << vol->cluster_size_bits >> 970 if (old_vcn << vol->cluster_size_bits >>
971 PAGE_CACHE_SHIFT == vcn << 971 PAGE_SHIFT == vcn <<
972 vol->cluster_size_bits >> 972 vol->cluster_size_bits >>
973 PAGE_CACHE_SHIFT) 973 PAGE_SHIFT)
974 goto fast_descend_into_child_node; 974 goto fast_descend_into_child_node;
975 unlock_page(page); 975 unlock_page(page);
976 ntfs_unmap_page(page); 976 ntfs_unmap_page(page);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 2dae60857544..91117ada8528 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
573 * only partially being written to. 573 * only partially being written to.
574 * 574 *
575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
576 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 576 * greater than PAGE_SIZE, that all pages in @pages are entirely inside
577 * the same cluster and that they are the entirety of that cluster, and that 577 * the same cluster and that they are the entirety of that cluster, and that
578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
579 * 579 *
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 02a83a46ead2..0d645f357930 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -272,7 +272,7 @@ done:
272descend_into_child_node: 272descend_into_child_node:
273 /* 273 /*
274 * Convert vcn to index into the index allocation attribute in units 274 * Convert vcn to index into the index allocation attribute in units
275 * of PAGE_CACHE_SIZE and map the page cache page, reading it from 275 * of PAGE_SIZE and map the page cache page, reading it from
276 * disk if necessary. 276 * disk if necessary.
277 */ 277 */
278 page = ntfs_map_page(ia_mapping, vcn << 278 page = ntfs_map_page(ia_mapping, vcn <<
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 3eda6d4bcc65..f40972d6df90 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -870,7 +870,7 @@ skip_attr_list_load:
870 } 870 }
871 if (ni->itype.index.block_size > PAGE_SIZE) { 871 if (ni->itype.index.block_size > PAGE_SIZE) {
872 ntfs_error(vi->i_sb, "Index block size (%u) > " 872 ntfs_error(vi->i_sb, "Index block size (%u) > "
873 "PAGE_CACHE_SIZE (%ld) is not " 873 "PAGE_SIZE (%ld) is not "
874 "supported. Sorry.", 874 "supported. Sorry.",
875 ni->itype.index.block_size, 875 ni->itype.index.block_size,
876 PAGE_SIZE); 876 PAGE_SIZE);
@@ -1586,7 +1586,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
1586 goto unm_err_out; 1586 goto unm_err_out;
1587 } 1587 }
1588 if (ni->itype.index.block_size > PAGE_SIZE) { 1588 if (ni->itype.index.block_size > PAGE_SIZE) {
1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " 1589 ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE "
1590 "(%ld) is not supported. Sorry.", 1590 "(%ld) is not supported. Sorry.",
1591 ni->itype.index.block_size, PAGE_SIZE); 1591 ni->itype.index.block_size, PAGE_SIZE);
1592 err = -EOPNOTSUPP; 1592 err = -EOPNOTSUPP;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index ab2b0930054e..ecb49870a680 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -823,12 +823,12 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b)
823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", 823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)",
824 vol->mft_record_size_bits, vol->mft_record_size_bits); 824 vol->mft_record_size_bits, vol->mft_record_size_bits);
825 /* 825 /*
826 * We cannot support mft record sizes above the PAGE_CACHE_SIZE since 826 * We cannot support mft record sizes above the PAGE_SIZE since
827 * we store $MFT/$DATA, the table of mft records in the page cache. 827 * we store $MFT/$DATA, the table of mft records in the page cache.
828 */ 828 */
829 if (vol->mft_record_size > PAGE_SIZE) { 829 if (vol->mft_record_size > PAGE_SIZE) {
830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the " 830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the "
831 "PAGE_CACHE_SIZE on your system (%lu). " 831 "PAGE_SIZE on your system (%lu). "
832 "This is not supported. Sorry.", 832 "This is not supported. Sorry.",
833 vol->mft_record_size, PAGE_SIZE); 833 vol->mft_record_size, PAGE_SIZE);
834 return false; 834 return false;
@@ -2471,12 +2471,12 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2471 down_read(&vol->lcnbmp_lock); 2471 down_read(&vol->lcnbmp_lock);
2472 /* 2472 /*
2473 * Convert the number of bits into bytes rounded up, then convert into 2473 * Convert the number of bits into bytes rounded up, then convert into
2474 * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one 2474 * multiples of PAGE_SIZE, rounding up so that if we have one
2475 * full and one partial page max_index = 2. 2475 * full and one partial page max_index = 2.
2476 */ 2476 */
2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >> 2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >>
2478 PAGE_SHIFT; 2478 PAGE_SHIFT;
2479 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2479 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
2481 max_index, PAGE_SIZE / 4); 2481 max_index, PAGE_SIZE / 4);
2482 for (index = 0; index < max_index; index++) { 2482 for (index = 0; index < max_index; index++) {
@@ -2547,7 +2547,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2547 pgoff_t index; 2547 pgoff_t index;
2548 2548
2549 ntfs_debug("Entering."); 2549 ntfs_debug("Entering.");
2550 /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2550 /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */
2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
2552 "0x%lx.", max_index, PAGE_SIZE / 4); 2552 "0x%lx.", max_index, PAGE_SIZE / 4);
2553 for (index = 0; index < max_index; index++) { 2553 for (index = 0; index < max_index; index++) {
@@ -2639,7 +2639,7 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; 2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits;
2640 /* 2640 /*
2641 * Convert the maximum number of set bits into bytes rounded up, then 2641 * Convert the maximum number of set bits into bytes rounded up, then
2642 * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we 2642 * convert into multiples of PAGE_SIZE, rounding up so that if we
2643 * have one full and one partial page max_index = 2. 2643 * have one full and one partial page max_index = 2.
2644 */ 2644 */
2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) 2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits)
@@ -2765,7 +2765,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
2765 if (!parse_options(vol, (char*)opt)) 2765 if (!parse_options(vol, (char*)opt))
2766 goto err_out_now; 2766 goto err_out_now;
2767 2767
2768 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2768 /* We support sector sizes up to the PAGE_SIZE. */
2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) { 2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) {
2770 if (!silent) 2770 if (!silent)
2771 ntfs_error(sb, "Device has unsupported sector size " 2771 ntfs_error(sb, "Device has unsupported sector size "
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index ce5dc4f92935..ad1577348a92 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -684,7 +684,7 @@ next_bh:
684 return ret; 684 return ret;
685} 685}
686 686
687#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 687#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
688#define OCFS2_MAX_CTXT_PAGES 1 688#define OCFS2_MAX_CTXT_PAGES 1
689#else 689#else
690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) 690#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 881242c3a8d2..744d5d90c363 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2956,7 +2956,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2956 } 2956 }
2957 2957
2958 /* 2958 /*
2959 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2959 * In case PAGE_SIZE <= CLUSTER_SIZE, This page
2960 * can't be dirtied before we CoW it out. 2960 * can't be dirtied before we CoW it out.
2961 */ 2961 */
2962 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) 2962 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 8c6487238bb3..2ace90e981f0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -599,7 +599,7 @@ static int journal_list_still_alive(struct super_block *s,
599 * This does a check to see if the buffer belongs to one of these 599 * This does a check to see if the buffer belongs to one of these
600 * lost pages before doing the final put_bh. If page->mapping was 600 * lost pages before doing the final put_bh. If page->mapping was
601 * null, it tries to free buffers on the page, which should make the 601 * null, it tries to free buffers on the page, which should make the
602 * final page_cache_release drop the page from the lru. 602 * final put_page drop the page from the lru.
603 */ 603 */
604static void release_buffer_page(struct buffer_head *bh) 604static void release_buffer_page(struct buffer_head *bh)
605{ 605{
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 27e501af0e8b..23813c078cc9 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -30,7 +30,7 @@
30 * access the metadata and fragment caches. 30 * access the metadata and fragment caches.
31 * 31 *
32 * To avoid out of memory and fragmentation issues with vmalloc the cache 32 * To avoid out of memory and fragmentation issues with vmalloc the cache
33 * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. 33 * uses sequences of kmalloced PAGE_SIZE buffers.
34 * 34 *
35 * It should be noted that the cache is not used for file datablocks, these 35 * It should be noted that the cache is not used for file datablocks, these
36 * are decompressed and cached in the page-cache in the normal way. The 36 * are decompressed and cached in the page-cache in the normal way. The
@@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache)
231/* 231/*
232 * Initialise cache allocating the specified number of entries, each of 232 * Initialise cache allocating the specified number of entries, each of
233 * size block_size. To avoid vmalloc fragmentation issues each entry 233 * size block_size. To avoid vmalloc fragmentation issues each entry
234 * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. 234 * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
235 */ 235 */
236struct squashfs_cache *squashfs_cache_init(char *name, int entries, 236struct squashfs_cache *squashfs_cache_init(char *name, int entries,
237 int block_size) 237 int block_size)
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 437de9e89221..13d80947bf9e 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -382,7 +382,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
382 382
383 /* 383 /*
384 * Loop copying datablock into pages. As the datablock likely covers 384 * Loop copying datablock into pages. As the datablock likely covers
385 * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly 385 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
386 * grab the pages from the page cache, except for the page that we've 386 * grab the pages from the page cache, except for the page that we've
387 * been called to fill. 387 * been called to fill.
388 */ 388 */
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 1a9c6640e604..446753d8ac34 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -554,7 +554,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
554 * VFS copied less data to the page that it intended and 554 * VFS copied less data to the page that it intended and
555 * declared in its '->write_begin()' call via the @len 555 * declared in its '->write_begin()' call via the @len
556 * argument. If the page was not up-to-date, and @len was 556 * argument. If the page was not up-to-date, and @len was
557 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did 557 * @PAGE_SIZE, the 'ubifs_write_begin()' function did
558 * not load it from the media (for optimization reasons). This 558 * not load it from the media (for optimization reasons). This
559 * means that part of the page contains garbage. So read the 559 * means that part of the page contains garbage. So read the
560 * page now. 560 * page now.
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 20daea9aa657..e98c24ee25a1 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2237,7 +2237,7 @@ static int __init ubifs_init(void)
2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); 2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
2238 2238
2239 /* 2239 /*
2240 * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to 2240 * We require that PAGE_SIZE is greater-than-or-equal-to
2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. 2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
2242 */ 2242 */
2243 if (PAGE_SIZE < UBIFS_BLOCK_SIZE) { 2243 if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 5b8ae03ba855..e49b2406d15d 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1563,7 +1563,7 @@ xfs_vm_write_begin(
1563 int status; 1563 int status;
1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; 1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
1565 1565
1566 ASSERT(len <= PAGE_CACHE_SIZE); 1566 ASSERT(len <= PAGE_SIZE);
1567 1567
1568 page = grab_cache_page_write_begin(mapping, index, flags); 1568 page = grab_cache_page_write_begin(mapping, index, flags);
1569 if (!page) 1569 if (!page)
@@ -1620,7 +1620,7 @@ xfs_vm_write_end(
1620{ 1620{
1621 int ret; 1621 int ret;
1622 1622
1623 ASSERT(len <= PAGE_CACHE_SIZE); 1623 ASSERT(len <= PAGE_SIZE);
1624 1624
1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1626 if (unlikely(ret < len)) { 1626 if (unlikely(ret < len)) {
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 6082f54e0557..187e14b696c2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -556,10 +556,10 @@ xfs_max_file_offset(
556 /* Figure out maximum filesize, on Linux this can depend on 556 /* Figure out maximum filesize, on Linux this can depend on
557 * the filesystem blocksize (on 32 bit platforms). 557 * the filesystem blocksize (on 32 bit platforms).
558 * __block_write_begin does this in an [unsigned] long... 558 * __block_write_begin does this in an [unsigned] long...
559 * page->index << (PAGE_CACHE_SHIFT - bbits) 559 * page->index << (PAGE_SHIFT - bbits)
560 * So, for page sized blocks (4K on 32 bit platforms), 560 * So, for page sized blocks (4K on 32 bit platforms),
561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
562 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 562 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
563 * but for smaller blocksizes it is less (bbits = log2 bsize). 563 * but for smaller blocksizes it is less (bbits = log2 bsize).
564 * Note1: get_block_t takes a long (implicit cast from above) 564 * Note1: get_block_t takes a long (implicit cast from above)
565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch 565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 1b4d69f68c33..3f103076d0bf 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -135,7 +135,7 @@ struct bdi_writeback {
135 135
136struct backing_dev_info { 136struct backing_dev_info {
137 struct list_head bdi_list; 137 struct list_head bdi_list;
138 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 138 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
139 unsigned int capabilities; /* Device capabilities */ 139 unsigned int capabilities; /* Device capabilities */
140 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */
141 void *congested_data; /* Pointer to aux data for congested func */ 141 void *congested_data; /* Pointer to aux data for congested func */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ed6407d1b7b5..ffcff53e3b2b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
623 * 623 *
624 * A page may belong to an inode's memory mapping. In this case, page->mapping 624 * A page may belong to an inode's memory mapping. In this case, page->mapping
625 * is the pointer to the inode, and page->index is the file offset of the page, 625 * is the pointer to the inode, and page->index is the file offset of the page,
626 * in units of PAGE_CACHE_SIZE. 626 * in units of PAGE_SIZE.
627 * 627 *
628 * If pagecache pages are not associated with an inode, they are said to be 628 * If pagecache pages are not associated with an inode, they are said to be
629 * anonymous pages. These may become associated with the swapcache, and in that 629 * anonymous pages. These may become associated with the swapcache, and in that
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 944b2b37313b..c2d75b4fa86c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct vm_area_struct {
341 341
342 /* Information about our backing store: */ 342 /* Information about our backing store: */
343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
344 units, *not* PAGE_CACHE_SIZE */ 344 units */
345 struct file * vm_file; /* File we map to (can be NULL). */ 345 struct file * vm_file; /* File we map to (can be NULL). */
346 void * vm_private_data; /* was vm_pte (shared mem) */ 346 void * vm_private_data; /* was vm_pte (shared mem) */
347 347
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index efada239205e..957049f72290 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -41,8 +41,8 @@ struct nfs_page {
41 struct page *wb_page; /* page to read in/write out */ 41 struct page *wb_page; /* page to read in/write out */
42 struct nfs_open_context *wb_context; /* File state context info */ 42 struct nfs_open_context *wb_context; /* File state context info */
43 struct nfs_lock_context *wb_lock_context; /* lock context info */ 43 struct nfs_lock_context *wb_lock_context; /* lock context info */
44 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 44 pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
45 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 45 unsigned int wb_offset, /* Offset & ~PAGE_MASK */
46 wb_pgbase, /* Start of page data */ 46 wb_pgbase, /* Start of page data */
47 wb_bytes; /* Length of request */ 47 wb_bytes; /* Length of request */
48 struct kref wb_kref; /* reference count */ 48 struct kref wb_kref; /* reference count */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 9abb763e4b86..e9fcf90b270d 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
331{ 331{
332 unsigned len = le16_to_cpu(dlen); 332 unsigned len = le16_to_cpu(dlen);
333 333
334#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 334#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
335 if (len == NILFS_MAX_REC_LEN) 335 if (len == NILFS_MAX_REC_LEN)
336 return 1 << 16; 336 return 1 << 16;
337#endif 337#endif
@@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
340 340
341static inline __le16 nilfs_rec_len_to_disk(unsigned len) 341static inline __le16 nilfs_rec_len_to_disk(unsigned len)
342{ 342{
343#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 343#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536)
344 if (len == (1 << 16)) 344 if (len == (1 << 16))
345 return cpu_to_le16(NILFS_MAX_REC_LEN); 345 return cpu_to_le16(NILFS_MAX_REC_LEN);
346 else if (len > (1 << 16)) 346 else if (len > (1 << 16))
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index f396ccb900cc..b3fc0370c14f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -535,8 +535,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535/* 535/*
536 * Fault a userspace page into pagetables. Return non-zero on a fault. 536 * Fault a userspace page into pagetables. Return non-zero on a fault.
537 * 537 *
538 * This assumes that two userspace pages are always sufficient. That's 538 * This assumes that two userspace pages are always sufficient.
539 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
540 */ 539 */
541static inline int fault_in_pages_writeable(char __user *uaddr, int size) 540static inline int fault_in_pages_writeable(char __user *uaddr, int size)
542{ 541{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb82..7ca44fb5b675 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv)
129 * 129 *
130 * These happen to all be powers of 2, which is not strictly 130 * These happen to all be powers of 2, which is not strictly
131 * necessary but helps enforce the real limitation, which is 131 * necessary but helps enforce the real limitation, which is
132 * that they should be multiples of PAGE_CACHE_SIZE. 132 * that they should be multiples of PAGE_SIZE.
133 * 133 *
134 * For UDP transports, a block plus NFS,RPC, and UDP headers 134 * For UDP transports, a block plus NFS,RPC, and UDP headers
135 * has to fit into the IP datagram limit of 64K. The largest 135 * has to fit into the IP datagram limit of 64K. The largest
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3d980ea1c946..2b83359c19ca 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -433,7 +433,7 @@ struct backing_dev_info;
433#define si_swapinfo(val) \ 433#define si_swapinfo(val) \
434 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 434 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
435/* only sparc can not include linux/pagemap.h in this file 435/* only sparc can not include linux/pagemap.h in this file
436 * so leave page_cache_release and release_pages undeclared... */ 436 * so leave put_page and release_pages undeclared... */
437#define free_page_and_swap_cache(page) \ 437#define free_page_and_swap_cache(page) \
438 put_page(page) 438 put_page(page)
439#define free_pages_and_swap_cache(pages, nr) \ 439#define free_pages_and_swap_cache(pages, nr) \
diff --git a/mm/gup.c b/mm/gup.c
index 7f1c4fb77cfa..fb87aea9edc8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1107,7 +1107,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1107 * @addr: user address 1107 * @addr: user address
1108 * 1108 *
1109 * Returns struct page pointer of user page pinned for dump, 1109 * Returns struct page pointer of user page pinned for dump,
1110 * to be freed afterwards by page_cache_release() or put_page(). 1110 * to be freed afterwards by put_page().
1111 * 1111 *
1112 * Returns NULL on any kind of failure - a hole must then be inserted into 1112 * Returns NULL on any kind of failure - a hole must then be inserted into
1113 * the corefile, to preserve alignment with its headers; and also returns 1113 * the corefile, to preserve alignment with its headers; and also returns
diff --git a/mm/memory.c b/mm/memory.c
index 07a420488cda..93897f23cc11 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2400,7 +2400,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
2400 2400
2401 vba = vma->vm_pgoff; 2401 vba = vma->vm_pgoff;
2402 vea = vba + vma_pages(vma) - 1; 2402 vea = vba + vma_pages(vma) - 1;
2403 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2404 zba = details->first_index; 2403 zba = details->first_index;
2405 if (zba < vba) 2404 if (zba < vba)
2406 zba = vba; 2405 zba = vba;
diff --git a/mm/mincore.c b/mm/mincore.c
index 0eed23d50adb..c0b5ba965200 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -211,7 +211,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
211 * return values: 211 * return values:
212 * zero - success 212 * zero - success
213 * -EFAULT - vec points to an illegal address 213 * -EFAULT - vec points to an illegal address
214 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE 214 * -EINVAL - addr is not a multiple of PAGE_SIZE
215 * -ENOMEM - Addresses in the range [addr, addr + len] are 215 * -ENOMEM - Addresses in the range [addr, addr + len] are
216 * invalid for the address space of this process, or 216 * invalid for the address space of this process, or
217 * specify one or more pages which are not currently 217 * specify one or more pages which are not currently
@@ -233,7 +233,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
233 if (!access_ok(VERIFY_READ, (void __user *) start, len)) 233 if (!access_ok(VERIFY_READ, (void __user *) start, len))
234 return -ENOMEM; 234 return -ENOMEM;
235 235
236 /* This also avoids any overflows on PAGE_CACHE_ALIGN */ 236 /* This also avoids any overflows on PAGE_ALIGN */
237 pages = len >> PAGE_SHIFT; 237 pages = len >> PAGE_SHIFT;
238 pages += (offset_in_page(len)) != 0; 238 pages += (offset_in_page(len)) != 0;
239 239
diff --git a/mm/swap.c b/mm/swap.c
index ea641e247033..a0bc206b4ac6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -698,7 +698,7 @@ void lru_add_drain_all(void)
698} 698}
699 699
700/** 700/**
701 * release_pages - batched page_cache_release() 701 * release_pages - batched put_page()
702 * @pages: array of pages to release 702 * @pages: array of pages to release
703 * @nr: number of pages 703 * @nr: number of pages
704 * @cold: whether the pages are cache cold 704 * @cold: whether the pages are cache cold
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 1e6aba0501a1..6bdb3865212d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way: 165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]', 166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap. 169 * they point to may overlap.
170 */ 170 */