aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-04-01 08:29:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-04-04 13:41:08 -0400
commit09cbfeaf1a5a67bfb3201e0c83c810cecb2efa5a (patch)
tree6cdf210c9c0f981cd22544feeba701892ec19464 /fs/dlm
parentc05c2ec96bb8b7310da1055c7b9d786a3ec6dc0c (diff)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/lowcomms.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 00640e70ed7a..1ab012a27d9f 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con)
640 con->rx_page = alloc_page(GFP_ATOMIC); 640 con->rx_page = alloc_page(GFP_ATOMIC);
641 if (con->rx_page == NULL) 641 if (con->rx_page == NULL)
642 goto out_resched; 642 goto out_resched;
643 cbuf_init(&con->cb, PAGE_CACHE_SIZE); 643 cbuf_init(&con->cb, PAGE_SIZE);
644 } 644 }
645 645
646 /* 646 /*
@@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con)
657 * buffer and the start of the currently used section (cb.base) 657 * buffer and the start of the currently used section (cb.base)
658 */ 658 */
659 if (cbuf_data(&con->cb) >= con->cb.base) { 659 if (cbuf_data(&con->cb) >= con->cb.base) {
660 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); 660 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
661 iov[1].iov_len = con->cb.base; 661 iov[1].iov_len = con->cb.base;
662 iov[1].iov_base = page_address(con->rx_page); 662 iov[1].iov_base = page_address(con->rx_page);
663 nvec = 2; 663 nvec = 2;
@@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con)
675 ret = dlm_process_incoming_buffer(con->nodeid, 675 ret = dlm_process_incoming_buffer(con->nodeid,
676 page_address(con->rx_page), 676 page_address(con->rx_page),
677 con->cb.base, con->cb.len, 677 con->cb.base, con->cb.len,
678 PAGE_CACHE_SIZE); 678 PAGE_SIZE);
679 if (ret == -EBADMSG) { 679 if (ret == -EBADMSG) {
680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", 680 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
681 page_address(con->rx_page), con->cb.base, 681 page_address(con->rx_page), con->cb.base,
@@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1416 spin_lock(&con->writequeue_lock); 1416 spin_lock(&con->writequeue_lock);
1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list); 1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1418 if ((&e->list == &con->writequeue) || 1418 if ((&e->list == &con->writequeue) ||
1419 (PAGE_CACHE_SIZE - e->end < len)) { 1419 (PAGE_SIZE - e->end < len)) {
1420 e = NULL; 1420 e = NULL;
1421 } else { 1421 } else {
1422 offset = e->end; 1422 offset = e->end;