aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:28:29 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:13 -0500
commiteebd2aa355692afaf9906f62118620f1a1c19dbb (patch)
tree207eead3a736963c3e50942038c463f2f611ccce /include
parentb98348bdd08dc4ec11828aa98a78edde15c53cfa (diff)
Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user
Simplify page cache zeroing of segments of pages through 3 functions zero_user_segments(page, start1, end1, start2, end2) Zeros two segments of the page. It takes the position where to start and end the zeroing which avoids length calculations and makes code clearer. zero_user_segment(page, start, end) Same for a single segment. zero_user(page, start, length) Length variant for the case where we know the length. We remove the zero_user_page macro. Issues: 1. Its a macro. Inline functions are preferable. 2. The KM_USER0 macro is only defined for HIGHMEM. Having to treat this special case everywhere makes the code needlessly complex. The parameter for zeroing is always KM_USER0 except in one single case that we open code. Avoiding KM_USER0 makes a lot of code not having to be dealing with the special casing for HIGHMEM anymore. Dealing with kmap is only necessary for HIGHMEM configurations. In those configurations we use KM_USER0 like we do for a series of other functions defined in highmem.h. Since KM_USER0 is depends on HIGHMEM the existing zero_user_page function could not be a macro. zero_user_* functions introduced here can be be inline because that constant is not used when these functions are called. Also extract the flushing of the caches to be outside of the kmap. [akpm@linux-foundation.org: fix nfs and ntfs build] [akpm@linux-foundation.org: fix ntfs build some more] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Steven French <sfrench@us.ibm.com> Cc: Michael Halcrow <mhalcrow@us.ibm.com> Cc: <linux-ext4@vger.kernel.org> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: "J. Bruce Fields" <bfields@fieldses.org> Cc: Anton Altaparmakov <aia21@cantab.net> Cc: Mark Fasheh <mark.fasheh@oracle.com> Cc: David Chinner <dgc@sgi.com> Cc: Michael Halcrow <mhalcrow@us.ibm.com> Cc: Steven French <sfrench@us.ibm.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/highmem.h48
1 files changed, 30 insertions, 18 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 1fcb0033179e..61a5e5eb27f0 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -124,28 +124,40 @@ static inline void clear_highpage(struct page *page)
124 kunmap_atomic(kaddr, KM_USER0); 124 kunmap_atomic(kaddr, KM_USER0);
125} 125}
126 126
127/* 127static inline void zero_user_segments(struct page *page,
128 * Same but also flushes aliased cache contents to RAM. 128 unsigned start1, unsigned end1,
129 * 129 unsigned start2, unsigned end2)
130 * This must be a macro because KM_USER0 and friends aren't defined if 130{
131 * !CONFIG_HIGHMEM 131 void *kaddr = kmap_atomic(page, KM_USER0);
132 */ 132
133#define zero_user_page(page, offset, size, km_type) \ 133 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
134 do { \ 134
135 void *kaddr; \ 135 if (end1 > start1)
136 \ 136 memset(kaddr + start1, 0, end1 - start1);
137 BUG_ON((offset) + (size) > PAGE_SIZE); \ 137
138 \ 138 if (end2 > start2)
139 kaddr = kmap_atomic(page, km_type); \ 139 memset(kaddr + start2, 0, end2 - start2);
140 memset((char *)kaddr + (offset), 0, (size)); \ 140
141 flush_dcache_page(page); \ 141 kunmap_atomic(kaddr, KM_USER0);
142 kunmap_atomic(kaddr, (km_type)); \ 142 flush_dcache_page(page);
143 } while (0) 143}
144
145static inline void zero_user_segment(struct page *page,
146 unsigned start, unsigned end)
147{
148 zero_user_segments(page, start, end, 0, 0);
149}
150
151static inline void zero_user(struct page *page,
152 unsigned start, unsigned size)
153{
154 zero_user_segments(page, start, start + size, 0, 0);
155}
144 156
145static inline void __deprecated memclear_highpage_flush(struct page *page, 157static inline void __deprecated memclear_highpage_flush(struct page *page,
146 unsigned int offset, unsigned int size) 158 unsigned int offset, unsigned int size)
147{ 159{
148 zero_user_page(page, offset, size, KM_USER0); 160 zero_user(page, offset, size);
149} 161}
150 162
151#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 163#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE