aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xdr.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/xdr.c')
-rw-r--r--net/sunrpc/xdr.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4c1b53..6bdb3865212d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
165 * the same way: 165 * the same way:
166 * if a memory area starts at byte 'base' in page 'pages[i]', 166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base 167 * then its address is given as (i << PAGE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap. 169 * they point to may overlap.
170 */ 170 */
@@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
181 pgto_base += len; 181 pgto_base += len;
182 pgfrom_base += len; 182 pgfrom_base += len;
183 183
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); 184 pgto = pages + (pgto_base >> PAGE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); 185 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
186 186
187 pgto_base &= ~PAGE_CACHE_MASK; 187 pgto_base &= ~PAGE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK; 188 pgfrom_base &= ~PAGE_MASK;
189 189
190 do { 190 do {
191 /* Are any pointers crossing a page boundary? */ 191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) { 192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE; 193 pgto_base = PAGE_SIZE;
194 pgto--; 194 pgto--;
195 } 195 }
196 if (pgfrom_base == 0) { 196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE; 197 pgfrom_base = PAGE_SIZE;
198 pgfrom--; 198 pgfrom--;
199 } 199 }
200 200
@@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
236 char *vto; 236 char *vto;
237 size_t copy; 237 size_t copy;
238 238
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); 239 pgto = pages + (pgbase >> PAGE_SHIFT);
240 pgbase &= ~PAGE_CACHE_MASK; 240 pgbase &= ~PAGE_MASK;
241 241
242 for (;;) { 242 for (;;) {
243 copy = PAGE_CACHE_SIZE - pgbase; 243 copy = PAGE_SIZE - pgbase;
244 if (copy > len) 244 if (copy > len)
245 copy = len; 245 copy = len;
246 246
@@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
253 break; 253 break;
254 254
255 pgbase += copy; 255 pgbase += copy;
256 if (pgbase == PAGE_CACHE_SIZE) { 256 if (pgbase == PAGE_SIZE) {
257 flush_dcache_page(*pgto); 257 flush_dcache_page(*pgto);
258 pgbase = 0; 258 pgbase = 0;
259 pgto++; 259 pgto++;
@@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
280 char *vfrom; 280 char *vfrom;
281 size_t copy; 281 size_t copy;
282 282
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); 283 pgfrom = pages + (pgbase >> PAGE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK; 284 pgbase &= ~PAGE_MASK;
285 285
286 do { 286 do {
287 copy = PAGE_CACHE_SIZE - pgbase; 287 copy = PAGE_SIZE - pgbase;
288 if (copy > len) 288 if (copy > len)
289 copy = len; 289 copy = len;
290 290
@@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
293 kunmap_atomic(vfrom); 293 kunmap_atomic(vfrom);
294 294
295 pgbase += copy; 295 pgbase += copy;
296 if (pgbase == PAGE_CACHE_SIZE) { 296 if (pgbase == PAGE_SIZE) {
297 pgbase = 0; 297 pgbase = 0;
298 pgfrom++; 298 pgfrom++;
299 } 299 }
@@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
1038 if (base < buf->page_len) { 1038 if (base < buf->page_len) {
1039 subbuf->page_len = min(buf->page_len - base, len); 1039 subbuf->page_len = min(buf->page_len - base, len);
1040 base += buf->page_base; 1040 base += buf->page_base;
1041 subbuf->page_base = base & ~PAGE_CACHE_MASK; 1041 subbuf->page_base = base & ~PAGE_MASK;
1042 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; 1042 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
1043 len -= subbuf->page_len; 1043 len -= subbuf->page_len;
1044 base = 0; 1044 base = 0;
1045 } else { 1045 } else {
@@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1297 todo -= avail_here; 1297 todo -= avail_here;
1298 1298
1299 base += buf->page_base; 1299 base += buf->page_base;
1300 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); 1300 ppages = buf->pages + (base >> PAGE_SHIFT);
1301 base &= ~PAGE_CACHE_MASK; 1301 base &= ~PAGE_MASK;
1302 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, 1302 avail_page = min_t(unsigned int, PAGE_SIZE - base,
1303 avail_here); 1303 avail_here);
1304 c = kmap(*ppages) + base; 1304 c = kmap(*ppages) + base;
1305 1305
@@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1383 } 1383 }
1384 1384
1385 avail_page = min(avail_here, 1385 avail_page = min(avail_here,
1386 (unsigned int) PAGE_CACHE_SIZE); 1386 (unsigned int) PAGE_SIZE);
1387 } 1387 }
1388 base = buf->page_len; /* align to start of tail */ 1388 base = buf->page_len; /* align to start of tail */
1389 } 1389 }
@@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1479 if (page_len > len) 1479 if (page_len > len)
1480 page_len = len; 1480 page_len = len;
1481 len -= page_len; 1481 len -= page_len;
1482 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); 1482 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1483 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; 1483 i = (offset + buf->page_base) >> PAGE_SHIFT;
1484 thislen = PAGE_CACHE_SIZE - page_offset; 1484 thislen = PAGE_SIZE - page_offset;
1485 do { 1485 do {
1486 if (thislen > page_len) 1486 if (thislen > page_len)
1487 thislen = page_len; 1487 thislen = page_len;
@@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1492 page_len -= thislen; 1492 page_len -= thislen;
1493 i++; 1493 i++;
1494 page_offset = 0; 1494 page_offset = 0;
1495 thislen = PAGE_CACHE_SIZE; 1495 thislen = PAGE_SIZE;
1496 } while (page_len != 0); 1496 } while (page_len != 0);
1497 offset = 0; 1497 offset = 0;
1498 } 1498 }