aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c183
1 files changed, 125 insertions, 58 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index fd57442186cb..807a463fd5ed 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/uaccess.h>
17#include <linux/aio.h> 18#include <linux/aio.h>
18#include <linux/capability.h> 19#include <linux/capability.h>
19#include <linux/kernel_stat.h> 20#include <linux/kernel_stat.h>
@@ -38,7 +39,6 @@
38 */ 39 */
39#include <linux/buffer_head.h> /* for generic_osync_inode */ 40#include <linux/buffer_head.h> /* for generic_osync_inode */
40 41
41#include <asm/uaccess.h>
42#include <asm/mman.h> 42#include <asm/mman.h>
43 43
44static ssize_t 44static ssize_t
@@ -171,15 +171,17 @@ static int sync_page(void *word)
171} 171}
172 172
173/** 173/**
174 * filemap_fdatawrite_range - start writeback against all of a mapping's 174 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
175 * dirty pages that lie within the byte offsets <start, end>
176 * @mapping: address space structure to write 175 * @mapping: address space structure to write
177 * @start: offset in bytes where the range starts 176 * @start: offset in bytes where the range starts
178 * @end: offset in bytes where the range ends (inclusive) 177 * @end: offset in bytes where the range ends (inclusive)
179 * @sync_mode: enable synchronous operation 178 * @sync_mode: enable synchronous operation
180 * 179 *
180 * Start writeback against all of a mapping's dirty pages that lie
181 * within the byte offsets <start, end> inclusive.
182 *
181 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 183 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
182 * opposed to a regular memory * cleansing writeback. The difference between 184 * opposed to a regular memory cleansing writeback. The difference between
183 * these two operations is that if a dirty page/buffer is encountered, it must 185 * these two operations is that if a dirty page/buffer is encountered, it must
184 * be waited upon, and not just skipped over. 186 * be waited upon, and not just skipped over.
185 */ 187 */
@@ -190,8 +192,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
190 struct writeback_control wbc = { 192 struct writeback_control wbc = {
191 .sync_mode = sync_mode, 193 .sync_mode = sync_mode,
192 .nr_to_write = mapping->nrpages * 2, 194 .nr_to_write = mapping->nrpages * 2,
193 .start = start, 195 .range_start = start,
194 .end = end, 196 .range_end = end,
195 }; 197 };
196 198
197 if (!mapping_cap_writeback_dirty(mapping)) 199 if (!mapping_cap_writeback_dirty(mapping))
@@ -204,7 +206,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
204static inline int __filemap_fdatawrite(struct address_space *mapping, 206static inline int __filemap_fdatawrite(struct address_space *mapping,
205 int sync_mode) 207 int sync_mode)
206{ 208{
207 return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); 209 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
208} 210}
209 211
210int filemap_fdatawrite(struct address_space *mapping) 212int filemap_fdatawrite(struct address_space *mapping)
@@ -219,7 +221,10 @@ static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
219 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 221 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
220} 222}
221 223
222/* 224/**
225 * filemap_flush - mostly a non-blocking flush
226 * @mapping: target address_space
227 *
223 * This is a mostly non-blocking flush. Not suitable for data-integrity 228 * This is a mostly non-blocking flush. Not suitable for data-integrity
224 * purposes - I/O may not be started against all dirty pages. 229 * purposes - I/O may not be started against all dirty pages.
225 */ 230 */
@@ -229,7 +234,12 @@ int filemap_flush(struct address_space *mapping)
229} 234}
230EXPORT_SYMBOL(filemap_flush); 235EXPORT_SYMBOL(filemap_flush);
231 236
232/* 237/**
238 * wait_on_page_writeback_range - wait for writeback to complete
239 * @mapping: target address_space
240 * @start: beginning page index
241 * @end: ending page index
242 *
233 * Wait for writeback to complete against pages indexed by start->end 243 * Wait for writeback to complete against pages indexed by start->end
234 * inclusive 244 * inclusive
235 */ 245 */
@@ -276,7 +286,13 @@ int wait_on_page_writeback_range(struct address_space *mapping,
276 return ret; 286 return ret;
277} 287}
278 288
279/* 289/**
290 * sync_page_range - write and wait on all pages in the passed range
291 * @inode: target inode
292 * @mapping: target address_space
293 * @pos: beginning offset in pages to write
294 * @count: number of bytes to write
295 *
280 * Write and wait upon all the pages in the passed range. This is a "data 296 * Write and wait upon all the pages in the passed range. This is a "data
281 * integrity" operation. It waits upon in-flight writeout before starting and 297 * integrity" operation. It waits upon in-flight writeout before starting and
282 * waiting upon new writeout. If there was an IO error, return it. 298 * waiting upon new writeout. If there was an IO error, return it.
@@ -305,7 +321,13 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
305} 321}
306EXPORT_SYMBOL(sync_page_range); 322EXPORT_SYMBOL(sync_page_range);
307 323
308/* 324/**
325 * sync_page_range_nolock
326 * @inode: target inode
327 * @mapping: target address_space
328 * @pos: beginning offset in pages to write
329 * @count: number of bytes to write
330 *
309 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea 331 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
310 * as it forces O_SYNC writers to different parts of the same file 332 * as it forces O_SYNC writers to different parts of the same file
311 * to be serialised right until io completion. 333 * to be serialised right until io completion.
@@ -329,10 +351,11 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
329EXPORT_SYMBOL(sync_page_range_nolock); 351EXPORT_SYMBOL(sync_page_range_nolock);
330 352
331/** 353/**
332 * filemap_fdatawait - walk the list of under-writeback pages of the given 354 * filemap_fdatawait - wait for all under-writeback pages to complete
333 * address space and wait for all of them.
334 *
335 * @mapping: address space structure to wait for 355 * @mapping: address space structure to wait for
356 *
357 * Walk the list of under-writeback pages of the given address space
358 * and wait for all of them.
336 */ 359 */
337int filemap_fdatawait(struct address_space *mapping) 360int filemap_fdatawait(struct address_space *mapping)
338{ 361{
@@ -368,7 +391,12 @@ int filemap_write_and_wait(struct address_space *mapping)
368} 391}
369EXPORT_SYMBOL(filemap_write_and_wait); 392EXPORT_SYMBOL(filemap_write_and_wait);
370 393
371/* 394/**
395 * filemap_write_and_wait_range - write out & wait on a file range
396 * @mapping: the address_space for the pages
397 * @lstart: offset in bytes where the range starts
398 * @lend: offset in bytes where the range ends (inclusive)
399 *
372 * Write out and wait upon file offsets lstart->lend, inclusive. 400 * Write out and wait upon file offsets lstart->lend, inclusive.
373 * 401 *
374 * Note that `lend' is inclusive (describes the last byte to be written) so 402 * Note that `lend' is inclusive (describes the last byte to be written) so
@@ -394,8 +422,14 @@ int filemap_write_and_wait_range(struct address_space *mapping,
394 return err; 422 return err;
395} 423}
396 424
397/* 425/**
398 * This function is used to add newly allocated pagecache pages: 426 * add_to_page_cache - add newly allocated pagecache pages
427 * @page: page to add
428 * @mapping: the page's address_space
429 * @offset: page index
430 * @gfp_mask: page allocation mode
431 *
432 * This function is used to add newly allocated pagecache pages;
399 * the page is new, so we can just run SetPageLocked() against it. 433 * the page is new, so we can just run SetPageLocked() against it.
400 * The other page state flags were set by rmqueue(). 434 * The other page state flags were set by rmqueue().
401 * 435 *
@@ -422,7 +456,6 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
422 } 456 }
423 return error; 457 return error;
424} 458}
425
426EXPORT_SYMBOL(add_to_page_cache); 459EXPORT_SYMBOL(add_to_page_cache);
427 460
428int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 461int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
@@ -489,8 +522,7 @@ void fastcall wait_on_page_bit(struct page *page, int bit_nr)
489EXPORT_SYMBOL(wait_on_page_bit); 522EXPORT_SYMBOL(wait_on_page_bit);
490 523
491/** 524/**
492 * unlock_page() - unlock a locked page 525 * unlock_page - unlock a locked page
493 *
494 * @page: the page 526 * @page: the page
495 * 527 *
496 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). 528 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
@@ -513,8 +545,9 @@ void fastcall unlock_page(struct page *page)
513} 545}
514EXPORT_SYMBOL(unlock_page); 546EXPORT_SYMBOL(unlock_page);
515 547
516/* 548/**
517 * End writeback against a page. 549 * end_page_writeback - end writeback against a page
550 * @page: the page
518 */ 551 */
519void end_page_writeback(struct page *page) 552void end_page_writeback(struct page *page)
520{ 553{
@@ -527,10 +560,11 @@ void end_page_writeback(struct page *page)
527} 560}
528EXPORT_SYMBOL(end_page_writeback); 561EXPORT_SYMBOL(end_page_writeback);
529 562
530/* 563/**
531 * Get a lock on the page, assuming we need to sleep to get it. 564 * __lock_page - get a lock on the page, assuming we need to sleep to get it
565 * @page: the page to lock
532 * 566 *
533 * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some 567 * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
534 * random driver's requestfn sets TASK_RUNNING, we could busywait. However 568 * random driver's requestfn sets TASK_RUNNING, we could busywait. However
535 * chances are that on the second loop, the block layer's plug list is empty, 569 * chances are that on the second loop, the block layer's plug list is empty,
536 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 570 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
@@ -544,8 +578,12 @@ void fastcall __lock_page(struct page *page)
544} 578}
545EXPORT_SYMBOL(__lock_page); 579EXPORT_SYMBOL(__lock_page);
546 580
547/* 581/**
548 * a rather lightweight function, finding and getting a reference to a 582 * find_get_page - find and get a page reference
583 * @mapping: the address_space to search
584 * @offset: the page index
585 *
586 * A rather lightweight function, finding and getting a reference to a
549 * hashed page atomically. 587 * hashed page atomically.
550 */ 588 */
551struct page * find_get_page(struct address_space *mapping, unsigned long offset) 589struct page * find_get_page(struct address_space *mapping, unsigned long offset)
@@ -559,11 +597,14 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset)
559 read_unlock_irq(&mapping->tree_lock); 597 read_unlock_irq(&mapping->tree_lock);
560 return page; 598 return page;
561} 599}
562
563EXPORT_SYMBOL(find_get_page); 600EXPORT_SYMBOL(find_get_page);
564 601
565/* 602/**
566 * Same as above, but trylock it instead of incrementing the count. 603 * find_trylock_page - find and lock a page
604 * @mapping: the address_space to search
605 * @offset: the page index
606 *
607 * Same as find_get_page(), but trylock it instead of incrementing the count.
567 */ 608 */
568struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) 609struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
569{ 610{
@@ -576,12 +617,10 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs
576 read_unlock_irq(&mapping->tree_lock); 617 read_unlock_irq(&mapping->tree_lock);
577 return page; 618 return page;
578} 619}
579
580EXPORT_SYMBOL(find_trylock_page); 620EXPORT_SYMBOL(find_trylock_page);
581 621
582/** 622/**
583 * find_lock_page - locate, pin and lock a pagecache page 623 * find_lock_page - locate, pin and lock a pagecache page
584 *
585 * @mapping: the address_space to search 624 * @mapping: the address_space to search
586 * @offset: the page index 625 * @offset: the page index
587 * 626 *
@@ -617,12 +656,10 @@ repeat:
617 read_unlock_irq(&mapping->tree_lock); 656 read_unlock_irq(&mapping->tree_lock);
618 return page; 657 return page;
619} 658}
620
621EXPORT_SYMBOL(find_lock_page); 659EXPORT_SYMBOL(find_lock_page);
622 660
623/** 661/**
624 * find_or_create_page - locate or add a pagecache page 662 * find_or_create_page - locate or add a pagecache page
625 *
626 * @mapping: the page's address_space 663 * @mapping: the page's address_space
627 * @index: the page's index into the mapping 664 * @index: the page's index into the mapping
628 * @gfp_mask: page allocation mode 665 * @gfp_mask: page allocation mode
@@ -663,7 +700,6 @@ repeat:
663 page_cache_release(cached_page); 700 page_cache_release(cached_page);
664 return page; 701 return page;
665} 702}
666
667EXPORT_SYMBOL(find_or_create_page); 703EXPORT_SYMBOL(find_or_create_page);
668 704
669/** 705/**
@@ -729,9 +765,16 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
729 return i; 765 return i;
730} 766}
731 767
732/* 768/**
769 * find_get_pages_tag - find and return pages that match @tag
770 * @mapping: the address_space to search
771 * @index: the starting page index
772 * @tag: the tag index
773 * @nr_pages: the maximum number of pages
774 * @pages: where the resulting pages are placed
775 *
733 * Like find_get_pages, except we only return pages which are tagged with 776 * Like find_get_pages, except we only return pages which are tagged with
734 * `tag'. We update *index to index the next page for the traversal. 777 * @tag. We update @index to index the next page for the traversal.
735 */ 778 */
736unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 779unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
737 int tag, unsigned int nr_pages, struct page **pages) 780 int tag, unsigned int nr_pages, struct page **pages)
@@ -750,7 +793,11 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
750 return ret; 793 return ret;
751} 794}
752 795
753/* 796/**
797 * grab_cache_page_nowait - returns locked page at given index in given cache
798 * @mapping: target address_space
799 * @index: the page index
800 *
754 * Same as grab_cache_page, but do not wait if the page is unavailable. 801 * Same as grab_cache_page, but do not wait if the page is unavailable.
755 * This is intended for speculative data generators, where the data can 802 * This is intended for speculative data generators, where the data can
756 * be regenerated if the page couldn't be grabbed. This routine should 803 * be regenerated if the page couldn't be grabbed. This routine should
@@ -779,19 +826,25 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
779 } 826 }
780 return page; 827 return page;
781} 828}
782
783EXPORT_SYMBOL(grab_cache_page_nowait); 829EXPORT_SYMBOL(grab_cache_page_nowait);
784 830
785/* 831/**
832 * do_generic_mapping_read - generic file read routine
833 * @mapping: address_space to be read
834 * @_ra: file's readahead state
835 * @filp: the file to read
836 * @ppos: current file position
837 * @desc: read_descriptor
838 * @actor: read method
839 *
786 * This is a generic file read routine, and uses the 840 * This is a generic file read routine, and uses the
787 * mapping->a_ops->readpage() function for the actual low-level 841 * mapping->a_ops->readpage() function for the actual low-level stuff.
788 * stuff.
789 * 842 *
790 * This is really ugly. But the goto's actually try to clarify some 843 * This is really ugly. But the goto's actually try to clarify some
791 * of the logic when it comes to error handling etc. 844 * of the logic when it comes to error handling etc.
792 * 845 *
793 * Note the struct file* is only passed for the use of readpage. It may be 846 * Note the struct file* is only passed for the use of readpage.
794 * NULL. 847 * It may be NULL.
795 */ 848 */
796void do_generic_mapping_read(struct address_space *mapping, 849void do_generic_mapping_read(struct address_space *mapping,
797 struct file_ra_state *_ra, 850 struct file_ra_state *_ra,
@@ -1004,7 +1057,6 @@ out:
1004 if (filp) 1057 if (filp)
1005 file_accessed(filp); 1058 file_accessed(filp);
1006} 1059}
1007
1008EXPORT_SYMBOL(do_generic_mapping_read); 1060EXPORT_SYMBOL(do_generic_mapping_read);
1009 1061
1010int file_read_actor(read_descriptor_t *desc, struct page *page, 1062int file_read_actor(read_descriptor_t *desc, struct page *page,
@@ -1045,7 +1097,13 @@ success:
1045 return size; 1097 return size;
1046} 1098}
1047 1099
1048/* 1100/**
1101 * __generic_file_aio_read - generic filesystem read routine
1102 * @iocb: kernel I/O control block
1103 * @iov: io vector request
1104 * @nr_segs: number of segments in the iovec
1105 * @ppos: current file position
1106 *
1049 * This is the "read()" routine for all filesystems 1107 * This is the "read()" routine for all filesystems
1050 * that can use the page cache directly. 1108 * that can use the page cache directly.
1051 */ 1109 */
@@ -1124,7 +1182,6 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1124out: 1182out:
1125 return retval; 1183 return retval;
1126} 1184}
1127
1128EXPORT_SYMBOL(__generic_file_aio_read); 1185EXPORT_SYMBOL(__generic_file_aio_read);
1129 1186
1130ssize_t 1187ssize_t
@@ -1135,7 +1192,6 @@ generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
1135 BUG_ON(iocb->ki_pos != pos); 1192 BUG_ON(iocb->ki_pos != pos);
1136 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); 1193 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
1137} 1194}
1138
1139EXPORT_SYMBOL(generic_file_aio_read); 1195EXPORT_SYMBOL(generic_file_aio_read);
1140 1196
1141ssize_t 1197ssize_t
@@ -1151,7 +1207,6 @@ generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppo
1151 ret = wait_on_sync_kiocb(&kiocb); 1207 ret = wait_on_sync_kiocb(&kiocb);
1152 return ret; 1208 return ret;
1153} 1209}
1154
1155EXPORT_SYMBOL(generic_file_read); 1210EXPORT_SYMBOL(generic_file_read);
1156 1211
1157int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) 1212int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
@@ -1192,7 +1247,6 @@ ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
1192 return desc.written; 1247 return desc.written;
1193 return desc.error; 1248 return desc.error;
1194} 1249}
1195
1196EXPORT_SYMBOL(generic_file_sendfile); 1250EXPORT_SYMBOL(generic_file_sendfile);
1197 1251
1198static ssize_t 1252static ssize_t
@@ -1228,11 +1282,15 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1228} 1282}
1229 1283
1230#ifdef CONFIG_MMU 1284#ifdef CONFIG_MMU
1231/* 1285static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1286/**
1287 * page_cache_read - adds requested page to the page cache if not already there
1288 * @file: file to read
1289 * @offset: page index
1290 *
1232 * This adds the requested page to the page cache if it isn't already there, 1291 * This adds the requested page to the page cache if it isn't already there,
1233 * and schedules an I/O to read in its contents from disk. 1292 * and schedules an I/O to read in its contents from disk.
1234 */ 1293 */
1235static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1236static int fastcall page_cache_read(struct file * file, unsigned long offset) 1294static int fastcall page_cache_read(struct file * file, unsigned long offset)
1237{ 1295{
1238 struct address_space *mapping = file->f_mapping; 1296 struct address_space *mapping = file->f_mapping;
@@ -1259,7 +1317,12 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
1259 1317
1260#define MMAP_LOTSAMISS (100) 1318#define MMAP_LOTSAMISS (100)
1261 1319
1262/* 1320/**
1321 * filemap_nopage - read in file data for page fault handling
1322 * @area: the applicable vm_area
1323 * @address: target address to read in
1324 * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
1325 *
1263 * filemap_nopage() is invoked via the vma operations vector for a 1326 * filemap_nopage() is invoked via the vma operations vector for a
1264 * mapped memory region to read in file data during a page fault. 1327 * mapped memory region to read in file data during a page fault.
1265 * 1328 *
@@ -1462,7 +1525,6 @@ page_not_uptodate:
1462 page_cache_release(page); 1525 page_cache_release(page);
1463 return NULL; 1526 return NULL;
1464} 1527}
1465
1466EXPORT_SYMBOL(filemap_nopage); 1528EXPORT_SYMBOL(filemap_nopage);
1467 1529
1468static struct page * filemap_getpage(struct file *file, unsigned long pgoff, 1530static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
@@ -1716,7 +1778,13 @@ repeat:
1716 return page; 1778 return page;
1717} 1779}
1718 1780
1719/* 1781/**
1782 * read_cache_page - read into page cache, fill it if needed
1783 * @mapping: the page's address_space
1784 * @index: the page index
1785 * @filler: function to perform the read
1786 * @data: destination for read data
1787 *
1720 * Read into the page cache. If a page already exists, 1788 * Read into the page cache. If a page already exists,
1721 * and PageUptodate() is not set, try to fill the page. 1789 * and PageUptodate() is not set, try to fill the page.
1722 */ 1790 */
@@ -1754,7 +1822,6 @@ retry:
1754 out: 1822 out:
1755 return page; 1823 return page;
1756} 1824}
1757
1758EXPORT_SYMBOL(read_cache_page); 1825EXPORT_SYMBOL(read_cache_page);
1759 1826
1760/* 1827/*
@@ -1835,7 +1902,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
1835 int copy = min(bytes, iov->iov_len - base); 1902 int copy = min(bytes, iov->iov_len - base);
1836 1903
1837 base = 0; 1904 base = 0;
1838 left = __copy_from_user_inatomic(vaddr, buf, copy); 1905 left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
1839 copied += copy; 1906 copied += copy;
1840 bytes -= copy; 1907 bytes -= copy;
1841 vaddr += copy; 1908 vaddr += copy;
@@ -1854,7 +1921,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
1854/* 1921/*
1855 * Performs necessary checks before doing a write 1922 * Performs necessary checks before doing a write
1856 * 1923 *
1857 * Can adjust writing position aor amount of bytes to write. 1924 * Can adjust writing position or amount of bytes to write.
1858 * Returns appropriate error code that caller should return or 1925 * Returns appropriate error code that caller should return or
1859 * zero in case that write should be allowed. 1926 * zero in case that write should be allowed.
1860 */ 1927 */