diff options
author | Anton Altaparmakov <anton@tuxera.com> | 2015-03-11 10:43:32 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2015-04-11 22:24:33 -0400 |
commit | a632f5593041305c8adbf4727bc1ccdf0b45178b (patch) | |
tree | db4eabb62c52a13a0c9594f6095333d3a744d32c /fs/ntfs | |
parent | 171a02032bf1e1bb35442a38d6e25e0dcbb85c63 (diff) |
NTFS: Version 2.1.32 - Update file write from aio_write to write_iter.
Signed-off-by: Anton Altaparmakov <anton@tuxera.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/ntfs')
-rw-r--r-- | fs/ntfs/Makefile | 2 | ||||
-rw-r--r-- | fs/ntfs/file.c | 783 |
2 files changed, 308 insertions, 477 deletions
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile index 36ae529511c4..2ff263e6d363 100644 --- a/fs/ntfs/Makefile +++ b/fs/ntfs/Makefile | |||
@@ -8,7 +8,7 @@ ntfs-y := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \ | |||
8 | 8 | ||
9 | ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o | 9 | ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o |
10 | 10 | ||
11 | ccflags-y := -DNTFS_VERSION=\"2.1.31\" | 11 | ccflags-y := -DNTFS_VERSION=\"2.1.32\" |
12 | ccflags-$(CONFIG_NTFS_DEBUG) += -DDEBUG | 12 | ccflags-$(CONFIG_NTFS_DEBUG) += -DDEBUG |
13 | ccflags-$(CONFIG_NTFS_RW) += -DNTFS_RW | 13 | ccflags-$(CONFIG_NTFS_RW) += -DNTFS_RW |
14 | 14 | ||
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 1da9b2d184dc..29139ffb4328 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. | 2 | * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc. | 4 | * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc. |
5 | * | 5 | * |
6 | * This program/include file is free software; you can redistribute it and/or | 6 | * This program/include file is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License as published | 7 | * modify it under the terms of the GNU General Public License as published |
@@ -329,62 +329,168 @@ err_out: | |||
329 | return err; | 329 | return err; |
330 | } | 330 | } |
331 | 331 | ||
332 | /** | 332 | static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos, |
333 | * ntfs_fault_in_pages_readable - | 333 | size_t *count) |
334 | * | ||
335 | * Fault a number of userspace pages into pagetables. | ||
336 | * | ||
337 | * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes | ||
338 | * with more than two userspace pages as well as handling the single page case | ||
339 | * elegantly. | ||
340 | * | ||
341 | * If you find this difficult to understand, then think of the while loop being | ||
342 | * the following code, except that we do without the integer variable ret: | ||
343 | * | ||
344 | * do { | ||
345 | * ret = __get_user(c, uaddr); | ||
346 | * uaddr += PAGE_SIZE; | ||
347 | * } while (!ret && uaddr < end); | ||
348 | * | ||
349 | * Note, the final __get_user() may well run out-of-bounds of the user buffer, | ||
350 | * but _not_ out-of-bounds of the page the user buffer belongs to, and since | ||
351 | * this is only a read and not a write, and since it is still in the same page, | ||
352 | * it should not matter and this makes the code much simpler. | ||
353 | */ | ||
354 | static inline void ntfs_fault_in_pages_readable(const char __user *uaddr, | ||
355 | int bytes) | ||
356 | { | 334 | { |
357 | const char __user *end; | 335 | loff_t pos; |
358 | volatile char c; | 336 | s64 end, ll; |
359 | 337 | ssize_t err; | |
360 | /* Set @end to the first byte outside the last page we care about. */ | 338 | unsigned long flags; |
361 | end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes); | 339 | struct inode *vi = file_inode(file); |
362 | 340 | ntfs_inode *base_ni, *ni = NTFS_I(vi); | |
363 | while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end)) | 341 | ntfs_volume *vol = ni->vol; |
364 | ; | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * ntfs_fault_in_pages_readable_iovec - | ||
369 | * | ||
370 | * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs. | ||
371 | */ | ||
372 | static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov, | ||
373 | size_t iov_ofs, int bytes) | ||
374 | { | ||
375 | do { | ||
376 | const char __user *buf; | ||
377 | unsigned len; | ||
378 | 342 | ||
379 | buf = iov->iov_base + iov_ofs; | 343 | ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " |
380 | len = iov->iov_len - iov_ofs; | 344 | "0x%llx, count 0x%lx.", vi->i_ino, |
381 | if (len > bytes) | 345 | (unsigned)le32_to_cpu(ni->type), |
382 | len = bytes; | 346 | (unsigned long long)*ppos, (unsigned long)*count); |
383 | ntfs_fault_in_pages_readable(buf, len); | 347 | /* We can write back this queue in page reclaim. */ |
384 | bytes -= len; | 348 | current->backing_dev_info = inode_to_bdi(vi); |
385 | iov++; | 349 | err = generic_write_checks(file, ppos, count, S_ISBLK(vi->i_mode)); |
386 | iov_ofs = 0; | 350 | if (unlikely(err)) |
387 | } while (bytes); | 351 | goto out; |
352 | /* | ||
353 | * All checks have passed. Before we start doing any writing we want | ||
354 | * to abort any totally illegal writes. | ||
355 | */ | ||
356 | BUG_ON(NInoMstProtected(ni)); | ||
357 | BUG_ON(ni->type != AT_DATA); | ||
358 | /* If file is encrypted, deny access, just like NT4. */ | ||
359 | if (NInoEncrypted(ni)) { | ||
360 | /* Only $DATA attributes can be encrypted. */ | ||
361 | /* | ||
362 | * Reminder for later: Encrypted files are _always_ | ||
363 | * non-resident so that the content can always be encrypted. | ||
364 | */ | ||
365 | ntfs_debug("Denying write access to encrypted file."); | ||
366 | err = -EACCES; | ||
367 | goto out; | ||
368 | } | ||
369 | if (NInoCompressed(ni)) { | ||
370 | /* Only unnamed $DATA attribute can be compressed. */ | ||
371 | BUG_ON(ni->name_len); | ||
372 | /* | ||
373 | * Reminder for later: If resident, the data is not actually | ||
374 | * compressed. Only on the switch to non-resident does | ||
375 | * compression kick in. This is in contrast to encrypted files | ||
376 | * (see above). | ||
377 | */ | ||
378 | ntfs_error(vi->i_sb, "Writing to compressed files is not " | ||
379 | "implemented yet. Sorry."); | ||
380 | err = -EOPNOTSUPP; | ||
381 | goto out; | ||
382 | } | ||
383 | if (*count == 0) | ||
384 | goto out; | ||
385 | base_ni = ni; | ||
386 | if (NInoAttr(ni)) | ||
387 | base_ni = ni->ext.base_ntfs_ino; | ||
388 | err = file_remove_suid(file); | ||
389 | if (unlikely(err)) | ||
390 | goto out; | ||
391 | /* | ||
392 | * Our ->update_time method always succeeds thus file_update_time() | ||
393 | * cannot fail either so there is no need to check the return code. | ||
394 | */ | ||
395 | file_update_time(file); | ||
396 | pos = *ppos; | ||
397 | /* The first byte after the last cluster being written to. */ | ||
398 | end = (pos + *count + vol->cluster_size_mask) & | ||
399 | ~(u64)vol->cluster_size_mask; | ||
400 | /* | ||
401 | * If the write goes beyond the allocated size, extend the allocation | ||
402 | * to cover the whole of the write, rounded up to the nearest cluster. | ||
403 | */ | ||
404 | read_lock_irqsave(&ni->size_lock, flags); | ||
405 | ll = ni->allocated_size; | ||
406 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
407 | if (end > ll) { | ||
408 | /* | ||
409 | * Extend the allocation without changing the data size. | ||
410 | * | ||
411 | * Note we ensure the allocation is big enough to at least | ||
412 | * write some data but we do not require the allocation to be | ||
413 | * complete, i.e. it may be partial. | ||
414 | */ | ||
415 | ll = ntfs_attr_extend_allocation(ni, end, -1, pos); | ||
416 | if (likely(ll >= 0)) { | ||
417 | BUG_ON(pos >= ll); | ||
418 | /* If the extension was partial truncate the write. */ | ||
419 | if (end > ll) { | ||
420 | ntfs_debug("Truncating write to inode 0x%lx, " | ||
421 | "attribute type 0x%x, because " | ||
422 | "the allocation was only " | ||
423 | "partially extended.", | ||
424 | vi->i_ino, (unsigned) | ||
425 | le32_to_cpu(ni->type)); | ||
426 | *count = ll - pos; | ||
427 | } | ||
428 | } else { | ||
429 | err = ll; | ||
430 | read_lock_irqsave(&ni->size_lock, flags); | ||
431 | ll = ni->allocated_size; | ||
432 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
433 | /* Perform a partial write if possible or fail. */ | ||
434 | if (pos < ll) { | ||
435 | ntfs_debug("Truncating write to inode 0x%lx " | ||
436 | "attribute type 0x%x, because " | ||
437 | "extending the allocation " | ||
438 | "failed (error %d).", | ||
439 | vi->i_ino, (unsigned) | ||
440 | le32_to_cpu(ni->type), | ||
441 | (int)-err); | ||
442 | *count = ll - pos; | ||
443 | } else { | ||
444 | if (err != -ENOSPC) | ||
445 | ntfs_error(vi->i_sb, "Cannot perform " | ||
446 | "write to inode " | ||
447 | "0x%lx, attribute " | ||
448 | "type 0x%x, because " | ||
449 | "extending the " | ||
450 | "allocation failed " | ||
451 | "(error %ld).", | ||
452 | vi->i_ino, (unsigned) | ||
453 | le32_to_cpu(ni->type), | ||
454 | (long)-err); | ||
455 | else | ||
456 | ntfs_debug("Cannot perform write to " | ||
457 | "inode 0x%lx, " | ||
458 | "attribute type 0x%x, " | ||
459 | "because there is not " | ||
460 | "space left.", | ||
461 | vi->i_ino, (unsigned) | ||
462 | le32_to_cpu(ni->type)); | ||
463 | goto out; | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | /* | ||
468 | * If the write starts beyond the initialized size, extend it up to the | ||
469 | * beginning of the write and initialize all non-sparse space between | ||
470 | * the old initialized size and the new one. This automatically also | ||
471 | * increments the vfs inode->i_size to keep it above or equal to the | ||
472 | * initialized_size. | ||
473 | */ | ||
474 | read_lock_irqsave(&ni->size_lock, flags); | ||
475 | ll = ni->initialized_size; | ||
476 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
477 | if (pos > ll) { | ||
478 | /* | ||
479 | * Wait for ongoing direct i/o to complete before proceeding. | ||
480 | * New direct i/o cannot start as we hold i_mutex. | ||
481 | */ | ||
482 | inode_dio_wait(vi); | ||
483 | err = ntfs_attr_extend_initialized(ni, pos); | ||
484 | if (unlikely(err < 0)) | ||
485 | ntfs_error(vi->i_sb, "Cannot perform write to inode " | ||
486 | "0x%lx, attribute type 0x%x, because " | ||
487 | "extending the initialized size " | ||
488 | "failed (error %d).", vi->i_ino, | ||
489 | (unsigned)le32_to_cpu(ni->type), | ||
490 | (int)-err); | ||
491 | } | ||
492 | out: | ||
493 | return err; | ||
388 | } | 494 | } |
389 | 495 | ||
390 | /** | 496 | /** |
@@ -421,8 +527,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping, | |||
421 | goto err_out; | 527 | goto err_out; |
422 | } | 528 | } |
423 | } | 529 | } |
424 | err = add_to_page_cache_lru(*cached_page, mapping, index, | 530 | err = add_to_page_cache_lru(*cached_page, mapping, |
425 | GFP_KERNEL); | 531 | index, GFP_KERNEL); |
426 | if (unlikely(err)) { | 532 | if (unlikely(err)) { |
427 | if (err == -EEXIST) | 533 | if (err == -EEXIST) |
428 | continue; | 534 | continue; |
@@ -1268,180 +1374,6 @@ rl_not_mapped_enoent: | |||
1268 | return err; | 1374 | return err; |
1269 | } | 1375 | } |
1270 | 1376 | ||
1271 | /* | ||
1272 | * Copy as much as we can into the pages and return the number of bytes which | ||
1273 | * were successfully copied. If a fault is encountered then clear the pages | ||
1274 | * out to (ofs + bytes) and return the number of bytes which were copied. | ||
1275 | */ | ||
1276 | static inline size_t ntfs_copy_from_user(struct page **pages, | ||
1277 | unsigned nr_pages, unsigned ofs, const char __user *buf, | ||
1278 | size_t bytes) | ||
1279 | { | ||
1280 | struct page **last_page = pages + nr_pages; | ||
1281 | char *addr; | ||
1282 | size_t total = 0; | ||
1283 | unsigned len; | ||
1284 | int left; | ||
1285 | |||
1286 | do { | ||
1287 | len = PAGE_CACHE_SIZE - ofs; | ||
1288 | if (len > bytes) | ||
1289 | len = bytes; | ||
1290 | addr = kmap_atomic(*pages); | ||
1291 | left = __copy_from_user_inatomic(addr + ofs, buf, len); | ||
1292 | kunmap_atomic(addr); | ||
1293 | if (unlikely(left)) { | ||
1294 | /* Do it the slow way. */ | ||
1295 | addr = kmap(*pages); | ||
1296 | left = __copy_from_user(addr + ofs, buf, len); | ||
1297 | kunmap(*pages); | ||
1298 | if (unlikely(left)) | ||
1299 | goto err_out; | ||
1300 | } | ||
1301 | total += len; | ||
1302 | bytes -= len; | ||
1303 | if (!bytes) | ||
1304 | break; | ||
1305 | buf += len; | ||
1306 | ofs = 0; | ||
1307 | } while (++pages < last_page); | ||
1308 | out: | ||
1309 | return total; | ||
1310 | err_out: | ||
1311 | total += len - left; | ||
1312 | /* Zero the rest of the target like __copy_from_user(). */ | ||
1313 | while (++pages < last_page) { | ||
1314 | bytes -= len; | ||
1315 | if (!bytes) | ||
1316 | break; | ||
1317 | len = PAGE_CACHE_SIZE; | ||
1318 | if (len > bytes) | ||
1319 | len = bytes; | ||
1320 | zero_user(*pages, 0, len); | ||
1321 | } | ||
1322 | goto out; | ||
1323 | } | ||
1324 | |||
1325 | static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, | ||
1326 | const struct iovec *iov, size_t iov_ofs, size_t bytes) | ||
1327 | { | ||
1328 | size_t total = 0; | ||
1329 | |||
1330 | while (1) { | ||
1331 | const char __user *buf = iov->iov_base + iov_ofs; | ||
1332 | unsigned len; | ||
1333 | size_t left; | ||
1334 | |||
1335 | len = iov->iov_len - iov_ofs; | ||
1336 | if (len > bytes) | ||
1337 | len = bytes; | ||
1338 | left = __copy_from_user_inatomic(vaddr, buf, len); | ||
1339 | total += len; | ||
1340 | bytes -= len; | ||
1341 | vaddr += len; | ||
1342 | if (unlikely(left)) { | ||
1343 | total -= left; | ||
1344 | break; | ||
1345 | } | ||
1346 | if (!bytes) | ||
1347 | break; | ||
1348 | iov++; | ||
1349 | iov_ofs = 0; | ||
1350 | } | ||
1351 | return total; | ||
1352 | } | ||
1353 | |||
1354 | static inline void ntfs_set_next_iovec(const struct iovec **iovp, | ||
1355 | size_t *iov_ofsp, size_t bytes) | ||
1356 | { | ||
1357 | const struct iovec *iov = *iovp; | ||
1358 | size_t iov_ofs = *iov_ofsp; | ||
1359 | |||
1360 | while (bytes) { | ||
1361 | unsigned len; | ||
1362 | |||
1363 | len = iov->iov_len - iov_ofs; | ||
1364 | if (len > bytes) | ||
1365 | len = bytes; | ||
1366 | bytes -= len; | ||
1367 | iov_ofs += len; | ||
1368 | if (iov->iov_len == iov_ofs) { | ||
1369 | iov++; | ||
1370 | iov_ofs = 0; | ||
1371 | } | ||
1372 | } | ||
1373 | *iovp = iov; | ||
1374 | *iov_ofsp = iov_ofs; | ||
1375 | } | ||
1376 | |||
1377 | /* | ||
1378 | * This has the same side-effects and return value as ntfs_copy_from_user(). | ||
1379 | * The difference is that on a fault we need to memset the remainder of the | ||
1380 | * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s | ||
1381 | * single-segment behaviour. | ||
1382 | * | ||
1383 | * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when | ||
1384 | * atomic and when not atomic. This is ok because it calls | ||
1385 | * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In | ||
1386 | * fact, the only difference between __copy_from_user_inatomic() and | ||
1387 | * __copy_from_user() is that the latter calls might_sleep() and the former | ||
1388 | * should not zero the tail of the buffer on error. And on many architectures | ||
1389 | * __copy_from_user_inatomic() is just defined to __copy_from_user() so it | ||
1390 | * makes no difference at all on those architectures. | ||
1391 | */ | ||
1392 | static inline size_t ntfs_copy_from_user_iovec(struct page **pages, | ||
1393 | unsigned nr_pages, unsigned ofs, const struct iovec **iov, | ||
1394 | size_t *iov_ofs, size_t bytes) | ||
1395 | { | ||
1396 | struct page **last_page = pages + nr_pages; | ||
1397 | char *addr; | ||
1398 | size_t copied, len, total = 0; | ||
1399 | |||
1400 | do { | ||
1401 | len = PAGE_CACHE_SIZE - ofs; | ||
1402 | if (len > bytes) | ||
1403 | len = bytes; | ||
1404 | addr = kmap_atomic(*pages); | ||
1405 | copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, | ||
1406 | *iov, *iov_ofs, len); | ||
1407 | kunmap_atomic(addr); | ||
1408 | if (unlikely(copied != len)) { | ||
1409 | /* Do it the slow way. */ | ||
1410 | addr = kmap(*pages); | ||
1411 | copied = __ntfs_copy_from_user_iovec_inatomic(addr + | ||
1412 | ofs, *iov, *iov_ofs, len); | ||
1413 | if (unlikely(copied != len)) | ||
1414 | goto err_out; | ||
1415 | kunmap(*pages); | ||
1416 | } | ||
1417 | total += len; | ||
1418 | ntfs_set_next_iovec(iov, iov_ofs, len); | ||
1419 | bytes -= len; | ||
1420 | if (!bytes) | ||
1421 | break; | ||
1422 | ofs = 0; | ||
1423 | } while (++pages < last_page); | ||
1424 | out: | ||
1425 | return total; | ||
1426 | err_out: | ||
1427 | BUG_ON(copied > len); | ||
1428 | /* Zero the rest of the target like __copy_from_user(). */ | ||
1429 | memset(addr + ofs + copied, 0, len - copied); | ||
1430 | kunmap(*pages); | ||
1431 | total += copied; | ||
1432 | ntfs_set_next_iovec(iov, iov_ofs, copied); | ||
1433 | while (++pages < last_page) { | ||
1434 | bytes -= len; | ||
1435 | if (!bytes) | ||
1436 | break; | ||
1437 | len = PAGE_CACHE_SIZE; | ||
1438 | if (len > bytes) | ||
1439 | len = bytes; | ||
1440 | zero_user(*pages, 0, len); | ||
1441 | } | ||
1442 | goto out; | ||
1443 | } | ||
1444 | |||
1445 | static inline void ntfs_flush_dcache_pages(struct page **pages, | 1377 | static inline void ntfs_flush_dcache_pages(struct page **pages, |
1446 | unsigned nr_pages) | 1378 | unsigned nr_pages) |
1447 | { | 1379 | { |
@@ -1762,86 +1694,83 @@ err_out: | |||
1762 | return err; | 1694 | return err; |
1763 | } | 1695 | } |
1764 | 1696 | ||
1765 | static void ntfs_write_failed(struct address_space *mapping, loff_t to) | 1697 | /* |
1698 | * Copy as much as we can into the pages and return the number of bytes which | ||
1699 | * were successfully copied. If a fault is encountered then clear the pages | ||
1700 | * out to (ofs + bytes) and return the number of bytes which were copied. | ||
1701 | */ | ||
1702 | static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, | ||
1703 | unsigned ofs, struct iov_iter *i, size_t bytes) | ||
1766 | { | 1704 | { |
1767 | struct inode *inode = mapping->host; | 1705 | struct page **last_page = pages + nr_pages; |
1706 | size_t total = 0; | ||
1707 | struct iov_iter data = *i; | ||
1708 | unsigned len, copied; | ||
1768 | 1709 | ||
1769 | if (to > inode->i_size) { | 1710 | do { |
1770 | truncate_pagecache(inode, inode->i_size); | 1711 | len = PAGE_CACHE_SIZE - ofs; |
1771 | ntfs_truncate_vfs(inode); | 1712 | if (len > bytes) |
1772 | } | 1713 | len = bytes; |
1714 | copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, | ||
1715 | len); | ||
1716 | total += copied; | ||
1717 | bytes -= copied; | ||
1718 | if (!bytes) | ||
1719 | break; | ||
1720 | iov_iter_advance(&data, copied); | ||
1721 | if (copied < len) | ||
1722 | goto err; | ||
1723 | ofs = 0; | ||
1724 | } while (++pages < last_page); | ||
1725 | out: | ||
1726 | return total; | ||
1727 | err: | ||
1728 | /* Zero the rest of the target like __copy_from_user(). */ | ||
1729 | len = PAGE_CACHE_SIZE - copied; | ||
1730 | do { | ||
1731 | if (len > bytes) | ||
1732 | len = bytes; | ||
1733 | zero_user(*pages, copied, len); | ||
1734 | bytes -= len; | ||
1735 | copied = 0; | ||
1736 | len = PAGE_CACHE_SIZE; | ||
1737 | } while (++pages < last_page); | ||
1738 | goto out; | ||
1773 | } | 1739 | } |
1774 | 1740 | ||
1775 | /** | 1741 | /** |
1776 | * ntfs_file_buffered_write - | 1742 | * ntfs_perform_write - perform buffered write to a file |
1777 | * | 1743 | * @file: file to write to |
1778 | * Locking: The vfs is holding ->i_mutex on the inode. | 1744 | * @i: iov_iter with data to write |
1745 | * @pos: byte offset in file at which to begin writing to | ||
1779 | */ | 1746 | */ |
1780 | static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | 1747 | static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, |
1781 | const struct iovec *iov, unsigned long nr_segs, | 1748 | loff_t pos) |
1782 | loff_t pos, loff_t *ppos, size_t count) | ||
1783 | { | 1749 | { |
1784 | struct file *file = iocb->ki_filp; | ||
1785 | struct address_space *mapping = file->f_mapping; | 1750 | struct address_space *mapping = file->f_mapping; |
1786 | struct inode *vi = mapping->host; | 1751 | struct inode *vi = mapping->host; |
1787 | ntfs_inode *ni = NTFS_I(vi); | 1752 | ntfs_inode *ni = NTFS_I(vi); |
1788 | ntfs_volume *vol = ni->vol; | 1753 | ntfs_volume *vol = ni->vol; |
1789 | struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; | 1754 | struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; |
1790 | struct page *cached_page = NULL; | 1755 | struct page *cached_page = NULL; |
1791 | char __user *buf = NULL; | ||
1792 | s64 end, ll; | ||
1793 | VCN last_vcn; | 1756 | VCN last_vcn; |
1794 | LCN lcn; | 1757 | LCN lcn; |
1795 | unsigned long flags; | 1758 | size_t bytes; |
1796 | size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */ | 1759 | ssize_t status, written = 0; |
1797 | ssize_t status, written; | ||
1798 | unsigned nr_pages; | 1760 | unsigned nr_pages; |
1799 | int err; | ||
1800 | 1761 | ||
1801 | ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " | 1762 | ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos " |
1802 | "pos 0x%llx, count 0x%lx.", | 1763 | "0x%llx, count 0x%lx.", vi->i_ino, |
1803 | vi->i_ino, (unsigned)le32_to_cpu(ni->type), | 1764 | (unsigned)le32_to_cpu(ni->type), |
1804 | (unsigned long long)pos, (unsigned long)count); | 1765 | (unsigned long long)pos, |
1805 | if (unlikely(!count)) | 1766 | (unsigned long)iov_iter_count(i)); |
1806 | return 0; | ||
1807 | BUG_ON(NInoMstProtected(ni)); | ||
1808 | /* | ||
1809 | * If the attribute is not an index root and it is encrypted or | ||
1810 | * compressed, we cannot write to it yet. Note we need to check for | ||
1811 | * AT_INDEX_ALLOCATION since this is the type of both directory and | ||
1812 | * index inodes. | ||
1813 | */ | ||
1814 | if (ni->type != AT_INDEX_ALLOCATION) { | ||
1815 | /* If file is encrypted, deny access, just like NT4. */ | ||
1816 | if (NInoEncrypted(ni)) { | ||
1817 | /* | ||
1818 | * Reminder for later: Encrypted files are _always_ | ||
1819 | * non-resident so that the content can always be | ||
1820 | * encrypted. | ||
1821 | */ | ||
1822 | ntfs_debug("Denying write access to encrypted file."); | ||
1823 | return -EACCES; | ||
1824 | } | ||
1825 | if (NInoCompressed(ni)) { | ||
1826 | /* Only unnamed $DATA attribute can be compressed. */ | ||
1827 | BUG_ON(ni->type != AT_DATA); | ||
1828 | BUG_ON(ni->name_len); | ||
1829 | /* | ||
1830 | * Reminder for later: If resident, the data is not | ||
1831 | * actually compressed. Only on the switch to non- | ||
1832 | * resident does compression kick in. This is in | ||
1833 | * contrast to encrypted files (see above). | ||
1834 | */ | ||
1835 | ntfs_error(vi->i_sb, "Writing to compressed files is " | ||
1836 | "not implemented yet. Sorry."); | ||
1837 | return -EOPNOTSUPP; | ||
1838 | } | ||
1839 | } | ||
1840 | /* | 1767 | /* |
1841 | * If a previous ntfs_truncate() failed, repeat it and abort if it | 1768 | * If a previous ntfs_truncate() failed, repeat it and abort if it |
1842 | * fails again. | 1769 | * fails again. |
1843 | */ | 1770 | */ |
1844 | if (unlikely(NInoTruncateFailed(ni))) { | 1771 | if (unlikely(NInoTruncateFailed(ni))) { |
1772 | int err; | ||
1773 | |||
1845 | inode_dio_wait(vi); | 1774 | inode_dio_wait(vi); |
1846 | err = ntfs_truncate(vi); | 1775 | err = ntfs_truncate(vi); |
1847 | if (err || NInoTruncateFailed(ni)) { | 1776 | if (err || NInoTruncateFailed(ni)) { |
@@ -1855,81 +1784,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1855 | return err; | 1784 | return err; |
1856 | } | 1785 | } |
1857 | } | 1786 | } |
1858 | /* The first byte after the write. */ | ||
1859 | end = pos + count; | ||
1860 | /* | ||
1861 | * If the write goes beyond the allocated size, extend the allocation | ||
1862 | * to cover the whole of the write, rounded up to the nearest cluster. | ||
1863 | */ | ||
1864 | read_lock_irqsave(&ni->size_lock, flags); | ||
1865 | ll = ni->allocated_size; | ||
1866 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
1867 | if (end > ll) { | ||
1868 | /* Extend the allocation without changing the data size. */ | ||
1869 | ll = ntfs_attr_extend_allocation(ni, end, -1, pos); | ||
1870 | if (likely(ll >= 0)) { | ||
1871 | BUG_ON(pos >= ll); | ||
1872 | /* If the extension was partial truncate the write. */ | ||
1873 | if (end > ll) { | ||
1874 | ntfs_debug("Truncating write to inode 0x%lx, " | ||
1875 | "attribute type 0x%x, because " | ||
1876 | "the allocation was only " | ||
1877 | "partially extended.", | ||
1878 | vi->i_ino, (unsigned) | ||
1879 | le32_to_cpu(ni->type)); | ||
1880 | end = ll; | ||
1881 | count = ll - pos; | ||
1882 | } | ||
1883 | } else { | ||
1884 | err = ll; | ||
1885 | read_lock_irqsave(&ni->size_lock, flags); | ||
1886 | ll = ni->allocated_size; | ||
1887 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
1888 | /* Perform a partial write if possible or fail. */ | ||
1889 | if (pos < ll) { | ||
1890 | ntfs_debug("Truncating write to inode 0x%lx, " | ||
1891 | "attribute type 0x%x, because " | ||
1892 | "extending the allocation " | ||
1893 | "failed (error code %i).", | ||
1894 | vi->i_ino, (unsigned) | ||
1895 | le32_to_cpu(ni->type), err); | ||
1896 | end = ll; | ||
1897 | count = ll - pos; | ||
1898 | } else { | ||
1899 | ntfs_error(vol->sb, "Cannot perform write to " | ||
1900 | "inode 0x%lx, attribute type " | ||
1901 | "0x%x, because extending the " | ||
1902 | "allocation failed (error " | ||
1903 | "code %i).", vi->i_ino, | ||
1904 | (unsigned) | ||
1905 | le32_to_cpu(ni->type), err); | ||
1906 | return err; | ||
1907 | } | ||
1908 | } | ||
1909 | } | ||
1910 | written = 0; | ||
1911 | /* | ||
1912 | * If the write starts beyond the initialized size, extend it up to the | ||
1913 | * beginning of the write and initialize all non-sparse space between | ||
1914 | * the old initialized size and the new one. This automatically also | ||
1915 | * increments the vfs inode->i_size to keep it above or equal to the | ||
1916 | * initialized_size. | ||
1917 | */ | ||
1918 | read_lock_irqsave(&ni->size_lock, flags); | ||
1919 | ll = ni->initialized_size; | ||
1920 | read_unlock_irqrestore(&ni->size_lock, flags); | ||
1921 | if (pos > ll) { | ||
1922 | err = ntfs_attr_extend_initialized(ni, pos); | ||
1923 | if (err < 0) { | ||
1924 | ntfs_error(vol->sb, "Cannot perform write to inode " | ||
1925 | "0x%lx, attribute type 0x%x, because " | ||
1926 | "extending the initialized size " | ||
1927 | "failed (error code %i).", vi->i_ino, | ||
1928 | (unsigned)le32_to_cpu(ni->type), err); | ||
1929 | status = err; | ||
1930 | goto err_out; | ||
1931 | } | ||
1932 | } | ||
1933 | /* | 1787 | /* |
1934 | * Determine the number of pages per cluster for non-resident | 1788 | * Determine the number of pages per cluster for non-resident |
1935 | * attributes. | 1789 | * attributes. |
@@ -1937,10 +1791,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1937 | nr_pages = 1; | 1791 | nr_pages = 1; |
1938 | if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) | 1792 | if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) |
1939 | nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; | 1793 | nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; |
1940 | /* Finally, perform the actual write. */ | ||
1941 | last_vcn = -1; | 1794 | last_vcn = -1; |
1942 | if (likely(nr_segs == 1)) | ||
1943 | buf = iov->iov_base; | ||
1944 | do { | 1795 | do { |
1945 | VCN vcn; | 1796 | VCN vcn; |
1946 | pgoff_t idx, start_idx; | 1797 | pgoff_t idx, start_idx; |
@@ -1965,10 +1816,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1965 | vol->cluster_size_bits, false); | 1816 | vol->cluster_size_bits, false); |
1966 | up_read(&ni->runlist.lock); | 1817 | up_read(&ni->runlist.lock); |
1967 | if (unlikely(lcn < LCN_HOLE)) { | 1818 | if (unlikely(lcn < LCN_HOLE)) { |
1968 | status = -EIO; | ||
1969 | if (lcn == LCN_ENOMEM) | 1819 | if (lcn == LCN_ENOMEM) |
1970 | status = -ENOMEM; | 1820 | status = -ENOMEM; |
1971 | else | 1821 | else { |
1822 | status = -EIO; | ||
1972 | ntfs_error(vol->sb, "Cannot " | 1823 | ntfs_error(vol->sb, "Cannot " |
1973 | "perform write to " | 1824 | "perform write to " |
1974 | "inode 0x%lx, " | 1825 | "inode 0x%lx, " |
@@ -1977,6 +1828,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1977 | "is corrupt.", | 1828 | "is corrupt.", |
1978 | vi->i_ino, (unsigned) | 1829 | vi->i_ino, (unsigned) |
1979 | le32_to_cpu(ni->type)); | 1830 | le32_to_cpu(ni->type)); |
1831 | } | ||
1980 | break; | 1832 | break; |
1981 | } | 1833 | } |
1982 | if (lcn == LCN_HOLE) { | 1834 | if (lcn == LCN_HOLE) { |
@@ -1989,8 +1841,9 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1989 | } | 1841 | } |
1990 | } | 1842 | } |
1991 | } | 1843 | } |
1992 | if (bytes > count) | 1844 | if (bytes > iov_iter_count(i)) |
1993 | bytes = count; | 1845 | bytes = iov_iter_count(i); |
1846 | again: | ||
1994 | /* | 1847 | /* |
1995 | * Bring in the user page(s) that we will copy from _first_. | 1848 | * Bring in the user page(s) that we will copy from _first_. |
1996 | * Otherwise there is a nasty deadlock on copying from the same | 1849 | * Otherwise there is a nasty deadlock on copying from the same |
@@ -1999,10 +1852,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
1999 | * pages being swapped out between us bringing them into memory | 1852 | * pages being swapped out between us bringing them into memory |
2000 | * and doing the actual copying. | 1853 | * and doing the actual copying. |
2001 | */ | 1854 | */ |
2002 | if (likely(nr_segs == 1)) | 1855 | if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) { |
2003 | ntfs_fault_in_pages_readable(buf, bytes); | 1856 | status = -EFAULT; |
2004 | else | 1857 | break; |
2005 | ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes); | 1858 | } |
2006 | /* Get and lock @do_pages starting at index @start_idx. */ | 1859 | /* Get and lock @do_pages starting at index @start_idx. */ |
2007 | status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, | 1860 | status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, |
2008 | pages, &cached_page); | 1861 | pages, &cached_page); |
@@ -2018,56 +1871,57 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, | |||
2018 | status = ntfs_prepare_pages_for_non_resident_write( | 1871 | status = ntfs_prepare_pages_for_non_resident_write( |
2019 | pages, do_pages, pos, bytes); | 1872 | pages, do_pages, pos, bytes); |
2020 | if (unlikely(status)) { | 1873 | if (unlikely(status)) { |
2021 | loff_t i_size; | ||
2022 | |||
2023 | do { | 1874 | do { |
2024 | unlock_page(pages[--do_pages]); | 1875 | unlock_page(pages[--do_pages]); |
2025 | page_cache_release(pages[do_pages]); | 1876 | page_cache_release(pages[do_pages]); |
2026 | } while (do_pages); | 1877 | } while (do_pages); |
2027 | /* | ||
2028 | * The write preparation may have instantiated | ||
2029 | * allocated space outside i_size. Trim this | ||
2030 | * off again. We can ignore any errors in this | ||
2031 | * case as we will just be waisting a bit of | ||
2032 | * allocated space, which is not a disaster. | ||
2033 | */ | ||
2034 | i_size = i_size_read(vi); | ||
2035 | if (pos + bytes > i_size) { | ||
2036 | ntfs_write_failed(mapping, pos + bytes); | ||
2037 | } | ||
2038 | break; | 1878 | break; |
2039 | } | 1879 | } |
2040 | } | 1880 | } |
2041 | u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; | 1881 | u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; |
2042 | if (likely(nr_segs == 1)) { | 1882 | copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, |
2043 | copied = ntfs_copy_from_user(pages + u, do_pages - u, | 1883 | i, bytes); |
2044 | ofs, buf, bytes); | ||
2045 | buf += copied; | ||
2046 | } else | ||
2047 | copied = ntfs_copy_from_user_iovec(pages + u, | ||
2048 | do_pages - u, ofs, &iov, &iov_ofs, | ||
2049 | bytes); | ||
2050 | ntfs_flush_dcache_pages(pages + u, do_pages - u); | 1884 | ntfs_flush_dcache_pages(pages + u, do_pages - u); |
2051 | status = ntfs_commit_pages_after_write(pages, do_pages, pos, | 1885 | status = 0; |
2052 | bytes); | 1886 | if (likely(copied == bytes)) { |
2053 | if (likely(!status)) { | 1887 | status = ntfs_commit_pages_after_write(pages, do_pages, |
2054 | written += copied; | 1888 | pos, bytes); |
2055 | count -= copied; | 1889 | if (!status) |
2056 | pos += copied; | 1890 | status = bytes; |
2057 | if (unlikely(copied != bytes)) | ||
2058 | status = -EFAULT; | ||
2059 | } | 1891 | } |
2060 | do { | 1892 | do { |
2061 | unlock_page(pages[--do_pages]); | 1893 | unlock_page(pages[--do_pages]); |
2062 | page_cache_release(pages[do_pages]); | 1894 | page_cache_release(pages[do_pages]); |
2063 | } while (do_pages); | 1895 | } while (do_pages); |
2064 | if (unlikely(status)) | 1896 | if (unlikely(status < 0)) |
2065 | break; | 1897 | break; |
2066 | balance_dirty_pages_ratelimited(mapping); | 1898 | copied = status; |
2067 | cond_resched(); | 1899 | cond_resched(); |
2068 | } while (count); | 1900 | if (unlikely(!copied)) { |
2069 | err_out: | 1901 | size_t sc; |
2070 | *ppos = pos; | 1902 | |
1903 | /* | ||
1904 | * We failed to copy anything. Fall back to single | ||
1905 | * segment length write. | ||
1906 | * | ||
1907 | * This is needed to avoid possible livelock in the | ||
1908 | * case that all segments in the iov cannot be copied | ||
1909 | * at once without a pagefault. | ||
1910 | */ | ||
1911 | sc = iov_iter_single_seg_count(i); | ||
1912 | if (bytes > sc) | ||
1913 | bytes = sc; | ||
1914 | goto again; | ||
1915 | } | ||
1916 | iov_iter_advance(i, copied); | ||
1917 | pos += copied; | ||
1918 | written += copied; | ||
1919 | balance_dirty_pages_ratelimited(mapping); | ||
1920 | if (fatal_signal_pending(current)) { | ||
1921 | status = -EINTR; | ||
1922 | break; | ||
1923 | } | ||
1924 | } while (iov_iter_count(i)); | ||
2071 | if (cached_page) | 1925 | if (cached_page) |
2072 | page_cache_release(cached_page); | 1926 | page_cache_release(cached_page); |
2073 | ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", | 1927 | ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", |
@@ -2077,59 +1931,56 @@ err_out: | |||
2077 | } | 1931 | } |
2078 | 1932 | ||
2079 | /** | 1933 | /** |
2080 | * ntfs_file_aio_write_nolock - | 1934 | * ntfs_file_write_iter_nolock - write data to a file |
1935 | * @iocb: IO state structure (file, offset, etc.) | ||
1936 | * @from: iov_iter with data to write | ||
1937 | * | ||
1938 | * Basically the same as __generic_file_write_iter() except that it ends | ||
1939 | * up calling ntfs_perform_write() instead of generic_perform_write() and that | ||
1940 | * O_DIRECT is not implemented. | ||
2081 | */ | 1941 | */ |
2082 | static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, | 1942 | static ssize_t ntfs_file_write_iter_nolock(struct kiocb *iocb, |
2083 | const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) | 1943 | struct iov_iter *from) |
2084 | { | 1944 | { |
2085 | struct file *file = iocb->ki_filp; | 1945 | struct file *file = iocb->ki_filp; |
2086 | struct address_space *mapping = file->f_mapping; | 1946 | loff_t pos = iocb->ki_pos; |
2087 | struct inode *inode = mapping->host; | 1947 | ssize_t written = 0; |
2088 | loff_t pos; | 1948 | ssize_t err; |
2089 | size_t count; /* after file limit checks */ | 1949 | size_t count = iov_iter_count(from); |
2090 | ssize_t written, err; | ||
2091 | 1950 | ||
2092 | count = iov_length(iov, nr_segs); | 1951 | err = ntfs_prepare_file_for_write(file, &pos, &count); |
2093 | pos = *ppos; | 1952 | if (count && !err) { |
2094 | /* We can write back this queue in page reclaim. */ | 1953 | iov_iter_truncate(from, count); |
2095 | current->backing_dev_info = inode_to_bdi(inode); | 1954 | written = ntfs_perform_write(file, from, pos); |
2096 | written = 0; | 1955 | if (likely(written >= 0)) |
2097 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | 1956 | iocb->ki_pos = pos + written; |
2098 | if (err) | 1957 | } |
2099 | goto out; | ||
2100 | if (!count) | ||
2101 | goto out; | ||
2102 | err = file_remove_suid(file); | ||
2103 | if (err) | ||
2104 | goto out; | ||
2105 | err = file_update_time(file); | ||
2106 | if (err) | ||
2107 | goto out; | ||
2108 | written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, | ||
2109 | count); | ||
2110 | out: | ||
2111 | current->backing_dev_info = NULL; | 1958 | current->backing_dev_info = NULL; |
2112 | return written ? written : err; | 1959 | return written ? written : err; |
2113 | } | 1960 | } |
2114 | 1961 | ||
2115 | /** | 1962 | /** |
2116 | * ntfs_file_aio_write - | 1963 | * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock() |
1964 | * @iocb: IO state structure | ||
1965 | * @from: iov_iter with data to write | ||
1966 | * | ||
1967 | * Basically the same as generic_file_write_iter() except that it ends up | ||
1968 | * calling ntfs_file_write_iter_nolock() instead of | ||
1969 | * __generic_file_write_iter(). | ||
2117 | */ | 1970 | */ |
2118 | static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | 1971 | static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
2119 | unsigned long nr_segs, loff_t pos) | ||
2120 | { | 1972 | { |
2121 | struct file *file = iocb->ki_filp; | 1973 | struct file *file = iocb->ki_filp; |
2122 | struct address_space *mapping = file->f_mapping; | 1974 | struct inode *vi = file_inode(file); |
2123 | struct inode *inode = mapping->host; | ||
2124 | ssize_t ret; | 1975 | ssize_t ret; |
2125 | 1976 | ||
2126 | BUG_ON(iocb->ki_pos != pos); | 1977 | mutex_lock(&vi->i_mutex); |
2127 | 1978 | ret = ntfs_file_write_iter_nolock(iocb, from); | |
2128 | mutex_lock(&inode->i_mutex); | 1979 | mutex_unlock(&vi->i_mutex); |
2129 | ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); | ||
2130 | mutex_unlock(&inode->i_mutex); | ||
2131 | if (ret > 0) { | 1980 | if (ret > 0) { |
2132 | int err = generic_write_sync(file, iocb->ki_pos - ret, ret); | 1981 | ssize_t err; |
1982 | |||
1983 | err = generic_write_sync(file, iocb->ki_pos - ret, ret); | ||
2133 | if (err < 0) | 1984 | if (err < 0) |
2134 | ret = err; | 1985 | ret = err; |
2135 | } | 1986 | } |
@@ -2197,37 +2048,17 @@ static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, | |||
2197 | #endif /* NTFS_RW */ | 2048 | #endif /* NTFS_RW */ |
2198 | 2049 | ||
2199 | const struct file_operations ntfs_file_ops = { | 2050 | const struct file_operations ntfs_file_ops = { |
2200 | .llseek = generic_file_llseek, /* Seek inside file. */ | 2051 | .llseek = generic_file_llseek, |
2201 | .read = new_sync_read, /* Read from file. */ | 2052 | .read = new_sync_read, |
2202 | .read_iter = generic_file_read_iter, /* Async read from file. */ | 2053 | .read_iter = generic_file_read_iter, |
2203 | #ifdef NTFS_RW | 2054 | #ifdef NTFS_RW |
2204 | .write = do_sync_write, /* Write to file. */ | 2055 | .write = new_sync_write, |
2205 | .aio_write = ntfs_file_aio_write, /* Async write to file. */ | 2056 | .write_iter = ntfs_file_write_iter, |
2206 | /*.release = ,*/ /* Last file is closed. See | 2057 | .fsync = ntfs_file_fsync, |
2207 | fs/ext2/file.c:: | ||
2208 | ext2_release_file() for | ||
2209 | how to use this to discard | ||
2210 | preallocated space for | ||
2211 | write opened files. */ | ||
2212 | .fsync = ntfs_file_fsync, /* Sync a file to disk. */ | ||
2213 | /*.aio_fsync = ,*/ /* Sync all outstanding async | ||
2214 | i/o operations on a | ||
2215 | kiocb. */ | ||
2216 | #endif /* NTFS_RW */ | 2058 | #endif /* NTFS_RW */ |
2217 | /*.ioctl = ,*/ /* Perform function on the | 2059 | .mmap = generic_file_mmap, |
2218 | mounted filesystem. */ | 2060 | .open = ntfs_file_open, |
2219 | .mmap = generic_file_mmap, /* Mmap file. */ | 2061 | .splice_read = generic_file_splice_read, |
2220 | .open = ntfs_file_open, /* Open file. */ | ||
2221 | .splice_read = generic_file_splice_read /* Zero-copy data send with | ||
2222 | the data source being on | ||
2223 | the ntfs partition. We do | ||
2224 | not need to care about the | ||
2225 | data destination. */ | ||
2226 | /*.sendpage = ,*/ /* Zero-copy data send with | ||
2227 | the data destination being | ||
2228 | on the ntfs partition. We | ||
2229 | do not need to care about | ||
2230 | the data source. */ | ||
2231 | }; | 2062 | }; |
2232 | 2063 | ||
2233 | const struct inode_operations ntfs_file_inode_ops = { | 2064 | const struct inode_operations ntfs_file_inode_ops = { |