aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorPavel Shilovsky <piastryyy@gmail.com>2011-01-24 14:16:35 -0500
committerSteve French <sfrench@us.ibm.com>2011-01-25 14:30:13 -0500
commit72432ffcf555decbbae47f1be338e1d2f210aa69 (patch)
tree4293cb6e5b880071099756b7523f8f36bb7e16cc /fs/cifs/file.c
parent93c100c0b423266c0ee28497e90fdf27c05e6b8e (diff)
CIFS: Implement cifs_strict_writev (try #4)
If we don't have Exclusive oplock we write a data to the server. Also set invalidate_mapping flag on the inode if we wrote something to the server. Add cifs_iovec_write to let the client write iovec buffers through CIFSSMBWrite2. Signed-off-by: Pavel Shilovsky <piastryyy@gmail.com> Reviewed-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c202
1 files changed, 201 insertions, 1 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d7d65a70678e..0de17c1db608 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -848,7 +848,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
848} 848}
849 849
850/* update the file size (if needed) after a write */ 850/* update the file size (if needed) after a write */
851static void 851void
852cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 852cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
853 unsigned int bytes_written) 853 unsigned int bytes_written)
854{ 854{
@@ -1619,6 +1619,206 @@ int cifs_flush(struct file *file, fl_owner_t id)
1619 return rc; 1619 return rc;
1620} 1620}
1621 1621
1622static int
1623cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1624{
1625 int rc = 0;
1626 unsigned long i;
1627
1628 for (i = 0; i < num_pages; i++) {
1629 pages[i] = alloc_page(__GFP_HIGHMEM);
1630 if (!pages[i]) {
1631 /*
1632 * save number of pages we have already allocated and
1633 * return with ENOMEM error
1634 */
1635 num_pages = i;
1636 rc = -ENOMEM;
1637 goto error;
1638 }
1639 }
1640
1641 return rc;
1642
1643error:
1644 for (i = 0; i < num_pages; i++)
1645 put_page(pages[i]);
1646 return rc;
1647}
1648
1649static inline
1650size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1651{
1652 size_t num_pages;
1653 size_t clen;
1654
1655 clen = min_t(const size_t, len, wsize);
1656 num_pages = clen / PAGE_CACHE_SIZE;
1657 if (clen % PAGE_CACHE_SIZE)
1658 num_pages++;
1659
1660 if (cur_len)
1661 *cur_len = clen;
1662
1663 return num_pages;
1664}
1665
1666static ssize_t
1667cifs_iovec_write(struct file *file, const struct iovec *iov,
1668 unsigned long nr_segs, loff_t *poffset)
1669{
1670 size_t total_written = 0, written = 0;
1671 unsigned long num_pages, npages;
1672 size_t copied, len, cur_len, i;
1673 struct kvec *to_send;
1674 struct page **pages;
1675 struct iov_iter it;
1676 struct inode *inode;
1677 struct cifsFileInfo *open_file;
1678 struct cifsTconInfo *pTcon;
1679 struct cifs_sb_info *cifs_sb;
1680 int xid, rc;
1681
1682 len = iov_length(iov, nr_segs);
1683 if (!len)
1684 return 0;
1685
1686 rc = generic_write_checks(file, poffset, &len, 0);
1687 if (rc)
1688 return rc;
1689
1690 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1691 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1692
1693 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1694 if (!pages)
1695 return -ENOMEM;
1696
1697 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1698 if (!to_send) {
1699 kfree(pages);
1700 return -ENOMEM;
1701 }
1702
1703 rc = cifs_write_allocate_pages(pages, num_pages);
1704 if (rc) {
1705 kfree(pages);
1706 kfree(to_send);
1707 return rc;
1708 }
1709
1710 xid = GetXid();
1711 open_file = file->private_data;
1712 pTcon = tlink_tcon(open_file->tlink);
1713 inode = file->f_path.dentry->d_inode;
1714
1715 iov_iter_init(&it, iov, nr_segs, len, 0);
1716 npages = num_pages;
1717
1718 do {
1719 size_t save_len = cur_len;
1720 for (i = 0; i < npages; i++) {
1721 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1722 copied = iov_iter_copy_from_user(pages[i], &it, 0,
1723 copied);
1724 cur_len -= copied;
1725 iov_iter_advance(&it, copied);
1726 to_send[i+1].iov_base = kmap(pages[i]);
1727 to_send[i+1].iov_len = copied;
1728 }
1729
1730 cur_len = save_len - cur_len;
1731
1732 do {
1733 if (open_file->invalidHandle) {
1734 rc = cifs_reopen_file(open_file, false);
1735 if (rc != 0)
1736 break;
1737 }
1738 rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
1739 cur_len, *poffset, &written,
1740 to_send, npages, 0);
1741 } while (rc == -EAGAIN);
1742
1743 for (i = 0; i < npages; i++)
1744 kunmap(pages[i]);
1745
1746 if (written) {
1747 len -= written;
1748 total_written += written;
1749 cifs_update_eof(CIFS_I(inode), *poffset, written);
1750 *poffset += written;
1751 } else if (rc < 0) {
1752 if (!total_written)
1753 total_written = rc;
1754 break;
1755 }
1756
1757 /* get length and number of kvecs of the next write */
1758 npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1759 } while (len > 0);
1760
1761 if (total_written > 0) {
1762 spin_lock(&inode->i_lock);
1763 if (*poffset > inode->i_size)
1764 i_size_write(inode, *poffset);
1765 spin_unlock(&inode->i_lock);
1766 }
1767
1768 cifs_stats_bytes_written(pTcon, total_written);
1769 mark_inode_dirty_sync(inode);
1770
1771 for (i = 0; i < num_pages; i++)
1772 put_page(pages[i]);
1773 kfree(to_send);
1774 kfree(pages);
1775 FreeXid(xid);
1776 return total_written;
1777}
1778
1779static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1780 unsigned long nr_segs, loff_t pos)
1781{
1782 ssize_t written;
1783 struct inode *inode;
1784
1785 inode = iocb->ki_filp->f_path.dentry->d_inode;
1786
1787 /*
1788 * BB - optimize the way when signing is disabled. We can drop this
1789 * extra memory-to-memory copying and use iovec buffers for constructing
1790 * write request.
1791 */
1792
1793 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1794 if (written > 0) {
1795 CIFS_I(inode)->invalid_mapping = true;
1796 iocb->ki_pos = pos;
1797 }
1798
1799 return written;
1800}
1801
1802ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1803 unsigned long nr_segs, loff_t pos)
1804{
1805 struct inode *inode;
1806
1807 inode = iocb->ki_filp->f_path.dentry->d_inode;
1808
1809 if (CIFS_I(inode)->clientCanCacheAll)
1810 return generic_file_aio_write(iocb, iov, nr_segs, pos);
1811
1812 /*
1813 * In strict cache mode we need to write the data to the server exactly
1814 * from the pos to pos+len-1 rather than flush all affected pages
1815 * because it may cause a error with mandatory locks on these pages but
1816 * not on the region from pos to ppos+len-1.
1817 */
1818
1819 return cifs_user_writev(iocb, iov, nr_segs, pos);
1820}
1821
1622static ssize_t 1822static ssize_t
1623cifs_iovec_read(struct file *file, const struct iovec *iov, 1823cifs_iovec_read(struct file *file, const struct iovec *iov,
1624 unsigned long nr_segs, loff_t *poffset) 1824 unsigned long nr_segs, loff_t *poffset)