diff options
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 130 |
1 files changed, 65 insertions, 65 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 9428c51ab2d6..719bd6b88d98 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -75,8 +75,8 @@ static struct vfsmount *shm_mnt; | |||
75 | 75 | ||
76 | #include "internal.h" | 76 | #include "internal.h" |
77 | 77 | ||
78 | #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) | 78 | #define BLOCKS_PER_PAGE (PAGE_SIZE/512) |
79 | #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | 79 | #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) |
80 | 80 | ||
81 | /* Pretend that each entry is of this size in directory's i_size */ | 81 | /* Pretend that each entry is of this size in directory's i_size */ |
82 | #define BOGO_DIRENT_SIZE 20 | 82 | #define BOGO_DIRENT_SIZE 20 |
@@ -176,13 +176,13 @@ static inline int shmem_reacct_size(unsigned long flags, | |||
176 | static inline int shmem_acct_block(unsigned long flags) | 176 | static inline int shmem_acct_block(unsigned long flags) |
177 | { | 177 | { |
178 | return (flags & VM_NORESERVE) ? | 178 | return (flags & VM_NORESERVE) ? |
179 | security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; | 179 | security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0; |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) | 182 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) |
183 | { | 183 | { |
184 | if (flags & VM_NORESERVE) | 184 | if (flags & VM_NORESERVE) |
185 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | 185 | vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); |
186 | } | 186 | } |
187 | 187 | ||
188 | static const struct super_operations shmem_ops; | 188 | static const struct super_operations shmem_ops; |
@@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struct page *page, | |||
300 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 300 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
301 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); | 301 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
302 | 302 | ||
303 | page_cache_get(page); | 303 | get_page(page); |
304 | page->mapping = mapping; | 304 | page->mapping = mapping; |
305 | page->index = index; | 305 | page->index = index; |
306 | 306 | ||
@@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struct page *page, | |||
318 | } else { | 318 | } else { |
319 | page->mapping = NULL; | 319 | page->mapping = NULL; |
320 | spin_unlock_irq(&mapping->tree_lock); | 320 | spin_unlock_irq(&mapping->tree_lock); |
321 | page_cache_release(page); | 321 | put_page(page); |
322 | } | 322 | } |
323 | return error; | 323 | return error; |
324 | } | 324 | } |
@@ -338,7 +338,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) | |||
338 | __dec_zone_page_state(page, NR_FILE_PAGES); | 338 | __dec_zone_page_state(page, NR_FILE_PAGES); |
339 | __dec_zone_page_state(page, NR_SHMEM); | 339 | __dec_zone_page_state(page, NR_SHMEM); |
340 | spin_unlock_irq(&mapping->tree_lock); | 340 | spin_unlock_irq(&mapping->tree_lock); |
341 | page_cache_release(page); | 341 | put_page(page); |
342 | BUG_ON(error); | 342 | BUG_ON(error); |
343 | } | 343 | } |
344 | 344 | ||
@@ -474,10 +474,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
474 | { | 474 | { |
475 | struct address_space *mapping = inode->i_mapping; | 475 | struct address_space *mapping = inode->i_mapping; |
476 | struct shmem_inode_info *info = SHMEM_I(inode); | 476 | struct shmem_inode_info *info = SHMEM_I(inode); |
477 | pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 477 | pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; |
478 | pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; | 478 | pgoff_t end = (lend + 1) >> PAGE_SHIFT; |
479 | unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); | 479 | unsigned int partial_start = lstart & (PAGE_SIZE - 1); |
480 | unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); | 480 | unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); |
481 | struct pagevec pvec; | 481 | struct pagevec pvec; |
482 | pgoff_t indices[PAGEVEC_SIZE]; | 482 | pgoff_t indices[PAGEVEC_SIZE]; |
483 | long nr_swaps_freed = 0; | 483 | long nr_swaps_freed = 0; |
@@ -530,7 +530,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
530 | struct page *page = NULL; | 530 | struct page *page = NULL; |
531 | shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); | 531 | shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); |
532 | if (page) { | 532 | if (page) { |
533 | unsigned int top = PAGE_CACHE_SIZE; | 533 | unsigned int top = PAGE_SIZE; |
534 | if (start > end) { | 534 | if (start > end) { |
535 | top = partial_end; | 535 | top = partial_end; |
536 | partial_end = 0; | 536 | partial_end = 0; |
@@ -538,7 +538,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
538 | zero_user_segment(page, partial_start, top); | 538 | zero_user_segment(page, partial_start, top); |
539 | set_page_dirty(page); | 539 | set_page_dirty(page); |
540 | unlock_page(page); | 540 | unlock_page(page); |
541 | page_cache_release(page); | 541 | put_page(page); |
542 | } | 542 | } |
543 | } | 543 | } |
544 | if (partial_end) { | 544 | if (partial_end) { |
@@ -548,7 +548,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
548 | zero_user_segment(page, 0, partial_end); | 548 | zero_user_segment(page, 0, partial_end); |
549 | set_page_dirty(page); | 549 | set_page_dirty(page); |
550 | unlock_page(page); | 550 | unlock_page(page); |
551 | page_cache_release(page); | 551 | put_page(page); |
552 | } | 552 | } |
553 | } | 553 | } |
554 | if (start >= end) | 554 | if (start >= end) |
@@ -833,7 +833,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page) | |||
833 | mem_cgroup_commit_charge(page, memcg, true, false); | 833 | mem_cgroup_commit_charge(page, memcg, true, false); |
834 | out: | 834 | out: |
835 | unlock_page(page); | 835 | unlock_page(page); |
836 | page_cache_release(page); | 836 | put_page(page); |
837 | return error; | 837 | return error; |
838 | } | 838 | } |
839 | 839 | ||
@@ -1080,7 +1080,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
1080 | if (!newpage) | 1080 | if (!newpage) |
1081 | return -ENOMEM; | 1081 | return -ENOMEM; |
1082 | 1082 | ||
1083 | page_cache_get(newpage); | 1083 | get_page(newpage); |
1084 | copy_highpage(newpage, oldpage); | 1084 | copy_highpage(newpage, oldpage); |
1085 | flush_dcache_page(newpage); | 1085 | flush_dcache_page(newpage); |
1086 | 1086 | ||
@@ -1120,8 +1120,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, | |||
1120 | set_page_private(oldpage, 0); | 1120 | set_page_private(oldpage, 0); |
1121 | 1121 | ||
1122 | unlock_page(oldpage); | 1122 | unlock_page(oldpage); |
1123 | page_cache_release(oldpage); | 1123 | put_page(oldpage); |
1124 | page_cache_release(oldpage); | 1124 | put_page(oldpage); |
1125 | return error; | 1125 | return error; |
1126 | } | 1126 | } |
1127 | 1127 | ||
@@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, | |||
1145 | int once = 0; | 1145 | int once = 0; |
1146 | int alloced = 0; | 1146 | int alloced = 0; |
1147 | 1147 | ||
1148 | if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) | 1148 | if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) |
1149 | return -EFBIG; | 1149 | return -EFBIG; |
1150 | repeat: | 1150 | repeat: |
1151 | swap.val = 0; | 1151 | swap.val = 0; |
@@ -1156,7 +1156,7 @@ repeat: | |||
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | if (sgp != SGP_WRITE && sgp != SGP_FALLOC && | 1158 | if (sgp != SGP_WRITE && sgp != SGP_FALLOC && |
1159 | ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { | 1159 | ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { |
1160 | error = -EINVAL; | 1160 | error = -EINVAL; |
1161 | goto unlock; | 1161 | goto unlock; |
1162 | } | 1162 | } |
@@ -1169,7 +1169,7 @@ repeat: | |||
1169 | if (sgp != SGP_READ) | 1169 | if (sgp != SGP_READ) |
1170 | goto clear; | 1170 | goto clear; |
1171 | unlock_page(page); | 1171 | unlock_page(page); |
1172 | page_cache_release(page); | 1172 | put_page(page); |
1173 | page = NULL; | 1173 | page = NULL; |
1174 | } | 1174 | } |
1175 | if (page || (sgp == SGP_READ && !swap.val)) { | 1175 | if (page || (sgp == SGP_READ && !swap.val)) { |
@@ -1327,7 +1327,7 @@ clear: | |||
1327 | 1327 | ||
1328 | /* Perhaps the file has been truncated since we checked */ | 1328 | /* Perhaps the file has been truncated since we checked */ |
1329 | if (sgp != SGP_WRITE && sgp != SGP_FALLOC && | 1329 | if (sgp != SGP_WRITE && sgp != SGP_FALLOC && |
1330 | ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { | 1330 | ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { |
1331 | if (alloced) { | 1331 | if (alloced) { |
1332 | ClearPageDirty(page); | 1332 | ClearPageDirty(page); |
1333 | delete_from_page_cache(page); | 1333 | delete_from_page_cache(page); |
@@ -1355,7 +1355,7 @@ failed: | |||
1355 | unlock: | 1355 | unlock: |
1356 | if (page) { | 1356 | if (page) { |
1357 | unlock_page(page); | 1357 | unlock_page(page); |
1358 | page_cache_release(page); | 1358 | put_page(page); |
1359 | } | 1359 | } |
1360 | if (error == -ENOSPC && !once++) { | 1360 | if (error == -ENOSPC && !once++) { |
1361 | info = SHMEM_I(inode); | 1361 | info = SHMEM_I(inode); |
@@ -1577,7 +1577,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, | |||
1577 | { | 1577 | { |
1578 | struct inode *inode = mapping->host; | 1578 | struct inode *inode = mapping->host; |
1579 | struct shmem_inode_info *info = SHMEM_I(inode); | 1579 | struct shmem_inode_info *info = SHMEM_I(inode); |
1580 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 1580 | pgoff_t index = pos >> PAGE_SHIFT; |
1581 | 1581 | ||
1582 | /* i_mutex is held by caller */ | 1582 | /* i_mutex is held by caller */ |
1583 | if (unlikely(info->seals)) { | 1583 | if (unlikely(info->seals)) { |
@@ -1601,16 +1601,16 @@ shmem_write_end(struct file *file, struct address_space *mapping, | |||
1601 | i_size_write(inode, pos + copied); | 1601 | i_size_write(inode, pos + copied); |
1602 | 1602 | ||
1603 | if (!PageUptodate(page)) { | 1603 | if (!PageUptodate(page)) { |
1604 | if (copied < PAGE_CACHE_SIZE) { | 1604 | if (copied < PAGE_SIZE) { |
1605 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); | 1605 | unsigned from = pos & (PAGE_SIZE - 1); |
1606 | zero_user_segments(page, 0, from, | 1606 | zero_user_segments(page, 0, from, |
1607 | from + copied, PAGE_CACHE_SIZE); | 1607 | from + copied, PAGE_SIZE); |
1608 | } | 1608 | } |
1609 | SetPageUptodate(page); | 1609 | SetPageUptodate(page); |
1610 | } | 1610 | } |
1611 | set_page_dirty(page); | 1611 | set_page_dirty(page); |
1612 | unlock_page(page); | 1612 | unlock_page(page); |
1613 | page_cache_release(page); | 1613 | put_page(page); |
1614 | 1614 | ||
1615 | return copied; | 1615 | return copied; |
1616 | } | 1616 | } |
@@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
1635 | if (!iter_is_iovec(to)) | 1635 | if (!iter_is_iovec(to)) |
1636 | sgp = SGP_DIRTY; | 1636 | sgp = SGP_DIRTY; |
1637 | 1637 | ||
1638 | index = *ppos >> PAGE_CACHE_SHIFT; | 1638 | index = *ppos >> PAGE_SHIFT; |
1639 | offset = *ppos & ~PAGE_CACHE_MASK; | 1639 | offset = *ppos & ~PAGE_MASK; |
1640 | 1640 | ||
1641 | for (;;) { | 1641 | for (;;) { |
1642 | struct page *page = NULL; | 1642 | struct page *page = NULL; |
@@ -1644,11 +1644,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
1644 | unsigned long nr, ret; | 1644 | unsigned long nr, ret; |
1645 | loff_t i_size = i_size_read(inode); | 1645 | loff_t i_size = i_size_read(inode); |
1646 | 1646 | ||
1647 | end_index = i_size >> PAGE_CACHE_SHIFT; | 1647 | end_index = i_size >> PAGE_SHIFT; |
1648 | if (index > end_index) | 1648 | if (index > end_index) |
1649 | break; | 1649 | break; |
1650 | if (index == end_index) { | 1650 | if (index == end_index) { |
1651 | nr = i_size & ~PAGE_CACHE_MASK; | 1651 | nr = i_size & ~PAGE_MASK; |
1652 | if (nr <= offset) | 1652 | if (nr <= offset) |
1653 | break; | 1653 | break; |
1654 | } | 1654 | } |
@@ -1666,14 +1666,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
1666 | * We must evaluate after, since reads (unlike writes) | 1666 | * We must evaluate after, since reads (unlike writes) |
1667 | * are called without i_mutex protection against truncate | 1667 | * are called without i_mutex protection against truncate |
1668 | */ | 1668 | */ |
1669 | nr = PAGE_CACHE_SIZE; | 1669 | nr = PAGE_SIZE; |
1670 | i_size = i_size_read(inode); | 1670 | i_size = i_size_read(inode); |
1671 | end_index = i_size >> PAGE_CACHE_SHIFT; | 1671 | end_index = i_size >> PAGE_SHIFT; |
1672 | if (index == end_index) { | 1672 | if (index == end_index) { |
1673 | nr = i_size & ~PAGE_CACHE_MASK; | 1673 | nr = i_size & ~PAGE_MASK; |
1674 | if (nr <= offset) { | 1674 | if (nr <= offset) { |
1675 | if (page) | 1675 | if (page) |
1676 | page_cache_release(page); | 1676 | put_page(page); |
1677 | break; | 1677 | break; |
1678 | } | 1678 | } |
1679 | } | 1679 | } |
@@ -1694,7 +1694,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
1694 | mark_page_accessed(page); | 1694 | mark_page_accessed(page); |
1695 | } else { | 1695 | } else { |
1696 | page = ZERO_PAGE(0); | 1696 | page = ZERO_PAGE(0); |
1697 | page_cache_get(page); | 1697 | get_page(page); |
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | /* | 1700 | /* |
@@ -1704,10 +1704,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
1704 | ret = copy_page_to_iter(page, offset, nr, to); | 1704 | ret = copy_page_to_iter(page, offset, nr, to); |
1705 | retval += ret; | 1705 | retval += ret; |
1706 | offset += ret; | 1706 | offset += ret; |
1707 | index += offset >> PAGE_CACHE_SHIFT; | 1707 | index += offset >> PAGE_SHIFT; |
1708 | offset &= ~PAGE_CACHE_MASK; | 1708 | offset &= ~PAGE_MASK; |
1709 | 1709 | ||
1710 | page_cache_release(page); | 1710 | put_page(page); |
1711 | if (!iov_iter_count(to)) | 1711 | if (!iov_iter_count(to)) |
1712 | break; | 1712 | break; |
1713 | if (ret < nr) { | 1713 | if (ret < nr) { |
@@ -1717,7 +1717,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
1717 | cond_resched(); | 1717 | cond_resched(); |
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | 1720 | *ppos = ((loff_t) index << PAGE_SHIFT) + offset; |
1721 | file_accessed(file); | 1721 | file_accessed(file); |
1722 | return retval ? retval : error; | 1722 | return retval ? retval : error; |
1723 | } | 1723 | } |
@@ -1755,9 +1755,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, | |||
1755 | if (splice_grow_spd(pipe, &spd)) | 1755 | if (splice_grow_spd(pipe, &spd)) |
1756 | return -ENOMEM; | 1756 | return -ENOMEM; |
1757 | 1757 | ||
1758 | index = *ppos >> PAGE_CACHE_SHIFT; | 1758 | index = *ppos >> PAGE_SHIFT; |
1759 | loff = *ppos & ~PAGE_CACHE_MASK; | 1759 | loff = *ppos & ~PAGE_MASK; |
1760 | req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 1760 | req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1761 | nr_pages = min(req_pages, spd.nr_pages_max); | 1761 | nr_pages = min(req_pages, spd.nr_pages_max); |
1762 | 1762 | ||
1763 | spd.nr_pages = find_get_pages_contig(mapping, index, | 1763 | spd.nr_pages = find_get_pages_contig(mapping, index, |
@@ -1774,7 +1774,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, | |||
1774 | index++; | 1774 | index++; |
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | index = *ppos >> PAGE_CACHE_SHIFT; | 1777 | index = *ppos >> PAGE_SHIFT; |
1778 | nr_pages = spd.nr_pages; | 1778 | nr_pages = spd.nr_pages; |
1779 | spd.nr_pages = 0; | 1779 | spd.nr_pages = 0; |
1780 | 1780 | ||
@@ -1784,7 +1784,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, | |||
1784 | if (!len) | 1784 | if (!len) |
1785 | break; | 1785 | break; |
1786 | 1786 | ||
1787 | this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); | 1787 | this_len = min_t(unsigned long, len, PAGE_SIZE - loff); |
1788 | page = spd.pages[page_nr]; | 1788 | page = spd.pages[page_nr]; |
1789 | 1789 | ||
1790 | if (!PageUptodate(page) || page->mapping != mapping) { | 1790 | if (!PageUptodate(page) || page->mapping != mapping) { |
@@ -1793,19 +1793,19 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, | |||
1793 | if (error) | 1793 | if (error) |
1794 | break; | 1794 | break; |
1795 | unlock_page(page); | 1795 | unlock_page(page); |
1796 | page_cache_release(spd.pages[page_nr]); | 1796 | put_page(spd.pages[page_nr]); |
1797 | spd.pages[page_nr] = page; | 1797 | spd.pages[page_nr] = page; |
1798 | } | 1798 | } |
1799 | 1799 | ||
1800 | isize = i_size_read(inode); | 1800 | isize = i_size_read(inode); |
1801 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | 1801 | end_index = (isize - 1) >> PAGE_SHIFT; |
1802 | if (unlikely(!isize || index > end_index)) | 1802 | if (unlikely(!isize || index > end_index)) |
1803 | break; | 1803 | break; |
1804 | 1804 | ||
1805 | if (end_index == index) { | 1805 | if (end_index == index) { |
1806 | unsigned int plen; | 1806 | unsigned int plen; |
1807 | 1807 | ||
1808 | plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | 1808 | plen = ((isize - 1) & ~PAGE_MASK) + 1; |
1809 | if (plen <= loff) | 1809 | if (plen <= loff) |
1810 | break; | 1810 | break; |
1811 | 1811 | ||
@@ -1822,7 +1822,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, | |||
1822 | } | 1822 | } |
1823 | 1823 | ||
1824 | while (page_nr < nr_pages) | 1824 | while (page_nr < nr_pages) |
1825 | page_cache_release(spd.pages[page_nr++]); | 1825 | put_page(spd.pages[page_nr++]); |
1826 | 1826 | ||
1827 | if (spd.nr_pages) | 1827 | if (spd.nr_pages) |
1828 | error = splice_to_pipe(pipe, &spd); | 1828 | error = splice_to_pipe(pipe, &spd); |
@@ -1904,10 +1904,10 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) | |||
1904 | else if (offset >= inode->i_size) | 1904 | else if (offset >= inode->i_size) |
1905 | offset = -ENXIO; | 1905 | offset = -ENXIO; |
1906 | else { | 1906 | else { |
1907 | start = offset >> PAGE_CACHE_SHIFT; | 1907 | start = offset >> PAGE_SHIFT; |
1908 | end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 1908 | end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1909 | new_offset = shmem_seek_hole_data(mapping, start, end, whence); | 1909 | new_offset = shmem_seek_hole_data(mapping, start, end, whence); |
1910 | new_offset <<= PAGE_CACHE_SHIFT; | 1910 | new_offset <<= PAGE_SHIFT; |
1911 | if (new_offset > offset) { | 1911 | if (new_offset > offset) { |
1912 | if (new_offset < inode->i_size) | 1912 | if (new_offset < inode->i_size) |
1913 | offset = new_offset; | 1913 | offset = new_offset; |
@@ -2203,8 +2203,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, | |||
2203 | goto out; | 2203 | goto out; |
2204 | } | 2204 | } |
2205 | 2205 | ||
2206 | start = offset >> PAGE_CACHE_SHIFT; | 2206 | start = offset >> PAGE_SHIFT; |
2207 | end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 2207 | end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
2208 | /* Try to avoid a swapstorm if len is impossible to satisfy */ | 2208 | /* Try to avoid a swapstorm if len is impossible to satisfy */ |
2209 | if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { | 2209 | if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { |
2210 | error = -ENOSPC; | 2210 | error = -ENOSPC; |
@@ -2237,8 +2237,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, | |||
2237 | if (error) { | 2237 | if (error) { |
2238 | /* Remove the !PageUptodate pages we added */ | 2238 | /* Remove the !PageUptodate pages we added */ |
2239 | shmem_undo_range(inode, | 2239 | shmem_undo_range(inode, |
2240 | (loff_t)start << PAGE_CACHE_SHIFT, | 2240 | (loff_t)start << PAGE_SHIFT, |
2241 | (loff_t)index << PAGE_CACHE_SHIFT, true); | 2241 | (loff_t)index << PAGE_SHIFT, true); |
2242 | goto undone; | 2242 | goto undone; |
2243 | } | 2243 | } |
2244 | 2244 | ||
@@ -2259,7 +2259,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, | |||
2259 | */ | 2259 | */ |
2260 | set_page_dirty(page); | 2260 | set_page_dirty(page); |
2261 | unlock_page(page); | 2261 | unlock_page(page); |
2262 | page_cache_release(page); | 2262 | put_page(page); |
2263 | cond_resched(); | 2263 | cond_resched(); |
2264 | } | 2264 | } |
2265 | 2265 | ||
@@ -2280,7 +2280,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
2280 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); | 2280 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); |
2281 | 2281 | ||
2282 | buf->f_type = TMPFS_MAGIC; | 2282 | buf->f_type = TMPFS_MAGIC; |
2283 | buf->f_bsize = PAGE_CACHE_SIZE; | 2283 | buf->f_bsize = PAGE_SIZE; |
2284 | buf->f_namelen = NAME_MAX; | 2284 | buf->f_namelen = NAME_MAX; |
2285 | if (sbinfo->max_blocks) { | 2285 | if (sbinfo->max_blocks) { |
2286 | buf->f_blocks = sbinfo->max_blocks; | 2286 | buf->f_blocks = sbinfo->max_blocks; |
@@ -2523,7 +2523,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s | |||
2523 | struct shmem_inode_info *info; | 2523 | struct shmem_inode_info *info; |
2524 | 2524 | ||
2525 | len = strlen(symname) + 1; | 2525 | len = strlen(symname) + 1; |
2526 | if (len > PAGE_CACHE_SIZE) | 2526 | if (len > PAGE_SIZE) |
2527 | return -ENAMETOOLONG; | 2527 | return -ENAMETOOLONG; |
2528 | 2528 | ||
2529 | inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); | 2529 | inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); |
@@ -2562,7 +2562,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s | |||
2562 | SetPageUptodate(page); | 2562 | SetPageUptodate(page); |
2563 | set_page_dirty(page); | 2563 | set_page_dirty(page); |
2564 | unlock_page(page); | 2564 | unlock_page(page); |
2565 | page_cache_release(page); | 2565 | put_page(page); |
2566 | } | 2566 | } |
2567 | dir->i_size += BOGO_DIRENT_SIZE; | 2567 | dir->i_size += BOGO_DIRENT_SIZE; |
2568 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 2568 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
@@ -2835,7 +2835,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | |||
2835 | if (*rest) | 2835 | if (*rest) |
2836 | goto bad_val; | 2836 | goto bad_val; |
2837 | sbinfo->max_blocks = | 2837 | sbinfo->max_blocks = |
2838 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); | 2838 | DIV_ROUND_UP(size, PAGE_SIZE); |
2839 | } else if (!strcmp(this_char,"nr_blocks")) { | 2839 | } else if (!strcmp(this_char,"nr_blocks")) { |
2840 | sbinfo->max_blocks = memparse(value, &rest); | 2840 | sbinfo->max_blocks = memparse(value, &rest); |
2841 | if (*rest) | 2841 | if (*rest) |
@@ -2940,7 +2940,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) | |||
2940 | 2940 | ||
2941 | if (sbinfo->max_blocks != shmem_default_max_blocks()) | 2941 | if (sbinfo->max_blocks != shmem_default_max_blocks()) |
2942 | seq_printf(seq, ",size=%luk", | 2942 | seq_printf(seq, ",size=%luk", |
2943 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); | 2943 | sbinfo->max_blocks << (PAGE_SHIFT - 10)); |
2944 | if (sbinfo->max_inodes != shmem_default_max_inodes()) | 2944 | if (sbinfo->max_inodes != shmem_default_max_inodes()) |
2945 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); | 2945 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); |
2946 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) | 2946 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) |
@@ -3082,8 +3082,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) | |||
3082 | sbinfo->free_inodes = sbinfo->max_inodes; | 3082 | sbinfo->free_inodes = sbinfo->max_inodes; |
3083 | 3083 | ||
3084 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 3084 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
3085 | sb->s_blocksize = PAGE_CACHE_SIZE; | 3085 | sb->s_blocksize = PAGE_SIZE; |
3086 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 3086 | sb->s_blocksize_bits = PAGE_SHIFT; |
3087 | sb->s_magic = TMPFS_MAGIC; | 3087 | sb->s_magic = TMPFS_MAGIC; |
3088 | sb->s_op = &shmem_ops; | 3088 | sb->s_op = &shmem_ops; |
3089 | sb->s_time_gran = 1; | 3089 | sb->s_time_gran = 1; |