diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-07-26 18:26:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 19:19:19 -0400 |
commit | 4595ef88d136134a9470c955575640f5c96344ed (patch) | |
tree | 2bade796ca5b0dc90138726ab7438b4fa136328e | |
parent | 988ddb710bb5be27f793b7e50455c769118a389f (diff) |
shmem: make shmem_inode_info::lock irq-safe
We are going to need to call shmem_charge() under tree_lock to get
accoutning right on collapse of small tmpfs pages into a huge one.
The problem is that tree_lock is irq-safe and lockdep is not happy, that
we take irq-unsafe lock under irq-safe[1].
Let's convert the lock to irq-safe.
[1] https://gist.github.com/kiryl/80c0149e03ed35dfaf26628b8e03cdbc
Link: http://lkml.kernel.org/r/1466021202-61880-34-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | ipc/shm.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 50 |
2 files changed, 28 insertions, 26 deletions
@@ -766,10 +766,10 @@ static void shm_add_rss_swap(struct shmid_kernel *shp, | |||
766 | } else { | 766 | } else { |
767 | #ifdef CONFIG_SHMEM | 767 | #ifdef CONFIG_SHMEM |
768 | struct shmem_inode_info *info = SHMEM_I(inode); | 768 | struct shmem_inode_info *info = SHMEM_I(inode); |
769 | spin_lock(&info->lock); | 769 | spin_lock_irq(&info->lock); |
770 | *rss_add += inode->i_mapping->nrpages; | 770 | *rss_add += inode->i_mapping->nrpages; |
771 | *swp_add += info->swapped; | 771 | *swp_add += info->swapped; |
772 | spin_unlock(&info->lock); | 772 | spin_unlock_irq(&info->lock); |
773 | #else | 773 | #else |
774 | *rss_add += inode->i_mapping->nrpages; | 774 | *rss_add += inode->i_mapping->nrpages; |
775 | #endif | 775 | #endif |
diff --git a/mm/shmem.c b/mm/shmem.c index f19b6b44ae46..03eb915c82eb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -258,14 +258,15 @@ bool shmem_charge(struct inode *inode, long pages) | |||
258 | { | 258 | { |
259 | struct shmem_inode_info *info = SHMEM_I(inode); | 259 | struct shmem_inode_info *info = SHMEM_I(inode); |
260 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 260 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
261 | unsigned long flags; | ||
261 | 262 | ||
262 | if (shmem_acct_block(info->flags, pages)) | 263 | if (shmem_acct_block(info->flags, pages)) |
263 | return false; | 264 | return false; |
264 | spin_lock(&info->lock); | 265 | spin_lock_irqsave(&info->lock, flags); |
265 | info->alloced += pages; | 266 | info->alloced += pages; |
266 | inode->i_blocks += pages * BLOCKS_PER_PAGE; | 267 | inode->i_blocks += pages * BLOCKS_PER_PAGE; |
267 | shmem_recalc_inode(inode); | 268 | shmem_recalc_inode(inode); |
268 | spin_unlock(&info->lock); | 269 | spin_unlock_irqrestore(&info->lock, flags); |
269 | inode->i_mapping->nrpages += pages; | 270 | inode->i_mapping->nrpages += pages; |
270 | 271 | ||
271 | if (!sbinfo->max_blocks) | 272 | if (!sbinfo->max_blocks) |
@@ -273,10 +274,10 @@ bool shmem_charge(struct inode *inode, long pages) | |||
273 | if (percpu_counter_compare(&sbinfo->used_blocks, | 274 | if (percpu_counter_compare(&sbinfo->used_blocks, |
274 | sbinfo->max_blocks - pages) > 0) { | 275 | sbinfo->max_blocks - pages) > 0) { |
275 | inode->i_mapping->nrpages -= pages; | 276 | inode->i_mapping->nrpages -= pages; |
276 | spin_lock(&info->lock); | 277 | spin_lock_irqsave(&info->lock, flags); |
277 | info->alloced -= pages; | 278 | info->alloced -= pages; |
278 | shmem_recalc_inode(inode); | 279 | shmem_recalc_inode(inode); |
279 | spin_unlock(&info->lock); | 280 | spin_unlock_irqrestore(&info->lock, flags); |
280 | 281 | ||
281 | return false; | 282 | return false; |
282 | } | 283 | } |
@@ -288,12 +289,13 @@ void shmem_uncharge(struct inode *inode, long pages) | |||
288 | { | 289 | { |
289 | struct shmem_inode_info *info = SHMEM_I(inode); | 290 | struct shmem_inode_info *info = SHMEM_I(inode); |
290 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 291 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
292 | unsigned long flags; | ||
291 | 293 | ||
292 | spin_lock(&info->lock); | 294 | spin_lock_irqsave(&info->lock, flags); |
293 | info->alloced -= pages; | 295 | info->alloced -= pages; |
294 | inode->i_blocks -= pages * BLOCKS_PER_PAGE; | 296 | inode->i_blocks -= pages * BLOCKS_PER_PAGE; |
295 | shmem_recalc_inode(inode); | 297 | shmem_recalc_inode(inode); |
296 | spin_unlock(&info->lock); | 298 | spin_unlock_irqrestore(&info->lock, flags); |
297 | 299 | ||
298 | if (sbinfo->max_blocks) | 300 | if (sbinfo->max_blocks) |
299 | percpu_counter_sub(&sbinfo->used_blocks, pages); | 301 | percpu_counter_sub(&sbinfo->used_blocks, pages); |
@@ -818,10 +820,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, | |||
818 | index++; | 820 | index++; |
819 | } | 821 | } |
820 | 822 | ||
821 | spin_lock(&info->lock); | 823 | spin_lock_irq(&info->lock); |
822 | info->swapped -= nr_swaps_freed; | 824 | info->swapped -= nr_swaps_freed; |
823 | shmem_recalc_inode(inode); | 825 | shmem_recalc_inode(inode); |
824 | spin_unlock(&info->lock); | 826 | spin_unlock_irq(&info->lock); |
825 | } | 827 | } |
826 | 828 | ||
827 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) | 829 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
@@ -838,9 +840,9 @@ static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
838 | struct shmem_inode_info *info = SHMEM_I(inode); | 840 | struct shmem_inode_info *info = SHMEM_I(inode); |
839 | 841 | ||
840 | if (info->alloced - info->swapped != inode->i_mapping->nrpages) { | 842 | if (info->alloced - info->swapped != inode->i_mapping->nrpages) { |
841 | spin_lock(&info->lock); | 843 | spin_lock_irq(&info->lock); |
842 | shmem_recalc_inode(inode); | 844 | shmem_recalc_inode(inode); |
843 | spin_unlock(&info->lock); | 845 | spin_unlock_irq(&info->lock); |
844 | } | 846 | } |
845 | generic_fillattr(inode, stat); | 847 | generic_fillattr(inode, stat); |
846 | return 0; | 848 | return 0; |
@@ -984,9 +986,9 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, | |||
984 | delete_from_swap_cache(*pagep); | 986 | delete_from_swap_cache(*pagep); |
985 | set_page_dirty(*pagep); | 987 | set_page_dirty(*pagep); |
986 | if (!error) { | 988 | if (!error) { |
987 | spin_lock(&info->lock); | 989 | spin_lock_irq(&info->lock); |
988 | info->swapped--; | 990 | info->swapped--; |
989 | spin_unlock(&info->lock); | 991 | spin_unlock_irq(&info->lock); |
990 | swap_free(swap); | 992 | swap_free(swap); |
991 | } | 993 | } |
992 | } | 994 | } |
@@ -1134,10 +1136,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) | |||
1134 | list_add_tail(&info->swaplist, &shmem_swaplist); | 1136 | list_add_tail(&info->swaplist, &shmem_swaplist); |
1135 | 1137 | ||
1136 | if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { | 1138 | if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { |
1137 | spin_lock(&info->lock); | 1139 | spin_lock_irq(&info->lock); |
1138 | shmem_recalc_inode(inode); | 1140 | shmem_recalc_inode(inode); |
1139 | info->swapped++; | 1141 | info->swapped++; |
1140 | spin_unlock(&info->lock); | 1142 | spin_unlock_irq(&info->lock); |
1141 | 1143 | ||
1142 | swap_shmem_alloc(swap); | 1144 | swap_shmem_alloc(swap); |
1143 | shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); | 1145 | shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); |
@@ -1523,10 +1525,10 @@ repeat: | |||
1523 | 1525 | ||
1524 | mem_cgroup_commit_charge(page, memcg, true, false); | 1526 | mem_cgroup_commit_charge(page, memcg, true, false); |
1525 | 1527 | ||
1526 | spin_lock(&info->lock); | 1528 | spin_lock_irq(&info->lock); |
1527 | info->swapped--; | 1529 | info->swapped--; |
1528 | shmem_recalc_inode(inode); | 1530 | shmem_recalc_inode(inode); |
1529 | spin_unlock(&info->lock); | 1531 | spin_unlock_irq(&info->lock); |
1530 | 1532 | ||
1531 | if (sgp == SGP_WRITE) | 1533 | if (sgp == SGP_WRITE) |
1532 | mark_page_accessed(page); | 1534 | mark_page_accessed(page); |
@@ -1603,11 +1605,11 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, | |||
1603 | PageTransHuge(page)); | 1605 | PageTransHuge(page)); |
1604 | lru_cache_add_anon(page); | 1606 | lru_cache_add_anon(page); |
1605 | 1607 | ||
1606 | spin_lock(&info->lock); | 1608 | spin_lock_irq(&info->lock); |
1607 | info->alloced += 1 << compound_order(page); | 1609 | info->alloced += 1 << compound_order(page); |
1608 | inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); | 1610 | inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); |
1609 | shmem_recalc_inode(inode); | 1611 | shmem_recalc_inode(inode); |
1610 | spin_unlock(&info->lock); | 1612 | spin_unlock_irq(&info->lock); |
1611 | alloced = true; | 1613 | alloced = true; |
1612 | 1614 | ||
1613 | /* | 1615 | /* |
@@ -1639,9 +1641,9 @@ clear: | |||
1639 | if (alloced) { | 1641 | if (alloced) { |
1640 | ClearPageDirty(page); | 1642 | ClearPageDirty(page); |
1641 | delete_from_page_cache(page); | 1643 | delete_from_page_cache(page); |
1642 | spin_lock(&info->lock); | 1644 | spin_lock_irq(&info->lock); |
1643 | shmem_recalc_inode(inode); | 1645 | shmem_recalc_inode(inode); |
1644 | spin_unlock(&info->lock); | 1646 | spin_unlock_irq(&info->lock); |
1645 | } | 1647 | } |
1646 | error = -EINVAL; | 1648 | error = -EINVAL; |
1647 | goto unlock; | 1649 | goto unlock; |
@@ -1673,9 +1675,9 @@ unlock: | |||
1673 | } | 1675 | } |
1674 | if (error == -ENOSPC && !once++) { | 1676 | if (error == -ENOSPC && !once++) { |
1675 | info = SHMEM_I(inode); | 1677 | info = SHMEM_I(inode); |
1676 | spin_lock(&info->lock); | 1678 | spin_lock_irq(&info->lock); |
1677 | shmem_recalc_inode(inode); | 1679 | shmem_recalc_inode(inode); |
1678 | spin_unlock(&info->lock); | 1680 | spin_unlock_irq(&info->lock); |
1679 | goto repeat; | 1681 | goto repeat; |
1680 | } | 1682 | } |
1681 | if (error == -EEXIST) /* from above or from radix_tree_insert */ | 1683 | if (error == -EEXIST) /* from above or from radix_tree_insert */ |
@@ -1874,7 +1876,7 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) | |||
1874 | struct shmem_inode_info *info = SHMEM_I(inode); | 1876 | struct shmem_inode_info *info = SHMEM_I(inode); |
1875 | int retval = -ENOMEM; | 1877 | int retval = -ENOMEM; |
1876 | 1878 | ||
1877 | spin_lock(&info->lock); | 1879 | spin_lock_irq(&info->lock); |
1878 | if (lock && !(info->flags & VM_LOCKED)) { | 1880 | if (lock && !(info->flags & VM_LOCKED)) { |
1879 | if (!user_shm_lock(inode->i_size, user)) | 1881 | if (!user_shm_lock(inode->i_size, user)) |
1880 | goto out_nomem; | 1882 | goto out_nomem; |
@@ -1889,7 +1891,7 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) | |||
1889 | retval = 0; | 1891 | retval = 0; |
1890 | 1892 | ||
1891 | out_nomem: | 1893 | out_nomem: |
1892 | spin_unlock(&info->lock); | 1894 | spin_unlock_irq(&info->lock); |
1893 | return retval; | 1895 | return retval; |
1894 | } | 1896 | } |
1895 | 1897 | ||