aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-02-05 01:28:52 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:15 -0500
commitcb5f7b9a47963d9238398cd0c2676473e3c6896d (patch)
treebc2eed3d7c2c3787f50030c7c493c4295eaf673c /mm/shmem.c
parenta0ee5ec520ede1dc8e2194623bcebfd9fab408f2 (diff)
tmpfs: make shmem_unuse more preemptible
shmem_unuse is at present an unbroken search through every swap vector page of every tmpfs file which might be swapped, all under shmem_swaplist_lock. This dates from long ago, when the caller held mmlist_lock over it all too: long gone, but there's never been much pressure for preemptible swapoff. Make it a little more preemptible, replacing shmem_swaplist_lock by shmem_swaplist_mutex, inserting a cond_resched in the main loop, and a cond_resched_lock (on info->lock) at one convenient point in the shmem_unuse_inode loop, where it has no outstanding kmap_atomic. If we're serious about preemptible swapoff, there's much further to go e.g. I'm stupid to let the kmap_atomics of the decreasingly significant HIGHMEM case dictate preemptiblility for other configs. But as in the earlier patch to make swapoff scan ptes preemptibly, my hidden agenda is really towards making memcgroups work, hardly about preemptibility at all. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index c919ed578f0a..2e03d6031c24 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -193,7 +193,7 @@ static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
193}; 193};
194 194
195static LIST_HEAD(shmem_swaplist); 195static LIST_HEAD(shmem_swaplist);
196static DEFINE_SPINLOCK(shmem_swaplist_lock); 196static DEFINE_MUTEX(shmem_swaplist_mutex);
197 197
198static void shmem_free_blocks(struct inode *inode, long pages) 198static void shmem_free_blocks(struct inode *inode, long pages)
199{ 199{
@@ -796,9 +796,9 @@ static void shmem_delete_inode(struct inode *inode)
796 inode->i_size = 0; 796 inode->i_size = 0;
797 shmem_truncate(inode); 797 shmem_truncate(inode);
798 if (!list_empty(&info->swaplist)) { 798 if (!list_empty(&info->swaplist)) {
799 spin_lock(&shmem_swaplist_lock); 799 mutex_lock(&shmem_swaplist_mutex);
800 list_del_init(&info->swaplist); 800 list_del_init(&info->swaplist);
801 spin_unlock(&shmem_swaplist_lock); 801 mutex_unlock(&shmem_swaplist_mutex);
802 } 802 }
803 } 803 }
804 BUG_ON(inode->i_blocks); 804 BUG_ON(inode->i_blocks);
@@ -851,6 +851,14 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
851 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 851 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
852 if (unlikely(idx == stage)) { 852 if (unlikely(idx == stage)) {
853 shmem_dir_unmap(dir-1); 853 shmem_dir_unmap(dir-1);
854 if (cond_resched_lock(&info->lock)) {
855 /* check it has not been truncated */
856 if (limit > info->next_index) {
857 limit = info->next_index;
858 if (idx >= limit)
859 goto lost2;
860 }
861 }
854 dir = shmem_dir_map(info->i_indirect) + 862 dir = shmem_dir_map(info->i_indirect) +
855 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 863 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
856 while (!*dir) { 864 while (!*dir) {
@@ -924,7 +932,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
924 struct shmem_inode_info *info; 932 struct shmem_inode_info *info;
925 int found = 0; 933 int found = 0;
926 934
927 spin_lock(&shmem_swaplist_lock); 935 mutex_lock(&shmem_swaplist_mutex);
928 list_for_each_safe(p, next, &shmem_swaplist) { 936 list_for_each_safe(p, next, &shmem_swaplist) {
929 info = list_entry(p, struct shmem_inode_info, swaplist); 937 info = list_entry(p, struct shmem_inode_info, swaplist);
930 if (!info->swapped) 938 if (!info->swapped)
@@ -935,8 +943,9 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
935 found = 1; 943 found = 1;
936 break; 944 break;
937 } 945 }
946 cond_resched();
938 } 947 }
939 spin_unlock(&shmem_swaplist_lock); 948 mutex_unlock(&shmem_swaplist_mutex);
940 return found; 949 return found;
941} 950}
942 951
@@ -996,10 +1005,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
996 shmem_swp_unmap(entry); 1005 shmem_swp_unmap(entry);
997 spin_unlock(&info->lock); 1006 spin_unlock(&info->lock);
998 if (list_empty(&info->swaplist)) { 1007 if (list_empty(&info->swaplist)) {
999 spin_lock(&shmem_swaplist_lock); 1008 mutex_lock(&shmem_swaplist_mutex);
1000 /* move instead of add in case we're racing */ 1009 /* move instead of add in case we're racing */
1001 list_move_tail(&info->swaplist, &shmem_swaplist); 1010 list_move_tail(&info->swaplist, &shmem_swaplist);
1002 spin_unlock(&shmem_swaplist_lock); 1011 mutex_unlock(&shmem_swaplist_mutex);
1003 } 1012 }
1004 swap_duplicate(swap); 1013 swap_duplicate(swap);
1005 BUG_ON(page_mapped(page)); 1014 BUG_ON(page_mapped(page));