aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/fs.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:17 -0400
commit97a894136f29802da19a15541de3c019e1ca147e (patch)
tree1fd3f92ba92a37d5d8527a1f41458091d0a944dc /include/linux/fs.h
parente4c70a6629f9c74c4b0de258a3951890e9047c82 (diff)
mm: Remove i_mmap_lock lockbreak
Hugh says: "The only significant loser, I think, would be page reclaim (when concurrent with truncation): could spin for a long time waiting for the i_mmap_mutex it expects would soon be dropped? " Counter points: - cpu contention makes the spin stop (need_resched()) - zap pages should be freeing pages at a higher rate than reclaim ever can I think the simplification of the truncate code is definitely worth it. Effectively reverts: 2aa15890f3c ("mm: prevent concurrent unmap_mapping_range() on the same inode") and takes out the code that caused its problem. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/fs.h')
-rw-r--r--include/linux/fs.h2
1 files changed, 0 insertions, 2 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cdf9495df204..5d2c86bdf5ba 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -635,7 +635,6 @@ struct address_space {
635 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 635 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
636 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 636 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
637 spinlock_t i_mmap_lock; /* protect tree, count, list */ 637 spinlock_t i_mmap_lock; /* protect tree, count, list */
638 unsigned int truncate_count; /* Cover race condition with truncate */
639 unsigned long nrpages; /* number of total pages */ 638 unsigned long nrpages; /* number of total pages */
640 pgoff_t writeback_index;/* writeback starts here */ 639 pgoff_t writeback_index;/* writeback starts here */
641 const struct address_space_operations *a_ops; /* methods */ 640 const struct address_space_operations *a_ops; /* methods */
@@ -644,7 +643,6 @@ struct address_space {
644 spinlock_t private_lock; /* for use by the address_space */ 643 spinlock_t private_lock; /* for use by the address_space */
645 struct list_head private_list; /* ditto */ 644 struct list_head private_list; /* ditto */
646 struct address_space *assoc_mapping; /* ditto */ 645 struct address_space *assoc_mapping; /* ditto */
647 struct mutex unmap_mutex; /* to protect unmapping */
648} __attribute__((aligned(sizeof(long)))); 646} __attribute__((aligned(sizeof(long))));
649 /* 647 /*
650 * On most architectures that alignment is already the case; but 648 * On most architectures that alignment is already the case; but