aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-07-25 22:45:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:06 -0400
commit19fd6231279be3c3bdd02ed99f9b0eb195978064 (patch)
treeee09121054262d73c551b57114acd855b82a7a82
parenta60637c85893e7191faaafa6a72e197c24386727 (diff)
mm: spinlock tree_lock
mapping->tree_lock has no read lockers. convert the lock from an rwlock to a spinlock. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Hugh Dickins <hugh@veritas.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/inode.c2
-rw-r--r--include/asm-arm/cacheflush.h4
-rw-r--r--include/asm-parisc/cacheflush.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/swap_state.c10
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/vmscan.c8
12 files changed, 38 insertions, 39 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index d48caee12e2a..109b261192d9 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -706,7 +706,7 @@ static int __set_page_dirty(struct page *page,
706 if (TestSetPageDirty(page)) 706 if (TestSetPageDirty(page))
707 return 0; 707 return 0;
708 708
709 write_lock_irq(&mapping->tree_lock); 709 spin_lock_irq(&mapping->tree_lock);
710 if (page->mapping) { /* Race with truncate? */ 710 if (page->mapping) { /* Race with truncate? */
711 WARN_ON_ONCE(warn && !PageUptodate(page)); 711 WARN_ON_ONCE(warn && !PageUptodate(page));
712 712
@@ -719,7 +719,7 @@ static int __set_page_dirty(struct page *page,
719 radix_tree_tag_set(&mapping->page_tree, 719 radix_tree_tag_set(&mapping->page_tree,
720 page_index(page), PAGECACHE_TAG_DIRTY); 720 page_index(page), PAGECACHE_TAG_DIRTY);
721 } 721 }
722 write_unlock_irq(&mapping->tree_lock); 722 spin_unlock_irq(&mapping->tree_lock);
723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
724 724
725 return 1; 725 return 1;
diff --git a/fs/inode.c b/fs/inode.c
index c36d9480335c..35b6414522ea 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode)
209 INIT_LIST_HEAD(&inode->i_dentry); 209 INIT_LIST_HEAD(&inode->i_dentry);
210 INIT_LIST_HEAD(&inode->i_devices); 210 INIT_LIST_HEAD(&inode->i_devices);
211 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 211 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
212 rwlock_init(&inode->i_data.tree_lock); 212 spin_lock_init(&inode->i_data.tree_lock);
213 spin_lock_init(&inode->i_data.i_mmap_lock); 213 spin_lock_init(&inode->i_data.i_mmap_lock);
214 INIT_LIST_HEAD(&inode->i_data.private_list); 214 INIT_LIST_HEAD(&inode->i_data.private_list);
215 spin_lock_init(&inode->i_data.private_lock); 215 spin_lock_init(&inode->i_data.private_lock);
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index 70b0fe724b62..03cf1ee977b7 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -424,9 +424,9 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
424} 424}
425 425
426#define flush_dcache_mmap_lock(mapping) \ 426#define flush_dcache_mmap_lock(mapping) \
427 write_lock_irq(&(mapping)->tree_lock) 427 spin_lock_irq(&(mapping)->tree_lock)
428#define flush_dcache_mmap_unlock(mapping) \ 428#define flush_dcache_mmap_unlock(mapping) \
429 write_unlock_irq(&(mapping)->tree_lock) 429 spin_unlock_irq(&(mapping)->tree_lock)
430 430
431#define flush_icache_user_range(vma,page,addr,len) \ 431#define flush_icache_user_range(vma,page,addr,len) \
432 flush_dcache_page(page) 432 flush_dcache_page(page)
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index 2f1e1b05440a..b7ca6dc7fddc 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -45,9 +45,9 @@ void flush_cache_mm(struct mm_struct *mm);
45extern void flush_dcache_page(struct page *page); 45extern void flush_dcache_page(struct page *page);
46 46
47#define flush_dcache_mmap_lock(mapping) \ 47#define flush_dcache_mmap_lock(mapping) \
48 write_lock_irq(&(mapping)->tree_lock) 48 spin_lock_irq(&(mapping)->tree_lock)
49#define flush_dcache_mmap_unlock(mapping) \ 49#define flush_dcache_mmap_unlock(mapping) \
50 write_unlock_irq(&(mapping)->tree_lock) 50 spin_unlock_irq(&(mapping)->tree_lock)
51 51
52#define flush_icache_page(vma,page) do { \ 52#define flush_icache_page(vma,page) do { \
53 flush_kernel_dcache_page(page); \ 53 flush_kernel_dcache_page(page); \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 49d8eb7a71be..53d2edb709b3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -499,7 +499,7 @@ struct backing_dev_info;
499struct address_space { 499struct address_space {
500 struct inode *host; /* owner: inode, block_device */ 500 struct inode *host; /* owner: inode, block_device */
501 struct radix_tree_root page_tree; /* radix tree of all pages */ 501 struct radix_tree_root page_tree; /* radix tree of all pages */
502 rwlock_t tree_lock; /* and rwlock protecting it */ 502 spinlock_t tree_lock; /* and lock protecting it */
503 unsigned int i_mmap_writable;/* count VM_SHARED mappings */ 503 unsigned int i_mmap_writable;/* count VM_SHARED mappings */
504 struct prio_tree_root i_mmap; /* tree of private and shared mappings */ 504 struct prio_tree_root i_mmap; /* tree of private and shared mappings */
505 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 505 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
diff --git a/mm/filemap.c b/mm/filemap.c
index feb8448d8618..2ed8b0389c51 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -109,7 +109,7 @@
109/* 109/*
110 * Remove a page from the page cache and free it. Caller has to make 110 * Remove a page from the page cache and free it. Caller has to make
111 * sure the page is locked and that nobody else uses it - or that usage 111 * sure the page is locked and that nobody else uses it - or that usage
112 * is safe. The caller must hold a write_lock on the mapping's tree_lock. 112 * is safe. The caller must hold the mapping's tree_lock.
113 */ 113 */
114void __remove_from_page_cache(struct page *page) 114void __remove_from_page_cache(struct page *page)
115{ 115{
@@ -141,9 +141,9 @@ void remove_from_page_cache(struct page *page)
141 141
142 BUG_ON(!PageLocked(page)); 142 BUG_ON(!PageLocked(page));
143 143
144 write_lock_irq(&mapping->tree_lock); 144 spin_lock_irq(&mapping->tree_lock);
145 __remove_from_page_cache(page); 145 __remove_from_page_cache(page);
146 write_unlock_irq(&mapping->tree_lock); 146 spin_unlock_irq(&mapping->tree_lock);
147} 147}
148 148
149static int sync_page(void *word) 149static int sync_page(void *word)
@@ -469,7 +469,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
469 page->mapping = mapping; 469 page->mapping = mapping;
470 page->index = offset; 470 page->index = offset;
471 471
472 write_lock_irq(&mapping->tree_lock); 472 spin_lock_irq(&mapping->tree_lock);
473 error = radix_tree_insert(&mapping->page_tree, offset, page); 473 error = radix_tree_insert(&mapping->page_tree, offset, page);
474 if (likely(!error)) { 474 if (likely(!error)) {
475 mapping->nrpages++; 475 mapping->nrpages++;
@@ -480,7 +480,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
480 page_cache_release(page); 480 page_cache_release(page);
481 } 481 }
482 482
483 write_unlock_irq(&mapping->tree_lock); 483 spin_unlock_irq(&mapping->tree_lock);
484 radix_tree_preload_end(); 484 radix_tree_preload_end();
485 } else 485 } else
486 mem_cgroup_uncharge_cache_page(page); 486 mem_cgroup_uncharge_cache_page(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 3ca6392e82cc..153572fb60b8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -323,7 +323,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
323 return 0; 323 return 0;
324 } 324 }
325 325
326 write_lock_irq(&mapping->tree_lock); 326 spin_lock_irq(&mapping->tree_lock);
327 327
328 pslot = radix_tree_lookup_slot(&mapping->page_tree, 328 pslot = radix_tree_lookup_slot(&mapping->page_tree,
329 page_index(page)); 329 page_index(page));
@@ -331,12 +331,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
331 expected_count = 2 + !!PagePrivate(page); 331 expected_count = 2 + !!PagePrivate(page);
332 if (page_count(page) != expected_count || 332 if (page_count(page) != expected_count ||
333 (struct page *)radix_tree_deref_slot(pslot) != page) { 333 (struct page *)radix_tree_deref_slot(pslot) != page) {
334 write_unlock_irq(&mapping->tree_lock); 334 spin_unlock_irq(&mapping->tree_lock);
335 return -EAGAIN; 335 return -EAGAIN;
336 } 336 }
337 337
338 if (!page_freeze_refs(page, expected_count)) { 338 if (!page_freeze_refs(page, expected_count)) {
339 write_unlock_irq(&mapping->tree_lock); 339 spin_unlock_irq(&mapping->tree_lock);
340 return -EAGAIN; 340 return -EAGAIN;
341 } 341 }
342 342
@@ -373,10 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
373 __dec_zone_page_state(page, NR_FILE_PAGES); 373 __dec_zone_page_state(page, NR_FILE_PAGES);
374 __inc_zone_page_state(newpage, NR_FILE_PAGES); 374 __inc_zone_page_state(newpage, NR_FILE_PAGES);
375 375
376 write_unlock_irq(&mapping->tree_lock); 376 spin_unlock_irq(&mapping->tree_lock);
377 if (!PageSwapCache(newpage)) { 377 if (!PageSwapCache(newpage))
378 mem_cgroup_uncharge_cache_page(page); 378 mem_cgroup_uncharge_cache_page(page);
379 }
380 379
381 return 0; 380 return 0;
382} 381}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 94c6d8988ab3..24de8b65fdbd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1088,7 +1088,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1088 if (!mapping) 1088 if (!mapping)
1089 return 1; 1089 return 1;
1090 1090
1091 write_lock_irq(&mapping->tree_lock); 1091 spin_lock_irq(&mapping->tree_lock);
1092 mapping2 = page_mapping(page); 1092 mapping2 = page_mapping(page);
1093 if (mapping2) { /* Race with truncate? */ 1093 if (mapping2) { /* Race with truncate? */
1094 BUG_ON(mapping2 != mapping); 1094 BUG_ON(mapping2 != mapping);
@@ -1102,7 +1102,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1102 radix_tree_tag_set(&mapping->page_tree, 1102 radix_tree_tag_set(&mapping->page_tree,
1103 page_index(page), PAGECACHE_TAG_DIRTY); 1103 page_index(page), PAGECACHE_TAG_DIRTY);
1104 } 1104 }
1105 write_unlock_irq(&mapping->tree_lock); 1105 spin_unlock_irq(&mapping->tree_lock);
1106 if (mapping->host) { 1106 if (mapping->host) {
1107 /* !PageAnon && !swapper_space */ 1107 /* !PageAnon && !swapper_space */
1108 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1108 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1258,7 +1258,7 @@ int test_clear_page_writeback(struct page *page)
1258 struct backing_dev_info *bdi = mapping->backing_dev_info; 1258 struct backing_dev_info *bdi = mapping->backing_dev_info;
1259 unsigned long flags; 1259 unsigned long flags;
1260 1260
1261 write_lock_irqsave(&mapping->tree_lock, flags); 1261 spin_lock_irqsave(&mapping->tree_lock, flags);
1262 ret = TestClearPageWriteback(page); 1262 ret = TestClearPageWriteback(page);
1263 if (ret) { 1263 if (ret) {
1264 radix_tree_tag_clear(&mapping->page_tree, 1264 radix_tree_tag_clear(&mapping->page_tree,
@@ -1269,7 +1269,7 @@ int test_clear_page_writeback(struct page *page)
1269 __bdi_writeout_inc(bdi); 1269 __bdi_writeout_inc(bdi);
1270 } 1270 }
1271 } 1271 }
1272 write_unlock_irqrestore(&mapping->tree_lock, flags); 1272 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1273 } else { 1273 } else {
1274 ret = TestClearPageWriteback(page); 1274 ret = TestClearPageWriteback(page);
1275 } 1275 }
@@ -1287,7 +1287,7 @@ int test_set_page_writeback(struct page *page)
1287 struct backing_dev_info *bdi = mapping->backing_dev_info; 1287 struct backing_dev_info *bdi = mapping->backing_dev_info;
1288 unsigned long flags; 1288 unsigned long flags;
1289 1289
1290 write_lock_irqsave(&mapping->tree_lock, flags); 1290 spin_lock_irqsave(&mapping->tree_lock, flags);
1291 ret = TestSetPageWriteback(page); 1291 ret = TestSetPageWriteback(page);
1292 if (!ret) { 1292 if (!ret) {
1293 radix_tree_tag_set(&mapping->page_tree, 1293 radix_tree_tag_set(&mapping->page_tree,
@@ -1300,7 +1300,7 @@ int test_set_page_writeback(struct page *page)
1300 radix_tree_tag_clear(&mapping->page_tree, 1300 radix_tree_tag_clear(&mapping->page_tree,
1301 page_index(page), 1301 page_index(page),
1302 PAGECACHE_TAG_DIRTY); 1302 PAGECACHE_TAG_DIRTY);
1303 write_unlock_irqrestore(&mapping->tree_lock, flags); 1303 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1304 } else { 1304 } else {
1305 ret = TestSetPageWriteback(page); 1305 ret = TestSetPageWriteback(page);
1306 } 1306 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 3e3381d6c7ee..2c217e33d497 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -39,7 +39,7 @@ static struct backing_dev_info swap_backing_dev_info = {
39 39
40struct address_space swapper_space = { 40struct address_space swapper_space = {
41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
42 .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), 42 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
43 .a_ops = &swap_aops, 43 .a_ops = &swap_aops,
44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
45 .backing_dev_info = &swap_backing_dev_info, 45 .backing_dev_info = &swap_backing_dev_info,
@@ -80,7 +80,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
80 SetPageSwapCache(page); 80 SetPageSwapCache(page);
81 set_page_private(page, entry.val); 81 set_page_private(page, entry.val);
82 82
83 write_lock_irq(&swapper_space.tree_lock); 83 spin_lock_irq(&swapper_space.tree_lock);
84 error = radix_tree_insert(&swapper_space.page_tree, 84 error = radix_tree_insert(&swapper_space.page_tree,
85 entry.val, page); 85 entry.val, page);
86 if (likely(!error)) { 86 if (likely(!error)) {
@@ -88,7 +88,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
88 __inc_zone_page_state(page, NR_FILE_PAGES); 88 __inc_zone_page_state(page, NR_FILE_PAGES);
89 INC_CACHE_INFO(add_total); 89 INC_CACHE_INFO(add_total);
90 } 90 }
91 write_unlock_irq(&swapper_space.tree_lock); 91 spin_unlock_irq(&swapper_space.tree_lock);
92 radix_tree_preload_end(); 92 radix_tree_preload_end();
93 93
94 if (unlikely(error)) { 94 if (unlikely(error)) {
@@ -182,9 +182,9 @@ void delete_from_swap_cache(struct page *page)
182 182
183 entry.val = page_private(page); 183 entry.val = page_private(page);
184 184
185 write_lock_irq(&swapper_space.tree_lock); 185 spin_lock_irq(&swapper_space.tree_lock);
186 __delete_from_swap_cache(page); 186 __delete_from_swap_cache(page);
187 write_unlock_irq(&swapper_space.tree_lock); 187 spin_unlock_irq(&swapper_space.tree_lock);
188 188
189 swap_free(entry); 189 swap_free(entry);
190 page_cache_release(page); 190 page_cache_release(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2f33edb8bee9..af283933c14e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -369,13 +369,13 @@ int remove_exclusive_swap_page(struct page *page)
369 retval = 0; 369 retval = 0;
370 if (p->swap_map[swp_offset(entry)] == 1) { 370 if (p->swap_map[swp_offset(entry)] == 1) {
371 /* Recheck the page count with the swapcache lock held.. */ 371 /* Recheck the page count with the swapcache lock held.. */
372 write_lock_irq(&swapper_space.tree_lock); 372 spin_lock_irq(&swapper_space.tree_lock);
373 if ((page_count(page) == 2) && !PageWriteback(page)) { 373 if ((page_count(page) == 2) && !PageWriteback(page)) {
374 __delete_from_swap_cache(page); 374 __delete_from_swap_cache(page);
375 SetPageDirty(page); 375 SetPageDirty(page);
376 retval = 1; 376 retval = 1;
377 } 377 }
378 write_unlock_irq(&swapper_space.tree_lock); 378 spin_unlock_irq(&swapper_space.tree_lock);
379 } 379 }
380 spin_unlock(&swap_lock); 380 spin_unlock(&swap_lock);
381 381
diff --git a/mm/truncate.c b/mm/truncate.c
index b8961cb63414..e68443d74567 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -349,18 +349,18 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
349 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 349 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
350 return 0; 350 return 0;
351 351
352 write_lock_irq(&mapping->tree_lock); 352 spin_lock_irq(&mapping->tree_lock);
353 if (PageDirty(page)) 353 if (PageDirty(page))
354 goto failed; 354 goto failed;
355 355
356 BUG_ON(PagePrivate(page)); 356 BUG_ON(PagePrivate(page));
357 __remove_from_page_cache(page); 357 __remove_from_page_cache(page);
358 write_unlock_irq(&mapping->tree_lock); 358 spin_unlock_irq(&mapping->tree_lock);
359 ClearPageUptodate(page); 359 ClearPageUptodate(page);
360 page_cache_release(page); /* pagecache ref */ 360 page_cache_release(page); /* pagecache ref */
361 return 1; 361 return 1;
362failed: 362failed:
363 write_unlock_irq(&mapping->tree_lock); 363 spin_unlock_irq(&mapping->tree_lock);
364 return 0; 364 return 0;
365} 365}
366 366
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0075eac1cd04..8f71761bc4b7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -399,7 +399,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
399 BUG_ON(!PageLocked(page)); 399 BUG_ON(!PageLocked(page));
400 BUG_ON(mapping != page_mapping(page)); 400 BUG_ON(mapping != page_mapping(page));
401 401
402 write_lock_irq(&mapping->tree_lock); 402 spin_lock_irq(&mapping->tree_lock);
403 /* 403 /*
404 * The non racy check for a busy page. 404 * The non racy check for a busy page.
405 * 405 *
@@ -436,17 +436,17 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
436 if (PageSwapCache(page)) { 436 if (PageSwapCache(page)) {
437 swp_entry_t swap = { .val = page_private(page) }; 437 swp_entry_t swap = { .val = page_private(page) };
438 __delete_from_swap_cache(page); 438 __delete_from_swap_cache(page);
439 write_unlock_irq(&mapping->tree_lock); 439 spin_unlock_irq(&mapping->tree_lock);
440 swap_free(swap); 440 swap_free(swap);
441 } else { 441 } else {
442 __remove_from_page_cache(page); 442 __remove_from_page_cache(page);
443 write_unlock_irq(&mapping->tree_lock); 443 spin_unlock_irq(&mapping->tree_lock);
444 } 444 }
445 445
446 return 1; 446 return 1;
447 447
448cannot_free: 448cannot_free:
449 write_unlock_irq(&mapping->tree_lock); 449 spin_unlock_irq(&mapping->tree_lock);
450 return 0; 450 return 0;
451} 451}
452 452