diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:42 -0400 |
commit | 4c21e2f2441dc5fbb957b030333f5a3f2d02dea7 (patch) | |
tree | 1f76d33bb1d76221c6424bc5fed080a4f91349a6 /mm/shmem.c | |
parent | b38c6845b695141259019e2b7c0fe6c32a6e720d (diff) |
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r-- | mm/shmem.c | 22 |
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index 37777f4c11f8..dc25565a61e9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -71,9 +71,6 @@ | |||
71 | /* Pretend that each entry is of this size in directory's i_size */ | 71 | /* Pretend that each entry is of this size in directory's i_size */ |
72 | #define BOGO_DIRENT_SIZE 20 | 72 | #define BOGO_DIRENT_SIZE 20 |
73 | 73 | ||
74 | /* Keep swapped page count in private field of indirect struct page */ | ||
75 | #define nr_swapped private | ||
76 | |||
77 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ | 74 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ |
78 | enum sgp_type { | 75 | enum sgp_type { |
79 | SGP_QUICK, /* don't try more than file page cache lookup */ | 76 | SGP_QUICK, /* don't try more than file page cache lookup */ |
@@ -324,8 +321,10 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns | |||
324 | 321 | ||
325 | entry->val = value; | 322 | entry->val = value; |
326 | info->swapped += incdec; | 323 | info->swapped += incdec; |
327 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) | 324 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { |
328 | kmap_atomic_to_page(entry)->nr_swapped += incdec; | 325 | struct page *page = kmap_atomic_to_page(entry); |
326 | set_page_private(page, page_private(page) + incdec); | ||
327 | } | ||
329 | } | 328 | } |
330 | 329 | ||
331 | /* | 330 | /* |
@@ -368,9 +367,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long | |||
368 | 367 | ||
369 | spin_unlock(&info->lock); | 368 | spin_unlock(&info->lock); |
370 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); | 369 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); |
371 | if (page) { | 370 | if (page) |
372 | page->nr_swapped = 0; | 371 | set_page_private(page, 0); |
373 | } | ||
374 | spin_lock(&info->lock); | 372 | spin_lock(&info->lock); |
375 | 373 | ||
376 | if (!page) { | 374 | if (!page) { |
@@ -561,7 +559,7 @@ static void shmem_truncate(struct inode *inode) | |||
561 | diroff = 0; | 559 | diroff = 0; |
562 | } | 560 | } |
563 | subdir = dir[diroff]; | 561 | subdir = dir[diroff]; |
564 | if (subdir && subdir->nr_swapped) { | 562 | if (subdir && page_private(subdir)) { |
565 | size = limit - idx; | 563 | size = limit - idx; |
566 | if (size > ENTRIES_PER_PAGE) | 564 | if (size > ENTRIES_PER_PAGE) |
567 | size = ENTRIES_PER_PAGE; | 565 | size = ENTRIES_PER_PAGE; |
@@ -572,10 +570,10 @@ static void shmem_truncate(struct inode *inode) | |||
572 | nr_swaps_freed += freed; | 570 | nr_swaps_freed += freed; |
573 | if (offset) | 571 | if (offset) |
574 | spin_lock(&info->lock); | 572 | spin_lock(&info->lock); |
575 | subdir->nr_swapped -= freed; | 573 | set_page_private(subdir, page_private(subdir) - freed); |
576 | if (offset) | 574 | if (offset) |
577 | spin_unlock(&info->lock); | 575 | spin_unlock(&info->lock); |
578 | BUG_ON(subdir->nr_swapped > offset); | 576 | BUG_ON(page_private(subdir) > offset); |
579 | } | 577 | } |
580 | if (offset) | 578 | if (offset) |
581 | offset = 0; | 579 | offset = 0; |
@@ -743,7 +741,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s | |||
743 | dir = shmem_dir_map(subdir); | 741 | dir = shmem_dir_map(subdir); |
744 | } | 742 | } |
745 | subdir = *dir; | 743 | subdir = *dir; |
746 | if (subdir && subdir->nr_swapped) { | 744 | if (subdir && page_private(subdir)) { |
747 | ptr = shmem_swp_map(subdir); | 745 | ptr = shmem_swp_map(subdir); |
748 | size = limit - idx; | 746 | size = limit - idx; |
749 | if (size > ENTRIES_PER_PAGE) | 747 | if (size > ENTRIES_PER_PAGE) |