diff options
author | Dave Chinner <dchinner@redhat.com> | 2013-08-27 20:18:14 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2013-09-10 18:56:32 -0400 |
commit | 70534a739c12b908789e27b08512d2615ba40f2f (patch) | |
tree | e41d776d76de06a64b08cb787d87590f80e94f7e /arch/x86/kvm | |
parent | ea8352c289294e21ee13bdb105f55dc63497acff (diff) |
shrinker: convert remaining shrinkers to count/scan API
Convert the remaining couple of random shrinkers in the tree to the new
API.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 6e2d2c8f230b..dce0df8150df 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) | |||
4421 | } | 4421 | } |
4422 | } | 4422 | } |
4423 | 4423 | ||
4424 | static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) | 4424 | static unsigned long |
4425 | mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | ||
4425 | { | 4426 | { |
4426 | struct kvm *kvm; | 4427 | struct kvm *kvm; |
4427 | int nr_to_scan = sc->nr_to_scan; | 4428 | int nr_to_scan = sc->nr_to_scan; |
4428 | 4429 | unsigned long freed = 0; | |
4429 | if (nr_to_scan == 0) | ||
4430 | goto out; | ||
4431 | 4430 | ||
4432 | raw_spin_lock(&kvm_lock); | 4431 | raw_spin_lock(&kvm_lock); |
4433 | 4432 | ||
@@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) | |||
4462 | goto unlock; | 4461 | goto unlock; |
4463 | } | 4462 | } |
4464 | 4463 | ||
4465 | prepare_zap_oldest_mmu_page(kvm, &invalid_list); | 4464 | if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) |
4465 | freed++; | ||
4466 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | 4466 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
4467 | 4467 | ||
4468 | unlock: | 4468 | unlock: |
4469 | spin_unlock(&kvm->mmu_lock); | 4469 | spin_unlock(&kvm->mmu_lock); |
4470 | srcu_read_unlock(&kvm->srcu, idx); | 4470 | srcu_read_unlock(&kvm->srcu, idx); |
4471 | 4471 | ||
4472 | /* | ||
4473 | * unfair on small ones | ||
4474 | * per-vm shrinkers cry out | ||
4475 | * sadness comes quickly | ||
4476 | */ | ||
4472 | list_move_tail(&kvm->vm_list, &vm_list); | 4477 | list_move_tail(&kvm->vm_list, &vm_list); |
4473 | break; | 4478 | break; |
4474 | } | 4479 | } |
4475 | 4480 | ||
4476 | raw_spin_unlock(&kvm_lock); | 4481 | raw_spin_unlock(&kvm_lock); |
4482 | return freed; | ||
4477 | 4483 | ||
4478 | out: | 4484 | } |
4485 | |||
4486 | static unsigned long | ||
4487 | mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | ||
4488 | { | ||
4479 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); | 4489 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
4480 | } | 4490 | } |
4481 | 4491 | ||
4482 | static struct shrinker mmu_shrinker = { | 4492 | static struct shrinker mmu_shrinker = { |
4483 | .shrink = mmu_shrink, | 4493 | .count_objects = mmu_shrink_count, |
4494 | .scan_objects = mmu_shrink_scan, | ||
4484 | .seeks = DEFAULT_SEEKS * 10, | 4495 | .seeks = DEFAULT_SEEKS * 10, |
4485 | }; | 4496 | }; |
4486 | 4497 | ||