aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:01:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:01:38 -0400
commit26935fb06ee88f1188789807687c03041f3c70d9 (patch)
tree381c487716540b52348d78bee6555f8fa61d77ef /arch
parent3cc69b638e11bfda5d013c2b75b60934aa0e88a1 (diff)
parentbf2ba3bc185269eca274b458aac46ba1ad7c1121 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 4 from Al Viro: "list_lru pile, mostly" This came out of Andrew's pile, Al ended up doing the merge work so that Andrew didn't have to. Additionally, a few fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (42 commits) super: fix for destroy lrus list_lru: dynamically adjust node arrays shrinker: Kill old ->shrink API. shrinker: convert remaining shrinkers to count/scan API staging/lustre/libcfs: cleanup linux-mem.h staging/lustre/ptlrpc: convert to new shrinker API staging/lustre/obdclass: convert lu_object shrinker to count/scan API staging/lustre/ldlm: convert to shrinkers to count/scan API hugepage: convert huge zero page shrinker to new shrinker API i915: bail out earlier when shrinker cannot acquire mutex drivers: convert shrinkers to new count/scan API fs: convert fs shrinkers to new scan/count API xfs: fix dquot isolation hang xfs-convert-dquot-cache-lru-to-list_lru-fix xfs: convert dquot cache lru to list_lru xfs: rework buffer dispose list tracking xfs-convert-buftarg-lru-to-generic-code-fix xfs: convert buftarg LRU to generic code fs: convert inode and dentry shrinking to be node aware vmscan: per-node deferred work ...
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6e2d2c8f230b..dce0df8150df 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
4421 } 4421 }
4422} 4422}
4423 4423
4424static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) 4424static unsigned long
4425mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
4425{ 4426{
4426 struct kvm *kvm; 4427 struct kvm *kvm;
4427 int nr_to_scan = sc->nr_to_scan; 4428 int nr_to_scan = sc->nr_to_scan;
4428 4429 unsigned long freed = 0;
4429 if (nr_to_scan == 0)
4430 goto out;
4431 4430
4432 raw_spin_lock(&kvm_lock); 4431 raw_spin_lock(&kvm_lock);
4433 4432
@@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4462 goto unlock; 4461 goto unlock;
4463 } 4462 }
4464 4463
4465 prepare_zap_oldest_mmu_page(kvm, &invalid_list); 4464 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
4465 freed++;
4466 kvm_mmu_commit_zap_page(kvm, &invalid_list); 4466 kvm_mmu_commit_zap_page(kvm, &invalid_list);
4467 4467
4468unlock: 4468unlock:
4469 spin_unlock(&kvm->mmu_lock); 4469 spin_unlock(&kvm->mmu_lock);
4470 srcu_read_unlock(&kvm->srcu, idx); 4470 srcu_read_unlock(&kvm->srcu, idx);
4471 4471
4472 /*
4473 * unfair on small ones
4474 * per-vm shrinkers cry out
4475 * sadness comes quickly
4476 */
4472 list_move_tail(&kvm->vm_list, &vm_list); 4477 list_move_tail(&kvm->vm_list, &vm_list);
4473 break; 4478 break;
4474 } 4479 }
4475 4480
4476 raw_spin_unlock(&kvm_lock); 4481 raw_spin_unlock(&kvm_lock);
4482 return freed;
4477 4483
4478out: 4484}
4485
4486static unsigned long
4487mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
4488{
4479 return percpu_counter_read_positive(&kvm_total_used_mmu_pages); 4489 return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
4480} 4490}
4481 4491
4482static struct shrinker mmu_shrinker = { 4492static struct shrinker mmu_shrinker = {
4483 .shrink = mmu_shrink, 4493 .count_objects = mmu_shrink_count,
4494 .scan_objects = mmu_shrink_scan,
4484 .seeks = DEFAULT_SEEKS * 10, 4495 .seeks = DEFAULT_SEEKS * 10,
4485}; 4496};
4486 4497