diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 19:44:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 19:44:24 -0500 |
commit | a200dcb34693084e56496960d855afdeaaf9578f (patch) | |
tree | bf65e4350460b7f98247278469f7600d1808c3fc /mm | |
parent | d05d82f7110b08fd36178a641b69a1f206e1142b (diff) | |
parent | 43e361f23c49dbddf74f56ddf6cdd85c5dbff6da (diff) |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio barrier rework+fixes from Michael Tsirkin:
"This adds a new kind of barrier, and reworks virtio and xen to use it.
Plus some fixes here and there"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits)
checkpatch: add virt barriers
checkpatch: check for __smp outside barrier.h
checkpatch.pl: add missing memory barriers
virtio: make find_vqs() checkpatch.pl-friendly
virtio_balloon: fix race between migration and ballooning
virtio_balloon: fix race by fill and leak
s390: more efficient smp barriers
s390: use generic memory barriers
xen/events: use virt_xxx barriers
xen/io: use virt_xxx barriers
xenbus: use virt_xxx barriers
virtio_ring: use virt_store_mb
sh: move xchg_cmpxchg to a header by itself
sh: support 1 and 2 byte xchg
virtio_ring: update weak barriers to use virt_xxx
Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb"
asm-generic: implement virt_xxx memory barriers
x86: define __smp_xxx
xtensa: define __smp_xxx
tile: define __smp_xxx
...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/balloon_compaction.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index d3116be5a00f..300117f1a08f 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c | |||
@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) | |||
61 | bool dequeued_page; | 61 | bool dequeued_page; |
62 | 62 | ||
63 | dequeued_page = false; | 63 | dequeued_page = false; |
64 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); | ||
64 | list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { | 65 | list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { |
65 | /* | 66 | /* |
66 | * Block others from accessing the 'page' while we get around | 67 | * Block others from accessing the 'page' while we get around |
@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) | |||
75 | continue; | 76 | continue; |
76 | } | 77 | } |
77 | #endif | 78 | #endif |
78 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); | ||
79 | balloon_page_delete(page); | 79 | balloon_page_delete(page); |
80 | __count_vm_event(BALLOON_DEFLATE); | 80 | __count_vm_event(BALLOON_DEFLATE); |
81 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); | ||
82 | unlock_page(page); | 81 | unlock_page(page); |
83 | dequeued_page = true; | 82 | dequeued_page = true; |
84 | break; | 83 | break; |
85 | } | 84 | } |
86 | } | 85 | } |
86 | spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); | ||
87 | 87 | ||
88 | if (!dequeued_page) { | 88 | if (!dequeued_page) { |
89 | /* | 89 | /* |