aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmdebug.h
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2010-01-25 18:51:01 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-25 18:51:01 -0500
commit39d321577405e8e269fd238b278aaf2425fa788a (patch)
tree923bded413373b0ee72b0929fa7413953888da12 /include/linux/mmdebug.h
parent5a27e86babe79cf5f575394bb1055448458df6c7 (diff)
virtio_net: Make delayed refill more reliable
I have seen RX stalls on a machine that experienced a suspected OOM. After the stall, the RX buffer is empty on the guest side and there are exactly 16 entries available on the host side. As the number of entries is less than that required by a maximal skb, the host cannot proceed. The guest did not have a refill job scheduled. My diagnosis is that an OOM had occured, with the delayed refill job scheduled. The job was able to allocate at least one skb, but not enough to overcome the minimum required by the host to proceed. As the refill job would only reschedule itself if it failed completely to allocate any skbs, this would lead to an RX stall. The following patch removes this stall possibility by always rescheduling the refill job until the ring is totally refilled. Testing has shown that the RX stall no longer occurs whereas previously it would occur within a day. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/mmdebug.h')
0 files changed, 0 insertions, 0 deletions
pan class="hl opt">{ swp_entry_t ret; ret.val = (type << SWP_TYPE_SHIFT(ret)) | (offset & SWP_OFFSET_MASK(ret)); return ret; } /* * Extract the `type' field from a swp_entry_t. The swp_entry_t is in * arch-independent format */ static inline unsigned swp_type(swp_entry_t entry) { return (entry.val >> SWP_TYPE_SHIFT(entry)); } /* * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in * arch-independent format */ static inline pgoff_t swp_offset(swp_entry_t entry) { return entry.val & SWP_OFFSET_MASK(entry); } /* check whether a pte points to a swap entry */ static inline int is_swap_pte(pte_t pte) { return !pte_none(pte) && !pte_present(pte) && !pte_file(pte); } /* * Convert the arch-dependent pte representation of a swp_entry_t into an * arch-independent swp_entry_t. */ static inline swp_entry_t pte_to_swp_entry(pte_t pte) { swp_entry_t arch_entry; BUG_ON(pte_file(pte)); arch_entry = __pte_to_swp_entry(pte); return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); } /* * Convert the arch-independent representation of a swp_entry_t into the * arch-dependent pte representation. */ static inline pte_t swp_entry_to_pte(swp_entry_t entry) { swp_entry_t arch_entry; arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); BUG_ON(pte_file(__swp_entry_to_pte(arch_entry))); return __swp_entry_to_pte(arch_entry); } #ifdef CONFIG_MIGRATION static inline swp_entry_t make_migration_entry(struct page *page, int write) { BUG_ON(!PageLocked(page)); return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ, page_to_pfn(page)); } static inline int is_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_READ || swp_type(entry) == SWP_MIGRATION_WRITE); } static inline int is_write_migration_entry(swp_entry_t entry) { return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); } static inline struct page *migration_entry_to_page(swp_entry_t entry) { struct page *p = pfn_to_page(swp_offset(entry)); /* * Any use of migration entries may only occur while the * corresponding page is locked */ BUG_ON(!PageLocked(p)); return p; } static inline void make_migration_entry_read(swp_entry_t *entry) { *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); } extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); #else #define make_migration_entry(page, write) swp_entry(0, 0) static inline int is_migration_entry(swp_entry_t swp) { return 0; } #define migration_entry_to_page(swp) NULL static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; } #endif