diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 12 | ||||
-rw-r--r-- | mm/slub.c | 10 |
2 files changed, 16 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ec4e7ca4cd..930de9437271 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2091,6 +2091,7 @@ struct memcg_stock_pcp { | |||
2091 | #define FLUSHING_CACHED_CHARGE (0) | 2091 | #define FLUSHING_CACHED_CHARGE (0) |
2092 | }; | 2092 | }; |
2093 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); | 2093 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); |
2094 | static DEFINE_MUTEX(percpu_charge_mutex); | ||
2094 | 2095 | ||
2095 | /* | 2096 | /* |
2096 | * Try to consume stocked charge on this cpu. If success, one page is consumed | 2097 | * Try to consume stocked charge on this cpu. If success, one page is consumed |
@@ -2197,8 +2198,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) | |||
2197 | 2198 | ||
2198 | for_each_online_cpu(cpu) { | 2199 | for_each_online_cpu(cpu) { |
2199 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 2200 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
2200 | if (mem_cgroup_same_or_subtree(root_mem, stock->cached) && | 2201 | if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) |
2201 | test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) | ||
2202 | flush_work(&stock->work); | 2202 | flush_work(&stock->work); |
2203 | } | 2203 | } |
2204 | out: | 2204 | out: |
@@ -2213,14 +2213,22 @@ out: | |||
2213 | */ | 2213 | */ |
2214 | static void drain_all_stock_async(struct mem_cgroup *root_mem) | 2214 | static void drain_all_stock_async(struct mem_cgroup *root_mem) |
2215 | { | 2215 | { |
2216 | /* | ||
2217 | * If someone calls draining, avoid adding more kworker runs. | ||
2218 | */ | ||
2219 | if (!mutex_trylock(&percpu_charge_mutex)) | ||
2220 | return; | ||
2216 | drain_all_stock(root_mem, false); | 2221 | drain_all_stock(root_mem, false); |
2222 | mutex_unlock(&percpu_charge_mutex); | ||
2217 | } | 2223 | } |
2218 | 2224 | ||
2219 | /* This is a synchronous drain interface. */ | 2225 | /* This is a synchronous drain interface. */ |
2220 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) | 2226 | static void drain_all_stock_sync(struct mem_cgroup *root_mem) |
2221 | { | 2227 | { |
2222 | /* called when force_empty is called */ | 2228 | /* called when force_empty is called */ |
2229 | mutex_lock(&percpu_charge_mutex); | ||
2223 | drain_all_stock(root_mem, true); | 2230 | drain_all_stock(root_mem, true); |
2231 | mutex_unlock(&percpu_charge_mutex); | ||
2224 | } | 2232 | } |
2225 | 2233 | ||
2226 | /* | 2234 | /* |
@@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) | |||
701 | return check_bytes8(start, value, bytes); | 701 | return check_bytes8(start, value, bytes); |
702 | 702 | ||
703 | value64 = value | value << 8 | value << 16 | value << 24; | 703 | value64 = value | value << 8 | value << 16 | value << 24; |
704 | value64 = value64 | value64 << 32; | 704 | value64 = (value64 & 0xffffffff) | value64 << 32; |
705 | prefix = 8 - ((unsigned long)start) % 8; | 705 | prefix = 8 - ((unsigned long)start) % 8; |
706 | 706 | ||
707 | if (prefix) { | 707 | if (prefix) { |
@@ -1854,7 +1854,7 @@ redo: | |||
1854 | 1854 | ||
1855 | new.frozen = 0; | 1855 | new.frozen = 0; |
1856 | 1856 | ||
1857 | if (!new.inuse && n->nr_partial < s->min_partial) | 1857 | if (!new.inuse && n->nr_partial > s->min_partial) |
1858 | m = M_FREE; | 1858 | m = M_FREE; |
1859 | else if (new.freelist) { | 1859 | else if (new.freelist) { |
1860 | m = M_PARTIAL; | 1860 | m = M_PARTIAL; |
@@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2387 | slab_empty: | 2387 | slab_empty: |
2388 | if (prior) { | 2388 | if (prior) { |
2389 | /* | 2389 | /* |
2390 | * Slab still on the partial list. | 2390 | * Slab on the partial list. |
2391 | */ | 2391 | */ |
2392 | remove_partial(n, page); | 2392 | remove_partial(n, page); |
2393 | stat(s, FREE_REMOVE_PARTIAL); | 2393 | stat(s, FREE_REMOVE_PARTIAL); |
2394 | } | 2394 | } else |
2395 | /* Slab must be on the full list */ | ||
2396 | remove_full(s, page); | ||
2395 | 2397 | ||
2396 | spin_unlock_irqrestore(&n->list_lock, flags); | 2398 | spin_unlock_irqrestore(&n->list_lock, flags); |
2397 | stat(s, FREE_SLAB); | 2399 | stat(s, FREE_SLAB); |