diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 6 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 4 | ||||
-rw-r--r-- | mm/zsmalloc.c | 2 |
8 files changed, 12 insertions, 12 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6b448881422b..14326935800d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2436,7 +2436,7 @@ static void drain_stock(struct memcg_stock_pcp *stock) | |||
2436 | */ | 2436 | */ |
2437 | static void drain_local_stock(struct work_struct *dummy) | 2437 | static void drain_local_stock(struct work_struct *dummy) |
2438 | { | 2438 | { |
2439 | struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); | 2439 | struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock); |
2440 | drain_stock(stock); | 2440 | drain_stock(stock); |
2441 | clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); | 2441 | clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); |
2442 | } | 2442 | } |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 6917f799412b..d50f17fb9be2 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1298,7 +1298,7 @@ static void memory_failure_work_func(struct work_struct *work) | |||
1298 | unsigned long proc_flags; | 1298 | unsigned long proc_flags; |
1299 | int gotten; | 1299 | int gotten; |
1300 | 1300 | ||
1301 | mf_cpu = &__get_cpu_var(memory_failure_cpu); | 1301 | mf_cpu = this_cpu_ptr(&memory_failure_cpu); |
1302 | for (;;) { | 1302 | for (;;) { |
1303 | spin_lock_irqsave(&mf_cpu->lock, proc_flags); | 1303 | spin_lock_irqsave(&mf_cpu->lock, proc_flags); |
1304 | gotten = kfifo_get(&mf_cpu->fifo, &entry); | 1304 | gotten = kfifo_get(&mf_cpu->fifo, &entry); |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index a4317da60532..b9b8e8204628 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) | |||
1623 | * 1000+ tasks, all of them start dirtying pages at exactly the same | 1623 | * 1000+ tasks, all of them start dirtying pages at exactly the same |
1624 | * time, hence all honoured too large initial task->nr_dirtied_pause. | 1624 | * time, hence all honoured too large initial task->nr_dirtied_pause. |
1625 | */ | 1625 | */ |
1626 | p = &__get_cpu_var(bdp_ratelimits); | 1626 | p = this_cpu_ptr(&bdp_ratelimits); |
1627 | if (unlikely(current->nr_dirtied >= ratelimit)) | 1627 | if (unlikely(current->nr_dirtied >= ratelimit)) |
1628 | *p = 0; | 1628 | *p = 0; |
1629 | else if (unlikely(*p >= ratelimit_pages)) { | 1629 | else if (unlikely(*p >= ratelimit_pages)) { |
@@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) | |||
1635 | * short-lived tasks (eg. gcc invocations in a kernel build) escaping | 1635 | * short-lived tasks (eg. gcc invocations in a kernel build) escaping |
1636 | * the dirty throttling and livelock other long-run dirtiers. | 1636 | * the dirty throttling and livelock other long-run dirtiers. |
1637 | */ | 1637 | */ |
1638 | p = &__get_cpu_var(dirty_throttle_leaks); | 1638 | p = this_cpu_ptr(&dirty_throttle_leaks); |
1639 | if (*p > 0 && current->nr_dirtied < ratelimit) { | 1639 | if (*p > 0 && current->nr_dirtied < ratelimit) { |
1640 | unsigned long nr_pages_dirtied; | 1640 | unsigned long nr_pages_dirtied; |
1641 | nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); | 1641 | nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); |
@@ -2209,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, | |||
2209 | 2209 | ||
2210 | page = new_slab(s, flags, node); | 2210 | page = new_slab(s, flags, node); |
2211 | if (page) { | 2211 | if (page) { |
2212 | c = __this_cpu_ptr(s->cpu_slab); | 2212 | c = raw_cpu_ptr(s->cpu_slab); |
2213 | if (c->page) | 2213 | if (c->page) |
2214 | flush_slab(s, c); | 2214 | flush_slab(s, c); |
2215 | 2215 | ||
@@ -2425,7 +2425,7 @@ redo: | |||
2425 | * and the retrieval of the tid. | 2425 | * and the retrieval of the tid. |
2426 | */ | 2426 | */ |
2427 | preempt_disable(); | 2427 | preempt_disable(); |
2428 | c = __this_cpu_ptr(s->cpu_slab); | 2428 | c = this_cpu_ptr(s->cpu_slab); |
2429 | 2429 | ||
2430 | /* | 2430 | /* |
2431 | * The transaction ids are globally unique per cpu and per operation on | 2431 | * The transaction ids are globally unique per cpu and per operation on |
@@ -2681,7 +2681,7 @@ redo: | |||
2681 | * during the cmpxchg then the free will succedd. | 2681 | * during the cmpxchg then the free will succedd. |
2682 | */ | 2682 | */ |
2683 | preempt_disable(); | 2683 | preempt_disable(); |
2684 | c = __this_cpu_ptr(s->cpu_slab); | 2684 | c = this_cpu_ptr(s->cpu_slab); |
2685 | 2685 | ||
2686 | tid = c->tid; | 2686 | tid = c->tid; |
2687 | preempt_enable(); | 2687 | preempt_enable(); |
@@ -441,7 +441,7 @@ void rotate_reclaimable_page(struct page *page) | |||
441 | 441 | ||
442 | page_cache_get(page); | 442 | page_cache_get(page); |
443 | local_irq_save(flags); | 443 | local_irq_save(flags); |
444 | pvec = &__get_cpu_var(lru_rotate_pvecs); | 444 | pvec = this_cpu_ptr(&lru_rotate_pvecs); |
445 | if (!pagevec_add(pvec, page)) | 445 | if (!pagevec_add(pvec, page)) |
446 | pagevec_move_tail(pvec); | 446 | pagevec_move_tail(pvec); |
447 | local_irq_restore(flags); | 447 | local_irq_restore(flags); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index bf233b283319..ddaf70b21b59 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1496,7 +1496,7 @@ void vfree(const void *addr) | |||
1496 | if (!addr) | 1496 | if (!addr) |
1497 | return; | 1497 | return; |
1498 | if (unlikely(in_interrupt())) { | 1498 | if (unlikely(in_interrupt())) { |
1499 | struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); | 1499 | struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); |
1500 | if (llist_add((struct llist_node *)addr, &p->list)) | 1500 | if (llist_add((struct llist_node *)addr, &p->list)) |
1501 | schedule_work(&p->wq); | 1501 | schedule_work(&p->wq); |
1502 | } else | 1502 | } else |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 82ce17ce58c4..376bd2d21482 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -489,7 +489,7 @@ static void refresh_cpu_vm_stats(void) | |||
489 | continue; | 489 | continue; |
490 | 490 | ||
491 | if (__this_cpu_read(p->pcp.count)) | 491 | if (__this_cpu_read(p->pcp.count)) |
492 | drain_zone_pages(zone, __this_cpu_ptr(&p->pcp)); | 492 | drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); |
493 | #endif | 493 | #endif |
494 | } | 494 | } |
495 | fold_diff(global_diff); | 495 | fold_diff(global_diff); |
@@ -1230,7 +1230,7 @@ int sysctl_stat_interval __read_mostly = HZ; | |||
1230 | static void vmstat_update(struct work_struct *w) | 1230 | static void vmstat_update(struct work_struct *w) |
1231 | { | 1231 | { |
1232 | refresh_cpu_vm_stats(); | 1232 | refresh_cpu_vm_stats(); |
1233 | schedule_delayed_work(&__get_cpu_var(vmstat_work), | 1233 | schedule_delayed_work(this_cpu_ptr(&vmstat_work), |
1234 | round_jiffies_relative(sysctl_stat_interval)); | 1234 | round_jiffies_relative(sysctl_stat_interval)); |
1235 | } | 1235 | } |
1236 | 1236 | ||
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 36b4591a7a2d..5ae5d85b629d 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -1082,7 +1082,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) | |||
1082 | class = &pool->size_class[class_idx]; | 1082 | class = &pool->size_class[class_idx]; |
1083 | off = obj_idx_to_offset(page, obj_idx, class->size); | 1083 | off = obj_idx_to_offset(page, obj_idx, class->size); |
1084 | 1084 | ||
1085 | area = &__get_cpu_var(zs_map_area); | 1085 | area = this_cpu_ptr(&zs_map_area); |
1086 | if (off + class->size <= PAGE_SIZE) | 1086 | if (off + class->size <= PAGE_SIZE) |
1087 | kunmap_atomic(area->vm_addr); | 1087 | kunmap_atomic(area->vm_addr); |
1088 | else { | 1088 | else { |