aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/internal.h13
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/slub.c87
5 files changed, 19 insertions, 92 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cb1b3a7ecdfc..89e6286a7f57 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -120,6 +120,7 @@ static void free_huge_page(struct page *page)
120 struct address_space *mapping; 120 struct address_space *mapping;
121 121
122 mapping = (struct address_space *) page_private(page); 122 mapping = (struct address_space *) page_private(page);
123 set_page_private(page, 0);
123 BUG_ON(page_count(page)); 124 BUG_ON(page_count(page));
124 INIT_LIST_HEAD(&page->lru); 125 INIT_LIST_HEAD(&page->lru);
125 126
@@ -134,7 +135,6 @@ static void free_huge_page(struct page *page)
134 spin_unlock(&hugetlb_lock); 135 spin_unlock(&hugetlb_lock);
135 if (mapping) 136 if (mapping)
136 hugetlb_put_quota(mapping, 1); 137 hugetlb_put_quota(mapping, 1);
137 set_page_private(page, 0);
138} 138}
139 139
140/* 140/*
diff --git a/mm/internal.h b/mm/internal.h
index 5a9a6200e034..789727309f4d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -47,4 +47,17 @@ static inline unsigned long page_order(struct page *page)
47 VM_BUG_ON(!PageBuddy(page)); 47 VM_BUG_ON(!PageBuddy(page));
48 return page_private(page); 48 return page_private(page);
49} 49}
50
51/*
52 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
53 * so all functions starting at paging_init should be marked __init
54 * in those cases. SPARSEMEM, however, allows for memory hotplug,
55 * and alloc_bootmem_node is not used.
56 */
57#ifdef CONFIG_SPARSEMEM
58#define __paginginit __meminit
59#else
60#define __paginginit __init
61#endif
62
50#endif 63#endif
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6bded84c20c8..631002d085d1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -534,7 +534,6 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
534 if (scan >= nr_to_scan) 534 if (scan >= nr_to_scan)
535 break; 535 break;
536 page = pc->page; 536 page = pc->page;
537 VM_BUG_ON(!pc);
538 537
539 if (unlikely(!PageLRU(page))) 538 if (unlikely(!PageLRU(page)))
540 continue; 539 continue;
@@ -1101,7 +1100,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1101 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); 1100 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
1102 1101
1103 if (mem == NULL) 1102 if (mem == NULL)
1104 return NULL; 1103 return ERR_PTR(-ENOMEM);
1105 1104
1106 res_counter_init(&mem->res); 1105 res_counter_init(&mem->res);
1107 1106
@@ -1117,7 +1116,7 @@ free_out:
1117 free_mem_cgroup_per_zone_info(mem, node); 1116 free_mem_cgroup_per_zone_info(mem, node);
1118 if (cont->parent != NULL) 1117 if (cont->parent != NULL)
1119 kfree(mem); 1118 kfree(mem);
1120 return NULL; 1119 return ERR_PTR(-ENOMEM);
1121} 1120}
1122 1121
1123static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, 1122static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 75b979313346..8896e874a67d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3314,7 +3314,7 @@ static inline int pageblock_default_order(unsigned int order)
3314 * - mark all memory queues empty 3314 * - mark all memory queues empty
3315 * - clear the memory bitmaps 3315 * - clear the memory bitmaps
3316 */ 3316 */
3317static void __meminit free_area_init_core(struct pglist_data *pgdat, 3317static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3318 unsigned long *zones_size, unsigned long *zholes_size) 3318 unsigned long *zones_size, unsigned long *zholes_size)
3319{ 3319{
3320 enum zone_type j; 3320 enum zone_type j;
@@ -3438,7 +3438,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3438#endif /* CONFIG_FLAT_NODE_MEM_MAP */ 3438#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3439} 3439}
3440 3440
3441void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 3441void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3442 unsigned long *zones_size, unsigned long node_start_pfn, 3442 unsigned long *zones_size, unsigned long node_start_pfn,
3443 unsigned long *zholes_size) 3443 unsigned long *zholes_size)
3444{ 3444{
diff --git a/mm/slub.c b/mm/slub.c
index 4b3895cb90ee..74c65af0a54f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -149,13 +149,6 @@ static inline void ClearSlabDebug(struct page *page)
149/* Enable to test recovery from slab corruption on boot */ 149/* Enable to test recovery from slab corruption on boot */
150#undef SLUB_RESILIENCY_TEST 150#undef SLUB_RESILIENCY_TEST
151 151
152/*
153 * Currently fastpath is not supported if preemption is enabled.
154 */
155#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT)
156#define SLUB_FASTPATH
157#endif
158
159#if PAGE_SHIFT <= 12 152#if PAGE_SHIFT <= 12
160 153
161/* 154/*
@@ -1514,11 +1507,7 @@ static void *__slab_alloc(struct kmem_cache *s,
1514{ 1507{
1515 void **object; 1508 void **object;
1516 struct page *new; 1509 struct page *new;
1517#ifdef SLUB_FASTPATH
1518 unsigned long flags;
1519 1510
1520 local_irq_save(flags);
1521#endif
1522 if (!c->page) 1511 if (!c->page)
1523 goto new_slab; 1512 goto new_slab;
1524 1513
@@ -1541,9 +1530,6 @@ load_freelist:
1541unlock_out: 1530unlock_out:
1542 slab_unlock(c->page); 1531 slab_unlock(c->page);
1543 stat(c, ALLOC_SLOWPATH); 1532 stat(c, ALLOC_SLOWPATH);
1544#ifdef SLUB_FASTPATH
1545 local_irq_restore(flags);
1546#endif
1547 return object; 1533 return object;
1548 1534
1549another_slab: 1535another_slab:
@@ -1575,9 +1561,7 @@ new_slab:
1575 c->page = new; 1561 c->page = new;
1576 goto load_freelist; 1562 goto load_freelist;
1577 } 1563 }
1578#ifdef SLUB_FASTPATH 1564
1579 local_irq_restore(flags);
1580#endif
1581 /* 1565 /*
1582 * No memory available. 1566 * No memory available.
1583 * 1567 *
@@ -1619,34 +1603,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1619{ 1603{
1620 void **object; 1604 void **object;
1621 struct kmem_cache_cpu *c; 1605 struct kmem_cache_cpu *c;
1622
1623/*
1624 * The SLUB_FASTPATH path is provisional and is currently disabled if the
1625 * kernel is compiled with preemption or if the arch does not support
1626 * fast cmpxchg operations. There are a couple of coming changes that will
1627 * simplify matters and allow preemption. Ultimately we may end up making
1628 * SLUB_FASTPATH the default.
1629 *
1630 * 1. The introduction of the per cpu allocator will avoid array lookups
1631 * through get_cpu_slab(). A special register can be used instead.
1632 *
1633 * 2. The introduction of per cpu atomic operations (cpu_ops) means that
1634 * we can realize the logic here entirely with per cpu atomics. The
1635 * per cpu atomic ops will take care of the preemption issues.
1636 */
1637
1638#ifdef SLUB_FASTPATH
1639 c = get_cpu_slab(s, raw_smp_processor_id());
1640 do {
1641 object = c->freelist;
1642 if (unlikely(is_end(object) || !node_match(c, node))) {
1643 object = __slab_alloc(s, gfpflags, node, addr, c);
1644 break;
1645 }
1646 stat(c, ALLOC_FASTPATH);
1647 } while (cmpxchg_local(&c->freelist, object, object[c->offset])
1648 != object);
1649#else
1650 unsigned long flags; 1606 unsigned long flags;
1651 1607
1652 local_irq_save(flags); 1608 local_irq_save(flags);
@@ -1661,7 +1617,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1661 stat(c, ALLOC_FASTPATH); 1617 stat(c, ALLOC_FASTPATH);
1662 } 1618 }
1663 local_irq_restore(flags); 1619 local_irq_restore(flags);
1664#endif
1665 1620
1666 if (unlikely((gfpflags & __GFP_ZERO) && object)) 1621 if (unlikely((gfpflags & __GFP_ZERO) && object))
1667 memset(object, 0, c->objsize); 1622 memset(object, 0, c->objsize);
@@ -1698,11 +1653,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1698 void **object = (void *)x; 1653 void **object = (void *)x;
1699 struct kmem_cache_cpu *c; 1654 struct kmem_cache_cpu *c;
1700 1655
1701#ifdef SLUB_FASTPATH
1702 unsigned long flags;
1703
1704 local_irq_save(flags);
1705#endif
1706 c = get_cpu_slab(s, raw_smp_processor_id()); 1656 c = get_cpu_slab(s, raw_smp_processor_id());
1707 stat(c, FREE_SLOWPATH); 1657 stat(c, FREE_SLOWPATH);
1708 slab_lock(page); 1658 slab_lock(page);
@@ -1734,9 +1684,6 @@ checks_ok:
1734 1684
1735out_unlock: 1685out_unlock:
1736 slab_unlock(page); 1686 slab_unlock(page);
1737#ifdef SLUB_FASTPATH
1738 local_irq_restore(flags);
1739#endif
1740 return; 1687 return;
1741 1688
1742slab_empty: 1689slab_empty:
@@ -1749,9 +1696,6 @@ slab_empty:
1749 } 1696 }
1750 slab_unlock(page); 1697 slab_unlock(page);
1751 stat(c, FREE_SLAB); 1698 stat(c, FREE_SLAB);
1752#ifdef SLUB_FASTPATH
1753 local_irq_restore(flags);
1754#endif
1755 discard_slab(s, page); 1699 discard_slab(s, page);
1756 return; 1700 return;
1757 1701
@@ -1777,34 +1721,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
1777{ 1721{
1778 void **object = (void *)x; 1722 void **object = (void *)x;
1779 struct kmem_cache_cpu *c; 1723 struct kmem_cache_cpu *c;
1780
1781#ifdef SLUB_FASTPATH
1782 void **freelist;
1783
1784 c = get_cpu_slab(s, raw_smp_processor_id());
1785 debug_check_no_locks_freed(object, s->objsize);
1786 do {
1787 freelist = c->freelist;
1788 barrier();
1789 /*
1790 * If the compiler would reorder the retrieval of c->page to
1791 * come before c->freelist then an interrupt could
1792 * change the cpu slab before we retrieve c->freelist. We
1793 * could be matching on a page no longer active and put the
1794 * object onto the freelist of the wrong slab.
1795 *
1796 * On the other hand: If we already have the freelist pointer
1797 * then any change of cpu_slab will cause the cmpxchg to fail
1798 * since the freelist pointers are unique per slab.
1799 */
1800 if (unlikely(page != c->page || c->node < 0)) {
1801 __slab_free(s, page, x, addr, c->offset);
1802 break;
1803 }
1804 object[c->offset] = freelist;
1805 stat(c, FREE_FASTPATH);
1806 } while (cmpxchg_local(&c->freelist, freelist, object) != freelist);
1807#else
1808 unsigned long flags; 1724 unsigned long flags;
1809 1725
1810 local_irq_save(flags); 1726 local_irq_save(flags);
@@ -1818,7 +1734,6 @@ static __always_inline void slab_free(struct kmem_cache *s,
1818 __slab_free(s, page, x, addr, c->offset); 1734 __slab_free(s, page, x, addr, c->offset);
1819 1735
1820 local_irq_restore(flags); 1736 local_irq_restore(flags);
1821#endif
1822} 1737}
1823 1738
1824void kmem_cache_free(struct kmem_cache *s, void *x) 1739void kmem_cache_free(struct kmem_cache *s, void *x)