diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-17 01:10:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-17 08:23:03 -0400 |
commit | 4b6f0750457db1f573eb6226960a432da3be8fe2 (patch) | |
tree | 9e71084c892b595e4b7383d152d7ebeea936934b /mm | |
parent | 3ca12ee549f7837b8a685dddc9515f9fc28434ee (diff) |
SLUB: Define functions for cpu slab handling instead of using PageActive
Use inline functions to access the per cpu bit. Intoduce the notion of
"freezing" a slab to make things more understandable.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slub.c | 57 |
1 files changed, 38 insertions, 19 deletions
@@ -78,10 +78,18 @@ | |||
78 | * | 78 | * |
79 | * Overloading of page flags that are otherwise used for LRU management. | 79 | * Overloading of page flags that are otherwise used for LRU management. |
80 | * | 80 | * |
81 | * PageActive The slab is used as a cpu cache. Allocations | 81 | * PageActive The slab is frozen and exempt from list processing. |
82 | * may be performed from the slab. The slab is not | 82 | * This means that the slab is dedicated to a purpose |
83 | * on any slab list and cannot be moved onto one. | 83 | * such as satisfying allocations for a specific |
84 | * The cpu slab may be equipped with an additioanl | 84 | * processor. Objects may be freed in the slab while |
85 | * it is frozen but slab_free will then skip the usual | ||
86 | * list operations. It is up to the processor holding | ||
87 | * the slab to integrate the slab into the slab lists | ||
88 | * when the slab is no longer needed. | ||
89 | * | ||
90 | * One use of this flag is to mark slabs that are | ||
91 | * used for allocations. Then such a slab becomes a cpu | ||
92 | * slab. The cpu slab may be equipped with an additional | ||
85 | * lockless_freelist that allows lockless access to | 93 | * lockless_freelist that allows lockless access to |
86 | * free objects in addition to the regular freelist | 94 | * free objects in addition to the regular freelist |
87 | * that requires the slab lock. | 95 | * that requires the slab lock. |
@@ -91,6 +99,21 @@ | |||
91 | * the fast path and disables lockless freelists. | 99 | * the fast path and disables lockless freelists. |
92 | */ | 100 | */ |
93 | 101 | ||
102 | static inline int SlabFrozen(struct page *page) | ||
103 | { | ||
104 | return PageActive(page); | ||
105 | } | ||
106 | |||
107 | static inline void SetSlabFrozen(struct page *page) | ||
108 | { | ||
109 | SetPageActive(page); | ||
110 | } | ||
111 | |||
112 | static inline void ClearSlabFrozen(struct page *page) | ||
113 | { | ||
114 | ClearPageActive(page); | ||
115 | } | ||
116 | |||
94 | static inline int SlabDebug(struct page *page) | 117 | static inline int SlabDebug(struct page *page) |
95 | { | 118 | { |
96 | #ifdef CONFIG_SLUB_DEBUG | 119 | #ifdef CONFIG_SLUB_DEBUG |
@@ -1135,11 +1158,12 @@ static void remove_partial(struct kmem_cache *s, | |||
1135 | * | 1158 | * |
1136 | * Must hold list_lock. | 1159 | * Must hold list_lock. |
1137 | */ | 1160 | */ |
1138 | static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page) | 1161 | static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) |
1139 | { | 1162 | { |
1140 | if (slab_trylock(page)) { | 1163 | if (slab_trylock(page)) { |
1141 | list_del(&page->lru); | 1164 | list_del(&page->lru); |
1142 | n->nr_partial--; | 1165 | n->nr_partial--; |
1166 | SetSlabFrozen(page); | ||
1143 | return 1; | 1167 | return 1; |
1144 | } | 1168 | } |
1145 | return 0; | 1169 | return 0; |
@@ -1163,7 +1187,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n) | |||
1163 | 1187 | ||
1164 | spin_lock(&n->list_lock); | 1188 | spin_lock(&n->list_lock); |
1165 | list_for_each_entry(page, &n->partial, lru) | 1189 | list_for_each_entry(page, &n->partial, lru) |
1166 | if (lock_and_del_slab(n, page)) | 1190 | if (lock_and_freeze_slab(n, page)) |
1167 | goto out; | 1191 | goto out; |
1168 | page = NULL; | 1192 | page = NULL; |
1169 | out: | 1193 | out: |
@@ -1242,10 +1266,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) | |||
1242 | * | 1266 | * |
1243 | * On exit the slab lock will have been dropped. | 1267 | * On exit the slab lock will have been dropped. |
1244 | */ | 1268 | */ |
1245 | static void putback_slab(struct kmem_cache *s, struct page *page) | 1269 | static void unfreeze_slab(struct kmem_cache *s, struct page *page) |
1246 | { | 1270 | { |
1247 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1271 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
1248 | 1272 | ||
1273 | ClearSlabFrozen(page); | ||
1249 | if (page->inuse) { | 1274 | if (page->inuse) { |
1250 | 1275 | ||
1251 | if (page->freelist) | 1276 | if (page->freelist) |
@@ -1296,9 +1321,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu) | |||
1296 | page->inuse--; | 1321 | page->inuse--; |
1297 | } | 1322 | } |
1298 | s->cpu_slab[cpu] = NULL; | 1323 | s->cpu_slab[cpu] = NULL; |
1299 | ClearPageActive(page); | 1324 | unfreeze_slab(s, page); |
1300 | |||
1301 | putback_slab(s, page); | ||
1302 | } | 1325 | } |
1303 | 1326 | ||
1304 | static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) | 1327 | static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) |
@@ -1389,9 +1412,7 @@ another_slab: | |||
1389 | new_slab: | 1412 | new_slab: |
1390 | page = get_partial(s, gfpflags, node); | 1413 | page = get_partial(s, gfpflags, node); |
1391 | if (page) { | 1414 | if (page) { |
1392 | have_slab: | ||
1393 | s->cpu_slab[cpu] = page; | 1415 | s->cpu_slab[cpu] = page; |
1394 | SetPageActive(page); | ||
1395 | goto load_freelist; | 1416 | goto load_freelist; |
1396 | } | 1417 | } |
1397 | 1418 | ||
@@ -1421,7 +1442,9 @@ have_slab: | |||
1421 | flush_slab(s, s->cpu_slab[cpu], cpu); | 1442 | flush_slab(s, s->cpu_slab[cpu], cpu); |
1422 | } | 1443 | } |
1423 | slab_lock(page); | 1444 | slab_lock(page); |
1424 | goto have_slab; | 1445 | SetSlabFrozen(page); |
1446 | s->cpu_slab[cpu] = page; | ||
1447 | goto load_freelist; | ||
1425 | } | 1448 | } |
1426 | return NULL; | 1449 | return NULL; |
1427 | debug: | 1450 | debug: |
@@ -1508,11 +1531,7 @@ checks_ok: | |||
1508 | page->freelist = object; | 1531 | page->freelist = object; |
1509 | page->inuse--; | 1532 | page->inuse--; |
1510 | 1533 | ||
1511 | if (unlikely(PageActive(page))) | 1534 | if (unlikely(SlabFrozen(page))) |
1512 | /* | ||
1513 | * Cpu slabs are never on partial lists and are | ||
1514 | * never freed. | ||
1515 | */ | ||
1516 | goto out_unlock; | 1535 | goto out_unlock; |
1517 | 1536 | ||
1518 | if (unlikely(!page->inuse)) | 1537 | if (unlikely(!page->inuse)) |
@@ -1544,7 +1563,7 @@ slab_empty: | |||
1544 | debug: | 1563 | debug: |
1545 | if (!free_object_checks(s, page, x)) | 1564 | if (!free_object_checks(s, page, x)) |
1546 | goto out_unlock; | 1565 | goto out_unlock; |
1547 | if (!PageActive(page) && !page->freelist) | 1566 | if (!SlabFrozen(page) && !page->freelist) |
1548 | remove_full(s, page); | 1567 | remove_full(s, page); |
1549 | if (s->flags & SLAB_STORE_USER) | 1568 | if (s->flags & SLAB_STORE_USER) |
1550 | set_track(s, x, TRACK_FREE, addr); | 1569 | set_track(s, x, TRACK_FREE, addr); |