diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-30 14:21:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-30 14:21:48 -0400 |
commit | c11abbbaa3252875c5740a6880b9a1a6f1e2a870 (patch) | |
tree | 692143f7edd1157ef499bff21143e0d6df7cace5 /include | |
parent | 1d3fe4a75b691285cded47c9f1a91b30d25287b0 (diff) | |
parent | 9e577e8b46ab0c38970c0f0cd7eae62e6dffddee (diff) |
Merge branch 'slub/lockless' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slub/lockless' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (21 commits)
slub: When allocating a new slab also prep the first object
slub: disable interrupts in cmpxchg_double_slab when falling back to pagelock
Avoid duplicate _count variables in page_struct
Revert "SLUB: Fix build breakage in linux/mm_types.h"
SLUB: Fix build breakage in linux/mm_types.h
slub: slabinfo update for cmpxchg handling
slub: Not necessary to check for empty slab on load_freelist
slub: fast release on full slab
slub: Add statistics for the case that the current slab does not match the node
slub: Get rid of the another_slab label
slub: Avoid disabling interrupts in free slowpath
slub: Disable interrupts in free_debug processing
slub: Invert locking and avoid slab lock
slub: Rework allocator fastpaths
slub: Pass kmem_cache struct to lock and freeze slab
slub: explicit list_lock taking
slub: Add cmpxchg_double_slab()
mm: Rearrange struct page
slub: Move page->frozen handling near where the page->freelist handling occurs
slub: Do not use frozen page flag but a bit in the page counters
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm_types.h | 89 | ||||
-rw-r--r-- | include/linux/page-flags.h | 5 | ||||
-rw-r--r-- | include/linux/slub_def.h | 3 |
3 files changed, 64 insertions, 33 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 027935c86c68..774b8952deb4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -30,23 +30,61 @@ struct address_space; | |||
30 | * moment. Note that we have no way to track which tasks are using | 30 | * moment. Note that we have no way to track which tasks are using |
31 | * a page, though if it is a pagecache page, rmap structures can tell us | 31 | * a page, though if it is a pagecache page, rmap structures can tell us |
32 | * who is mapping it. | 32 | * who is mapping it. |
33 | * | ||
34 | * The objects in struct page are organized in double word blocks in | ||
35 | * order to allows us to use atomic double word operations on portions | ||
36 | * of struct page. That is currently only used by slub but the arrangement | ||
37 | * allows the use of atomic double word operations on the flags/mapping | ||
38 | * and lru list pointers also. | ||
33 | */ | 39 | */ |
34 | struct page { | 40 | struct page { |
41 | /* First double word block */ | ||
35 | unsigned long flags; /* Atomic flags, some possibly | 42 | unsigned long flags; /* Atomic flags, some possibly |
36 | * updated asynchronously */ | 43 | * updated asynchronously */ |
37 | atomic_t _count; /* Usage count, see below. */ | 44 | struct address_space *mapping; /* If low bit clear, points to |
38 | union { | 45 | * inode address_space, or NULL. |
39 | atomic_t _mapcount; /* Count of ptes mapped in mms, | 46 | * If page mapped as anonymous |
40 | * to show when page is mapped | 47 | * memory, low bit is set, and |
41 | * & limit reverse map searches. | 48 | * it points to anon_vma object: |
49 | * see PAGE_MAPPING_ANON below. | ||
42 | */ | 50 | */ |
43 | struct { /* SLUB */ | 51 | /* Second double word */ |
44 | u16 inuse; | 52 | struct { |
45 | u16 objects; | 53 | union { |
54 | pgoff_t index; /* Our offset within mapping. */ | ||
55 | void *freelist; /* slub first free object */ | ||
56 | }; | ||
57 | |||
58 | union { | ||
59 | /* Used for cmpxchg_double in slub */ | ||
60 | unsigned long counters; | ||
61 | |||
62 | struct { | ||
63 | |||
64 | union { | ||
65 | atomic_t _mapcount; /* Count of ptes mapped in mms, | ||
66 | * to show when page is mapped | ||
67 | * & limit reverse map searches. | ||
68 | */ | ||
69 | |||
70 | struct { | ||
71 | unsigned inuse:16; | ||
72 | unsigned objects:15; | ||
73 | unsigned frozen:1; | ||
74 | }; | ||
75 | }; | ||
76 | atomic_t _count; /* Usage count, see below. */ | ||
77 | }; | ||
46 | }; | 78 | }; |
47 | }; | 79 | }; |
80 | |||
81 | /* Third double word block */ | ||
82 | struct list_head lru; /* Pageout list, eg. active_list | ||
83 | * protected by zone->lru_lock ! | ||
84 | */ | ||
85 | |||
86 | /* Remainder is not double word aligned */ | ||
48 | union { | 87 | union { |
49 | struct { | ||
50 | unsigned long private; /* Mapping-private opaque data: | 88 | unsigned long private; /* Mapping-private opaque data: |
51 | * usually used for buffer_heads | 89 | * usually used for buffer_heads |
52 | * if PagePrivate set; used for | 90 | * if PagePrivate set; used for |
@@ -54,27 +92,13 @@ struct page { | |||
54 | * indicates order in the buddy | 92 | * indicates order in the buddy |
55 | * system if PG_buddy is set. | 93 | * system if PG_buddy is set. |
56 | */ | 94 | */ |
57 | struct address_space *mapping; /* If low bit clear, points to | ||
58 | * inode address_space, or NULL. | ||
59 | * If page mapped as anonymous | ||
60 | * memory, low bit is set, and | ||
61 | * it points to anon_vma object: | ||
62 | * see PAGE_MAPPING_ANON below. | ||
63 | */ | ||
64 | }; | ||
65 | #if USE_SPLIT_PTLOCKS | 95 | #if USE_SPLIT_PTLOCKS |
66 | spinlock_t ptl; | 96 | spinlock_t ptl; |
67 | #endif | 97 | #endif |
68 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ | 98 | struct kmem_cache *slab; /* SLUB: Pointer to slab */ |
69 | struct page *first_page; /* Compound tail pages */ | 99 | struct page *first_page; /* Compound tail pages */ |
70 | }; | ||
71 | union { | ||
72 | pgoff_t index; /* Our offset within mapping. */ | ||
73 | void *freelist; /* SLUB: freelist req. slab lock */ | ||
74 | }; | 100 | }; |
75 | struct list_head lru; /* Pageout list, eg. active_list | 101 | |
76 | * protected by zone->lru_lock ! | ||
77 | */ | ||
78 | /* | 102 | /* |
79 | * On machines where all RAM is mapped into kernel address space, | 103 | * On machines where all RAM is mapped into kernel address space, |
80 | * we can simply calculate the virtual address. On machines with | 104 | * we can simply calculate the virtual address. On machines with |
@@ -100,7 +124,16 @@ struct page { | |||
100 | */ | 124 | */ |
101 | void *shadow; | 125 | void *shadow; |
102 | #endif | 126 | #endif |
103 | }; | 127 | } |
128 | /* | ||
129 | * If another subsystem starts using the double word pairing for atomic | ||
130 | * operations on struct page then it must change the #if to ensure | ||
131 | * proper alignment of the page struct. | ||
132 | */ | ||
133 | #if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL) | ||
134 | __attribute__((__aligned__(2*sizeof(unsigned long)))) | ||
135 | #endif | ||
136 | ; | ||
104 | 137 | ||
105 | typedef unsigned long __nocast vm_flags_t; | 138 | typedef unsigned long __nocast vm_flags_t; |
106 | 139 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 3e5a1b189a41..e90a673be67e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -124,9 +124,6 @@ enum pageflags { | |||
124 | 124 | ||
125 | /* SLOB */ | 125 | /* SLOB */ |
126 | PG_slob_free = PG_private, | 126 | PG_slob_free = PG_private, |
127 | |||
128 | /* SLUB */ | ||
129 | PG_slub_frozen = PG_active, | ||
130 | }; | 127 | }; |
131 | 128 | ||
132 | #ifndef __GENERATING_BOUNDS_H | 129 | #ifndef __GENERATING_BOUNDS_H |
@@ -212,8 +209,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) | |||
212 | 209 | ||
213 | __PAGEFLAG(SlobFree, slob_free) | 210 | __PAGEFLAG(SlobFree, slob_free) |
214 | 211 | ||
215 | __PAGEFLAG(SlubFrozen, slub_frozen) | ||
216 | |||
217 | /* | 212 | /* |
218 | * Private page markings that may be used by the filesystem that owns the page | 213 | * Private page markings that may be used by the filesystem that owns the page |
219 | * for its own purposes. | 214 | * for its own purposes. |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4b35c06dfbc5..f58d6413d230 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -24,6 +24,7 @@ enum stat_item { | |||
24 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ | 24 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ |
25 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ | 25 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
26 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ | 26 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ |
27 | ALLOC_NODE_MISMATCH, /* Switching cpu slab */ | ||
27 | FREE_SLAB, /* Slab freed to the page allocator */ | 28 | FREE_SLAB, /* Slab freed to the page allocator */ |
28 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ | 29 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ |
29 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ | 30 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ |
@@ -31,8 +32,10 @@ enum stat_item { | |||
31 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | 32 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ |
32 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | 33 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ |
33 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | 34 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
35 | DEACTIVATE_BYPASS, /* Implicit deactivation */ | ||
34 | ORDER_FALLBACK, /* Number of times fallback was necessary */ | 36 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
35 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ | 37 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ |
38 | CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ | ||
36 | NR_SLUB_STAT_ITEMS }; | 39 | NR_SLUB_STAT_ITEMS }; |
37 | 40 | ||
38 | struct kmem_cache_cpu { | 41 | struct kmem_cache_cpu { |