diff options
| author | Christoph Lameter <cl@linux-foundation.org> | 2010-01-21 18:43:35 -0500 |
|---|---|---|
| committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2010-01-22 11:33:38 -0500 |
| commit | 91efd773c74bb26b5409c85ad755d536448e229c (patch) | |
| tree | b812dadb615ecff08e4d3ebe97483f192d0be27d | |
| parent | 7738dd9e8f2bc1c249e00c9c20e018448fac0084 (diff) | |
dma kmalloc handling fixes
1. We need kmalloc_percpu for all of the now extended kmalloc caches
array not just for each shift value.
2. init_kmem_cache_nodes() must assume node 0 locality for statically
allocated dma kmem_cache structures even after boot is complete.
Reported-and-tested-by: Alex Chiang <achiang@hp.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
| -rw-r--r-- | mm/slub.c | 5 |
1 files changed, 3 insertions, 2 deletions
| @@ -2062,7 +2062,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) | |||
| 2062 | #endif | 2062 | #endif |
| 2063 | } | 2063 | } |
| 2064 | 2064 | ||
| 2065 | static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]); | 2065 | static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); |
| 2066 | 2066 | ||
| 2067 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) | 2067 | static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) |
| 2068 | { | 2068 | { |
| @@ -2148,7 +2148,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) | |||
| 2148 | int node; | 2148 | int node; |
| 2149 | int local_node; | 2149 | int local_node; |
| 2150 | 2150 | ||
| 2151 | if (slab_state >= UP) | 2151 | if (slab_state >= UP && (s < kmalloc_caches || |
| 2152 | s > kmalloc_caches + KMALLOC_CACHES)) | ||
| 2152 | local_node = page_to_nid(virt_to_page(s)); | 2153 | local_node = page_to_nid(virt_to_page(s)); |
| 2153 | else | 2154 | else |
| 2154 | local_node = 0; | 2155 | local_node = 0; |
