aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-07-28 04:30:20 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-07-28 04:30:20 -0400
commit661299d9d0437a0ff72240f3d60016ac3a361a6e (patch)
tree765512576314fc3612b503f182b9ae4e60fcf849 /arch/sparc64/mm/init.c
parent05caac585f8abd6c0113856bc8858e3ef214d8a6 (diff)
parent41c018b7ecb60b1c2c4d5dee0cd37d32a94c45af (diff)
Merge with Linus' 2.6 tree
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 8fc413cb6acd..3fbaf342a452 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -121,15 +121,24 @@ __inline__ void flush_dcache_page_impl(struct page *page)
121} 121}
122 122
123#define PG_dcache_dirty PG_arch_1 123#define PG_dcache_dirty PG_arch_1
124#define PG_dcache_cpu_shift 24
125#define PG_dcache_cpu_mask (256 - 1)
126
127#if NR_CPUS > 256
128#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
129#endif
124 130
125#define dcache_dirty_cpu(page) \ 131#define dcache_dirty_cpu(page) \
126 (((page)->flags >> 24) & (NR_CPUS - 1UL)) 132 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
127 133
128static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) 134static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
129{ 135{
130 unsigned long mask = this_cpu; 136 unsigned long mask = this_cpu;
131 unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL); 137 unsigned long non_cpu_bits;
132 mask = (mask << 24) | (1UL << PG_dcache_dirty); 138
139 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
140 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
141
133 __asm__ __volatile__("1:\n\t" 142 __asm__ __volatile__("1:\n\t"
134 "ldx [%2], %%g7\n\t" 143 "ldx [%2], %%g7\n\t"
135 "and %%g7, %1, %%g1\n\t" 144 "and %%g7, %1, %%g1\n\t"
@@ -151,7 +160,7 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
151 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 160 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
152 "1:\n\t" 161 "1:\n\t"
153 "ldx [%2], %%g7\n\t" 162 "ldx [%2], %%g7\n\t"
154 "srlx %%g7, 24, %%g1\n\t" 163 "srlx %%g7, %4, %%g1\n\t"
155 "and %%g1, %3, %%g1\n\t" 164 "and %%g1, %3, %%g1\n\t"
156 "cmp %%g1, %0\n\t" 165 "cmp %%g1, %0\n\t"
157 "bne,pn %%icc, 2f\n\t" 166 "bne,pn %%icc, 2f\n\t"
@@ -164,7 +173,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
164 "2:" 173 "2:"
165 : /* no outputs */ 174 : /* no outputs */
166 : "r" (cpu), "r" (mask), "r" (&page->flags), 175 : "r" (cpu), "r" (mask), "r" (&page->flags),
167 "i" (NR_CPUS - 1UL) 176 "i" (PG_dcache_cpu_mask),
177 "i" (PG_dcache_cpu_shift)
168 : "g1", "g7"); 178 : "g1", "g7");
169} 179}
170 180
@@ -180,7 +190,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
180 if (pfn_valid(pfn) && 190 if (pfn_valid(pfn) &&
181 (page = pfn_to_page(pfn), page_mapping(page)) && 191 (page = pfn_to_page(pfn), page_mapping(page)) &&
182 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { 192 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
183 int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL)); 193 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
194 PG_dcache_cpu_mask);
184 int this_cpu = get_cpu(); 195 int this_cpu = get_cpu();
185 196
186 /* This is just to optimize away some function calls 197 /* This is just to optimize away some function calls
com"> * Both the root cache and the child caches will have it. For the root cache, * this will hold a dynamically allocated array large enough to hold * information about the currently limited memcgs in the system. * * Child caches will hold extra metadata needed for its operation. Fields are: * * @memcg: pointer to the memcg this cache belongs to * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from * @dead: set to true after the memcg dies; the cache may still be around. * @nr_pages: number of pages that belongs to this cache. * @destroy: worker to be called whenever we are ready, or believe we may be * ready, to destroy this cache. */ struct memcg_cache_params { bool is_root_cache; union { struct kmem_cache *memcg_caches[0]; struct { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; bool dead; atomic_t nr_pages; struct work_struct destroy; }; }; }; int memcg_update_all_caches(int num_memcgs); struct seq_file; int cache_show(struct kmem_cache *s, struct seq_file *m); void print_slabinfo_header(struct seq_file *m); /* * Common kmalloc functions provided by all allocators */ void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); void kzfree(const void *); size_t ksize(const void *); /* * Allocator specific definitions. These are mainly used to establish optimized * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by * selecting the appropriate general cache at compile time. * * Allocators must define at least: * * kmem_cache_alloc() * __kmalloc() * kmalloc() * * Those wishing to support NUMA must also define: * * kmem_cache_alloc_node() * kmalloc_node() * * See each allocator definition file for additional comments and * implementation notes. */ #ifdef CONFIG_SLUB #include <linux/slub_def.h> #elif defined(CONFIG_SLOB) #include <linux/slob_def.h> #else #include <linux/slab_def.h> #endif /** * kmalloc_array - allocate memory for an array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate. * * The @flags argument may be one of: * * %GFP_USER - Allocate memory on behalf of user. May sleep. * * %GFP_KERNEL - Allocate normal kernel ram. May sleep. * * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. * For example, use this inside interrupt handlers. * * %GFP_HIGHUSER - Allocate pages from high memory. * * %GFP_NOIO - Do not do any I/O at all while trying to get memory. * * %GFP_NOFS - Do not make any fs calls while trying to get memory. * * %GFP_NOWAIT - Allocation will not sleep. * * %GFP_THISNODE - Allocate node-local memory only. * * %GFP_DMA - Allocation suitable for DMA. * Should only be used for kmalloc() caches. Otherwise, use a * slab created with SLAB_DMA. * * Also it is possible to set different flags by OR'ing * in one or more of the following additional @flags: * * %__GFP_COLD - Request cache-cold pages instead of * trying to return cache-warm pages. * * %__GFP_HIGH - This allocation has high priority and may use emergency pools. * * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail * (think twice before using). * * %__GFP_NORETRY - If memory is not immediately available, * then give up at once. * * %__GFP_NOWARN - If allocation fails, don't issue any warnings. * * %__GFP_REPEAT - If allocation fails initially, try once more before failing. * * There are other flags available as well, but these are not intended * for general use, and so are not documented here. For a full list of * potential flags, always refer to linux/gfp.h. */ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; return __kmalloc(n * size, flags); } /** * kcalloc - allocate memory for an array. The memory is set to zero. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { return kmalloc_array(n, size, flags | __GFP_ZERO); } #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) /** * kmalloc_node - allocate memory from a specific node * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kcalloc). * @node: node to allocate from. * * kmalloc() for non-local nodes, used to allocate from a specific node * if available. Equivalent to kmalloc() in the non-NUMA single-node * case. */ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) { return kmalloc(size, flags); } static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc(size, flags); } void *kmem_cache_alloc(struct kmem_cache *, gfp_t); static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node) { return kmem_cache_alloc(cachep, flags); } #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ /* * kmalloc_track_caller is a special version of kmalloc that records the * calling function of the routine calling it for slab leak tracking instead * of just the calling function (confusing, eh?). * It's useful when the call to kmalloc comes from a widely-used standard * allocator where we care about the real place the memory allocation * request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) #else #define kmalloc_track_caller(size, flags) \ __kmalloc(size, flags) #endif /* DEBUG_SLAB */ #ifdef CONFIG_NUMA /* * kmalloc_node_track_caller is a special version of kmalloc_node that * records the calling function of the routine calling it for slab leak * tracking instead of just the calling function (confusing, eh?). * It's useful when the call to kmalloc_node comes from a widely-used * standard allocator where we care about the real place the memory * allocation request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) #else #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node(size, flags, node) #endif #else /* CONFIG_NUMA */ #define kmalloc_node_track_caller(size, flags, node) \ kmalloc_track_caller(size, flags) #endif /* CONFIG_NUMA */ /* * Shortcuts */ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) { return kmem_cache_alloc(k, flags | __GFP_ZERO); } /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kzalloc(size_t size, gfp_t flags) { return kmalloc(size, flags | __GFP_ZERO); } /** * kzalloc_node - allocate zeroed memory from a particular memory node. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). * @node: memory node from which to allocate */ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) { return kmalloc_node(size, flags | __GFP_ZERO, node); } /* * Determine the size of a slab object */ static inline unsigned int kmem_cache_size(struct kmem_cache *s) { return s->object_size; } void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */