aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-18 21:12:42 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:16:39 -0500
commit9b4006dcf6a8c43bd482b9c1ec576f0ed270ef23 (patch)
treee04ac039a49f37ee5e8eca3fd654df0649a1806c /arch/sparc64/mm
parentb52439c22c63dbbefd5395f2151c0ef4f667e949 (diff)
[SPARC64]: Use SLAB caches for TSB tables.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/init.c5
-rw-r--r--arch/sparc64/mm/tsb.c86
2 files changed, 66 insertions, 25 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index d703b67bc7b9..a1a364e537c7 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -165,6 +165,8 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
165 clear_page(addr); 165 clear_page(addr);
166} 166}
167 167
168extern void tsb_cache_init(void);
169
168void pgtable_cache_init(void) 170void pgtable_cache_init(void)
169{ 171{
170 pgtable_cache = kmem_cache_create("pgtable_cache", 172 pgtable_cache = kmem_cache_create("pgtable_cache",
@@ -174,9 +176,10 @@ void pgtable_cache_init(void)
174 zero_ctor, 176 zero_ctor,
175 NULL); 177 NULL);
176 if (!pgtable_cache) { 178 if (!pgtable_cache) {
177 prom_printf("pgtable_cache_init(): Could not create!\n"); 179 prom_printf("Could not create pgtable_cache\n");
178 prom_halt(); 180 prom_halt();
179 } 181 }
182 tsb_cache_init();
180} 183}
181 184
182#ifdef CONFIG_DEBUG_DCFLUSH 185#ifdef CONFIG_DEBUG_DCFLUSH
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 3eb8670282fd..1af797a0a092 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -11,6 +11,7 @@
11#include <asm/mmu_context.h> 11#include <asm/mmu_context.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/tsb.h> 13#include <asm/tsb.h>
14#include <asm/oplib.h>
14 15
15extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
16 17
@@ -207,6 +208,39 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
207 } 208 }
208} 209}
209 210
211static kmem_cache_t *tsb_caches[8] __read_mostly;
212
213static const char *tsb_cache_names[8] = {
214 "tsb_8KB",
215 "tsb_16KB",
216 "tsb_32KB",
217 "tsb_64KB",
218 "tsb_128KB",
219 "tsb_256KB",
220 "tsb_512KB",
221 "tsb_1MB",
222};
223
224void __init tsb_cache_init(void)
225{
226 unsigned long i;
227
228 for (i = 0; i < 8; i++) {
229 unsigned long size = 8192 << i;
230 const char *name = tsb_cache_names[i];
231
232 tsb_caches[i] = kmem_cache_create(name,
233 size, size,
234 SLAB_HWCACHE_ALIGN |
235 SLAB_MUST_HWCACHE_ALIGN,
236 NULL, NULL);
237 if (!tsb_caches[i]) {
238 prom_printf("Could not create %s cache\n", name);
239 prom_halt();
240 }
241 }
242}
243
210/* When the RSS of an address space exceeds mm->context.tsb_rss_limit, 244/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
211 * do_sparc64_fault() invokes this routine to try and grow the TSB. 245 * do_sparc64_fault() invokes this routine to try and grow the TSB.
212 * 246 *
@@ -226,45 +260,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
226void tsb_grow(struct mm_struct *mm, unsigned long rss) 260void tsb_grow(struct mm_struct *mm, unsigned long rss)
227{ 261{
228 unsigned long max_tsb_size = 1 * 1024 * 1024; 262 unsigned long max_tsb_size = 1 * 1024 * 1024;
229 unsigned long size, old_size, flags; 263 unsigned long new_size, old_size, flags;
230 struct page *page;
231 struct tsb *old_tsb, *new_tsb; 264 struct tsb *old_tsb, *new_tsb;
232 unsigned long order, new_rss_limit; 265 unsigned long new_cache_index, old_cache_index;
266 unsigned long new_rss_limit;
233 gfp_t gfp_flags; 267 gfp_t gfp_flags;
234 268
235 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) 269 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
236 max_tsb_size = (PAGE_SIZE << MAX_ORDER); 270 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
237 271
238 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { 272 new_cache_index = 0;
239 unsigned long n_entries = size / sizeof(struct tsb); 273 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
274 unsigned long n_entries = new_size / sizeof(struct tsb);
240 275
241 n_entries = (n_entries * 3) / 4; 276 n_entries = (n_entries * 3) / 4;
242 if (n_entries > rss) 277 if (n_entries > rss)
243 break; 278 break;
279
280 new_cache_index++;
244 } 281 }
245 282
246 if (size == max_tsb_size) 283 if (new_size == max_tsb_size)
247 new_rss_limit = ~0UL; 284 new_rss_limit = ~0UL;
248 else 285 else
249 new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; 286 new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
250 287
251retry_page_alloc: 288retry_tsb_alloc:
252 order = get_order(size);
253 gfp_flags = GFP_KERNEL; 289 gfp_flags = GFP_KERNEL;
254 if (order > 1) 290 if (new_size > (PAGE_SIZE * 2))
255 gfp_flags = __GFP_NOWARN | __GFP_NORETRY; 291 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
256 292
257 page = alloc_pages(gfp_flags, order); 293 new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags);
258 if (unlikely(!page)) { 294 if (unlikely(!new_tsb)) {
259 /* Not being able to fork due to a high-order TSB 295 /* Not being able to fork due to a high-order TSB
260 * allocation failure is very bad behavior. Just back 296 * allocation failure is very bad behavior. Just back
261 * down to a 0-order allocation and force no TSB 297 * down to a 0-order allocation and force no TSB
262 * growing for this address space. 298 * growing for this address space.
263 */ 299 */
264 if (mm->context.tsb == NULL && order > 0) { 300 if (mm->context.tsb == NULL && new_cache_index > 0) {
265 size = PAGE_SIZE; 301 new_cache_index = 0;
302 new_size = 8192;
266 new_rss_limit = ~0UL; 303 new_rss_limit = ~0UL;
267 goto retry_page_alloc; 304 goto retry_tsb_alloc;
268 } 305 }
269 306
270 /* If we failed on a TSB grow, we are under serious 307 /* If we failed on a TSB grow, we are under serious
@@ -276,8 +313,7 @@ retry_page_alloc:
276 } 313 }
277 314
278 /* Mark all tags as invalid. */ 315 /* Mark all tags as invalid. */
279 new_tsb = page_address(page); 316 memset(new_tsb, 0x40, new_size);
280 memset(new_tsb, 0x40, size);
281 317
282 /* Ok, we are about to commit the changes. If we are 318 /* Ok, we are about to commit the changes. If we are
283 * growing an existing TSB the locking is very tricky, 319 * growing an existing TSB the locking is very tricky,
@@ -304,8 +340,10 @@ retry_page_alloc:
304 spin_lock_irqsave(&mm->context.lock, flags); 340 spin_lock_irqsave(&mm->context.lock, flags);
305 341
306 old_tsb = mm->context.tsb; 342 old_tsb = mm->context.tsb;
343 old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
307 old_size = mm->context.tsb_nentries * sizeof(struct tsb); 344 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
308 345
346
309 /* Handle multiple threads trying to grow the TSB at the same time. 347 /* Handle multiple threads trying to grow the TSB at the same time.
310 * One will get in here first, and bump the size and the RSS limit. 348 * One will get in here first, and bump the size and the RSS limit.
311 * The others will get in here next and hit this check. 349 * The others will get in here next and hit this check.
@@ -313,7 +351,7 @@ retry_page_alloc:
313 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { 351 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
314 spin_unlock_irqrestore(&mm->context.lock, flags); 352 spin_unlock_irqrestore(&mm->context.lock, flags);
315 353
316 free_pages((unsigned long) new_tsb, get_order(size)); 354 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
317 return; 355 return;
318 } 356 }
319 357
@@ -331,11 +369,11 @@ retry_page_alloc:
331 old_tsb_base = __pa(old_tsb_base); 369 old_tsb_base = __pa(old_tsb_base);
332 new_tsb_base = __pa(new_tsb_base); 370 new_tsb_base = __pa(new_tsb_base);
333 } 371 }
334 copy_tsb(old_tsb_base, old_size, new_tsb_base, size); 372 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
335 } 373 }
336 374
337 mm->context.tsb = new_tsb; 375 mm->context.tsb = new_tsb;
338 setup_tsb_params(mm, size); 376 setup_tsb_params(mm, new_size);
339 377
340 spin_unlock_irqrestore(&mm->context.lock, flags); 378 spin_unlock_irqrestore(&mm->context.lock, flags);
341 379
@@ -350,7 +388,7 @@ retry_page_alloc:
350 smp_tsb_sync(mm); 388 smp_tsb_sync(mm);
351 389
352 /* Now it is safe to free the old tsb. */ 390 /* Now it is safe to free the old tsb. */
353 free_pages((unsigned long) old_tsb, get_order(old_size)); 391 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
354 } 392 }
355} 393}
356 394
@@ -379,10 +417,10 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
379 417
380void destroy_context(struct mm_struct *mm) 418void destroy_context(struct mm_struct *mm)
381{ 419{
382 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); 420 unsigned long flags, cache_index;
383 unsigned long flags;
384 421
385 free_pages((unsigned long) mm->context.tsb, get_order(size)); 422 cache_index = (mm->context.tsb_reg_val & 0x7UL);
423 kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
386 424
387 /* We can remove these later, but for now it's useful 425 /* We can remove these later, but for now it's useful
388 * to catch any bogus post-destroy_context() references 426 * to catch any bogus post-destroy_context() references