aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/tlb.c')
-rw-r--r--arch/ia64/mm/tlb.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ee09d261f2e6..5dfd916e9ea6 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -22,6 +22,7 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/slab.h>
25 26
26#include <asm/delay.h> 27#include <asm/delay.h>
27#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
@@ -48,7 +49,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 49DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 50DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
50 51
51struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 52struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
52 53
53/* 54/*
54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 55 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +430,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
429 struct ia64_tr_entry *p; 430 struct ia64_tr_entry *p;
430 int cpu = smp_processor_id(); 431 int cpu = smp_processor_id();
431 432
433 if (!ia64_idtrs[cpu]) {
434 ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
435 sizeof (struct ia64_tr_entry), GFP_KERNEL);
436 if (!ia64_idtrs[cpu])
437 return -ENOMEM;
438 }
432 r = -EINVAL; 439 r = -EINVAL;
433 /*Check overlap with existing TR entries*/ 440 /*Check overlap with existing TR entries*/
434 if (target_mask & 0x1) { 441 if (target_mask & 0x1) {
435 p = &__per_cpu_idtrs[cpu][0][0]; 442 p = ia64_idtrs[cpu];
436 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 443 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
437 i++, p++) { 444 i++, p++) {
438 if (p->pte & 0x1) 445 if (p->pte & 0x1)
@@ -444,7 +451,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
444 } 451 }
445 } 452 }
446 if (target_mask & 0x2) { 453 if (target_mask & 0x2) {
447 p = &__per_cpu_idtrs[cpu][1][0]; 454 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
448 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 455 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
449 i++, p++) { 456 i++, p++) {
450 if (p->pte & 0x1) 457 if (p->pte & 0x1)
@@ -459,16 +466,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
459 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 466 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
460 switch (target_mask & 0x3) { 467 switch (target_mask & 0x3) {
461 case 1: 468 case 1:
462 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) 469 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
463 goto found; 470 goto found;
464 continue; 471 continue;
465 case 2: 472 case 2:
466 if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 473 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
467 goto found; 474 goto found;
468 continue; 475 continue;
469 case 3: 476 case 3:
470 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && 477 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
471 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 478 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
472 goto found; 479 goto found;
473 continue; 480 continue;
474 default: 481 default:
@@ -488,7 +495,7 @@ found:
488 if (target_mask & 0x1) { 495 if (target_mask & 0x1) {
489 ia64_itr(0x1, i, va, pte, log_size); 496 ia64_itr(0x1, i, va, pte, log_size);
490 ia64_srlz_i(); 497 ia64_srlz_i();
491 p = &__per_cpu_idtrs[cpu][0][i]; 498 p = ia64_idtrs[cpu] + i;
492 p->ifa = va; 499 p->ifa = va;
493 p->pte = pte; 500 p->pte = pte;
494 p->itir = log_size << 2; 501 p->itir = log_size << 2;
@@ -497,7 +504,7 @@ found:
497 if (target_mask & 0x2) { 504 if (target_mask & 0x2) {
498 ia64_itr(0x2, i, va, pte, log_size); 505 ia64_itr(0x2, i, va, pte, log_size);
499 ia64_srlz_i(); 506 ia64_srlz_i();
500 p = &__per_cpu_idtrs[cpu][1][i]; 507 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
501 p->ifa = va; 508 p->ifa = va;
502 p->pte = pte; 509 p->pte = pte;
503 p->itir = log_size << 2; 510 p->itir = log_size << 2;
@@ -528,7 +535,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
528 return; 535 return;
529 536
530 if (target_mask & 0x1) { 537 if (target_mask & 0x1) {
531 p = &__per_cpu_idtrs[cpu][0][slot]; 538 p = ia64_idtrs[cpu] + slot;
532 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 539 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
533 p->pte = 0; 540 p->pte = 0;
534 ia64_ptr(0x1, p->ifa, p->itir>>2); 541 ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +544,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
537 } 544 }
538 545
539 if (target_mask & 0x2) { 546 if (target_mask & 0x2) {
540 p = &__per_cpu_idtrs[cpu][1][slot]; 547 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
541 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 548 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
542 p->pte = 0; 549 p->pte = 0;
543 ia64_ptr(0x2, p->ifa, p->itir>>2); 550 ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +553,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
546 } 553 }
547 554
548 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { 555 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
549 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || 556 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
550 (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 557 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
551 break; 558 break;
552 } 559 }
553 per_cpu(ia64_tr_used, cpu) = i; 560 per_cpu(ia64_tr_used, cpu) = i;