aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-07-09 22:29:19 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-07-28 00:10:22 -0400
commit376af5947c0e441ccbf98f0212d4ffbf171528f6 (patch)
tree8d80f48caea9448257de40a2a9c7279cc80e60b0 /arch
parent468a33028edd62549ad3c4dcbc23dd0311c67832 (diff)
powerpc: Remove STAB code
Old cpus didn't have a Segment Lookaside Buffer (SLB), instead they had a Segment Table (STAB). Now that we've dropped support for those cpus, we can remove the STAB support entirely. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h22
-rw-r--r--arch/powerpc/include/asm/mmu_context.h3
-rw-r--r--arch/powerpc/include/asm/paca.h4
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S155
-rw-r--r--arch/powerpc/kernel/head_64.S8
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/mm/Makefile4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c18
-rw-r--r--arch/powerpc/mm/stab.c286
-rw-r--r--arch/powerpc/xmon/xmon.c26
12 files changed, 11 insertions, 522 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index c2b4dcf23d03..d76514487d6f 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -25,26 +25,6 @@
25#include <asm/processor.h> 25#include <asm/processor.h>
26 26
27/* 27/*
28 * Segment table
29 */
30
31#define STE_ESID_V 0x80
32#define STE_ESID_KS 0x20
33#define STE_ESID_KP 0x10
34#define STE_ESID_N 0x08
35
36#define STE_VSID_SHIFT 12
37
38/* Location of cpu0's segment table */
39#define STAB0_PAGE 0x8
40#define STAB0_OFFSET (STAB0_PAGE << 12)
41#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
42
43#ifndef __ASSEMBLY__
44extern char initial_stab[];
45#endif /* ! __ASSEMBLY */
46
47/*
48 * SLB 28 * SLB
49 */ 29 */
50 30
@@ -370,10 +350,8 @@ extern void hpte_init_lpar(void);
370extern void hpte_init_beat(void); 350extern void hpte_init_beat(void);
371extern void hpte_init_beat_v3(void); 351extern void hpte_init_beat_v3(void);
372 352
373extern void stabs_alloc(void);
374extern void slb_initialize(void); 353extern void slb_initialize(void);
375extern void slb_flush_and_rebolt(void); 354extern void slb_flush_and_rebolt(void);
376extern void stab_initialize(unsigned long stab);
377 355
378extern void slb_vmalloc_update(void); 356extern void slb_vmalloc_update(void);
379extern void slb_set_size(u16 size); 357extern void slb_set_size(u16 size);
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b467530e2485..f5690e2689c7 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -18,7 +18,6 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18extern void destroy_context(struct mm_struct *mm); 18extern void destroy_context(struct mm_struct *mm);
19 19
20extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); 20extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
21extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
22extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 21extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
23extern void set_context(unsigned long id, pgd_t *pgd); 22extern void set_context(unsigned long id, pgd_t *pgd);
24 23
@@ -79,8 +78,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
79#ifdef CONFIG_PPC_STD_MMU_64 78#ifdef CONFIG_PPC_STD_MMU_64
80 if (mmu_has_feature(MMU_FTR_SLB)) 79 if (mmu_has_feature(MMU_FTR_SLB))
81 switch_slb(tsk, next); 80 switch_slb(tsk, next);
82 else
83 switch_stab(tsk, next);
84#else 81#else
85 /* Out of line for now */ 82 /* Out of line for now */
86 switch_mmu_context(prev, next); 83 switch_mmu_context(prev, next);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index bb0bd25f20d0..5abde4e223bb 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -78,10 +78,6 @@ struct paca_struct {
78 u64 kernel_toc; /* Kernel TOC address */ 78 u64 kernel_toc; /* Kernel TOC address */
79 u64 kernelbase; /* Base address of kernel */ 79 u64 kernelbase; /* Base address of kernel */
80 u64 kernel_msr; /* MSR while running in kernel */ 80 u64 kernel_msr; /* MSR while running in kernel */
81#ifdef CONFIG_PPC_STD_MMU_64
82 u64 stab_real; /* Absolute address of segment table */
83 u64 stab_addr; /* Virtual address of segment table */
84#endif /* CONFIG_PPC_STD_MMU_64 */
85 void *emergency_sp; /* pointer to emergency stack */ 81 void *emergency_sp; /* pointer to emergency stack */
86 u64 data_offset; /* per cpu data offset */ 82 u64 data_offset; /* per cpu data offset */
87 s16 hw_cpu_id; /* Physical processor number */ 83 s16 hw_cpu_id; /* Physical processor number */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bffd89d27301..f7b97b895708 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,7 +254,7 @@
254#define DSISR_PROTFAULT 0x08000000 /* protection fault */ 254#define DSISR_PROTFAULT 0x08000000 /* protection fault */
255#define DSISR_ISSTORE 0x02000000 /* access was a store */ 255#define DSISR_ISSTORE 0x02000000 /* access was a store */
256#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ 256#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
257#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ 257#define DSISR_NOSEGMENT 0x00200000 /* SLB miss */
258#define DSISR_KEYFAULT 0x00200000 /* Key fault */ 258#define DSISR_KEYFAULT 0x00200000 /* Key fault */
259#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ 259#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
260#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 260#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f5995a912213..e35054054c32 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -216,8 +216,6 @@ int main(void)
216#endif /* CONFIG_PPC_BOOK3E */ 216#endif /* CONFIG_PPC_BOOK3E */
217 217
218#ifdef CONFIG_PPC_STD_MMU_64 218#ifdef CONFIG_PPC_STD_MMU_64
219 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
220 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
221 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 219 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
222 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 220 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
223 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); 221 DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a7d36b19221d..b859b3665be6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -188,10 +188,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
188data_access_pSeries: 188data_access_pSeries:
189 HMT_MEDIUM_PPR_DISCARD 189 HMT_MEDIUM_PPR_DISCARD
190 SET_SCRATCH0(r13) 190 SET_SCRATCH0(r13)
191BEGIN_FTR_SECTION
192 b data_access_check_stab
193data_access_not_stab:
194END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
195 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 191 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
196 KVMTEST, 0x300) 192 KVMTEST, 0x300)
197 193
@@ -514,34 +510,6 @@ machine_check_pSeries_0:
514 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) 510 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
515 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) 511 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
516 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) 512 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
517
518 /* moved from 0x300 */
519data_access_check_stab:
520 GET_PACA(r13)
521 std r9,PACA_EXSLB+EX_R9(r13)
522 std r10,PACA_EXSLB+EX_R10(r13)
523 mfspr r10,SPRN_DAR
524 mfspr r9,SPRN_DSISR
525 srdi r10,r10,60
526 rlwimi r10,r9,16,0x20
527#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
528 lbz r9,HSTATE_IN_GUEST(r13)
529 rlwimi r10,r9,8,0x300
530#endif
531 mfcr r9
532 cmpwi r10,0x2c
533 beq do_stab_bolted_pSeries
534 mtcrf 0x80,r9
535 ld r9,PACA_EXSLB+EX_R9(r13)
536 ld r10,PACA_EXSLB+EX_R10(r13)
537 b data_access_not_stab
538do_stab_bolted_pSeries:
539 std r11,PACA_EXSLB+EX_R11(r13)
540 std r12,PACA_EXSLB+EX_R12(r13)
541 GET_SCRATCH0(r10)
542 std r10,PACA_EXSLB+EX_R13(r13)
543 EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD)
544
545 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 513 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
546 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 514 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
547 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 515 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
@@ -1338,12 +1306,6 @@ fwnmi_data_area:
1338 . = 0x8000 1306 . = 0x8000
1339#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1307#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1340 1308
1341/* Space for CPU0's segment table */
1342 .balign 4096
1343 .globl initial_stab
1344initial_stab:
1345 .space 4096
1346
1347#ifdef CONFIG_PPC_POWERNV 1309#ifdef CONFIG_PPC_POWERNV
1348_GLOBAL(opal_mc_secondary_handler) 1310_GLOBAL(opal_mc_secondary_handler)
1349 HMT_MEDIUM_PPR_DISCARD 1311 HMT_MEDIUM_PPR_DISCARD
@@ -1594,12 +1556,6 @@ do_hash_page:
1594 bne- handle_page_fault /* if not, try to insert a HPTE */ 1556 bne- handle_page_fault /* if not, try to insert a HPTE */
1595 andis. r0,r4,DSISR_DABRMATCH@h 1557 andis. r0,r4,DSISR_DABRMATCH@h
1596 bne- handle_dabr_fault 1558 bne- handle_dabr_fault
1597
1598BEGIN_FTR_SECTION
1599 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1600 bne- do_ste_alloc /* If so handle it */
1601END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1602
1603 CURRENT_THREAD_INFO(r11, r1) 1559 CURRENT_THREAD_INFO(r11, r1)
1604 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1560 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1605 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 1561 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1680,114 +1636,3 @@ handle_dabr_fault:
1680 li r5,SIGSEGV 1636 li r5,SIGSEGV
1681 bl bad_page_fault 1637 bl bad_page_fault
1682 b ret_from_except 1638 b ret_from_except
1683
1684 /* here we have a segment miss */
1685do_ste_alloc:
1686 bl ste_allocate /* try to insert stab entry */
1687 cmpdi r3,0
1688 bne- handle_page_fault
1689 b fast_exception_return
1690
1691/*
1692 * r13 points to the PACA, r9 contains the saved CR,
1693 * r11 and r12 contain the saved SRR0 and SRR1.
1694 * r9 - r13 are saved in paca->exslb.
1695 * We assume we aren't going to take any exceptions during this procedure.
1696 * We assume (DAR >> 60) == 0xc.
1697 */
1698 .align 7
1699do_stab_bolted:
1700 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1701 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1702 mfspr r11,SPRN_DAR /* ea */
1703
1704 /*
1705 * check for bad kernel/user address
1706 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1707 */
1708 rldicr. r9,r11,4,(63 - 46 - 4)
1709 li r9,0 /* VSID = 0 for bad address */
1710 bne- 0f
1711
1712 /*
1713 * Calculate VSID:
1714 * This is the kernel vsid, we take the top for context from
1715 * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1716 * Here we know that (ea >> 60) == 0xc
1717 */
1718 lis r9,(MAX_USER_CONTEXT + 1)@ha
1719 addi r9,r9,(MAX_USER_CONTEXT + 1)@l
1720
1721 srdi r10,r11,SID_SHIFT
1722 rldimi r10,r9,ESID_BITS,0 /* proto vsid */
1723 ASM_VSID_SCRAMBLE(r10, r9, 256M)
1724 rldic r9,r10,12,16 /* r9 = vsid << 12 */
1725
17260:
1727 /* Hash to the primary group */
1728 ld r10,PACASTABVIRT(r13)
1729 srdi r11,r11,SID_SHIFT
1730 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1731
1732 /* Search the primary group for a free entry */
17331: ld r11,0(r10) /* Test valid bit of the current ste */
1734 andi. r11,r11,0x80
1735 beq 2f
1736 addi r10,r10,16
1737 andi. r11,r10,0x70
1738 bne 1b
1739
1740 /* Stick for only searching the primary group for now. */
1741 /* At least for now, we use a very simple random castout scheme */
1742 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1743 mftb r11
1744 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1745 ori r11,r11,0x10
1746
1747 /* r10 currently points to an ste one past the group of interest */
1748 /* make it point to the randomly selected entry */
1749 subi r10,r10,128
1750 or r10,r10,r11 /* r10 is the entry to invalidate */
1751
1752 isync /* mark the entry invalid */
1753 ld r11,0(r10)
1754 rldicl r11,r11,56,1 /* clear the valid bit */
1755 rotldi r11,r11,8
1756 std r11,0(r10)
1757 sync
1758
1759 clrrdi r11,r11,28 /* Get the esid part of the ste */
1760 slbie r11
1761
17622: std r9,8(r10) /* Store the vsid part of the ste */
1763 eieio
1764
1765 mfspr r11,SPRN_DAR /* Get the new esid */
1766 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1767 ori r11,r11,0x90 /* Turn on valid and kp */
1768 std r11,0(r10) /* Put new entry back into the stab */
1769
1770 sync
1771
1772 /* All done -- return from exception. */
1773 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1774 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1775
1776 andi. r10,r12,MSR_RI
1777 beq- unrecov_slb
1778
1779 mtcrf 0x80,r9 /* restore CR */
1780
1781 mfmsr r10
1782 clrrdi r10,r10,2
1783 mtmsrd r10,1
1784
1785 mtspr SPRN_SRR0,r11
1786 mtspr SPRN_SRR1,r12
1787 ld r9,PACA_EXSLB+EX_R9(r13)
1788 ld r10,PACA_EXSLB+EX_R10(r13)
1789 ld r11,PACA_EXSLB+EX_R11(r13)
1790 ld r12,PACA_EXSLB+EX_R12(r13)
1791 ld r13,PACA_EXSLB+EX_R13(r13)
1792 rfid
1793 b . /* prevent speculative execution */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index a95145d7f61b..41ab76c3a94a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -618,7 +618,7 @@ __secondary_start:
618 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD 618 addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
619 std r14,PACAKSAVE(r13) 619 std r14,PACAKSAVE(r13)
620 620
621 /* Do early setup for that CPU (stab, slb, hash table pointer) */ 621 /* Do early setup for that CPU (SLB and hash table pointer) */
622 bl early_setup_secondary 622 bl early_setup_secondary
623 623
624 /* 624 /*
@@ -771,8 +771,10 @@ start_here_multiplatform:
771 li r0,0 771 li r0,0
772 stdu r0,-STACK_FRAME_OVERHEAD(r1) 772 stdu r0,-STACK_FRAME_OVERHEAD(r1)
773 773
774 /* Do very early kernel initializations, including initial hash table, 774 /*
775 * stab and slb setup before we turn on relocation. */ 775 * Do very early kernel initializations, including initial hash table
776 * and SLB setup before we turn on relocation.
777 */
776 778
777 /* Restore parameters passed from prom_init/kexec */ 779 /* Restore parameters passed from prom_init/kexec */
778 mr r3,r31 780 mr r3,r31
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ee082d771178..694339043b56 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -673,9 +673,6 @@ void __init setup_arch(char **cmdline_p)
673 exc_lvl_early_init(); 673 exc_lvl_early_init();
674 emergency_stack_init(); 674 emergency_stack_init();
675 675
676#ifdef CONFIG_PPC_STD_MMU_64
677 stabs_alloc();
678#endif
679 /* set up the bootmem stuff with available memory */ 676 /* set up the bootmem stuff with available memory */
680 do_init_bootmem(); 677 do_init_bootmem();
681 sparse_init(); 678 sparse_init();
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 51230ee6a407..d0130fff20e5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -13,9 +13,7 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
13 tlb_nohash_low.o 13 tlb_nohash_low.o
14obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o 14obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(CONFIG_WORD_SIZE)e.o
15hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o 15hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
16obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \ 16obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o slb_low.o slb.o $(hash64-y)
17 slb_low.o slb.o stab.o \
18 $(hash64-y)
19obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o 17obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
20obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ 18obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
21 tlb_hash$(CONFIG_WORD_SIZE).o \ 19 tlb_hash$(CONFIG_WORD_SIZE).o \
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 88fdd9d25077..fb8bea71327d 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -821,21 +821,15 @@ static void __init htab_initialize(void)
821 821
822void __init early_init_mmu(void) 822void __init early_init_mmu(void)
823{ 823{
824 /* Setup initial STAB address in the PACA */
825 get_paca()->stab_real = __pa((u64)&initial_stab);
826 get_paca()->stab_addr = (u64)&initial_stab;
827
828 /* Initialize the MMU Hash table and create the linear mapping 824 /* Initialize the MMU Hash table and create the linear mapping
829 * of memory. Has to be done before stab/slb initialization as 825 * of memory. Has to be done before SLB initialization as this is
830 * this is currently where the page size encoding is obtained 826 * currently where the page size encoding is obtained.
831 */ 827 */
832 htab_initialize(); 828 htab_initialize();
833 829
834 /* Initialize stab / SLB management */ 830 /* Initialize SLB management */
835 if (mmu_has_feature(MMU_FTR_SLB)) 831 if (mmu_has_feature(MMU_FTR_SLB))
836 slb_initialize(); 832 slb_initialize();
837 else
838 stab_initialize(get_paca()->stab_real);
839} 833}
840 834
841#ifdef CONFIG_SMP 835#ifdef CONFIG_SMP
@@ -845,13 +839,9 @@ void early_init_mmu_secondary(void)
845 if (!firmware_has_feature(FW_FEATURE_LPAR)) 839 if (!firmware_has_feature(FW_FEATURE_LPAR))
846 mtspr(SPRN_SDR1, _SDR1); 840 mtspr(SPRN_SDR1, _SDR1);
847 841
848 /* Initialize STAB/SLB. We use a virtual address as it works 842 /* Initialize SLB */
849 * in real mode on pSeries.
850 */
851 if (mmu_has_feature(MMU_FTR_SLB)) 843 if (mmu_has_feature(MMU_FTR_SLB))
852 slb_initialize(); 844 slb_initialize();
853 else
854 stab_initialize(get_paca()->stab_addr);
855} 845}
856#endif /* CONFIG_SMP */ 846#endif /* CONFIG_SMP */
857 847
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
deleted file mode 100644
index 3f8efa6f2997..000000000000
--- a/arch/powerpc/mm/stab.c
+++ /dev/null
@@ -1,286 +0,0 @@
1/*
2 * PowerPC64 Segment Translation Support.
3 *
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
6 *
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/memblock.h>
16
17#include <asm/pgtable.h>
18#include <asm/mmu.h>
19#include <asm/mmu_context.h>
20#include <asm/paca.h>
21#include <asm/cputable.h>
22#include <asm/prom.h>
23
24struct stab_entry {
25 unsigned long esid_data;
26 unsigned long vsid_data;
27};
28
29#define NR_STAB_CACHE_ENTRIES 8
30static DEFINE_PER_CPU(long, stab_cache_ptr);
31static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
32
33/*
34 * Create a segment table entry for the given esid/vsid pair.
35 */
36static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
37{
38 unsigned long esid_data, vsid_data;
39 unsigned long entry, group, old_esid, castout_entry, i;
40 unsigned int global_entry;
41 struct stab_entry *ste, *castout_ste;
42 unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
43
44 vsid_data = vsid << STE_VSID_SHIFT;
45 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
46 if (! kernel_segment)
47 esid_data |= STE_ESID_KS;
48
49 /* Search the primary group first. */
50 global_entry = (esid & 0x1f) << 3;
51 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
52
53 /* Find an empty entry, if one exists. */
54 for (group = 0; group < 2; group++) {
55 for (entry = 0; entry < 8; entry++, ste++) {
56 if (!(ste->esid_data & STE_ESID_V)) {
57 ste->vsid_data = vsid_data;
58 eieio();
59 ste->esid_data = esid_data;
60 return (global_entry | entry);
61 }
62 }
63 /* Now search the secondary group. */
64 global_entry = ((~esid) & 0x1f) << 3;
65 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
66 }
67
68 /*
69 * Could not find empty entry, pick one with a round robin selection.
70 * Search all entries in the two groups.
71 */
72 castout_entry = get_paca()->stab_rr;
73 for (i = 0; i < 16; i++) {
74 if (castout_entry < 8) {
75 global_entry = (esid & 0x1f) << 3;
76 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
77 castout_ste = ste + castout_entry;
78 } else {
79 global_entry = ((~esid) & 0x1f) << 3;
80 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
81 castout_ste = ste + (castout_entry - 8);
82 }
83
84 /* Dont cast out the first kernel segment */
85 if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
86 break;
87
88 castout_entry = (castout_entry + 1) & 0xf;
89 }
90
91 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
92
93 /* Modify the old entry to the new value. */
94
95 /* Force previous translations to complete. DRENG */
96 asm volatile("isync" : : : "memory");
97
98 old_esid = castout_ste->esid_data >> SID_SHIFT;
99 castout_ste->esid_data = 0; /* Invalidate old entry */
100
101 asm volatile("sync" : : : "memory"); /* Order update */
102
103 castout_ste->vsid_data = vsid_data;
104 eieio(); /* Order update */
105 castout_ste->esid_data = esid_data;
106
107 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
108 /* Ensure completion of slbie */
109 asm volatile("sync" : : : "memory");
110
111 return (global_entry | (castout_entry & 0x7));
112}
113
114/*
115 * Allocate a segment table entry for the given ea and mm
116 */
117static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
118{
119 unsigned long vsid;
120 unsigned char stab_entry;
121 unsigned long offset;
122
123 /* Kernel or user address? */
124 if (is_kernel_addr(ea)) {
125 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
126 } else {
127 if ((ea >= TASK_SIZE_USER64) || (! mm))
128 return 1;
129
130 vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
131 }
132
133 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
134
135 if (!is_kernel_addr(ea)) {
136 offset = __get_cpu_var(stab_cache_ptr);
137 if (offset < NR_STAB_CACHE_ENTRIES)
138 __get_cpu_var(stab_cache[offset++]) = stab_entry;
139 else
140 offset = NR_STAB_CACHE_ENTRIES+1;
141 __get_cpu_var(stab_cache_ptr) = offset;
142
143 /* Order update */
144 asm volatile("sync":::"memory");
145 }
146
147 return 0;
148}
149
150int ste_allocate(unsigned long ea)
151{
152 return __ste_allocate(ea, current->mm);
153}
154
155/*
156 * Do the segment table work for a context switch: flush all user
157 * entries from the table, then preload some probably useful entries
158 * for the new task
159 */
160void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
161{
162 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
163 struct stab_entry *ste;
164 unsigned long offset;
165 unsigned long pc = KSTK_EIP(tsk);
166 unsigned long stack = KSTK_ESP(tsk);
167 unsigned long unmapped_base;
168
169 /* Force previous translations to complete. DRENG */
170 asm volatile("isync" : : : "memory");
171
172 /*
173 * We need interrupts hard-disabled here, not just soft-disabled,
174 * so that a PMU interrupt can't occur, which might try to access
175 * user memory (to get a stack trace) and possible cause an STAB miss
176 * which would update the stab_cache/stab_cache_ptr per-cpu variables.
177 */
178 hard_irq_disable();
179
180 offset = __get_cpu_var(stab_cache_ptr);
181 if (offset <= NR_STAB_CACHE_ENTRIES) {
182 int i;
183
184 for (i = 0; i < offset; i++) {
185 ste = stab + __get_cpu_var(stab_cache[i]);
186 ste->esid_data = 0; /* invalidate entry */
187 }
188 } else {
189 unsigned long entry;
190
191 /* Invalidate all entries. */
192 ste = stab;
193
194 /* Never flush the first entry. */
195 ste += 1;
196 for (entry = 1;
197 entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
198 entry++, ste++) {
199 unsigned long ea;
200 ea = ste->esid_data & ESID_MASK;
201 if (!is_kernel_addr(ea)) {
202 ste->esid_data = 0;
203 }
204 }
205 }
206
207 asm volatile("sync; slbia; sync":::"memory");
208
209 __get_cpu_var(stab_cache_ptr) = 0;
210
211 /* Now preload some entries for the new task */
212 if (test_tsk_thread_flag(tsk, TIF_32BIT))
213 unmapped_base = TASK_UNMAPPED_BASE_USER32;
214 else
215 unmapped_base = TASK_UNMAPPED_BASE_USER64;
216
217 __ste_allocate(pc, mm);
218
219 if (GET_ESID(pc) == GET_ESID(stack))
220 return;
221
222 __ste_allocate(stack, mm);
223
224 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
225 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
226 return;
227
228 __ste_allocate(unmapped_base, mm);
229
230 /* Order update */
231 asm volatile("sync" : : : "memory");
232}
233
234/*
235 * Allocate segment tables for secondary CPUs. These must all go in
236 * the first (bolted) segment, so that do_stab_bolted won't get a
237 * recursive segment miss on the segment table itself.
238 */
239void __init stabs_alloc(void)
240{
241 int cpu;
242
243 if (mmu_has_feature(MMU_FTR_SLB))
244 return;
245
246 for_each_possible_cpu(cpu) {
247 unsigned long newstab;
248
249 if (cpu == 0)
250 continue; /* stab for CPU 0 is statically allocated */
251
252 newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
253 1<<SID_SHIFT);
254 newstab = (unsigned long)__va(newstab);
255
256 memset((void *)newstab, 0, HW_PAGE_SIZE);
257
258 paca[cpu].stab_addr = newstab;
259 paca[cpu].stab_real = __pa(newstab);
260 printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
261 "virtual, 0x%llx absolute\n",
262 cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
263 }
264}
265
266/*
267 * Build an entry for the base kernel segment and put it into
268 * the segment table or SLB. All other segment table or SLB
269 * entries are faulted in.
270 */
271void stab_initialize(unsigned long stab)
272{
273 unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
274 unsigned long stabreal;
275
276 asm volatile("isync; slbia; isync":::"memory");
277 make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278
279 /* Order update */
280 asm volatile("sync":::"memory");
281
282 /* Set ASR */
283 stabreal = get_paca()->stab_real | 0x1ul;
284
285 mtspr(SPRN_ASR, stabreal);
286}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index d199bfa2f1fa..dc8cf285c3ff 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2058,10 +2058,6 @@ static void dump_one_paca(int cpu)
2058 DUMP(p, kernel_toc, "lx"); 2058 DUMP(p, kernel_toc, "lx");
2059 DUMP(p, kernelbase, "lx"); 2059 DUMP(p, kernelbase, "lx");
2060 DUMP(p, kernel_msr, "lx"); 2060 DUMP(p, kernel_msr, "lx");
2061#ifdef CONFIG_PPC_STD_MMU_64
2062 DUMP(p, stab_real, "lx");
2063 DUMP(p, stab_addr, "lx");
2064#endif
2065 DUMP(p, emergency_sp, "p"); 2061 DUMP(p, emergency_sp, "p");
2066#ifdef CONFIG_PPC_BOOK3S_64 2062#ifdef CONFIG_PPC_BOOK3S_64
2067 DUMP(p, mc_emergency_sp, "p"); 2063 DUMP(p, mc_emergency_sp, "p");
@@ -2727,32 +2723,10 @@ static void dump_slb(void)
2727 } 2723 }
2728} 2724}
2729 2725
2730static void dump_stab(void)
2731{
2732 int i;
2733 unsigned long *tmp = (unsigned long *)local_paca->stab_addr;
2734
2735 printf("Segment table contents of cpu 0x%x\n", smp_processor_id());
2736
2737 for (i = 0; i < PAGE_SIZE/16; i++) {
2738 unsigned long a, b;
2739
2740 a = *tmp++;
2741 b = *tmp++;
2742
2743 if (a || b) {
2744 printf("%03d %016lx ", i, a);
2745 printf("%016lx\n", b);
2746 }
2747 }
2748}
2749
2750void dump_segments(void) 2726void dump_segments(void)
2751{ 2727{
2752 if (mmu_has_feature(MMU_FTR_SLB)) 2728 if (mmu_has_feature(MMU_FTR_SLB))
2753 dump_slb(); 2729 dump_slb();
2754 else
2755 dump_stab();
2756} 2730}
2757#endif 2731#endif
2758 2732