aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2011-07-05 19:34:46 -0400
committerRalf Baechle <ralf@linux-mips.org>2011-07-26 01:47:47 -0400
commitbf28607fbe529e20180080c4a0295b0a47834fde (patch)
tree101ca3c89186e1feb07ceb61ba91a9150d19218f /arch/mips/mm/tlbex.c
parentf0daaaf5236297ea81ec7732cd0df5dbd84a5042 (diff)
MIPS: Close races in TLB modify handlers.
Page table entries are made invalid by writing a zero into the the PTE slot in a page table. This creates a race condition with the TLB modify handlers when they are updating the PTE. CPU0 CPU1 Test for _PAGE_PRESENT . set to not _PAGE_PRESENT (zero) Set to _PAGE_VALID So now the page not present value (zero) is suddenly valid and user space programs have access to physical page zero. We close the race by putting the test for _PAGE_PRESENT and setting of _PAGE_VALID into an atomic LL/SC section. This requires more registers than just K0 and K1 in the handlers, so we need to save some registers to a save area and then restore them when we are done. The save area is an array of cacheline aligned structures that should not suffer cache line bouncing as they are CPU private. [ralf@linux-mips.org: Fix !defined(CONFIG_MIPS_PGD_C0_CONTEXT) build error.] Signed-off-by: David Daney <david.daney@cavium.com> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2577/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c292
1 files changed, 194 insertions, 98 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 424ed4b92e6d..b6e1cff50667 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -42,6 +42,18 @@
42extern void tlb_do_page_fault_0(void); 42extern void tlb_do_page_fault_0(void);
43extern void tlb_do_page_fault_1(void); 43extern void tlb_do_page_fault_1(void);
44 44
45struct work_registers {
46 int r1;
47 int r2;
48 int r3;
49};
50
51struct tlb_reg_save {
52 unsigned long a;
53 unsigned long b;
54} ____cacheline_aligned_in_smp;
55
56static struct tlb_reg_save handler_reg_save[NR_CPUS];
45 57
46static inline int r45k_bvahwbug(void) 58static inline int r45k_bvahwbug(void)
47{ 59{
@@ -248,6 +260,73 @@ static int scratch_reg __cpuinitdata;
248static int pgd_reg __cpuinitdata; 260static int pgd_reg __cpuinitdata;
249enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 261enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
250 262
263static struct work_registers __cpuinit build_get_work_registers(u32 **p)
264{
265 struct work_registers r;
266
267 int smp_processor_id_reg;
268 int smp_processor_id_sel;
269 int smp_processor_id_shift;
270
271 if (scratch_reg > 0) {
272 /* Save in CPU local C0_KScratch? */
273 UASM_i_MTC0(p, 1, 31, scratch_reg);
274 r.r1 = K0;
275 r.r2 = K1;
276 r.r3 = 1;
277 return r;
278 }
279
280 if (num_possible_cpus() > 1) {
281#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
282 smp_processor_id_shift = 51;
283 smp_processor_id_reg = 20; /* XContext */
284 smp_processor_id_sel = 0;
285#else
286# ifdef CONFIG_32BIT
287 smp_processor_id_shift = 25;
288 smp_processor_id_reg = 4; /* Context */
289 smp_processor_id_sel = 0;
290# endif
291# ifdef CONFIG_64BIT
292 smp_processor_id_shift = 26;
293 smp_processor_id_reg = 4; /* Context */
294 smp_processor_id_sel = 0;
295# endif
296#endif
297 /* Get smp_processor_id */
298 UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
299 UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
300
301 /* handler_reg_save index in K0 */
302 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
303
304 UASM_i_LA(p, K1, (long)&handler_reg_save);
305 UASM_i_ADDU(p, K0, K0, K1);
306 } else {
307 UASM_i_LA(p, K0, (long)&handler_reg_save);
308 }
309 /* K0 now points to save area, save $1 and $2 */
310 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
311 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
312
313 r.r1 = K1;
314 r.r2 = 1;
315 r.r3 = 2;
316 return r;
317}
318
319static void __cpuinit build_restore_work_registers(u32 **p)
320{
321 if (scratch_reg > 0) {
322 UASM_i_MFC0(p, 1, 31, scratch_reg);
323 return;
324 }
325 /* K0 already points to save area, restore $1 and $2 */
326 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
327 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
328}
329
251#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 330#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
252 331
253/* 332/*
@@ -1160,9 +1239,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
1160 memset(relocs, 0, sizeof(relocs)); 1239 memset(relocs, 0, sizeof(relocs));
1161 memset(final_handler, 0, sizeof(final_handler)); 1240 memset(final_handler, 0, sizeof(final_handler));
1162 1241
1163 if (scratch_reg == 0)
1164 scratch_reg = allocate_kscratch();
1165
1166 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { 1242 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
1167 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1243 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1168 scratch_reg); 1244 scratch_reg);
@@ -1462,22 +1538,28 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1462 */ 1538 */
1463static void __cpuinit 1539static void __cpuinit
1464build_pte_present(u32 **p, struct uasm_reloc **r, 1540build_pte_present(u32 **p, struct uasm_reloc **r,
1465 unsigned int pte, unsigned int ptr, enum label_id lid) 1541 int pte, int ptr, int scratch, enum label_id lid)
1466{ 1542{
1543 int t = scratch >= 0 ? scratch : pte;
1544
1467 if (kernel_uses_smartmips_rixi) { 1545 if (kernel_uses_smartmips_rixi) {
1468 if (use_bbit_insns()) { 1546 if (use_bbit_insns()) {
1469 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1547 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1470 uasm_i_nop(p); 1548 uasm_i_nop(p);
1471 } else { 1549 } else {
1472 uasm_i_andi(p, pte, pte, _PAGE_PRESENT); 1550 uasm_i_andi(p, t, pte, _PAGE_PRESENT);
1473 uasm_il_beqz(p, r, pte, lid); 1551 uasm_il_beqz(p, r, t, lid);
1474 iPTE_LW(p, pte, ptr); 1552 if (pte == t)
1553 /* You lose the SMP race :-(*/
1554 iPTE_LW(p, pte, ptr);
1475 } 1555 }
1476 } else { 1556 } else {
1477 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1557 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1478 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1558 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
1479 uasm_il_bnez(p, r, pte, lid); 1559 uasm_il_bnez(p, r, t, lid);
1480 iPTE_LW(p, pte, ptr); 1560 if (pte == t)
1561 /* You lose the SMP race :-(*/
1562 iPTE_LW(p, pte, ptr);
1481 } 1563 }
1482} 1564}
1483 1565
@@ -1497,19 +1579,19 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1497 */ 1579 */
1498static void __cpuinit 1580static void __cpuinit
1499build_pte_writable(u32 **p, struct uasm_reloc **r, 1581build_pte_writable(u32 **p, struct uasm_reloc **r,
1500 unsigned int pte, unsigned int ptr, enum label_id lid) 1582 unsigned int pte, unsigned int ptr, int scratch,
1583 enum label_id lid)
1501{ 1584{
1502 if (use_bbit_insns()) { 1585 int t = scratch >= 0 ? scratch : pte;
1503 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1586
1504 uasm_i_nop(p); 1587 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1505 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1588 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
1506 uasm_i_nop(p); 1589 uasm_il_bnez(p, r, t, lid);
1507 } else { 1590 if (pte == t)
1508 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1591 /* You lose the SMP race :-(*/
1509 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1510 uasm_il_bnez(p, r, pte, lid);
1511 iPTE_LW(p, pte, ptr); 1592 iPTE_LW(p, pte, ptr);
1512 } 1593 else
1594 uasm_i_nop(p);
1513} 1595}
1514 1596
1515/* Make PTE writable, update software status bits as well, then store 1597/* Make PTE writable, update software status bits as well, then store
@@ -1531,15 +1613,19 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1531 */ 1613 */
1532static void __cpuinit 1614static void __cpuinit
1533build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1615build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1534 unsigned int pte, unsigned int ptr, enum label_id lid) 1616 unsigned int pte, unsigned int ptr, int scratch,
1617 enum label_id lid)
1535{ 1618{
1536 if (use_bbit_insns()) { 1619 if (use_bbit_insns()) {
1537 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1620 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1538 uasm_i_nop(p); 1621 uasm_i_nop(p);
1539 } else { 1622 } else {
1540 uasm_i_andi(p, pte, pte, _PAGE_WRITE); 1623 int t = scratch >= 0 ? scratch : pte;
1541 uasm_il_beqz(p, r, pte, lid); 1624 uasm_i_andi(p, t, pte, _PAGE_WRITE);
1542 iPTE_LW(p, pte, ptr); 1625 uasm_il_beqz(p, r, t, lid);
1626 if (pte == t)
1627 /* You lose the SMP race :-(*/
1628 iPTE_LW(p, pte, ptr);
1543 } 1629 }
1544} 1630}
1545 1631
@@ -1619,7 +1705,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
1619 memset(relocs, 0, sizeof(relocs)); 1705 memset(relocs, 0, sizeof(relocs));
1620 1706
1621 build_r3000_tlbchange_handler_head(&p, K0, K1); 1707 build_r3000_tlbchange_handler_head(&p, K0, K1);
1622 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1708 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1623 uasm_i_nop(&p); /* load delay */ 1709 uasm_i_nop(&p); /* load delay */
1624 build_make_valid(&p, &r, K0, K1); 1710 build_make_valid(&p, &r, K0, K1);
1625 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1711 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1649,7 +1735,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1649 memset(relocs, 0, sizeof(relocs)); 1735 memset(relocs, 0, sizeof(relocs));
1650 1736
1651 build_r3000_tlbchange_handler_head(&p, K0, K1); 1737 build_r3000_tlbchange_handler_head(&p, K0, K1);
1652 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); 1738 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1653 uasm_i_nop(&p); /* load delay */ 1739 uasm_i_nop(&p); /* load delay */
1654 build_make_write(&p, &r, K0, K1); 1740 build_make_write(&p, &r, K0, K1);
1655 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1741 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1673,13 +1759,14 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1673 u32 *p = handle_tlbm; 1759 u32 *p = handle_tlbm;
1674 struct uasm_label *l = labels; 1760 struct uasm_label *l = labels;
1675 struct uasm_reloc *r = relocs; 1761 struct uasm_reloc *r = relocs;
1762 struct work_registers wr;
1676 1763
1677 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1764 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1678 memset(labels, 0, sizeof(labels)); 1765 memset(labels, 0, sizeof(labels));
1679 memset(relocs, 0, sizeof(relocs)); 1766 memset(relocs, 0, sizeof(relocs));
1680 1767
1681 build_r3000_tlbchange_handler_head(&p, K0, K1); 1768 build_r3000_tlbchange_handler_head(&p, K0, K1);
1682 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); 1769 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
1683 uasm_i_nop(&p); /* load delay */ 1770 uasm_i_nop(&p); /* load delay */
1684 build_make_write(&p, &r, K0, K1); 1771 build_make_write(&p, &r, K0, K1);
1685 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1772 build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1702,15 +1789,16 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1702/* 1789/*
1703 * R4000 style TLB load/store/modify handlers. 1790 * R4000 style TLB load/store/modify handlers.
1704 */ 1791 */
1705static void __cpuinit 1792static struct work_registers __cpuinit
1706build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1793build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1707 struct uasm_reloc **r, unsigned int pte, 1794 struct uasm_reloc **r)
1708 unsigned int ptr)
1709{ 1795{
1796 struct work_registers wr = build_get_work_registers(p);
1797
1710#ifdef CONFIG_64BIT 1798#ifdef CONFIG_64BIT
1711 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ 1799 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
1712#else 1800#else
1713 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1801 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
1714#endif 1802#endif
1715 1803
1716#ifdef CONFIG_HUGETLB_PAGE 1804#ifdef CONFIG_HUGETLB_PAGE
@@ -1719,21 +1807,22 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1719 * instead contains the tlb pte. Check the PAGE_HUGE bit and 1807 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1720 * see if we need to jump to huge tlb processing. 1808 * see if we need to jump to huge tlb processing.
1721 */ 1809 */
1722 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); 1810 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
1723#endif 1811#endif
1724 1812
1725 UASM_i_MFC0(p, pte, C0_BADVADDR); 1813 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1726 UASM_i_LW(p, ptr, 0, ptr); 1814 UASM_i_LW(p, wr.r2, 0, wr.r2);
1727 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1815 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1728 uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1816 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1729 UASM_i_ADDU(p, ptr, ptr, pte); 1817 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
1730 1818
1731#ifdef CONFIG_SMP 1819#ifdef CONFIG_SMP
1732 uasm_l_smp_pgtable_change(l, *p); 1820 uasm_l_smp_pgtable_change(l, *p);
1733#endif 1821#endif
1734 iPTE_LW(p, pte, ptr); /* get even pte */ 1822 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1735 if (!m4kc_tlbp_war()) 1823 if (!m4kc_tlbp_war())
1736 build_tlb_probe_entry(p); 1824 build_tlb_probe_entry(p);
1825 return wr;
1737} 1826}
1738 1827
1739static void __cpuinit 1828static void __cpuinit
@@ -1746,6 +1835,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1746 build_update_entries(p, tmp, ptr); 1835 build_update_entries(p, tmp, ptr);
1747 build_tlb_write_entry(p, l, r, tlb_indexed); 1836 build_tlb_write_entry(p, l, r, tlb_indexed);
1748 uasm_l_leave(l, *p); 1837 uasm_l_leave(l, *p);
1838 build_restore_work_registers(p);
1749 uasm_i_eret(p); /* return from trap */ 1839 uasm_i_eret(p); /* return from trap */
1750 1840
1751#ifdef CONFIG_64BIT 1841#ifdef CONFIG_64BIT
@@ -1758,6 +1848,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1758 u32 *p = handle_tlbl; 1848 u32 *p = handle_tlbl;
1759 struct uasm_label *l = labels; 1849 struct uasm_label *l = labels;
1760 struct uasm_reloc *r = relocs; 1850 struct uasm_reloc *r = relocs;
1851 struct work_registers wr;
1761 1852
1762 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 1853 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1763 memset(labels, 0, sizeof(labels)); 1854 memset(labels, 0, sizeof(labels));
@@ -1777,8 +1868,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1777 /* No need for uasm_i_nop */ 1868 /* No need for uasm_i_nop */
1778 } 1869 }
1779 1870
1780 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1871 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1781 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1872 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1782 if (m4kc_tlbp_war()) 1873 if (m4kc_tlbp_war())
1783 build_tlb_probe_entry(&p); 1874 build_tlb_probe_entry(&p);
1784 1875
@@ -1788,44 +1879,43 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1788 * have triggered it. Skip the expensive test.. 1879 * have triggered it. Skip the expensive test..
1789 */ 1880 */
1790 if (use_bbit_insns()) { 1881 if (use_bbit_insns()) {
1791 uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), 1882 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1792 label_tlbl_goaround1); 1883 label_tlbl_goaround1);
1793 } else { 1884 } else {
1794 uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1885 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1795 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); 1886 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1796 } 1887 }
1797 uasm_i_nop(&p); 1888 uasm_i_nop(&p);
1798 1889
1799 uasm_i_tlbr(&p); 1890 uasm_i_tlbr(&p);
1800 /* Examine entrylo 0 or 1 based on ptr. */ 1891 /* Examine entrylo 0 or 1 based on ptr. */
1801 if (use_bbit_insns()) { 1892 if (use_bbit_insns()) {
1802 uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); 1893 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1803 } else { 1894 } else {
1804 uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1895 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1805 uasm_i_beqz(&p, K0, 8); 1896 uasm_i_beqz(&p, wr.r3, 8);
1806 } 1897 }
1807 1898 /* load it in the delay slot*/
1808 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1899 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1809 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1900 /* load it if ptr is odd */
1901 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1810 /* 1902 /*
1811 * If the entryLo (now in K0) is valid (bit 1), RI or 1903 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1812 * XI must have triggered it. 1904 * XI must have triggered it.
1813 */ 1905 */
1814 if (use_bbit_insns()) { 1906 if (use_bbit_insns()) {
1815 uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl); 1907 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1816 /* Reload the PTE value */ 1908 uasm_i_nop(&p);
1817 iPTE_LW(&p, K0, K1);
1818 uasm_l_tlbl_goaround1(&l, p); 1909 uasm_l_tlbl_goaround1(&l, p);
1819 } else { 1910 } else {
1820 uasm_i_andi(&p, K0, K0, 2); 1911 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1821 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); 1912 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1822 uasm_l_tlbl_goaround1(&l, p); 1913 uasm_i_nop(&p);
1823 /* Reload the PTE value */
1824 iPTE_LW(&p, K0, K1);
1825 } 1914 }
1915 uasm_l_tlbl_goaround1(&l, p);
1826 } 1916 }
1827 build_make_valid(&p, &r, K0, K1); 1917 build_make_valid(&p, &r, wr.r1, wr.r2);
1828 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1918 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1829 1919
1830#ifdef CONFIG_HUGETLB_PAGE 1920#ifdef CONFIG_HUGETLB_PAGE
1831 /* 1921 /*
@@ -1833,8 +1923,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1833 * spots a huge page. 1923 * spots a huge page.
1834 */ 1924 */
1835 uasm_l_tlb_huge_update(&l, p); 1925 uasm_l_tlb_huge_update(&l, p);
1836 iPTE_LW(&p, K0, K1); 1926 iPTE_LW(&p, wr.r1, wr.r2);
1837 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1927 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1838 build_tlb_probe_entry(&p); 1928 build_tlb_probe_entry(&p);
1839 1929
1840 if (kernel_uses_smartmips_rixi) { 1930 if (kernel_uses_smartmips_rixi) {
@@ -1843,50 +1933,51 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1843 * have triggered it. Skip the expensive test.. 1933 * have triggered it. Skip the expensive test..
1844 */ 1934 */
1845 if (use_bbit_insns()) { 1935 if (use_bbit_insns()) {
1846 uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), 1936 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1847 label_tlbl_goaround2); 1937 label_tlbl_goaround2);
1848 } else { 1938 } else {
1849 uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1939 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1850 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1940 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1851 } 1941 }
1852 uasm_i_nop(&p); 1942 uasm_i_nop(&p);
1853 1943
1854 uasm_i_tlbr(&p); 1944 uasm_i_tlbr(&p);
1855 /* Examine entrylo 0 or 1 based on ptr. */ 1945 /* Examine entrylo 0 or 1 based on ptr. */
1856 if (use_bbit_insns()) { 1946 if (use_bbit_insns()) {
1857 uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); 1947 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1858 } else { 1948 } else {
1859 uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1949 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1860 uasm_i_beqz(&p, K0, 8); 1950 uasm_i_beqz(&p, wr.r3, 8);
1861 } 1951 }
1862 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1952 /* load it in the delay slot*/
1863 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1953 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1954 /* load it if ptr is odd */
1955 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1864 /* 1956 /*
1865 * If the entryLo (now in K0) is valid (bit 1), RI or 1957 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1866 * XI must have triggered it. 1958 * XI must have triggered it.
1867 */ 1959 */
1868 if (use_bbit_insns()) { 1960 if (use_bbit_insns()) {
1869 uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2); 1961 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
1870 } else { 1962 } else {
1871 uasm_i_andi(&p, K0, K0, 2); 1963 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1872 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1964 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1873 } 1965 }
1874 /* Reload the PTE value */
1875 iPTE_LW(&p, K0, K1);
1876 1966
1877 /* 1967 /*
1878 * We clobbered C0_PAGEMASK, restore it. On the other branch 1968 * We clobbered C0_PAGEMASK, restore it. On the other branch
1879 * it is restored in build_huge_tlb_write_entry. 1969 * it is restored in build_huge_tlb_write_entry.
1880 */ 1970 */
1881 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0); 1971 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
1882 1972
1883 uasm_l_tlbl_goaround2(&l, p); 1973 uasm_l_tlbl_goaround2(&l, p);
1884 } 1974 }
1885 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); 1975 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
1886 build_huge_handler_tail(&p, &r, &l, K0, K1); 1976 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1887#endif 1977#endif
1888 1978
1889 uasm_l_nopage_tlbl(&l, p); 1979 uasm_l_nopage_tlbl(&l, p);
1980 build_restore_work_registers(&p);
1890 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1981 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1891 uasm_i_nop(&p); 1982 uasm_i_nop(&p);
1892 1983
@@ -1905,17 +1996,18 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1905 u32 *p = handle_tlbs; 1996 u32 *p = handle_tlbs;
1906 struct uasm_label *l = labels; 1997 struct uasm_label *l = labels;
1907 struct uasm_reloc *r = relocs; 1998 struct uasm_reloc *r = relocs;
1999 struct work_registers wr;
1908 2000
1909 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 2001 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1910 memset(labels, 0, sizeof(labels)); 2002 memset(labels, 0, sizeof(labels));
1911 memset(relocs, 0, sizeof(relocs)); 2003 memset(relocs, 0, sizeof(relocs));
1912 2004
1913 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 2005 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1914 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); 2006 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
1915 if (m4kc_tlbp_war()) 2007 if (m4kc_tlbp_war())
1916 build_tlb_probe_entry(&p); 2008 build_tlb_probe_entry(&p);
1917 build_make_write(&p, &r, K0, K1); 2009 build_make_write(&p, &r, wr.r1, wr.r2);
1918 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 2010 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1919 2011
1920#ifdef CONFIG_HUGETLB_PAGE 2012#ifdef CONFIG_HUGETLB_PAGE
1921 /* 2013 /*
@@ -1923,15 +2015,16 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1923 * build_r4000_tlbchange_handler_head spots a huge page. 2015 * build_r4000_tlbchange_handler_head spots a huge page.
1924 */ 2016 */
1925 uasm_l_tlb_huge_update(&l, p); 2017 uasm_l_tlb_huge_update(&l, p);
1926 iPTE_LW(&p, K0, K1); 2018 iPTE_LW(&p, wr.r1, wr.r2);
1927 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); 2019 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
1928 build_tlb_probe_entry(&p); 2020 build_tlb_probe_entry(&p);
1929 uasm_i_ori(&p, K0, K0, 2021 uasm_i_ori(&p, wr.r1, wr.r1,
1930 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2022 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1931 build_huge_handler_tail(&p, &r, &l, K0, K1); 2023 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1932#endif 2024#endif
1933 2025
1934 uasm_l_nopage_tlbs(&l, p); 2026 uasm_l_nopage_tlbs(&l, p);
2027 build_restore_work_registers(&p);
1935 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2028 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1936 uasm_i_nop(&p); 2029 uasm_i_nop(&p);
1937 2030
@@ -1950,18 +2043,19 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1950 u32 *p = handle_tlbm; 2043 u32 *p = handle_tlbm;
1951 struct uasm_label *l = labels; 2044 struct uasm_label *l = labels;
1952 struct uasm_reloc *r = relocs; 2045 struct uasm_reloc *r = relocs;
2046 struct work_registers wr;
1953 2047
1954 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 2048 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1955 memset(labels, 0, sizeof(labels)); 2049 memset(labels, 0, sizeof(labels));
1956 memset(relocs, 0, sizeof(relocs)); 2050 memset(relocs, 0, sizeof(relocs));
1957 2051
1958 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 2052 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1959 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); 2053 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
1960 if (m4kc_tlbp_war()) 2054 if (m4kc_tlbp_war())
1961 build_tlb_probe_entry(&p); 2055 build_tlb_probe_entry(&p);
1962 /* Present and writable bits set, set accessed and dirty bits. */ 2056 /* Present and writable bits set, set accessed and dirty bits. */
1963 build_make_write(&p, &r, K0, K1); 2057 build_make_write(&p, &r, wr.r1, wr.r2);
1964 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 2058 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1965 2059
1966#ifdef CONFIG_HUGETLB_PAGE 2060#ifdef CONFIG_HUGETLB_PAGE
1967 /* 2061 /*
@@ -1969,15 +2063,16 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1969 * build_r4000_tlbchange_handler_head spots a huge page. 2063 * build_r4000_tlbchange_handler_head spots a huge page.
1970 */ 2064 */
1971 uasm_l_tlb_huge_update(&l, p); 2065 uasm_l_tlb_huge_update(&l, p);
1972 iPTE_LW(&p, K0, K1); 2066 iPTE_LW(&p, wr.r1, wr.r2);
1973 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); 2067 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
1974 build_tlb_probe_entry(&p); 2068 build_tlb_probe_entry(&p);
1975 uasm_i_ori(&p, K0, K0, 2069 uasm_i_ori(&p, wr.r1, wr.r1,
1976 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2070 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1977 build_huge_handler_tail(&p, &r, &l, K0, K1); 2071 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1978#endif 2072#endif
1979 2073
1980 uasm_l_nopage_tlbm(&l, p); 2074 uasm_l_nopage_tlbm(&l, p);
2075 build_restore_work_registers(&p);
1981 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2076 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1982 uasm_i_nop(&p); 2077 uasm_i_nop(&p);
1983 2078
@@ -2036,6 +2131,7 @@ void __cpuinit build_tlb_refill_handler(void)
2036 2131
2037 default: 2132 default:
2038 if (!run_once) { 2133 if (!run_once) {
2134 scratch_reg = allocate_kscratch();
2039#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2135#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2040 build_r4000_setup_pgd(); 2136 build_r4000_setup_pgd();
2041#endif 2137#endif