aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/intvec_32.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/intvec_32.S')
-rw-r--r--arch/tile/kernel/intvec_32.S69
1 files changed, 3 insertions, 66 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index f084f1c7afde..088d5c141e68 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -32,12 +32,6 @@
32 32
33#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) 33#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
34 34
35#if !CHIP_HAS_WH64()
36 /* By making this an empty macro, we can use wh64 in the code. */
37 .macro wh64 reg
38 .endm
39#endif
40
41 .macro push_reg reg, ptr=sp, delta=-4 35 .macro push_reg reg, ptr=sp, delta=-4
42 { 36 {
43 sw \ptr, \reg 37 sw \ptr, \reg
@@ -325,18 +319,14 @@ intvec_\vecname:
325 movei r3, -1 /* not used, but set for consistency */ 319 movei r3, -1 /* not used, but set for consistency */
326 } 320 }
327 .else 321 .else
328#if CHIP_HAS_AUX_PERF_COUNTERS()
329 .ifc \c_routine, op_handle_aux_perf_interrupt 322 .ifc \c_routine, op_handle_aux_perf_interrupt
330 { 323 {
331 mfspr r2, AUX_PERF_COUNT_STS 324 mfspr r2, AUX_PERF_COUNT_STS
332 movei r3, -1 /* not used, but set for consistency */ 325 movei r3, -1 /* not used, but set for consistency */
333 } 326 }
334 .else 327 .else
335#endif
336 movei r3, 0 328 movei r3, 0
337#if CHIP_HAS_AUX_PERF_COUNTERS()
338 .endif 329 .endif
339#endif
340 .endif 330 .endif
341 .endif 331 .endif
342 .endif 332 .endif
@@ -561,7 +551,6 @@ intvec_\vecname:
561 .endif 551 .endif
562 mtspr INTERRUPT_CRITICAL_SECTION, zero 552 mtspr INTERRUPT_CRITICAL_SECTION, zero
563 553
564#if CHIP_HAS_WH64()
565 /* 554 /*
566 * Prepare the first 256 stack bytes to be rapidly accessible 555 * Prepare the first 256 stack bytes to be rapidly accessible
567 * without having to fetch the background data. We don't really 556 * without having to fetch the background data. We don't really
@@ -582,7 +571,6 @@ intvec_\vecname:
582 addi r52, r52, -64 571 addi r52, r52, -64
583 } 572 }
584 wh64 r52 573 wh64 r52
585#endif
586 574
587#ifdef CONFIG_TRACE_IRQFLAGS 575#ifdef CONFIG_TRACE_IRQFLAGS
588 .ifnc \function,handle_nmi 576 .ifnc \function,handle_nmi
@@ -1533,12 +1521,10 @@ STD_ENTRY(_sys_clone)
1533 __HEAD 1521 __HEAD
1534 .align 64 1522 .align 64
1535 /* Align much later jump on the start of a cache line. */ 1523 /* Align much later jump on the start of a cache line. */
1536#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
1537 nop 1524 nop
1538#if PAGE_SIZE >= 0x10000 1525#if PAGE_SIZE >= 0x10000
1539 nop 1526 nop
1540#endif 1527#endif
1541#endif
1542ENTRY(sys_cmpxchg) 1528ENTRY(sys_cmpxchg)
1543 1529
1544 /* 1530 /*
@@ -1572,45 +1558,6 @@ ENTRY(sys_cmpxchg)
1572# error Code here assumes PAGE_OFFSET can be loaded with just hi16() 1558# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
1573#endif 1559#endif
1574 1560
1575#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1576 {
1577 /* Check for unaligned input. */
1578 bnz sp, .Lcmpxchg_badaddr
1579 mm r25, r0, zero, 3, PAGE_SHIFT-1
1580 }
1581 {
1582 crc32_32 r25, zero, r25
1583 moveli r21, lo16(atomic_lock_ptr)
1584 }
1585 {
1586 auli r21, r21, ha16(atomic_lock_ptr)
1587 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1588 }
1589 {
1590 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1591 slt_u r23, r0, r23
1592 lw r26, r0 /* see comment in the "#else" for the "lw r26". */
1593 }
1594 {
1595 s2a r21, r20, r21
1596 bbns r23, .Lcmpxchg_badaddr
1597 }
1598 {
1599 lw r21, r21
1600 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1601 andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
1602 }
1603 {
1604 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1605 bbs r23, .Lcmpxchg64
1606 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1607 }
1608 {
1609 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1610 j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
1611 }
1612
1613#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1614 { 1561 {
1615 /* Check for unaligned input. */ 1562 /* Check for unaligned input. */
1616 bnz sp, .Lcmpxchg_badaddr 1563 bnz sp, .Lcmpxchg_badaddr
@@ -1635,12 +1582,9 @@ ENTRY(sys_cmpxchg)
1635 1582
1636 /* 1583 /*
1637 * Ensure that the TLB is loaded before we take out the lock. 1584 * Ensure that the TLB is loaded before we take out the lock.
1638 * On tilepro, this will start fetching the value all the way 1585 * This will start fetching the value all the way into our L1
1639 * into our L1 as well (and if it gets modified before we 1586 * as well (and if it gets modified before we grab the lock,
1640 * grab the lock, it will be invalidated from our cache 1587 * it will be invalidated from our cache before we reload it).
1641 * before we reload it). On tile64, we'll start fetching it
1642 * into our L1 if we're the home, and if we're not, we'll
1643 * still at least start fetching it into the home's L2.
1644 */ 1588 */
1645 lw r26, r0 1589 lw r26, r0
1646 } 1590 }
@@ -1683,8 +1627,6 @@ ENTRY(sys_cmpxchg)
1683 j .Lcmpxchg32_tns 1627 j .Lcmpxchg32_tns
1684 } 1628 }
1685 1629
1686#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1687
1688/* Symbol for do_page_fault_ics() to use to compare against the PC. */ 1630/* Symbol for do_page_fault_ics() to use to compare against the PC. */
1689.global __sys_cmpxchg_grab_lock 1631.global __sys_cmpxchg_grab_lock
1690__sys_cmpxchg_grab_lock: 1632__sys_cmpxchg_grab_lock:
@@ -1822,9 +1764,6 @@ __sys_cmpxchg_grab_lock:
1822 .align 64 1764 .align 64
1823.Lcmpxchg64: 1765.Lcmpxchg64:
1824 { 1766 {
1825#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1826 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1827#endif
1828 bzt r23, .Lcmpxchg64_tns 1767 bzt r23, .Lcmpxchg64_tns
1829 } 1768 }
1830 j .Lcmpxchg_badaddr 1769 j .Lcmpxchg_badaddr
@@ -1959,10 +1898,8 @@ int_unalign:
1959 do_page_fault 1898 do_page_fault
1960 int_hand INT_SN_CPL, SN_CPL, bad_intr 1899 int_hand INT_SN_CPL, SN_CPL, bad_intr
1961 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap 1900 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1962#if CHIP_HAS_AUX_PERF_COUNTERS()
1963 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ 1901 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1964 op_handle_aux_perf_interrupt, handle_nmi 1902 op_handle_aux_perf_interrupt, handle_nmi
1965#endif
1966 1903
1967 /* Synthetic interrupt delivered only by the simulator */ 1904 /* Synthetic interrupt delivered only by the simulator */
1968 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint 1905 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint