diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-07-13 16:23:51 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-07-13 16:23:51 -0400 |
commit | 327309e899662b482c58cf25f574513d38b5788c (patch) | |
tree | 069de438aa0e92dd9b6ba28e6b207e2cd07151a5 /arch/sparc64/kernel | |
parent | 0c168775709faa74c1b87f1e61046e0c51ade7f3 (diff) | |
parent | c32511e2718618f0b53479eb36e07439aa363a74 (diff) |
Merge upstream 2.6.13-rc3 into ieee80211 branch of netdev-2.6.
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/dtlb_backend.S | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 63 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 588 | ||||
-rw-r--r-- | arch/sparc64/kernel/kprobes.c | 5 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 46 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 78 | ||||
-rw-r--r-- | arch/sparc64/kernel/power.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/ptrace.c | 46 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sys32.S | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/systbls.S | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/time.c | 26 | ||||
-rw-r--r-- | arch/sparc64/kernel/vmlinux.lds.S | 2 |
15 files changed, 372 insertions, 512 deletions
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S index b73a3c858770..538522848ad4 100644 --- a/arch/sparc64/kernel/dtlb_backend.S +++ b/arch/sparc64/kernel/dtlb_backend.S | |||
@@ -16,7 +16,7 @@ | |||
16 | #elif PAGE_SHIFT == 19 | 16 | #elif PAGE_SHIFT == 19 |
17 | #define SZ_BITS _PAGE_SZ512K | 17 | #define SZ_BITS _PAGE_SZ512K |
18 | #elif PAGE_SHIFT == 22 | 18 | #elif PAGE_SHIFT == 22 |
19 | #define SZ_BITS _PAGE_SZ4M | 19 | #define SZ_BITS _PAGE_SZ4MB |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) | 22 | #define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index eee516a71c14..d781f10adc52 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -22,8 +22,6 @@ | |||
22 | #include <asm/estate.h> | 22 | #include <asm/estate.h> |
23 | #include <asm/auxio.h> | 23 | #include <asm/auxio.h> |
24 | 24 | ||
25 | /* #define SYSCALL_TRACING 1 */ | ||
26 | |||
27 | #define curptr g6 | 25 | #define curptr g6 |
28 | 26 | ||
29 | #define NR_SYSCALLS 284 /* Each OS is different... */ | 27 | #define NR_SYSCALLS 284 /* Each OS is different... */ |
@@ -553,13 +551,11 @@ do_ivec: | |||
553 | sllx %g3, 5, %g3 | 551 | sllx %g3, 5, %g3 |
554 | or %g2, %lo(ivector_table), %g2 | 552 | or %g2, %lo(ivector_table), %g2 |
555 | add %g2, %g3, %g3 | 553 | add %g2, %g3, %g3 |
556 | ldx [%g3 + 0x08], %g2 /* irq_info */ | ||
557 | ldub [%g3 + 0x04], %g4 /* pil */ | 554 | ldub [%g3 + 0x04], %g4 /* pil */ |
558 | brz,pn %g2, do_ivec_spurious | 555 | mov 1, %g2 |
559 | mov 1, %g2 | ||
560 | |||
561 | sllx %g2, %g4, %g2 | 556 | sllx %g2, %g4, %g2 |
562 | sllx %g4, 2, %g4 | 557 | sllx %g4, 2, %g4 |
558 | |||
563 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ | 559 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ |
564 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ | 560 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ |
565 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ | 561 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ |
@@ -567,9 +563,9 @@ do_ivec: | |||
567 | retry | 563 | retry |
568 | do_ivec_xcall: | 564 | do_ivec_xcall: |
569 | mov 0x50, %g1 | 565 | mov 0x50, %g1 |
570 | |||
571 | ldxa [%g1 + %g0] ASI_INTR_R, %g1 | 566 | ldxa [%g1 + %g0] ASI_INTR_R, %g1 |
572 | srl %g3, 0, %g3 | 567 | srl %g3, 0, %g3 |
568 | |||
573 | mov 0x60, %g7 | 569 | mov 0x60, %g7 |
574 | ldxa [%g7 + %g0] ASI_INTR_R, %g7 | 570 | ldxa [%g7 + %g0] ASI_INTR_R, %g7 |
575 | stxa %g0, [%g0] ASI_INTR_RECEIVE | 571 | stxa %g0, [%g0] ASI_INTR_RECEIVE |
@@ -581,19 +577,6 @@ do_ivec_xcall: | |||
581 | 1: jmpl %g3, %g0 | 577 | 1: jmpl %g3, %g0 |
582 | nop | 578 | nop |
583 | 579 | ||
584 | do_ivec_spurious: | ||
585 | stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */ | ||
586 | rdpr %pstate, %g5 | ||
587 | |||
588 | wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate | ||
589 | sethi %hi(109f), %g7 | ||
590 | ba,pt %xcc, etrap | ||
591 | 109: or %g7, %lo(109b), %g7 | ||
592 | call catch_disabled_ivec | ||
593 | add %sp, PTREGS_OFF, %o0 | ||
594 | ba,pt %xcc, rtrap | ||
595 | clr %l6 | ||
596 | |||
597 | .globl save_alternate_globals | 580 | .globl save_alternate_globals |
598 | save_alternate_globals: /* %o0 = save_area */ | 581 | save_alternate_globals: /* %o0 = save_area */ |
599 | rdpr %pstate, %o5 | 582 | rdpr %pstate, %o5 |
@@ -1569,11 +1552,12 @@ sys_ptrace: add %sp, PTREGS_OFF, %o0 | |||
1569 | nop | 1552 | nop |
1570 | .align 32 | 1553 | .align 32 |
1571 | 1: ldx [%curptr + TI_FLAGS], %l5 | 1554 | 1: ldx [%curptr + TI_FLAGS], %l5 |
1572 | andcc %l5, _TIF_SYSCALL_TRACE, %g0 | 1555 | andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 |
1573 | be,pt %icc, rtrap | 1556 | be,pt %icc, rtrap |
1574 | clr %l6 | 1557 | clr %l6 |
1558 | add %sp, PTREGS_OFF, %o0 | ||
1575 | call syscall_trace | 1559 | call syscall_trace |
1576 | nop | 1560 | mov 1, %o1 |
1577 | 1561 | ||
1578 | ba,pt %xcc, rtrap | 1562 | ba,pt %xcc, rtrap |
1579 | clr %l6 | 1563 | clr %l6 |
@@ -1657,18 +1641,20 @@ linux_sparc_ni_syscall: | |||
1657 | or %l7, %lo(sys_ni_syscall), %l7 | 1641 | or %l7, %lo(sys_ni_syscall), %l7 |
1658 | 1642 | ||
1659 | linux_syscall_trace32: | 1643 | linux_syscall_trace32: |
1644 | add %sp, PTREGS_OFF, %o0 | ||
1660 | call syscall_trace | 1645 | call syscall_trace |
1661 | nop | 1646 | clr %o1 |
1662 | srl %i0, 0, %o0 | 1647 | srl %i0, 0, %o0 |
1663 | mov %i4, %o4 | 1648 | srl %i4, 0, %o4 |
1664 | srl %i1, 0, %o1 | 1649 | srl %i1, 0, %o1 |
1665 | srl %i2, 0, %o2 | 1650 | srl %i2, 0, %o2 |
1666 | b,pt %xcc, 2f | 1651 | b,pt %xcc, 2f |
1667 | srl %i3, 0, %o3 | 1652 | srl %i3, 0, %o3 |
1668 | 1653 | ||
1669 | linux_syscall_trace: | 1654 | linux_syscall_trace: |
1655 | add %sp, PTREGS_OFF, %o0 | ||
1670 | call syscall_trace | 1656 | call syscall_trace |
1671 | nop | 1657 | clr %o1 |
1672 | mov %i0, %o0 | 1658 | mov %i0, %o0 |
1673 | mov %i1, %o1 | 1659 | mov %i1, %o1 |
1674 | mov %i2, %o2 | 1660 | mov %i2, %o2 |
@@ -1686,11 +1672,6 @@ linux_sparc_syscall32: | |||
1686 | bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI | 1672 | bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI |
1687 | srl %i0, 0, %o0 ! IEU0 | 1673 | srl %i0, 0, %o0 ! IEU0 |
1688 | sll %g1, 2, %l4 ! IEU0 Group | 1674 | sll %g1, 2, %l4 ! IEU0 Group |
1689 | #ifdef SYSCALL_TRACING | ||
1690 | call syscall_trace_entry | ||
1691 | add %sp, PTREGS_OFF, %o0 | ||
1692 | srl %i0, 0, %o0 | ||
1693 | #endif | ||
1694 | srl %i4, 0, %o4 ! IEU1 | 1675 | srl %i4, 0, %o4 ! IEU1 |
1695 | lduw [%l7 + %l4], %l7 ! Load | 1676 | lduw [%l7 + %l4], %l7 ! Load |
1696 | srl %i1, 0, %o1 ! IEU0 Group | 1677 | srl %i1, 0, %o1 ! IEU0 Group |
@@ -1698,7 +1679,7 @@ linux_sparc_syscall32: | |||
1698 | 1679 | ||
1699 | srl %i5, 0, %o5 ! IEU1 | 1680 | srl %i5, 0, %o5 ! IEU1 |
1700 | srl %i2, 0, %o2 ! IEU0 Group | 1681 | srl %i2, 0, %o2 ! IEU0 Group |
1701 | andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group | 1682 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 |
1702 | bne,pn %icc, linux_syscall_trace32 ! CTI | 1683 | bne,pn %icc, linux_syscall_trace32 ! CTI |
1703 | mov %i0, %l5 ! IEU1 | 1684 | mov %i0, %l5 ! IEU1 |
1704 | call %l7 ! CTI Group brk forced | 1685 | call %l7 ! CTI Group brk forced |
@@ -1714,11 +1695,6 @@ linux_sparc_syscall: | |||
1714 | bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI | 1695 | bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI |
1715 | mov %i0, %o0 ! IEU0 | 1696 | mov %i0, %o0 ! IEU0 |
1716 | sll %g1, 2, %l4 ! IEU0 Group | 1697 | sll %g1, 2, %l4 ! IEU0 Group |
1717 | #ifdef SYSCALL_TRACING | ||
1718 | call syscall_trace_entry | ||
1719 | add %sp, PTREGS_OFF, %o0 | ||
1720 | mov %i0, %o0 | ||
1721 | #endif | ||
1722 | mov %i1, %o1 ! IEU1 | 1698 | mov %i1, %o1 ! IEU1 |
1723 | lduw [%l7 + %l4], %l7 ! Load | 1699 | lduw [%l7 + %l4], %l7 ! Load |
1724 | 4: mov %i2, %o2 ! IEU0 Group | 1700 | 4: mov %i2, %o2 ! IEU0 Group |
@@ -1726,7 +1702,7 @@ linux_sparc_syscall: | |||
1726 | 1702 | ||
1727 | mov %i3, %o3 ! IEU1 | 1703 | mov %i3, %o3 ! IEU1 |
1728 | mov %i4, %o4 ! IEU0 Group | 1704 | mov %i4, %o4 ! IEU0 Group |
1729 | andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble | 1705 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 |
1730 | bne,pn %icc, linux_syscall_trace ! CTI Group | 1706 | bne,pn %icc, linux_syscall_trace ! CTI Group |
1731 | mov %i0, %l5 ! IEU0 | 1707 | mov %i0, %l5 ! IEU0 |
1732 | 2: call %l7 ! CTI Group brk forced | 1708 | 2: call %l7 ! CTI Group brk forced |
@@ -1735,12 +1711,6 @@ linux_sparc_syscall: | |||
1735 | 1711 | ||
1736 | 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] | 1712 | 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] |
1737 | ret_sys_call: | 1713 | ret_sys_call: |
1738 | #ifdef SYSCALL_TRACING | ||
1739 | mov %o0, %o1 | ||
1740 | call syscall_trace_exit | ||
1741 | add %sp, PTREGS_OFF, %o0 | ||
1742 | mov %o1, %o0 | ||
1743 | #endif | ||
1744 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 | 1714 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 |
1745 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc | 1715 | ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc |
1746 | sra %o0, 0, %o0 | 1716 | sra %o0, 0, %o0 |
@@ -1760,7 +1730,7 @@ ret_sys_call: | |||
1760 | 1: | 1730 | 1: |
1761 | cmp %o0, -ERESTART_RESTARTBLOCK | 1731 | cmp %o0, -ERESTART_RESTARTBLOCK |
1762 | bgeu,pn %xcc, 1f | 1732 | bgeu,pn %xcc, 1f |
1763 | andcc %l0, _TIF_SYSCALL_TRACE, %l6 | 1733 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 |
1764 | 80: | 1734 | 80: |
1765 | /* System call success, clear Carry condition code. */ | 1735 | /* System call success, clear Carry condition code. */ |
1766 | andn %g3, %g2, %g3 | 1736 | andn %g3, %g2, %g3 |
@@ -1775,7 +1745,7 @@ ret_sys_call: | |||
1775 | /* System call failure, set Carry condition code. | 1745 | /* System call failure, set Carry condition code. |
1776 | * Also, get abs(errno) to return to the process. | 1746 | * Also, get abs(errno) to return to the process. |
1777 | */ | 1747 | */ |
1778 | andcc %l0, _TIF_SYSCALL_TRACE, %l6 | 1748 | andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 |
1779 | sub %g0, %o0, %o0 | 1749 | sub %g0, %o0, %o0 |
1780 | or %g3, %g2, %g3 | 1750 | or %g3, %g2, %g3 |
1781 | stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] | 1751 | stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] |
@@ -1788,8 +1758,9 @@ ret_sys_call: | |||
1788 | b,pt %xcc, rtrap | 1758 | b,pt %xcc, rtrap |
1789 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] | 1759 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] |
1790 | linux_syscall_trace2: | 1760 | linux_syscall_trace2: |
1761 | add %sp, PTREGS_OFF, %o0 | ||
1791 | call syscall_trace | 1762 | call syscall_trace |
1792 | nop | 1763 | mov 1, %o1 |
1793 | stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] | 1764 | stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] |
1794 | ba,pt %xcc, rtrap | 1765 | ba,pt %xcc, rtrap |
1795 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] | 1766 | stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 424712577307..daa2fb93052c 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -71,31 +71,7 @@ struct irq_work_struct { | |||
71 | struct irq_work_struct __irq_work[NR_CPUS]; | 71 | struct irq_work_struct __irq_work[NR_CPUS]; |
72 | #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) | 72 | #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) |
73 | 73 | ||
74 | #ifdef CONFIG_PCI | 74 | static struct irqaction *irq_action[NR_IRQS+1]; |
75 | /* This is a table of physical addresses used to deal with IBF_DMA_SYNC. | ||
76 | * It is used for PCI only to synchronize DMA transfers with IRQ delivery | ||
77 | * for devices behind busses other than APB on Sabre systems. | ||
78 | * | ||
79 | * Currently these physical addresses are just config space accesses | ||
80 | * to the command register for that device. | ||
81 | */ | ||
82 | unsigned long pci_dma_wsync; | ||
83 | unsigned long dma_sync_reg_table[256]; | ||
84 | unsigned char dma_sync_reg_table_entry = 0; | ||
85 | #endif | ||
86 | |||
87 | /* This is based upon code in the 32-bit Sparc kernel written mostly by | ||
88 | * David Redman (djhr@tadpole.co.uk). | ||
89 | */ | ||
90 | #define MAX_STATIC_ALLOC 4 | ||
91 | static struct irqaction static_irqaction[MAX_STATIC_ALLOC]; | ||
92 | static int static_irq_count; | ||
93 | |||
94 | /* This is exported so that fast IRQ handlers can get at it... -DaveM */ | ||
95 | struct irqaction *irq_action[NR_IRQS+1] = { | ||
96 | NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, | ||
97 | NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL | ||
98 | }; | ||
99 | 75 | ||
100 | /* This only synchronizes entities which modify IRQ handler | 76 | /* This only synchronizes entities which modify IRQ handler |
101 | * state and some selected user-level spots that want to | 77 | * state and some selected user-level spots that want to |
@@ -241,17 +217,22 @@ void disable_irq(unsigned int irq) | |||
241 | * the CPU %tick register and not by some normal vectored interrupt | 217 | * the CPU %tick register and not by some normal vectored interrupt |
242 | * source. To handle this special case, we use this dummy INO bucket. | 218 | * source. To handle this special case, we use this dummy INO bucket. |
243 | */ | 219 | */ |
220 | static struct irq_desc pil0_dummy_desc; | ||
244 | static struct ino_bucket pil0_dummy_bucket = { | 221 | static struct ino_bucket pil0_dummy_bucket = { |
245 | 0, /* irq_chain */ | 222 | .irq_info = &pil0_dummy_desc, |
246 | 0, /* pil */ | ||
247 | 0, /* pending */ | ||
248 | 0, /* flags */ | ||
249 | 0, /* __unused */ | ||
250 | NULL, /* irq_info */ | ||
251 | 0UL, /* iclr */ | ||
252 | 0UL, /* imap */ | ||
253 | }; | 223 | }; |
254 | 224 | ||
225 | static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, | ||
226 | unsigned long iclr, unsigned long imap, | ||
227 | struct ino_bucket *bucket) | ||
228 | { | ||
229 | prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> " | ||
230 | "(%d:%d:%016lx:%016lx), halting...\n", | ||
231 | ino, bucket->pil, bucket->iclr, bucket->imap, | ||
232 | pil, inofixup, iclr, imap); | ||
233 | prom_halt(); | ||
234 | } | ||
235 | |||
255 | unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) | 236 | unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) |
256 | { | 237 | { |
257 | struct ino_bucket *bucket; | 238 | struct ino_bucket *bucket; |
@@ -280,28 +261,35 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
280 | prom_halt(); | 261 | prom_halt(); |
281 | } | 262 | } |
282 | 263 | ||
283 | /* Ok, looks good, set it up. Don't touch the irq_chain or | ||
284 | * the pending flag. | ||
285 | */ | ||
286 | bucket = &ivector_table[ino]; | 264 | bucket = &ivector_table[ino]; |
287 | if ((bucket->flags & IBF_ACTIVE) || | 265 | if (bucket->flags & IBF_ACTIVE) |
288 | (bucket->irq_info != NULL)) { | 266 | build_irq_error("IRQ: Trying to build active INO bucket.\n", |
289 | /* This is a gross fatal error if it happens here. */ | 267 | ino, pil, inofixup, iclr, imap, bucket); |
290 | prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); | 268 | |
291 | prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", | 269 | if (bucket->irq_info) { |
292 | ino, pil, inofixup, iclr, imap); | 270 | if (bucket->imap != imap || bucket->iclr != iclr) |
293 | prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", | 271 | build_irq_error("IRQ: Trying to reinit INO bucket.\n", |
294 | bucket->pil, bucket->iclr, bucket->imap); | 272 | ino, pil, inofixup, iclr, imap, bucket); |
295 | prom_printf("IRQ: Cannot continue, halting...\n"); | 273 | |
274 | goto out; | ||
275 | } | ||
276 | |||
277 | bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); | ||
278 | if (!bucket->irq_info) { | ||
279 | prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); | ||
296 | prom_halt(); | 280 | prom_halt(); |
297 | } | 281 | } |
282 | memset(bucket->irq_info, 0, sizeof(struct irq_desc)); | ||
283 | |||
284 | /* Ok, looks good, set it up. Don't touch the irq_chain or | ||
285 | * the pending flag. | ||
286 | */ | ||
298 | bucket->imap = imap; | 287 | bucket->imap = imap; |
299 | bucket->iclr = iclr; | 288 | bucket->iclr = iclr; |
300 | bucket->pil = pil; | 289 | bucket->pil = pil; |
301 | bucket->flags = 0; | 290 | bucket->flags = 0; |
302 | 291 | ||
303 | bucket->irq_info = NULL; | 292 | out: |
304 | |||
305 | return __irq(bucket); | 293 | return __irq(bucket); |
306 | } | 294 | } |
307 | 295 | ||
@@ -319,26 +307,65 @@ static void atomic_bucket_insert(struct ino_bucket *bucket) | |||
319 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | 307 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); |
320 | } | 308 | } |
321 | 309 | ||
310 | static int check_irq_sharing(int pil, unsigned long irqflags) | ||
311 | { | ||
312 | struct irqaction *action, *tmp; | ||
313 | |||
314 | action = *(irq_action + pil); | ||
315 | if (action) { | ||
316 | if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { | ||
317 | for (tmp = action; tmp->next; tmp = tmp->next) | ||
318 | ; | ||
319 | } else { | ||
320 | return -EBUSY; | ||
321 | } | ||
322 | } | ||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | static void append_irq_action(int pil, struct irqaction *action) | ||
327 | { | ||
328 | struct irqaction **pp = irq_action + pil; | ||
329 | |||
330 | while (*pp) | ||
331 | pp = &((*pp)->next); | ||
332 | *pp = action; | ||
333 | } | ||
334 | |||
335 | static struct irqaction *get_action_slot(struct ino_bucket *bucket) | ||
336 | { | ||
337 | struct irq_desc *desc = bucket->irq_info; | ||
338 | int max_irq, i; | ||
339 | |||
340 | max_irq = 1; | ||
341 | if (bucket->flags & IBF_PCI) | ||
342 | max_irq = MAX_IRQ_DESC_ACTION; | ||
343 | for (i = 0; i < max_irq; i++) { | ||
344 | struct irqaction *p = &desc->action[i]; | ||
345 | u32 mask = (1 << i); | ||
346 | |||
347 | if (desc->action_active_mask & mask) | ||
348 | continue; | ||
349 | |||
350 | desc->action_active_mask |= mask; | ||
351 | return p; | ||
352 | } | ||
353 | return NULL; | ||
354 | } | ||
355 | |||
322 | int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), | 356 | int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), |
323 | unsigned long irqflags, const char *name, void *dev_id) | 357 | unsigned long irqflags, const char *name, void *dev_id) |
324 | { | 358 | { |
325 | struct irqaction *action, *tmp = NULL; | 359 | struct irqaction *action; |
326 | struct ino_bucket *bucket = __bucket(irq); | 360 | struct ino_bucket *bucket = __bucket(irq); |
327 | unsigned long flags; | 361 | unsigned long flags; |
328 | int pending = 0; | 362 | int pending = 0; |
329 | 363 | ||
330 | if ((bucket != &pil0_dummy_bucket) && | 364 | if (unlikely(!handler)) |
331 | (bucket < &ivector_table[0] || | ||
332 | bucket >= &ivector_table[NUM_IVECS])) { | ||
333 | unsigned int *caller; | ||
334 | |||
335 | __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); | ||
336 | printk(KERN_CRIT "request_irq: Old style IRQ registry attempt " | ||
337 | "from %p, irq %08x.\n", caller, irq); | ||
338 | return -EINVAL; | 365 | return -EINVAL; |
339 | } | 366 | |
340 | if (!handler) | 367 | if (unlikely(!bucket->irq_info)) |
341 | return -EINVAL; | 368 | return -ENODEV; |
342 | 369 | ||
343 | if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { | 370 | if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { |
344 | /* | 371 | /* |
@@ -356,93 +383,20 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
356 | 383 | ||
357 | spin_lock_irqsave(&irq_action_lock, flags); | 384 | spin_lock_irqsave(&irq_action_lock, flags); |
358 | 385 | ||
359 | action = *(bucket->pil + irq_action); | 386 | if (check_irq_sharing(bucket->pil, irqflags)) { |
360 | if (action) { | 387 | spin_unlock_irqrestore(&irq_action_lock, flags); |
361 | if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) | 388 | return -EBUSY; |
362 | for (tmp = action; tmp->next; tmp = tmp->next) | ||
363 | ; | ||
364 | else { | ||
365 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
366 | return -EBUSY; | ||
367 | } | ||
368 | action = NULL; /* Or else! */ | ||
369 | } | 389 | } |
370 | 390 | ||
371 | /* If this is flagged as statically allocated then we use our | 391 | action = get_action_slot(bucket); |
372 | * private struct which is never freed. | ||
373 | */ | ||
374 | if (irqflags & SA_STATIC_ALLOC) { | ||
375 | if (static_irq_count < MAX_STATIC_ALLOC) | ||
376 | action = &static_irqaction[static_irq_count++]; | ||
377 | else | ||
378 | printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " | ||
379 | "using kmalloc\n", irq, name); | ||
380 | } | ||
381 | if (action == NULL) | ||
382 | action = (struct irqaction *)kmalloc(sizeof(struct irqaction), | ||
383 | GFP_ATOMIC); | ||
384 | |||
385 | if (!action) { | 392 | if (!action) { |
386 | spin_unlock_irqrestore(&irq_action_lock, flags); | 393 | spin_unlock_irqrestore(&irq_action_lock, flags); |
387 | return -ENOMEM; | 394 | return -ENOMEM; |
388 | } | 395 | } |
389 | 396 | ||
390 | if (bucket == &pil0_dummy_bucket) { | 397 | bucket->flags |= IBF_ACTIVE; |
391 | bucket->irq_info = action; | 398 | pending = 0; |
392 | bucket->flags |= IBF_ACTIVE; | 399 | if (bucket != &pil0_dummy_bucket) { |
393 | } else { | ||
394 | if ((bucket->flags & IBF_ACTIVE) != 0) { | ||
395 | void *orig = bucket->irq_info; | ||
396 | void **vector = NULL; | ||
397 | |||
398 | if ((bucket->flags & IBF_PCI) == 0) { | ||
399 | printk("IRQ: Trying to share non-PCI bucket.\n"); | ||
400 | goto free_and_ebusy; | ||
401 | } | ||
402 | if ((bucket->flags & IBF_MULTI) == 0) { | ||
403 | vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC); | ||
404 | if (vector == NULL) | ||
405 | goto free_and_enomem; | ||
406 | |||
407 | /* We might have slept. */ | ||
408 | if ((bucket->flags & IBF_MULTI) != 0) { | ||
409 | int ent; | ||
410 | |||
411 | kfree(vector); | ||
412 | vector = (void **)bucket->irq_info; | ||
413 | for(ent = 0; ent < 4; ent++) { | ||
414 | if (vector[ent] == NULL) { | ||
415 | vector[ent] = action; | ||
416 | break; | ||
417 | } | ||
418 | } | ||
419 | if (ent == 4) | ||
420 | goto free_and_ebusy; | ||
421 | } else { | ||
422 | vector[0] = orig; | ||
423 | vector[1] = action; | ||
424 | vector[2] = NULL; | ||
425 | vector[3] = NULL; | ||
426 | bucket->irq_info = vector; | ||
427 | bucket->flags |= IBF_MULTI; | ||
428 | } | ||
429 | } else { | ||
430 | int ent; | ||
431 | |||
432 | vector = (void **)orig; | ||
433 | for (ent = 0; ent < 4; ent++) { | ||
434 | if (vector[ent] == NULL) { | ||
435 | vector[ent] = action; | ||
436 | break; | ||
437 | } | ||
438 | } | ||
439 | if (ent == 4) | ||
440 | goto free_and_ebusy; | ||
441 | } | ||
442 | } else { | ||
443 | bucket->irq_info = action; | ||
444 | bucket->flags |= IBF_ACTIVE; | ||
445 | } | ||
446 | pending = bucket->pending; | 400 | pending = bucket->pending; |
447 | if (pending) | 401 | if (pending) |
448 | bucket->pending = 0; | 402 | bucket->pending = 0; |
@@ -456,10 +410,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
456 | put_ino_in_irqaction(action, irq); | 410 | put_ino_in_irqaction(action, irq); |
457 | put_smpaff_in_irqaction(action, CPU_MASK_NONE); | 411 | put_smpaff_in_irqaction(action, CPU_MASK_NONE); |
458 | 412 | ||
459 | if (tmp) | 413 | append_irq_action(bucket->pil, action); |
460 | tmp->next = action; | ||
461 | else | ||
462 | *(bucket->pil + irq_action) = action; | ||
463 | 414 | ||
464 | enable_irq(irq); | 415 | enable_irq(irq); |
465 | 416 | ||
@@ -468,147 +419,103 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
468 | atomic_bucket_insert(bucket); | 419 | atomic_bucket_insert(bucket); |
469 | set_softint(1 << bucket->pil); | 420 | set_softint(1 << bucket->pil); |
470 | } | 421 | } |
422 | |||
471 | spin_unlock_irqrestore(&irq_action_lock, flags); | 423 | spin_unlock_irqrestore(&irq_action_lock, flags); |
472 | if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) | 424 | |
425 | if (bucket != &pil0_dummy_bucket) | ||
473 | register_irq_proc(__irq_ino(irq)); | 426 | register_irq_proc(__irq_ino(irq)); |
474 | 427 | ||
475 | #ifdef CONFIG_SMP | 428 | #ifdef CONFIG_SMP |
476 | distribute_irqs(); | 429 | distribute_irqs(); |
477 | #endif | 430 | #endif |
478 | return 0; | 431 | return 0; |
479 | |||
480 | free_and_ebusy: | ||
481 | kfree(action); | ||
482 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
483 | return -EBUSY; | ||
484 | |||
485 | free_and_enomem: | ||
486 | kfree(action); | ||
487 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
488 | return -ENOMEM; | ||
489 | } | 432 | } |
490 | 433 | ||
491 | EXPORT_SYMBOL(request_irq); | 434 | EXPORT_SYMBOL(request_irq); |
492 | 435 | ||
493 | void free_irq(unsigned int irq, void *dev_id) | 436 | static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id) |
494 | { | 437 | { |
495 | struct irqaction *action; | 438 | struct ino_bucket *bucket = __bucket(irq); |
496 | struct irqaction *tmp = NULL; | 439 | struct irqaction *action, **pp; |
497 | unsigned long flags; | ||
498 | struct ino_bucket *bucket = __bucket(irq), *bp; | ||
499 | 440 | ||
500 | if ((bucket != &pil0_dummy_bucket) && | 441 | pp = irq_action + bucket->pil; |
501 | (bucket < &ivector_table[0] || | 442 | action = *pp; |
502 | bucket >= &ivector_table[NUM_IVECS])) { | 443 | if (unlikely(!action)) |
503 | unsigned int *caller; | 444 | return NULL; |
504 | 445 | ||
505 | __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); | 446 | if (unlikely(!action->handler)) { |
506 | printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " | ||
507 | "from %p, irq %08x.\n", caller, irq); | ||
508 | return; | ||
509 | } | ||
510 | |||
511 | spin_lock_irqsave(&irq_action_lock, flags); | ||
512 | |||
513 | action = *(bucket->pil + irq_action); | ||
514 | if (!action->handler) { | ||
515 | printk("Freeing free IRQ %d\n", bucket->pil); | 447 | printk("Freeing free IRQ %d\n", bucket->pil); |
516 | return; | 448 | return NULL; |
517 | } | ||
518 | if (dev_id) { | ||
519 | for ( ; action; action = action->next) { | ||
520 | if (action->dev_id == dev_id) | ||
521 | break; | ||
522 | tmp = action; | ||
523 | } | ||
524 | if (!action) { | ||
525 | printk("Trying to free free shared IRQ %d\n", bucket->pil); | ||
526 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
527 | return; | ||
528 | } | ||
529 | } else if (action->flags & SA_SHIRQ) { | ||
530 | printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil); | ||
531 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
532 | return; | ||
533 | } | 449 | } |
534 | 450 | ||
535 | if (action->flags & SA_STATIC_ALLOC) { | 451 | while (action && action->dev_id != dev_id) { |
536 | printk("Attempt to free statically allocated IRQ %d (%s)\n", | 452 | pp = &action->next; |
537 | bucket->pil, action->name); | 453 | action = *pp; |
538 | spin_unlock_irqrestore(&irq_action_lock, flags); | ||
539 | return; | ||
540 | } | 454 | } |
541 | 455 | ||
542 | if (action && tmp) | 456 | if (likely(action)) |
543 | tmp->next = action->next; | 457 | *pp = action->next; |
544 | else | 458 | |
545 | *(bucket->pil + irq_action) = action->next; | 459 | return action; |
460 | } | ||
461 | |||
462 | void free_irq(unsigned int irq, void *dev_id) | ||
463 | { | ||
464 | struct irqaction *action; | ||
465 | struct ino_bucket *bucket; | ||
466 | unsigned long flags; | ||
467 | |||
468 | spin_lock_irqsave(&irq_action_lock, flags); | ||
469 | |||
470 | action = unlink_irq_action(irq, dev_id); | ||
546 | 471 | ||
547 | spin_unlock_irqrestore(&irq_action_lock, flags); | 472 | spin_unlock_irqrestore(&irq_action_lock, flags); |
548 | 473 | ||
474 | if (unlikely(!action)) | ||
475 | return; | ||
476 | |||
549 | synchronize_irq(irq); | 477 | synchronize_irq(irq); |
550 | 478 | ||
551 | spin_lock_irqsave(&irq_action_lock, flags); | 479 | spin_lock_irqsave(&irq_action_lock, flags); |
552 | 480 | ||
481 | bucket = __bucket(irq); | ||
553 | if (bucket != &pil0_dummy_bucket) { | 482 | if (bucket != &pil0_dummy_bucket) { |
483 | struct irq_desc *desc = bucket->irq_info; | ||
554 | unsigned long imap = bucket->imap; | 484 | unsigned long imap = bucket->imap; |
555 | void **vector, *orig; | 485 | int ent, i; |
556 | int ent; | ||
557 | |||
558 | orig = bucket->irq_info; | ||
559 | vector = (void **)orig; | ||
560 | |||
561 | if ((bucket->flags & IBF_MULTI) != 0) { | ||
562 | int other = 0; | ||
563 | void *orphan = NULL; | ||
564 | for (ent = 0; ent < 4; ent++) { | ||
565 | if (vector[ent] == action) | ||
566 | vector[ent] = NULL; | ||
567 | else if (vector[ent] != NULL) { | ||
568 | orphan = vector[ent]; | ||
569 | other++; | ||
570 | } | ||
571 | } | ||
572 | 486 | ||
573 | /* Only free when no other shared irq | 487 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
574 | * uses this bucket. | 488 | struct irqaction *p = &desc->action[i]; |
575 | */ | 489 | |
576 | if (other) { | 490 | if (p == action) { |
577 | if (other == 1) { | 491 | desc->action_active_mask &= ~(1 << i); |
578 | /* Convert back to non-shared bucket. */ | 492 | break; |
579 | bucket->irq_info = orphan; | ||
580 | bucket->flags &= ~(IBF_MULTI); | ||
581 | kfree(vector); | ||
582 | } | ||
583 | goto out; | ||
584 | } | 493 | } |
585 | } else { | ||
586 | bucket->irq_info = NULL; | ||
587 | } | 494 | } |
588 | 495 | ||
589 | /* This unique interrupt source is now inactive. */ | 496 | if (!desc->action_active_mask) { |
590 | bucket->flags &= ~IBF_ACTIVE; | 497 | /* This unique interrupt source is now inactive. */ |
498 | bucket->flags &= ~IBF_ACTIVE; | ||
591 | 499 | ||
592 | /* See if any other buckets share this bucket's IMAP | 500 | /* See if any other buckets share this bucket's IMAP |
593 | * and are still active. | 501 | * and are still active. |
594 | */ | 502 | */ |
595 | for (ent = 0; ent < NUM_IVECS; ent++) { | 503 | for (ent = 0; ent < NUM_IVECS; ent++) { |
596 | bp = &ivector_table[ent]; | 504 | struct ino_bucket *bp = &ivector_table[ent]; |
597 | if (bp != bucket && | 505 | if (bp != bucket && |
598 | bp->imap == imap && | 506 | bp->imap == imap && |
599 | (bp->flags & IBF_ACTIVE) != 0) | 507 | (bp->flags & IBF_ACTIVE) != 0) |
600 | break; | 508 | break; |
601 | } | 509 | } |
602 | 510 | ||
603 | /* Only disable when no other sub-irq levels of | 511 | /* Only disable when no other sub-irq levels of |
604 | * the same IMAP are active. | 512 | * the same IMAP are active. |
605 | */ | 513 | */ |
606 | if (ent == NUM_IVECS) | 514 | if (ent == NUM_IVECS) |
607 | disable_irq(irq); | 515 | disable_irq(irq); |
516 | } | ||
608 | } | 517 | } |
609 | 518 | ||
610 | out: | ||
611 | kfree(action); | ||
612 | spin_unlock_irqrestore(&irq_action_lock, flags); | 519 | spin_unlock_irqrestore(&irq_action_lock, flags); |
613 | } | 520 | } |
614 | 521 | ||
@@ -647,99 +554,55 @@ void synchronize_irq(unsigned int irq) | |||
647 | } | 554 | } |
648 | #endif /* CONFIG_SMP */ | 555 | #endif /* CONFIG_SMP */ |
649 | 556 | ||
650 | void catch_disabled_ivec(struct pt_regs *regs) | 557 | static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) |
651 | { | 558 | { |
652 | int cpu = smp_processor_id(); | 559 | struct irq_desc *desc = bp->irq_info; |
653 | struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); | 560 | unsigned char flags = bp->flags; |
561 | u32 action_mask, i; | ||
562 | int random; | ||
654 | 563 | ||
655 | /* We can actually see this on Ultra/PCI PCI cards, which are bridges | 564 | bp->flags |= IBF_INPROGRESS; |
656 | * to other devices. Here a single IMAP enabled potentially multiple | ||
657 | * unique interrupt sources (which each do have a unique ICLR register. | ||
658 | * | ||
659 | * So what we do is just register that the IVEC arrived, when registered | ||
660 | * for real the request_irq() code will check the bit and signal | ||
661 | * a local CPU interrupt for it. | ||
662 | */ | ||
663 | #if 0 | ||
664 | printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", | ||
665 | bucket - &ivector_table[0], regs->tpc); | ||
666 | #endif | ||
667 | *irq_work(cpu, 0) = 0; | ||
668 | bucket->pending = 1; | ||
669 | } | ||
670 | |||
671 | /* Tune this... */ | ||
672 | #define FORWARD_VOLUME 12 | ||
673 | |||
674 | #ifdef CONFIG_SMP | ||
675 | |||
676 | static inline void redirect_intr(int cpu, struct ino_bucket *bp) | ||
677 | { | ||
678 | /* Ok, here is what is going on: | ||
679 | * 1) Retargeting IRQs on Starfire is very | ||
680 | * expensive so just forget about it on them. | ||
681 | * 2) Moving around very high priority interrupts | ||
682 | * is a losing game. | ||
683 | * 3) If the current cpu is idle, interrupts are | ||
684 | * useful work, so keep them here. But do not | ||
685 | * pass to our neighbour if he is not very idle. | ||
686 | * 4) If sysadmin explicitly asks for directed intrs, | ||
687 | * Just Do It. | ||
688 | */ | ||
689 | struct irqaction *ap = bp->irq_info; | ||
690 | cpumask_t cpu_mask; | ||
691 | unsigned int buddy, ticks; | ||
692 | 565 | ||
693 | cpu_mask = get_smpaff_in_irqaction(ap); | 566 | if (unlikely(!(flags & IBF_ACTIVE))) { |
694 | cpus_and(cpu_mask, cpu_mask, cpu_online_map); | 567 | bp->pending = 1; |
695 | if (cpus_empty(cpu_mask)) | ||
696 | cpu_mask = cpu_online_map; | ||
697 | |||
698 | if (this_is_starfire != 0 || | ||
699 | bp->pil >= 10 || current->pid == 0) | ||
700 | goto out; | 568 | goto out; |
701 | |||
702 | /* 'cpu' is the MID (ie. UPAID), calculate the MID | ||
703 | * of our buddy. | ||
704 | */ | ||
705 | buddy = cpu + 1; | ||
706 | if (buddy >= NR_CPUS) | ||
707 | buddy = 0; | ||
708 | |||
709 | ticks = 0; | ||
710 | while (!cpu_isset(buddy, cpu_mask)) { | ||
711 | if (++buddy >= NR_CPUS) | ||
712 | buddy = 0; | ||
713 | if (++ticks > NR_CPUS) { | ||
714 | put_smpaff_in_irqaction(ap, CPU_MASK_NONE); | ||
715 | goto out; | ||
716 | } | ||
717 | } | 569 | } |
718 | 570 | ||
719 | if (buddy == cpu) | 571 | if (desc->pre_handler) |
720 | goto out; | 572 | desc->pre_handler(bp, |
573 | desc->pre_handler_arg1, | ||
574 | desc->pre_handler_arg2); | ||
721 | 575 | ||
722 | /* Voo-doo programming. */ | 576 | action_mask = desc->action_active_mask; |
723 | if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) | 577 | random = 0; |
724 | goto out; | 578 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
579 | struct irqaction *p = &desc->action[i]; | ||
580 | u32 mask = (1 << i); | ||
725 | 581 | ||
726 | /* This just so happens to be correct on Cheetah | 582 | if (!(action_mask & mask)) |
727 | * at the moment. | 583 | continue; |
728 | */ | ||
729 | buddy <<= 26; | ||
730 | 584 | ||
731 | /* Push it to our buddy. */ | 585 | action_mask &= ~mask; |
732 | upa_writel(buddy | IMAP_VALID, bp->imap); | ||
733 | 586 | ||
587 | if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED) | ||
588 | random |= p->flags; | ||
589 | |||
590 | if (!action_mask) | ||
591 | break; | ||
592 | } | ||
593 | if (bp->pil != 0) { | ||
594 | upa_writel(ICLR_IDLE, bp->iclr); | ||
595 | /* Test and add entropy */ | ||
596 | if (random & SA_SAMPLE_RANDOM) | ||
597 | add_interrupt_randomness(irq); | ||
598 | } | ||
734 | out: | 599 | out: |
735 | return; | 600 | bp->flags &= ~IBF_INPROGRESS; |
736 | } | 601 | } |
737 | 602 | ||
738 | #endif | ||
739 | |||
740 | void handler_irq(int irq, struct pt_regs *regs) | 603 | void handler_irq(int irq, struct pt_regs *regs) |
741 | { | 604 | { |
742 | struct ino_bucket *bp, *nbp; | 605 | struct ino_bucket *bp; |
743 | int cpu = smp_processor_id(); | 606 | int cpu = smp_processor_id(); |
744 | 607 | ||
745 | #ifndef CONFIG_SMP | 608 | #ifndef CONFIG_SMP |
@@ -757,8 +620,6 @@ void handler_irq(int irq, struct pt_regs *regs) | |||
757 | clear_softint(clr_mask); | 620 | clear_softint(clr_mask); |
758 | } | 621 | } |
759 | #else | 622 | #else |
760 | int should_forward = 0; | ||
761 | |||
762 | clear_softint(1 << irq); | 623 | clear_softint(1 << irq); |
763 | #endif | 624 | #endif |
764 | 625 | ||
@@ -773,63 +634,12 @@ void handler_irq(int irq, struct pt_regs *regs) | |||
773 | #else | 634 | #else |
774 | bp = __bucket(xchg32(irq_work(cpu, irq), 0)); | 635 | bp = __bucket(xchg32(irq_work(cpu, irq), 0)); |
775 | #endif | 636 | #endif |
776 | for ( ; bp != NULL; bp = nbp) { | 637 | while (bp) { |
777 | unsigned char flags = bp->flags; | 638 | struct ino_bucket *nbp = __bucket(bp->irq_chain); |
778 | unsigned char random = 0; | ||
779 | 639 | ||
780 | nbp = __bucket(bp->irq_chain); | ||
781 | bp->irq_chain = 0; | 640 | bp->irq_chain = 0; |
782 | 641 | process_bucket(irq, bp, regs); | |
783 | bp->flags |= IBF_INPROGRESS; | 642 | bp = nbp; |
784 | |||
785 | if ((flags & IBF_ACTIVE) != 0) { | ||
786 | #ifdef CONFIG_PCI | ||
787 | if ((flags & IBF_DMA_SYNC) != 0) { | ||
788 | upa_readl(dma_sync_reg_table[bp->synctab_ent]); | ||
789 | upa_readq(pci_dma_wsync); | ||
790 | } | ||
791 | #endif | ||
792 | if ((flags & IBF_MULTI) == 0) { | ||
793 | struct irqaction *ap = bp->irq_info; | ||
794 | int ret; | ||
795 | |||
796 | ret = ap->handler(__irq(bp), ap->dev_id, regs); | ||
797 | if (ret == IRQ_HANDLED) | ||
798 | random |= ap->flags; | ||
799 | } else { | ||
800 | void **vector = (void **)bp->irq_info; | ||
801 | int ent; | ||
802 | for (ent = 0; ent < 4; ent++) { | ||
803 | struct irqaction *ap = vector[ent]; | ||
804 | if (ap != NULL) { | ||
805 | int ret; | ||
806 | |||
807 | ret = ap->handler(__irq(bp), | ||
808 | ap->dev_id, | ||
809 | regs); | ||
810 | if (ret == IRQ_HANDLED) | ||
811 | random |= ap->flags; | ||
812 | } | ||
813 | } | ||
814 | } | ||
815 | /* Only the dummy bucket lacks IMAP/ICLR. */ | ||
816 | if (bp->pil != 0) { | ||
817 | #ifdef CONFIG_SMP | ||
818 | if (should_forward) { | ||
819 | redirect_intr(cpu, bp); | ||
820 | should_forward = 0; | ||
821 | } | ||
822 | #endif | ||
823 | upa_writel(ICLR_IDLE, bp->iclr); | ||
824 | |||
825 | /* Test and add entropy */ | ||
826 | if (random & SA_SAMPLE_RANDOM) | ||
827 | add_interrupt_randomness(irq); | ||
828 | } | ||
829 | } else | ||
830 | bp->pending = 1; | ||
831 | |||
832 | bp->flags &= ~IBF_INPROGRESS; | ||
833 | } | 643 | } |
834 | irq_exit(); | 644 | irq_exit(); |
835 | } | 645 | } |
@@ -959,7 +769,10 @@ static void distribute_irqs(void) | |||
959 | */ | 769 | */ |
960 | for (level = 1; level < NR_IRQS; level++) { | 770 | for (level = 1; level < NR_IRQS; level++) { |
961 | struct irqaction *p = irq_action[level]; | 771 | struct irqaction *p = irq_action[level]; |
962 | if (level == 12) continue; | 772 | |
773 | if (level == 12) | ||
774 | continue; | ||
775 | |||
963 | while(p) { | 776 | while(p) { |
964 | cpu = retarget_one_irq(p, cpu); | 777 | cpu = retarget_one_irq(p, cpu); |
965 | p = p->next; | 778 | p = p->next; |
@@ -1104,7 +917,8 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, | |||
1104 | int count, int *eof, void *data) | 917 | int count, int *eof, void *data) |
1105 | { | 918 | { |
1106 | struct ino_bucket *bp = ivector_table + (long)data; | 919 | struct ino_bucket *bp = ivector_table + (long)data; |
1107 | struct irqaction *ap = bp->irq_info; | 920 | struct irq_desc *desc = bp->irq_info; |
921 | struct irqaction *ap = desc->action; | ||
1108 | cpumask_t mask; | 922 | cpumask_t mask; |
1109 | int len; | 923 | int len; |
1110 | 924 | ||
@@ -1122,11 +936,13 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off, | |||
1122 | static inline void set_intr_affinity(int irq, cpumask_t hw_aff) | 936 | static inline void set_intr_affinity(int irq, cpumask_t hw_aff) |
1123 | { | 937 | { |
1124 | struct ino_bucket *bp = ivector_table + irq; | 938 | struct ino_bucket *bp = ivector_table + irq; |
939 | struct irq_desc *desc = bp->irq_info; | ||
940 | struct irqaction *ap = desc->action; | ||
1125 | 941 | ||
1126 | /* Users specify affinity in terms of hw cpu ids. | 942 | /* Users specify affinity in terms of hw cpu ids. |
1127 | * As soon as we do this, handler_irq() might see and take action. | 943 | * As soon as we do this, handler_irq() might see and take action. |
1128 | */ | 944 | */ |
1129 | put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff); | 945 | put_smpaff_in_irqaction(ap, hw_aff); |
1130 | 946 | ||
1131 | /* Migration is simply done by the next cpu to service this | 947 | /* Migration is simply done by the next cpu to service this |
1132 | * interrupt. | 948 | * interrupt. |
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index bdac631cf011..bbf11f85dab1 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c | |||
@@ -433,3 +433,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
435 | 435 | ||
436 | /* architecture specific initialization */ | ||
437 | int arch_init_kprobes(void) | ||
438 | { | ||
439 | return 0; | ||
440 | } | ||
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 534320ef0db2..91ab466d6c66 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -1303,8 +1303,7 @@ static void psycho_controller_hwinit(struct pci_controller_info *p) | |||
1303 | { | 1303 | { |
1304 | u64 tmp; | 1304 | u64 tmp; |
1305 | 1305 | ||
1306 | /* PROM sets the IRQ retry value too low, increase it. */ | 1306 | psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 5); |
1307 | psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff); | ||
1308 | 1307 | ||
1309 | /* Enable arbiter for all PCI slots. */ | 1308 | /* Enable arbiter for all PCI slots. */ |
1310 | tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL); | 1309 | tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL); |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 53d333b4a4e8..52bf3431a422 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -595,6 +595,23 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
595 | return ret; | 595 | return ret; |
596 | } | 596 | } |
597 | 597 | ||
598 | /* When a device lives behind a bridge deeper in the PCI bus topology | ||
599 | * than APB, a special sequence must run to make sure all pending DMA | ||
600 | * transfers at the time of IRQ delivery are visible in the coherency | ||
601 | * domain by the cpu. This sequence is to perform a read on the far | ||
602 | * side of the non-APB bridge, then perform a read of Sabre's DMA | ||
603 | * write-sync register. | ||
604 | */ | ||
605 | static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) | ||
606 | { | ||
607 | struct pci_dev *pdev = _arg1; | ||
608 | unsigned long sync_reg = (unsigned long) _arg2; | ||
609 | u16 _unused; | ||
610 | |||
611 | pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused); | ||
612 | sabre_read(sync_reg); | ||
613 | } | ||
614 | |||
598 | static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, | 615 | static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, |
599 | struct pci_dev *pdev, | 616 | struct pci_dev *pdev, |
600 | unsigned int ino) | 617 | unsigned int ino) |
@@ -639,24 +656,14 @@ static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, | |||
639 | if (pdev) { | 656 | if (pdev) { |
640 | struct pcidev_cookie *pcp = pdev->sysdata; | 657 | struct pcidev_cookie *pcp = pdev->sysdata; |
641 | 658 | ||
642 | /* When a device lives behind a bridge deeper in the | ||
643 | * PCI bus topology than APB, a special sequence must | ||
644 | * run to make sure all pending DMA transfers at the | ||
645 | * time of IRQ delivery are visible in the coherency | ||
646 | * domain by the cpu. This sequence is to perform | ||
647 | * a read on the far side of the non-APB bridge, then | ||
648 | * perform a read of Sabre's DMA write-sync register. | ||
649 | * | ||
650 | * Currently, the PCI_CONFIG register for the device | ||
651 | * is used for this read from the far side of the bridge. | ||
652 | */ | ||
653 | if (pdev->bus->number != pcp->pbm->pci_first_busno) { | 659 | if (pdev->bus->number != pcp->pbm->pci_first_busno) { |
654 | bucket->flags |= IBF_DMA_SYNC; | 660 | struct pci_controller_info *p = pcp->pbm->parent; |
655 | bucket->synctab_ent = dma_sync_reg_table_entry++; | 661 | struct irq_desc *d = bucket->irq_info; |
656 | dma_sync_reg_table[bucket->synctab_ent] = | 662 | |
657 | (unsigned long) sabre_pci_config_mkaddr( | 663 | d->pre_handler = sabre_wsync_handler; |
658 | pcp->pbm, | 664 | d->pre_handler_arg1 = pdev; |
659 | pdev->bus->number, pdev->devfn, PCI_COMMAND); | 665 | d->pre_handler_arg2 = (void *) |
666 | p->pbm_A.controller_regs + SABRE_WRSYNC; | ||
660 | } | 667 | } |
661 | } | 668 | } |
662 | return __irq(bucket); | 669 | return __irq(bucket); |
@@ -1626,10 +1633,9 @@ void __init sabre_init(int pnode, char *model_name) | |||
1626 | */ | 1633 | */ |
1627 | p->pbm_A.controller_regs = pr_regs[0].phys_addr; | 1634 | p->pbm_A.controller_regs = pr_regs[0].phys_addr; |
1628 | p->pbm_B.controller_regs = pr_regs[0].phys_addr; | 1635 | p->pbm_B.controller_regs = pr_regs[0].phys_addr; |
1629 | pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC; | ||
1630 | 1636 | ||
1631 | printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", | 1637 | printk("PCI: Found SABRE, main regs at %016lx\n", |
1632 | p->pbm_A.controller_regs, pci_dma_wsync); | 1638 | p->pbm_A.controller_regs); |
1633 | 1639 | ||
1634 | /* Clear interrupts */ | 1640 | /* Clear interrupts */ |
1635 | 1641 | ||
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 5753175b94e6..6a182bb66281 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/iommu.h> | 15 | #include <asm/iommu.h> |
16 | #include <asm/irq.h> | 16 | #include <asm/irq.h> |
17 | #include <asm/upa.h> | 17 | #include <asm/upa.h> |
18 | #include <asm/pstate.h> | ||
18 | 19 | ||
19 | #include "pci_impl.h" | 20 | #include "pci_impl.h" |
20 | #include "iommu_common.h" | 21 | #include "iommu_common.h" |
@@ -326,6 +327,44 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) | |||
326 | return ret; | 327 | return ret; |
327 | } | 328 | } |
328 | 329 | ||
330 | static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) | ||
331 | { | ||
332 | unsigned long sync_reg = (unsigned long) _arg2; | ||
333 | u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); | ||
334 | u64 val; | ||
335 | int limit; | ||
336 | |||
337 | schizo_write(sync_reg, mask); | ||
338 | |||
339 | limit = 100000; | ||
340 | val = 0; | ||
341 | while (--limit) { | ||
342 | val = schizo_read(sync_reg); | ||
343 | if (!(val & mask)) | ||
344 | break; | ||
345 | } | ||
346 | if (limit <= 0) { | ||
347 | printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n", | ||
348 | val, mask); | ||
349 | } | ||
350 | |||
351 | if (_arg1) { | ||
352 | static unsigned char cacheline[64] | ||
353 | __attribute__ ((aligned (64))); | ||
354 | |||
355 | __asm__ __volatile__("rd %%fprs, %0\n\t" | ||
356 | "or %0, %4, %1\n\t" | ||
357 | "wr %1, 0x0, %%fprs\n\t" | ||
358 | "stda %%f0, [%5] %6\n\t" | ||
359 | "wr %0, 0x0, %%fprs\n\t" | ||
360 | "membar #Sync" | ||
361 | : "=&r" (mask), "=&r" (val) | ||
362 | : "0" (mask), "1" (val), | ||
363 | "i" (FPRS_FEF), "r" (&cacheline[0]), | ||
364 | "i" (ASI_BLK_COMMIT_P)); | ||
365 | } | ||
366 | } | ||
367 | |||
329 | static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, | 368 | static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, |
330 | struct pci_dev *pdev, | 369 | struct pci_dev *pdev, |
331 | unsigned int ino) | 370 | unsigned int ino) |
@@ -369,6 +408,15 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, | |||
369 | bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap)); | 408 | bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap)); |
370 | bucket->flags |= IBF_PCI; | 409 | bucket->flags |= IBF_PCI; |
371 | 410 | ||
411 | if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { | ||
412 | struct irq_desc *p = bucket->irq_info; | ||
413 | |||
414 | p->pre_handler = tomatillo_wsync_handler; | ||
415 | p->pre_handler_arg1 = ((pbm->chip_version <= 4) ? | ||
416 | (void *) 1 : (void *) 0); | ||
417 | p->pre_handler_arg2 = (void *) pbm->sync_reg; | ||
418 | } | ||
419 | |||
372 | return __irq(bucket); | 420 | return __irq(bucket); |
373 | } | 421 | } |
374 | 422 | ||
@@ -885,6 +933,7 @@ static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
885 | 933 | ||
886 | #define SCHIZO_PCI_CTRL (0x2000UL) | 934 | #define SCHIZO_PCI_CTRL (0x2000UL) |
887 | #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ | 935 | #define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ |
936 | #define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */ | ||
888 | #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ | 937 | #define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ |
889 | #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ | 938 | #define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ |
890 | #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ | 939 | #define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ |
@@ -1887,37 +1936,27 @@ static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm) | |||
1887 | { | 1936 | { |
1888 | u64 tmp; | 1937 | u64 tmp; |
1889 | 1938 | ||
1890 | /* Set IRQ retry to infinity. */ | 1939 | schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, 5); |
1891 | schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, | ||
1892 | SCHIZO_IRQ_RETRY_INF); | ||
1893 | 1940 | ||
1894 | /* Enable arbiter for all PCI slots. Also, disable PCI interval | ||
1895 | * timer so that DTO (Discard TimeOuts) are not reported because | ||
1896 | * some Schizo revisions report them erroneously. | ||
1897 | */ | ||
1898 | tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL); | 1941 | tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL); |
1899 | if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS && | ||
1900 | pbm->chip_version == 0x5 && | ||
1901 | pbm->chip_revision == 0x1) | ||
1902 | tmp |= 0x0f; | ||
1903 | else | ||
1904 | tmp |= 0xff; | ||
1905 | 1942 | ||
1906 | tmp &= ~SCHIZO_PCICTRL_PTO; | 1943 | /* Enable arbiter for all PCI slots. */ |
1944 | tmp |= 0xff; | ||
1945 | |||
1907 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && | 1946 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && |
1908 | pbm->chip_version >= 0x2) | 1947 | pbm->chip_version >= 0x2) |
1909 | tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; | 1948 | tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; |
1910 | else | ||
1911 | tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT; | ||
1912 | 1949 | ||
1913 | if (!prom_getbool(pbm->prom_node, "no-bus-parking")) | 1950 | if (!prom_getbool(pbm->prom_node, "no-bus-parking")) |
1914 | tmp |= SCHIZO_PCICTRL_PARK; | 1951 | tmp |= SCHIZO_PCICTRL_PARK; |
1952 | else | ||
1953 | tmp &= ~SCHIZO_PCICTRL_PARK; | ||
1915 | 1954 | ||
1916 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && | 1955 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && |
1917 | pbm->chip_version <= 0x1) | 1956 | pbm->chip_version <= 0x1) |
1918 | tmp |= (1UL << 61); | 1957 | tmp |= SCHIZO_PCICTRL_DTO_INT; |
1919 | else | 1958 | else |
1920 | tmp &= ~(1UL << 61); | 1959 | tmp &= ~SCHIZO_PCICTRL_DTO_INT; |
1921 | 1960 | ||
1922 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) | 1961 | if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) |
1923 | tmp |= (SCHIZO_PCICTRL_MRM_PREF | | 1962 | tmp |= (SCHIZO_PCICTRL_MRM_PREF | |
@@ -2015,6 +2054,9 @@ static void __init schizo_pbm_init(struct pci_controller_info *p, | |||
2015 | pbm->pbm_regs = pr_regs[0].phys_addr; | 2054 | pbm->pbm_regs = pr_regs[0].phys_addr; |
2016 | pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL; | 2055 | pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL; |
2017 | 2056 | ||
2057 | if (chip_type == PBM_CHIP_TYPE_TOMATILLO) | ||
2058 | pbm->sync_reg = pr_regs[3].phys_addr + 0x1a18UL; | ||
2059 | |||
2018 | sprintf(pbm->name, | 2060 | sprintf(pbm->name, |
2019 | (chip_type == PBM_CHIP_TYPE_TOMATILLO ? | 2061 | (chip_type == PBM_CHIP_TYPE_TOMATILLO ? |
2020 | "TOMATILLO%d PBM%c" : | 2062 | "TOMATILLO%d PBM%c" : |
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c index 52f14e399b1c..533104c7907d 100644 --- a/arch/sparc64/kernel/power.c +++ b/arch/sparc64/kernel/power.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) | 4 | * Copyright (C) 1999 David S. Miller (davem@redhat.com) |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define __KERNEL_SYSCALLS__ | ||
8 | |||
7 | #include <linux/config.h> | 9 | #include <linux/config.h> |
8 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
9 | #include <linux/module.h> | 11 | #include <linux/module.h> |
@@ -17,7 +19,6 @@ | |||
17 | #include <asm/ebus.h> | 19 | #include <asm/ebus.h> |
18 | #include <asm/auxio.h> | 20 | #include <asm/auxio.h> |
19 | 21 | ||
20 | #define __KERNEL_SYSCALLS__ | ||
21 | #include <linux/unistd.h> | 22 | #include <linux/unistd.h> |
22 | 23 | ||
23 | /* | 24 | /* |
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 80a76e2ad732..23ad839d113f 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
21 | #include <linux/security.h> | 21 | #include <linux/security.h> |
22 | #include <linux/seccomp.h> | ||
23 | #include <linux/audit.h> | ||
22 | #include <linux/signal.h> | 24 | #include <linux/signal.h> |
23 | 25 | ||
24 | #include <asm/asi.h> | 26 | #include <asm/asi.h> |
@@ -628,15 +630,27 @@ out: | |||
628 | unlock_kernel(); | 630 | unlock_kernel(); |
629 | } | 631 | } |
630 | 632 | ||
631 | asmlinkage void syscall_trace(void) | 633 | asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p) |
632 | { | 634 | { |
633 | #ifdef DEBUG_PTRACE | 635 | /* do the secure computing check first */ |
634 | printk("%s [%d]: syscall_trace\n", current->comm, current->pid); | 636 | secure_computing(regs->u_regs[UREG_G1]); |
635 | #endif | 637 | |
636 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 638 | if (unlikely(current->audit_context) && syscall_exit_p) { |
637 | return; | 639 | unsigned long tstate = regs->tstate; |
640 | int result = AUDITSC_SUCCESS; | ||
641 | |||
642 | if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) | ||
643 | result = AUDITSC_FAILURE; | ||
644 | |||
645 | audit_syscall_exit(current, result, regs->u_regs[UREG_I0]); | ||
646 | } | ||
647 | |||
638 | if (!(current->ptrace & PT_PTRACED)) | 648 | if (!(current->ptrace & PT_PTRACED)) |
639 | return; | 649 | goto out; |
650 | |||
651 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
652 | goto out; | ||
653 | |||
640 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 654 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) |
641 | ? 0x80 : 0)); | 655 | ? 0x80 : 0)); |
642 | 656 | ||
@@ -645,12 +659,20 @@ asmlinkage void syscall_trace(void) | |||
645 | * for normal use. strace only continues with a signal if the | 659 | * for normal use. strace only continues with a signal if the |
646 | * stopping signal is not SIGTRAP. -brl | 660 | * stopping signal is not SIGTRAP. -brl |
647 | */ | 661 | */ |
648 | #ifdef DEBUG_PTRACE | ||
649 | printk("%s [%d]: syscall_trace exit= %x\n", current->comm, | ||
650 | current->pid, current->exit_code); | ||
651 | #endif | ||
652 | if (current->exit_code) { | 662 | if (current->exit_code) { |
653 | send_sig (current->exit_code, current, 1); | 663 | send_sig(current->exit_code, current, 1); |
654 | current->exit_code = 0; | 664 | current->exit_code = 0; |
655 | } | 665 | } |
666 | |||
667 | out: | ||
668 | if (unlikely(current->audit_context) && !syscall_exit_p) | ||
669 | audit_syscall_entry(current, | ||
670 | (test_thread_flag(TIF_32BIT) ? | ||
671 | AUDIT_ARCH_SPARC : | ||
672 | AUDIT_ARCH_SPARC64), | ||
673 | regs->u_regs[UREG_G1], | ||
674 | regs->u_regs[UREG_I0], | ||
675 | regs->u_regs[UREG_I1], | ||
676 | regs->u_regs[UREG_I2], | ||
677 | regs->u_regs[UREG_I3]); | ||
656 | } | 678 | } |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index e5b9c7a27789..7e8e2919e186 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -45,8 +45,8 @@ extern void calibrate_delay(void); | |||
45 | /* Please don't make this stuff initdata!!! --DaveM */ | 45 | /* Please don't make this stuff initdata!!! --DaveM */ |
46 | static unsigned char boot_cpu_id; | 46 | static unsigned char boot_cpu_id; |
47 | 47 | ||
48 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 48 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; |
49 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 49 | cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; |
50 | static cpumask_t smp_commenced_mask; | 50 | static cpumask_t smp_commenced_mask; |
51 | static cpumask_t cpu_callout_map; | 51 | static cpumask_t cpu_callout_map; |
52 | 52 | ||
@@ -155,7 +155,7 @@ void cpu_panic(void) | |||
155 | panic("SMP bolixed\n"); | 155 | panic("SMP bolixed\n"); |
156 | } | 156 | } |
157 | 157 | ||
158 | static unsigned long current_tick_offset; | 158 | static unsigned long current_tick_offset __read_mostly; |
159 | 159 | ||
160 | /* This tick register synchronization scheme is taken entirely from | 160 | /* This tick register synchronization scheme is taken entirely from |
161 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. | 161 | * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. |
@@ -1193,8 +1193,8 @@ void smp_send_stop(void) | |||
1193 | { | 1193 | { |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | unsigned long __per_cpu_base; | 1196 | unsigned long __per_cpu_base __read_mostly; |
1197 | unsigned long __per_cpu_shift; | 1197 | unsigned long __per_cpu_shift __read_mostly; |
1198 | 1198 | ||
1199 | EXPORT_SYMBOL(__per_cpu_base); | 1199 | EXPORT_SYMBOL(__per_cpu_base); |
1200 | EXPORT_SYMBOL(__per_cpu_shift); | 1200 | EXPORT_SYMBOL(__per_cpu_shift); |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 56cd96f4a5cd..9202d925a9ce 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -79,7 +79,7 @@ extern void linux_sparc_syscall(void); | |||
79 | extern void rtrap(void); | 79 | extern void rtrap(void); |
80 | extern void show_regs(struct pt_regs *); | 80 | extern void show_regs(struct pt_regs *); |
81 | extern void solaris_syscall(void); | 81 | extern void solaris_syscall(void); |
82 | extern void syscall_trace(void); | 82 | extern void syscall_trace(struct pt_regs *, int); |
83 | extern u32 sunos_sys_table[], sys_call_table32[]; | 83 | extern u32 sunos_sys_table[], sys_call_table32[]; |
84 | extern void tl0_solaris(void); | 84 | extern void tl0_solaris(void); |
85 | extern void sys_sigsuspend(void); | 85 | extern void sys_sigsuspend(void); |
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S index 5a95e98c5317..5f9e4fae612e 100644 --- a/arch/sparc64/kernel/sys32.S +++ b/arch/sparc64/kernel/sys32.S | |||
@@ -135,6 +135,8 @@ SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1) | |||
135 | SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2) | 135 | SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2) |
136 | SIGN1(sys32_getpeername, sys_getpeername, %o0) | 136 | SIGN1(sys32_getpeername, sys_getpeername, %o0) |
137 | SIGN1(sys32_getsockname, sys_getsockname, %o0) | 137 | SIGN1(sys32_getsockname, sys_getsockname, %o0) |
138 | SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) | ||
139 | SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) | ||
138 | 140 | ||
139 | .globl sys32_mmap2 | 141 | .globl sys32_mmap2 |
140 | sys32_mmap2: | 142 | sys32_mmap2: |
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index a5e36a4c8924..bceb91a8a2bd 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S | |||
@@ -59,11 +59,11 @@ sys_call_table32: | |||
59 | /*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall | 59 | /*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall |
60 | .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname | 60 | .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname |
61 | /*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl | 61 | /*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl |
62 | .word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask | 62 | .word sys32_epoll_wait, sys32_ioprio_set, sys_getppid, sys32_sigaction, sys_sgetmask |
63 | /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir | 63 | /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir |
64 | .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 | 64 | .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 |
65 | /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo | 65 | /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo |
66 | .word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex | 66 | .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, sys32_adjtimex |
67 | /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid | 67 | /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid |
68 | .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 | 68 | .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 |
69 | /*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 | 69 | /*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 |
@@ -125,11 +125,11 @@ sys_call_table: | |||
125 | /*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall | 125 | /*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall |
126 | .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname | 126 | .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname |
127 | /*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl | 127 | /*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl |
128 | .word sys_epoll_wait, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask | 128 | .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask |
129 | /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall | 129 | /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall |
130 | .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 | 130 | .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 |
131 | /*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo | 131 | /*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo |
132 | .word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex | 132 | .word sys_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex |
133 | /*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid | 133 | /*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid |
134 | .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid | 134 | .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid |
135 | /*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64 | 135 | /*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64 |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 71b4e3807694..362b9c26871b 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -73,7 +73,7 @@ static __initdata struct sparc64_tick_ops dummy_tick_ops = { | |||
73 | .get_tick = dummy_get_tick, | 73 | .get_tick = dummy_get_tick, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct sparc64_tick_ops *tick_ops = &dummy_tick_ops; | 76 | struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops; |
77 | 77 | ||
78 | #define TICK_PRIV_BIT (1UL << 63) | 78 | #define TICK_PRIV_BIT (1UL << 63) |
79 | 79 | ||
@@ -195,7 +195,7 @@ static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) | |||
195 | return new_tick; | 195 | return new_tick; |
196 | } | 196 | } |
197 | 197 | ||
198 | static struct sparc64_tick_ops tick_operations = { | 198 | static struct sparc64_tick_ops tick_operations __read_mostly = { |
199 | .init_tick = tick_init_tick, | 199 | .init_tick = tick_init_tick, |
200 | .get_tick = tick_get_tick, | 200 | .get_tick = tick_get_tick, |
201 | .get_compare = tick_get_compare, | 201 | .get_compare = tick_get_compare, |
@@ -276,7 +276,7 @@ static unsigned long stick_add_compare(unsigned long adj) | |||
276 | return new_compare; | 276 | return new_compare; |
277 | } | 277 | } |
278 | 278 | ||
279 | static struct sparc64_tick_ops stick_operations = { | 279 | static struct sparc64_tick_ops stick_operations __read_mostly = { |
280 | .init_tick = stick_init_tick, | 280 | .init_tick = stick_init_tick, |
281 | .get_tick = stick_get_tick, | 281 | .get_tick = stick_get_tick, |
282 | .get_compare = stick_get_compare, | 282 | .get_compare = stick_get_compare, |
@@ -422,7 +422,7 @@ static unsigned long hbtick_add_compare(unsigned long adj) | |||
422 | return val; | 422 | return val; |
423 | } | 423 | } |
424 | 424 | ||
425 | static struct sparc64_tick_ops hbtick_operations = { | 425 | static struct sparc64_tick_ops hbtick_operations __read_mostly = { |
426 | .init_tick = hbtick_init_tick, | 426 | .init_tick = hbtick_init_tick, |
427 | .get_tick = hbtick_get_tick, | 427 | .get_tick = hbtick_get_tick, |
428 | .get_compare = hbtick_get_compare, | 428 | .get_compare = hbtick_get_compare, |
@@ -437,10 +437,9 @@ static struct sparc64_tick_ops hbtick_operations = { | |||
437 | * NOTE: On SUN5 systems the ticker interrupt comes in using 2 | 437 | * NOTE: On SUN5 systems the ticker interrupt comes in using 2 |
438 | * interrupts, one at level14 and one with softint bit 0. | 438 | * interrupts, one at level14 and one with softint bit 0. |
439 | */ | 439 | */ |
440 | unsigned long timer_tick_offset; | 440 | unsigned long timer_tick_offset __read_mostly; |
441 | unsigned long timer_tick_compare; | ||
442 | 441 | ||
443 | static unsigned long timer_ticks_per_nsec_quotient; | 442 | static unsigned long timer_ticks_per_nsec_quotient __read_mostly; |
444 | 443 | ||
445 | #define TICK_SIZE (tick_nsec / 1000) | 444 | #define TICK_SIZE (tick_nsec / 1000) |
446 | 445 | ||
@@ -464,7 +463,7 @@ static inline void timer_check_rtc(void) | |||
464 | 463 | ||
465 | static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 464 | static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) |
466 | { | 465 | { |
467 | unsigned long ticks, pstate; | 466 | unsigned long ticks, compare, pstate; |
468 | 467 | ||
469 | write_seqlock(&xtime_lock); | 468 | write_seqlock(&xtime_lock); |
470 | 469 | ||
@@ -483,14 +482,14 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
483 | : "=r" (pstate) | 482 | : "=r" (pstate) |
484 | : "i" (PSTATE_IE)); | 483 | : "i" (PSTATE_IE)); |
485 | 484 | ||
486 | timer_tick_compare = tick_ops->add_compare(timer_tick_offset); | 485 | compare = tick_ops->add_compare(timer_tick_offset); |
487 | ticks = tick_ops->get_tick(); | 486 | ticks = tick_ops->get_tick(); |
488 | 487 | ||
489 | /* Restore PSTATE_IE. */ | 488 | /* Restore PSTATE_IE. */ |
490 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" | 489 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" |
491 | : /* no outputs */ | 490 | : /* no outputs */ |
492 | : "r" (pstate)); | 491 | : "r" (pstate)); |
493 | } while (time_after_eq(ticks, timer_tick_compare)); | 492 | } while (time_after_eq(ticks, compare)); |
494 | 493 | ||
495 | timer_check_rtc(); | 494 | timer_check_rtc(); |
496 | 495 | ||
@@ -506,11 +505,6 @@ void timer_tick_interrupt(struct pt_regs *regs) | |||
506 | 505 | ||
507 | do_timer(regs); | 506 | do_timer(regs); |
508 | 507 | ||
509 | /* | ||
510 | * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR. | ||
511 | */ | ||
512 | timer_tick_compare = tick_ops->get_compare() + timer_tick_offset; | ||
513 | |||
514 | timer_check_rtc(); | 508 | timer_check_rtc(); |
515 | 509 | ||
516 | write_sequnlock(&xtime_lock); | 510 | write_sequnlock(&xtime_lock); |
@@ -973,7 +967,7 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg | |||
973 | int err; | 967 | int err; |
974 | 968 | ||
975 | /* Register IRQ handler. */ | 969 | /* Register IRQ handler. */ |
976 | err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC, | 970 | err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0, |
977 | "timer", NULL); | 971 | "timer", NULL); |
978 | 972 | ||
979 | if (err) { | 973 | if (err) { |
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 382fd6798bb9..950423da8a6a 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S | |||
@@ -32,6 +32,8 @@ SECTIONS | |||
32 | .data1 : { *(.data1) } | 32 | .data1 : { *(.data1) } |
33 | . = ALIGN(64); | 33 | . = ALIGN(64); |
34 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 34 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } |
35 | . = ALIGN(64); | ||
36 | .data.read_mostly : { *(.data.read_mostly) } | ||
35 | _edata = .; | 37 | _edata = .; |
36 | PROVIDE (edata = .); | 38 | PROVIDE (edata = .); |
37 | .fixup : { *(.fixup) } | 39 | .fixup : { *(.fixup) } |