aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/cpu.c10
-rw-r--r--arch/sparc64/kernel/ds.c3
-rw-r--r--arch/sparc64/kernel/entry.S30
-rw-r--r--arch/sparc64/kernel/entry.h196
-rw-r--r--arch/sparc64/kernel/head.S8
-rw-r--r--arch/sparc64/kernel/iommu.c20
-rw-r--r--arch/sparc64/kernel/iommu_common.h18
-rw-r--r--arch/sparc64/kernel/irq.c21
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c12
-rw-r--r--arch/sparc64/kernel/process.c3
-rw-r--r--arch/sparc64/kernel/ptrace.c20
-rw-r--r--arch/sparc64/kernel/setup.c5
-rw-r--r--arch/sparc64/kernel/signal.c3
-rw-r--r--arch/sparc64/kernel/smp.c20
-rw-r--r--arch/sparc64/kernel/stacktrace.c4
-rw-r--r--arch/sparc64/kernel/sys_sparc.c14
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c3
-rw-r--r--arch/sparc64/kernel/systbls.h53
-rw-r--r--arch/sparc64/kernel/time.c66
-rw-r--r--arch/sparc64/kernel/trampoline.S188
-rw-r--r--arch/sparc64/kernel/traps.c49
21 files changed, 480 insertions, 266 deletions
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index dd5d28e3d79..0097c08dc60 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -15,6 +15,8 @@
15#include <asm/spitfire.h> 15#include <asm/spitfire.h>
16#include <asm/oplib.h> 16#include <asm/oplib.h>
17 17
18#include "entry.h"
19
18DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; 20DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
19 21
20struct cpu_iu_info { 22struct cpu_iu_info {
@@ -65,8 +67,6 @@ static struct cpu_iu_info linux_sparc_chips[] = {
65char *sparc_cpu_type; 67char *sparc_cpu_type;
66char *sparc_fpu_type; 68char *sparc_fpu_type;
67 69
68unsigned int fsr_storage;
69
70static void __init sun4v_cpu_probe(void) 70static void __init sun4v_cpu_probe(void)
71{ 71{
72 switch (sun4v_chip_type) { 72 switch (sun4v_chip_type) {
@@ -94,8 +94,10 @@ void __init cpu_probe(void)
94 unsigned long ver, fpu_vers, manuf, impl, fprs; 94 unsigned long ver, fpu_vers, manuf, impl, fprs;
95 int i; 95 int i;
96 96
97 if (tlb_type == hypervisor) 97 if (tlb_type == hypervisor) {
98 return sun4v_cpu_probe(); 98 sun4v_cpu_probe();
99 return;
100 }
99 101
100 fprs = fprs_read(); 102 fprs = fprs_read();
101 fprs_write(FPRS_FEF); 103 fprs_write(FPRS_FEF);
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index bd76482077b..edb74f5a118 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -972,8 +972,7 @@ static void process_ds_work(void)
972 LIST_HEAD(todo); 972 LIST_HEAD(todo);
973 973
974 spin_lock_irqsave(&ds_lock, flags); 974 spin_lock_irqsave(&ds_lock, flags);
975 list_splice(&ds_work_list, &todo); 975 list_splice_init(&ds_work_list, &todo);
976 INIT_LIST_HEAD(&ds_work_list);
977 spin_unlock_irqrestore(&ds_lock, flags); 976 spin_unlock_irqrestore(&ds_lock, flags);
978 977
979 list_for_each_entry_safe(qp, tmp, &todo, list) { 978 list_for_each_entry_safe(qp, tmp, &todo, list) {
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 6be4d2d2904..49eca4b1cf2 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1705,6 +1705,36 @@ __flushw_user:
17052: retl 17052: retl
1706 nop 1706 nop
1707 1707
1708 /* Flush %fp and %i7 to the stack for all register
1709 * windows active inside of the cpu. This allows
1710 * show_stack_trace() to avoid using an expensive
1711 * 'flushw'.
1712 */
1713 .globl stack_trace_flush
1714 .type stack_trace_flush,#function
1715stack_trace_flush:
1716 rdpr %pstate, %o0
1717 wrpr %o0, PSTATE_IE, %pstate
1718
1719 rdpr %cwp, %g1
1720 rdpr %canrestore, %g2
1721 sub %g1, 1, %g3
1722
17231: brz,pn %g2, 2f
1724 sub %g2, 1, %g2
1725 wrpr %g3, %cwp
1726 stx %fp, [%sp + STACK_BIAS + RW_V9_I6]
1727 stx %i7, [%sp + STACK_BIAS + RW_V9_I7]
1728 ba,pt %xcc, 1b
1729 sub %g3, 1, %g3
1730
17312: wrpr %g1, %cwp
1732 wrpr %o0, %pstate
1733
1734 retl
1735 nop
1736 .size stack_trace_flush,.-stack_trace_flush
1737
1708#ifdef CONFIG_SMP 1738#ifdef CONFIG_SMP
1709 .globl hard_smp_processor_id 1739 .globl hard_smp_processor_id
1710hard_smp_processor_id: 1740hard_smp_processor_id:
diff --git a/arch/sparc64/kernel/entry.h b/arch/sparc64/kernel/entry.h
new file mode 100644
index 00000000000..4a91e9c6d31
--- /dev/null
+++ b/arch/sparc64/kernel/entry.h
@@ -0,0 +1,196 @@
1#ifndef _ENTRY_H
2#define _ENTRY_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/init.h>
7
8extern char *sparc_cpu_type;
9extern char *sparc_fpu_type;
10
11extern void __init per_cpu_patch(void);
12extern void __init sun4v_patch(void);
13extern void __init boot_cpu_id_too_large(int cpu);
14extern unsigned int dcache_parity_tl1_occurred;
15extern unsigned int icache_parity_tl1_occurred;
16
17extern asmlinkage void update_perfctrs(void);
18extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
19extern void timer_interrupt(int irq, struct pt_regs *regs);
20
21extern void do_notify_resume(struct pt_regs *regs,
22 unsigned long orig_i0,
23 int restart_syscall,
24 unsigned long thread_info_flags);
25
26extern asmlinkage void syscall_trace(struct pt_regs *regs,
27 int syscall_exit_p);
28
29extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
30
31extern void do_fpe_common(struct pt_regs *regs);
32extern void do_fpieee(struct pt_regs *regs);
33extern void do_fpother(struct pt_regs *regs);
34extern void do_tof(struct pt_regs *regs);
35extern void do_div0(struct pt_regs *regs);
36extern void do_illegal_instruction(struct pt_regs *regs);
37extern void mem_address_unaligned(struct pt_regs *regs,
38 unsigned long sfar,
39 unsigned long sfsr);
40extern void sun4v_do_mna(struct pt_regs *regs,
41 unsigned long addr,
42 unsigned long type_ctx);
43extern void do_privop(struct pt_regs *regs);
44extern void do_privact(struct pt_regs *regs);
45extern void do_cee(struct pt_regs *regs);
46extern void do_cee_tl1(struct pt_regs *regs);
47extern void do_dae_tl1(struct pt_regs *regs);
48extern void do_iae_tl1(struct pt_regs *regs);
49extern void do_div0_tl1(struct pt_regs *regs);
50extern void do_fpdis_tl1(struct pt_regs *regs);
51extern void do_fpieee_tl1(struct pt_regs *regs);
52extern void do_fpother_tl1(struct pt_regs *regs);
53extern void do_ill_tl1(struct pt_regs *regs);
54extern void do_irq_tl1(struct pt_regs *regs);
55extern void do_lddfmna_tl1(struct pt_regs *regs);
56extern void do_stdfmna_tl1(struct pt_regs *regs);
57extern void do_paw(struct pt_regs *regs);
58extern void do_paw_tl1(struct pt_regs *regs);
59extern void do_vaw(struct pt_regs *regs);
60extern void do_vaw_tl1(struct pt_regs *regs);
61extern void do_tof_tl1(struct pt_regs *regs);
62extern void do_getpsr(struct pt_regs *regs);
63
64extern void spitfire_insn_access_exception(struct pt_regs *regs,
65 unsigned long sfsr,
66 unsigned long sfar);
67extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
68 unsigned long sfsr,
69 unsigned long sfar);
70extern void spitfire_data_access_exception(struct pt_regs *regs,
71 unsigned long sfsr,
72 unsigned long sfar);
73extern void spitfire_data_access_exception_tl1(struct pt_regs *regs,
74 unsigned long sfsr,
75 unsigned long sfar);
76extern void spitfire_access_error(struct pt_regs *regs,
77 unsigned long status_encoded,
78 unsigned long afar);
79
80extern void cheetah_fecc_handler(struct pt_regs *regs,
81 unsigned long afsr,
82 unsigned long afar);
83extern void cheetah_cee_handler(struct pt_regs *regs,
84 unsigned long afsr,
85 unsigned long afar);
86extern void cheetah_deferred_handler(struct pt_regs *regs,
87 unsigned long afsr,
88 unsigned long afar);
89extern void cheetah_plus_parity_error(int type, struct pt_regs *regs);
90
91extern void sun4v_insn_access_exception(struct pt_regs *regs,
92 unsigned long addr,
93 unsigned long type_ctx);
94extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
95 unsigned long addr,
96 unsigned long type_ctx);
97extern void sun4v_data_access_exception(struct pt_regs *regs,
98 unsigned long addr,
99 unsigned long type_ctx);
100extern void sun4v_data_access_exception_tl1(struct pt_regs *regs,
101 unsigned long addr,
102 unsigned long type_ctx);
103extern void sun4v_resum_error(struct pt_regs *regs,
104 unsigned long offset);
105extern void sun4v_resum_overflow(struct pt_regs *regs);
106extern void sun4v_nonresum_error(struct pt_regs *regs,
107 unsigned long offset);
108extern void sun4v_nonresum_overflow(struct pt_regs *regs);
109
110extern unsigned long sun4v_err_itlb_vaddr;
111extern unsigned long sun4v_err_itlb_ctx;
112extern unsigned long sun4v_err_itlb_pte;
113extern unsigned long sun4v_err_itlb_error;
114
115extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
116
117extern unsigned long sun4v_err_dtlb_vaddr;
118extern unsigned long sun4v_err_dtlb_ctx;
119extern unsigned long sun4v_err_dtlb_pte;
120extern unsigned long sun4v_err_dtlb_error;
121
122extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
123extern void hypervisor_tlbop_error(unsigned long err,
124 unsigned long op);
125extern void hypervisor_tlbop_error_xcall(unsigned long err,
126 unsigned long op);
127
128/* WARNING: The error trap handlers in assembly know the precise
129 * layout of the following structure.
130 *
131 * C-level handlers in traps.c use this information to log the
132 * error and then determine how to recover (if possible).
133 */
134struct cheetah_err_info {
135/*0x00*/u64 afsr;
136/*0x08*/u64 afar;
137
138 /* D-cache state */
139/*0x10*/u64 dcache_data[4]; /* The actual data */
140/*0x30*/u64 dcache_index; /* D-cache index */
141/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
142/*0x40*/u64 dcache_utag; /* D-cache microtag */
143/*0x48*/u64 dcache_stag; /* D-cache snooptag */
144
145 /* I-cache state */
146/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
147/*0x90*/u64 icache_index; /* I-cache index */
148/*0x98*/u64 icache_tag; /* I-cache phys tag */
149/*0xa0*/u64 icache_utag; /* I-cache microtag */
150/*0xa8*/u64 icache_stag; /* I-cache snooptag */
151/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
152/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
153
154 /* E-cache state */
155/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
156/*0xe0*/u64 ecache_index; /* E-cache index */
157/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
158
159/*0xf0*/u64 __pad[32 - 30];
160};
161#define CHAFSR_INVALID ((u64)-1L)
162
163/* This is allocated at boot time based upon the largest hardware
164 * cpu ID in the system. We allocate two entries per cpu, one for
165 * TL==0 logging and one for TL >= 1 logging.
166 */
167extern struct cheetah_err_info *cheetah_error_log;
168
169/* UPA nodes send interrupt packet to UltraSparc with first data reg
170 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
171 * delivered. We must translate this into a non-vector IRQ so we can
172 * set the softint on this cpu.
173 *
174 * To make processing these packets efficient and race free we use
175 * an array of irq buckets below. The interrupt vector handler in
176 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
177 *
178 * If you make changes to ino_bucket, please update hand coded assembler
179 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
180 */
181struct ino_bucket {
182/*0x00*/unsigned long __irq_chain_pa;
183
184 /* Virtual interrupt number assigned to this INO. */
185/*0x08*/unsigned int __virt_irq;
186/*0x0c*/unsigned int __pad;
187};
188
189extern struct ino_bucket *ivector_table;
190extern unsigned long ivector_table_pa;
191
192extern void handler_irq(int irq, struct pt_regs *regs);
193extern void init_irqwork_curcpu(void);
194extern void __cpuinit sun4v_register_mondo_queues(int this_cpu);
195
196#endif /* _ENTRY_H */
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 44b105c04dd..34f8ff57c56 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -288,8 +288,12 @@ sun4v_chip_type:
288 /* Leave arg2 as-is, prom_mmu_ihandle_cache */ 288 /* Leave arg2 as-is, prom_mmu_ihandle_cache */
289 mov -1, %l3 289 mov -1, %l3
290 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) 290 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
291 sethi %hi(8 * 1024 * 1024), %l3 291 /* 4MB align the kernel image size. */
292 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) 292 set (_end - KERNBASE), %l3
293 set ((4 * 1024 * 1024) - 1), %l4
294 add %l3, %l4, %l3
295 andn %l3, %l4, %l3
296 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB)
293 sethi %hi(KERNBASE), %l3 297 sethi %hi(KERNBASE), %l3
294 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) 298 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
295 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty 299 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index fbaab3497bf..756fa24eeef 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -516,9 +516,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
516 unsigned long flags, handle, prot, ctx; 516 unsigned long flags, handle, prot, ctx;
517 dma_addr_t dma_next = 0, dma_addr; 517 dma_addr_t dma_next = 0, dma_addr;
518 unsigned int max_seg_size; 518 unsigned int max_seg_size;
519 unsigned long seg_boundary_size;
519 int outcount, incount, i; 520 int outcount, incount, i;
520 struct strbuf *strbuf; 521 struct strbuf *strbuf;
521 struct iommu *iommu; 522 struct iommu *iommu;
523 unsigned long base_shift;
522 524
523 BUG_ON(direction == DMA_NONE); 525 BUG_ON(direction == DMA_NONE);
524 526
@@ -549,8 +551,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
549 outs->dma_length = 0; 551 outs->dma_length = 0;
550 552
551 max_seg_size = dma_get_max_seg_size(dev); 553 max_seg_size = dma_get_max_seg_size(dev);
554 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
555 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
556 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
552 for_each_sg(sglist, s, nelems, i) { 557 for_each_sg(sglist, s, nelems, i) {
553 unsigned long paddr, npages, entry, slen; 558 unsigned long paddr, npages, entry, out_entry = 0, slen;
554 iopte_t *base; 559 iopte_t *base;
555 560
556 slen = s->length; 561 slen = s->length;
@@ -593,7 +598,9 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
593 * - allocated dma_addr isn't contiguous to previous allocation 598 * - allocated dma_addr isn't contiguous to previous allocation
594 */ 599 */
595 if ((dma_addr != dma_next) || 600 if ((dma_addr != dma_next) ||
596 (outs->dma_length + s->length > max_seg_size)) { 601 (outs->dma_length + s->length > max_seg_size) ||
602 (is_span_boundary(out_entry, base_shift,
603 seg_boundary_size, outs, s))) {
597 /* Can't merge: create a new segment */ 604 /* Can't merge: create a new segment */
598 segstart = s; 605 segstart = s;
599 outcount++; 606 outcount++;
@@ -607,6 +614,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
607 /* This is a new segment, fill entries */ 614 /* This is a new segment, fill entries */
608 outs->dma_address = dma_addr; 615 outs->dma_address = dma_addr;
609 outs->dma_length = slen; 616 outs->dma_length = slen;
617 out_entry = entry;
610 } 618 }
611 619
612 /* Calculate next page pointer for contiguous check */ 620 /* Calculate next page pointer for contiguous check */
@@ -626,7 +634,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
626iommu_map_failed: 634iommu_map_failed:
627 for_each_sg(sglist, s, nelems, i) { 635 for_each_sg(sglist, s, nelems, i) {
628 if (s->dma_length != 0) { 636 if (s->dma_length != 0) {
629 unsigned long vaddr, npages, entry, i; 637 unsigned long vaddr, npages, entry, j;
630 iopte_t *base; 638 iopte_t *base;
631 639
632 vaddr = s->dma_address & IO_PAGE_MASK; 640 vaddr = s->dma_address & IO_PAGE_MASK;
@@ -637,8 +645,8 @@ iommu_map_failed:
637 >> IO_PAGE_SHIFT; 645 >> IO_PAGE_SHIFT;
638 base = iommu->page_table + entry; 646 base = iommu->page_table + entry;
639 647
640 for (i = 0; i < npages; i++) 648 for (j = 0; j < npages; j++)
641 iopte_make_dummy(iommu, base + i); 649 iopte_make_dummy(iommu, base + j);
642 650
643 s->dma_address = DMA_ERROR_CODE; 651 s->dma_address = DMA_ERROR_CODE;
644 s->dma_length = 0; 652 s->dma_length = 0;
@@ -803,7 +811,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
803 spin_unlock_irqrestore(&iommu->lock, flags); 811 spin_unlock_irqrestore(&iommu->lock, flags);
804} 812}
805 813
806const struct dma_ops sun4u_dma_ops = { 814static const struct dma_ops sun4u_dma_ops = {
807 .alloc_coherent = dma_4u_alloc_coherent, 815 .alloc_coherent = dma_4u_alloc_coherent,
808 .free_coherent = dma_4u_free_coherent, 816 .free_coherent = dma_4u_free_coherent,
809 .map_single = dma_4u_map_single, 817 .map_single = dma_4u_map_single,
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 0713bd58499..f3575a614fa 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/iommu-helper.h>
15 16
16#include <asm/iommu.h> 17#include <asm/iommu.h>
17#include <asm/scatterlist.h> 18#include <asm/scatterlist.h>
@@ -45,17 +46,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr,
45 return npages; 46 return npages;
46} 47}
47 48
48static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems) 49static inline int is_span_boundary(unsigned long entry,
50 unsigned long shift,
51 unsigned long boundary_size,
52 struct scatterlist *outs,
53 struct scatterlist *sg)
49{ 54{
50 unsigned long i, npages = 0; 55 unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
51 struct scatterlist *sg; 56 int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
52 57
53 for_each_sg(sglist, sg, nelems, i) { 58 return iommu_is_span_boundary(entry, nr, shift, boundary_size);
54 unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
55 npages += iommu_num_pages(paddr, sg->length);
56 }
57
58 return npages;
59} 59}
60 60
61extern unsigned long iommu_range_alloc(struct device *dev, 61extern unsigned long iommu_range_alloc(struct device *dev,
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 5ec06c8c7fe..eb88bd6e674 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -44,27 +44,10 @@
44#include <asm/hypervisor.h> 44#include <asm/hypervisor.h>
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46 46
47/* UPA nodes send interrupt packet to UltraSparc with first data reg 47#include "entry.h"
48 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
49 * delivered. We must translate this into a non-vector IRQ so we can
50 * set the softint on this cpu.
51 *
52 * To make processing these packets efficient and race free we use
53 * an array of irq buckets below. The interrupt vector handler in
54 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55 *
56 * If you make changes to ino_bucket, please update hand coded assembler
57 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
58 */
59struct ino_bucket {
60/*0x00*/unsigned long __irq_chain_pa;
61
62 /* Virtual interrupt number assigned to this INO. */
63/*0x08*/unsigned int __virt_irq;
64/*0x0c*/unsigned int __pad;
65};
66 48
67#define NUM_IVECS (IMAP_INR + 1) 49#define NUM_IVECS (IMAP_INR + 1)
50
68struct ino_bucket *ivector_table; 51struct ino_bucket *ivector_table;
69unsigned long ivector_table_pa; 52unsigned long ivector_table_pa;
70 53
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index ddca6c6c0b4..01839706bd5 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -335,8 +335,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
335 unsigned long flags, handle, prot; 335 unsigned long flags, handle, prot;
336 dma_addr_t dma_next = 0, dma_addr; 336 dma_addr_t dma_next = 0, dma_addr;
337 unsigned int max_seg_size; 337 unsigned int max_seg_size;
338 unsigned long seg_boundary_size;
338 int outcount, incount, i; 339 int outcount, incount, i;
339 struct iommu *iommu; 340 struct iommu *iommu;
341 unsigned long base_shift;
340 long err; 342 long err;
341 343
342 BUG_ON(direction == DMA_NONE); 344 BUG_ON(direction == DMA_NONE);
@@ -362,8 +364,11 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
362 iommu_batch_start(dev, prot, ~0UL); 364 iommu_batch_start(dev, prot, ~0UL);
363 365
364 max_seg_size = dma_get_max_seg_size(dev); 366 max_seg_size = dma_get_max_seg_size(dev);
367 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
368 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
369 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
365 for_each_sg(sglist, s, nelems, i) { 370 for_each_sg(sglist, s, nelems, i) {
366 unsigned long paddr, npages, entry, slen; 371 unsigned long paddr, npages, entry, out_entry = 0, slen;
367 372
368 slen = s->length; 373 slen = s->length;
369 /* Sanity check */ 374 /* Sanity check */
@@ -406,7 +411,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
406 * - allocated dma_addr isn't contiguous to previous allocation 411 * - allocated dma_addr isn't contiguous to previous allocation
407 */ 412 */
408 if ((dma_addr != dma_next) || 413 if ((dma_addr != dma_next) ||
409 (outs->dma_length + s->length > max_seg_size)) { 414 (outs->dma_length + s->length > max_seg_size) ||
415 (is_span_boundary(out_entry, base_shift,
416 seg_boundary_size, outs, s))) {
410 /* Can't merge: create a new segment */ 417 /* Can't merge: create a new segment */
411 segstart = s; 418 segstart = s;
412 outcount++; 419 outcount++;
@@ -420,6 +427,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
420 /* This is a new segment, fill entries */ 427 /* This is a new segment, fill entries */
421 outs->dma_address = dma_addr; 428 outs->dma_address = dma_addr;
422 outs->dma_length = slen; 429 outs->dma_length = slen;
430 out_entry = entry;
423 } 431 }
424 432
425 /* Calculate next page pointer for contiguous check */ 433 /* Calculate next page pointer for contiguous check */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index e116e38b160..acf8c5250aa 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
731 current_thread_info()->xfsr[0] = 0; 731 current_thread_info()->xfsr[0] = 0;
732 current_thread_info()->fpsaved[0] = 0; 732 current_thread_info()->fpsaved[0] = 0;
733 regs->tstate &= ~TSTATE_PEF; 733 regs->tstate &= ~TSTATE_PEF;
734 task_lock(current);
735 current->ptrace &= ~PT_DTRACE;
736 task_unlock(current);
737 } 734 }
738out: 735out:
739 return error; 736 return error;
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 9a1ba1fe859..aaae865e793 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -35,6 +35,9 @@
35#include <asm/spitfire.h> 35#include <asm/spitfire.h>
36#include <asm/page.h> 36#include <asm/page.h>
37#include <asm/cpudata.h> 37#include <asm/cpudata.h>
38#include <asm/cacheflush.h>
39
40#include "entry.h"
38 41
39/* #define ALLOW_INIT_TRACING */ 42/* #define ALLOW_INIT_TRACING */
40 43
@@ -67,6 +70,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
67 if (tlb_type == hypervisor) 70 if (tlb_type == hypervisor)
68 return; 71 return;
69 72
73 preempt_disable();
74
70#ifdef DCACHE_ALIASING_POSSIBLE 75#ifdef DCACHE_ALIASING_POSSIBLE
71 /* If bit 13 of the kernel address we used to access the 76 /* If bit 13 of the kernel address we used to access the
72 * user page is the same as the virtual address that page 77 * user page is the same as the virtual address that page
@@ -105,6 +110,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
105 for (; start < end; start += icache_line_size) 110 for (; start < end; start += icache_line_size)
106 flushi(start); 111 flushi(start);
107 } 112 }
113
114 preempt_enable();
108} 115}
109 116
110enum sparc_regset { 117enum sparc_regset {
@@ -382,6 +389,7 @@ static const struct user_regset_view user_sparc64_view = {
382 .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets) 389 .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
383}; 390};
384 391
392#ifdef CONFIG_COMPAT
385static int genregs32_get(struct task_struct *target, 393static int genregs32_get(struct task_struct *target,
386 const struct user_regset *regset, 394 const struct user_regset *regset,
387 unsigned int pos, unsigned int count, 395 unsigned int pos, unsigned int count,
@@ -676,14 +684,18 @@ static const struct user_regset_view user_sparc32_view = {
676 .name = "sparc", .e_machine = EM_SPARC, 684 .name = "sparc", .e_machine = EM_SPARC,
677 .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) 685 .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
678}; 686};
687#endif /* CONFIG_COMPAT */
679 688
680const struct user_regset_view *task_user_regset_view(struct task_struct *task) 689const struct user_regset_view *task_user_regset_view(struct task_struct *task)
681{ 690{
691#ifdef CONFIG_COMPAT
682 if (test_tsk_thread_flag(task, TIF_32BIT)) 692 if (test_tsk_thread_flag(task, TIF_32BIT))
683 return &user_sparc32_view; 693 return &user_sparc32_view;
694#endif
684 return &user_sparc64_view; 695 return &user_sparc64_view;
685} 696}
686 697
698#ifdef CONFIG_COMPAT
687struct compat_fps { 699struct compat_fps {
688 unsigned int regs[32]; 700 unsigned int regs[32];
689 unsigned int fsr; 701 unsigned int fsr;
@@ -798,6 +810,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
798 810
799 return ret; 811 return ret;
800} 812}
813#endif /* CONFIG_COMPAT */
801 814
802struct fps { 815struct fps {
803 unsigned int regs[64]; 816 unsigned int regs[64];
@@ -807,11 +820,14 @@ struct fps {
807long arch_ptrace(struct task_struct *child, long request, long addr, long data) 820long arch_ptrace(struct task_struct *child, long request, long addr, long data)
808{ 821{
809 const struct user_regset_view *view = task_user_regset_view(child); 822 const struct user_regset_view *view = task_user_regset_view(child);
810 struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
811 unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; 823 unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
812 struct fps __user *fps = (struct fps __user *) addr; 824 struct pt_regs __user *pregs;
825 struct fps __user *fps;
813 int ret; 826 int ret;
814 827
828 pregs = (struct pt_regs __user *) (unsigned long) addr;
829 fps = (struct fps __user *) (unsigned long) addr;
830
815 switch (request) { 831 switch (request) {
816 case PTRACE_PEEKUSR: 832 case PTRACE_PEEKUSR:
817 ret = (addr != 0) ? -EIO : 0; 833 ret = (addr != 0) ? -EIO : 0;
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index d036dbe7286..6acb4c51cfe 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -51,6 +51,8 @@
51#include <net/ipconfig.h> 51#include <net/ipconfig.h>
52#endif 52#endif
53 53
54#include "entry.h"
55
54/* Used to synchronize accesses to NatSemi SUPER I/O chip configure 56/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
55 * operations in asm/ns87303.h 57 * operations in asm/ns87303.h
56 */ 58 */
@@ -335,9 +337,6 @@ void __init setup_arch(char **cmdline_p)
335 337
336/* BUFFER is PAGE_SIZE bytes long. */ 338/* BUFFER is PAGE_SIZE bytes long. */
337 339
338extern char *sparc_cpu_type;
339extern char *sparc_fpu_type;
340
341extern void smp_info(struct seq_file *); 340extern void smp_info(struct seq_file *);
342extern void smp_bogo(struct seq_file *); 341extern void smp_bogo(struct seq_file *);
343extern void mmu_info(struct seq_file *); 342extern void mmu_info(struct seq_file *);
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index fb13775b368..94a9d64208e 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -32,6 +32,9 @@
32#include <asm/siginfo.h> 32#include <asm/siginfo.h>
33#include <asm/visasm.h> 33#include <asm/visasm.h>
34 34
35#include "entry.h"
36#include "systbls.h"
37
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36 39
37/* {set, get}context() needed for 64-bit SparcLinux userland. */ 40/* {set, get}context() needed for 64-bit SparcLinux userland. */
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index cc454731d87..59f020d69d4 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1,6 +1,6 @@
1/* smp.c: Sparc64 SMP support. 1/* smp.c: Sparc64 SMP support.
2 * 2 *
3 * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net) 3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */ 4 */
5 5
6#include <linux/module.h> 6#include <linux/module.h>
@@ -30,6 +30,7 @@
30#include <asm/cpudata.h> 30#include <asm/cpudata.h>
31#include <asm/hvtramp.h> 31#include <asm/hvtramp.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/timer.h>
33 34
34#include <asm/irq.h> 35#include <asm/irq.h>
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
@@ -284,14 +285,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
284{ 285{
285 extern unsigned long sparc64_ttable_tl0; 286 extern unsigned long sparc64_ttable_tl0;
286 extern unsigned long kern_locked_tte_data; 287 extern unsigned long kern_locked_tte_data;
287 extern int bigkernel;
288 struct hvtramp_descr *hdesc; 288 struct hvtramp_descr *hdesc;
289 unsigned long trampoline_ra; 289 unsigned long trampoline_ra;
290 struct trap_per_cpu *tb; 290 struct trap_per_cpu *tb;
291 u64 tte_vaddr, tte_data; 291 u64 tte_vaddr, tte_data;
292 unsigned long hv_err; 292 unsigned long hv_err;
293 int i;
293 294
294 hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL); 295 hdesc = kzalloc(sizeof(*hdesc) +
296 (sizeof(struct hvtramp_mapping) *
297 num_kernel_image_mappings - 1),
298 GFP_KERNEL);
295 if (!hdesc) { 299 if (!hdesc) {
296 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate " 300 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
297 "hvtramp_descr.\n"); 301 "hvtramp_descr.\n");
@@ -299,7 +303,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
299 } 303 }
300 304
301 hdesc->cpu = cpu; 305 hdesc->cpu = cpu;
302 hdesc->num_mappings = (bigkernel ? 2 : 1); 306 hdesc->num_mappings = num_kernel_image_mappings;
303 307
304 tb = &trap_block[cpu]; 308 tb = &trap_block[cpu];
305 tb->hdesc = hdesc; 309 tb->hdesc = hdesc;
@@ -312,13 +316,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
312 tte_vaddr = (unsigned long) KERNBASE; 316 tte_vaddr = (unsigned long) KERNBASE;
313 tte_data = kern_locked_tte_data; 317 tte_data = kern_locked_tte_data;
314 318
315 hdesc->maps[0].vaddr = tte_vaddr; 319 for (i = 0; i < hdesc->num_mappings; i++) {
316 hdesc->maps[0].tte = tte_data; 320 hdesc->maps[i].vaddr = tte_vaddr;
317 if (bigkernel) { 321 hdesc->maps[i].tte = tte_data;
318 tte_vaddr += 0x400000; 322 tte_vaddr += 0x400000;
319 tte_data += 0x400000; 323 tte_data += 0x400000;
320 hdesc->maps[1].vaddr = tte_vaddr;
321 hdesc->maps[1].tte = tte_data;
322 } 324 }
323 325
324 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup); 326 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c
index 47f92a59be1..84d39e873e8 100644
--- a/arch/sparc64/kernel/stacktrace.c
+++ b/arch/sparc64/kernel/stacktrace.c
@@ -2,13 +2,15 @@
2#include <linux/stacktrace.h> 2#include <linux/stacktrace.h>
3#include <linux/thread_info.h> 3#include <linux/thread_info.h>
4#include <asm/ptrace.h> 4#include <asm/ptrace.h>
5#include <asm/stacktrace.h>
5 6
6void save_stack_trace(struct stack_trace *trace) 7void save_stack_trace(struct stack_trace *trace)
7{ 8{
8 unsigned long ksp, fp, thread_base; 9 unsigned long ksp, fp, thread_base;
9 struct thread_info *tp = task_thread_info(current); 10 struct thread_info *tp = task_thread_info(current);
10 11
11 flushw_all(); 12 stack_trace_flush();
13
12 __asm__ __volatile__( 14 __asm__ __volatile__(
13 "mov %%fp, %0" 15 "mov %%fp, %0"
14 : "=r" (ksp) 16 : "=r" (ksp)
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 134d801579f..f952745d0f3 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -1,5 +1,4 @@
1/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $ 1/* linux/arch/sparc64/kernel/sys_sparc.c
2 * linux/arch/sparc64/kernel/sys_sparc.c
3 * 2 *
4 * This file contains various random system calls that 3 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc 4 * have a non-standard calling sequence on the Linux/sparc
@@ -30,6 +29,9 @@
30#include <asm/perfctr.h> 29#include <asm/perfctr.h>
31#include <asm/unistd.h> 30#include <asm/unistd.h>
32 31
32#include "entry.h"
33#include "systbls.h"
34
33/* #define DEBUG_UNIMP_SYSCALL */ 35/* #define DEBUG_UNIMP_SYSCALL */
34 36
35asmlinkage unsigned long sys_getpagesize(void) 37asmlinkage unsigned long sys_getpagesize(void)
@@ -445,7 +447,8 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
445 goto out; 447 goto out;
446 case SEMTIMEDOP: 448 case SEMTIMEDOP:
447 err = sys_semtimedop(first, ptr, (unsigned)second, 449 err = sys_semtimedop(first, ptr, (unsigned)second,
448 (const struct timespec __user *) fifth); 450 (const struct timespec __user *)
451 (unsigned long) fifth);
449 goto out; 452 goto out;
450 case SEMGET: 453 case SEMGET:
451 err = sys_semget(first, (int)second, (int)third); 454 err = sys_semget(first, (int)second, (int)third);
@@ -788,7 +791,7 @@ asmlinkage long sys_utrap_install(utrap_entry_t type,
788 } else { 791 } else {
789 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && 792 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
790 current_thread_info()->utraps[0] > 1) { 793 current_thread_info()->utraps[0] > 1) {
791 long *p = current_thread_info()->utraps; 794 unsigned long *p = current_thread_info()->utraps;
792 795
793 current_thread_info()->utraps = 796 current_thread_info()->utraps =
794 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), 797 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
@@ -816,7 +819,8 @@ asmlinkage long sys_utrap_install(utrap_entry_t type,
816 return 0; 819 return 0;
817} 820}
818 821
819long sparc_memory_ordering(unsigned long model, struct pt_regs *regs) 822asmlinkage long sparc_memory_ordering(unsigned long model,
823 struct pt_regs *regs)
820{ 824{
821 if (model >= 3) 825 if (model >= 3)
822 return -EINVAL; 826 return -EINVAL;
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index deaba2bd053..2455fa49887 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs)
678 current_thread_info()->xfsr[0] = 0; 678 current_thread_info()->xfsr[0] = 0;
679 current_thread_info()->fpsaved[0] = 0; 679 current_thread_info()->fpsaved[0] = 0;
680 regs->tstate &= ~TSTATE_PEF; 680 regs->tstate &= ~TSTATE_PEF;
681 task_lock(current);
682 current->ptrace &= ~PT_DTRACE;
683 task_unlock(current);
684 } 681 }
685out: 682out:
686 return error; 683 return error;
diff --git a/arch/sparc64/kernel/systbls.h b/arch/sparc64/kernel/systbls.h
new file mode 100644
index 00000000000..8a0d20a35d0
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.h
@@ -0,0 +1,53 @@
1#ifndef _SYSTBLS_H
2#define _SYSTBLS_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/utsname.h>
7#include <asm/utrap.h>
8#include <asm/signal.h>
9
10extern asmlinkage unsigned long sys_getpagesize(void);
11extern asmlinkage unsigned long sparc_brk(unsigned long brk);
12extern asmlinkage long sparc_pipe(struct pt_regs *regs);
13extern asmlinkage long sys_ipc(unsigned int call, int first,
14 unsigned long second,
15 unsigned long third,
16 void __user *ptr, long fifth);
17extern asmlinkage long sparc64_newuname(struct new_utsname __user *name);
18extern asmlinkage long sparc64_personality(unsigned long personality);
19extern asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
20 unsigned long prot, unsigned long flags,
21 unsigned long fd, unsigned long off);
22extern asmlinkage long sys64_munmap(unsigned long addr, size_t len);
23extern asmlinkage unsigned long sys64_mremap(unsigned long addr,
24 unsigned long old_len,
25 unsigned long new_len,
26 unsigned long flags,
27 unsigned long new_addr);
28extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
29extern asmlinkage long sys_getdomainname(char __user *name, int len);
30extern asmlinkage long solaris_syscall(struct pt_regs *regs);
31extern asmlinkage long sunos_syscall(struct pt_regs *regs);
32extern asmlinkage long sys_utrap_install(utrap_entry_t type,
33 utrap_handler_t new_p,
34 utrap_handler_t new_d,
35 utrap_handler_t __user *old_p,
36 utrap_handler_t __user *old_d);
37extern asmlinkage long sparc_memory_ordering(unsigned long model,
38 struct pt_regs *regs);
39extern asmlinkage long sys_rt_sigaction(int sig,
40 const struct sigaction __user *act,
41 struct sigaction __user *oact,
42 void __user *restorer,
43 size_t sigsetsize);
44extern asmlinkage long sys_perfctr(int opcode, unsigned long arg0,
45 unsigned long arg1, unsigned long arg2);
46
47extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
48extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
49extern asmlinkage long sys_sigpause(unsigned int set);
50extern asmlinkage long sys_sigsuspend(old_sigset_t set);
51extern void do_rt_sigreturn(struct pt_regs *regs);
52
53#endif /* _SYSTBLS_H */
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index d204f1ab1d4..e5d238970c7 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1,7 +1,6 @@
1/* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $ 1/* time.c: UltraSparc timer and TOD clock support.
2 * time.c: UltraSparc timer and TOD clock support.
3 * 2 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 3 * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) 4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * 5 *
7 * Based largely on code which is: 6 * Based largely on code which is:
@@ -48,6 +47,8 @@
48#include <asm/uaccess.h> 47#include <asm/uaccess.h>
49#include <asm/irq_regs.h> 48#include <asm/irq_regs.h>
50 49
50#include "entry.h"
51
51DEFINE_SPINLOCK(mostek_lock); 52DEFINE_SPINLOCK(mostek_lock);
52DEFINE_SPINLOCK(rtc_lock); 53DEFINE_SPINLOCK(rtc_lock);
53void __iomem *mstk48t02_regs = NULL; 54void __iomem *mstk48t02_regs = NULL;
@@ -508,6 +509,37 @@ static int __init has_low_battery(void)
508 return (data1 == data2); /* Was the write blocked? */ 509 return (data1 == data2); /* Was the write blocked? */
509} 510}
510 511
512static void __init mostek_set_system_time(void __iomem *mregs)
513{
514 unsigned int year, mon, day, hour, min, sec;
515 u8 tmp;
516
517 spin_lock_irq(&mostek_lock);
518
519 /* Traditional Mostek chip. */
520 tmp = mostek_read(mregs + MOSTEK_CREG);
521 tmp |= MSTK_CREG_READ;
522 mostek_write(mregs + MOSTEK_CREG, tmp);
523
524 sec = MSTK_REG_SEC(mregs);
525 min = MSTK_REG_MIN(mregs);
526 hour = MSTK_REG_HOUR(mregs);
527 day = MSTK_REG_DOM(mregs);
528 mon = MSTK_REG_MONTH(mregs);
529 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
530
531 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
532 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
533 set_normalized_timespec(&wall_to_monotonic,
534 -xtime.tv_sec, -xtime.tv_nsec);
535
536 tmp = mostek_read(mregs + MOSTEK_CREG);
537 tmp &= ~MSTK_CREG_READ;
538 mostek_write(mregs + MOSTEK_CREG, tmp);
539
540 spin_unlock_irq(&mostek_lock);
541}
542
511/* Probe for the real time clock chip. */ 543/* Probe for the real time clock chip. */
512static void __init set_system_time(void) 544static void __init set_system_time(void)
513{ 545{
@@ -520,7 +552,6 @@ static void __init set_system_time(void)
520 unsigned long dregs = 0UL; 552 unsigned long dregs = 0UL;
521 void __iomem *bregs = 0UL; 553 void __iomem *bregs = 0UL;
522#endif 554#endif
523 u8 tmp;
524 555
525 if (!mregs && !dregs && !bregs) { 556 if (!mregs && !dregs && !bregs) {
526 prom_printf("Something wrong, clock regs not mapped yet.\n"); 557 prom_printf("Something wrong, clock regs not mapped yet.\n");
@@ -528,20 +559,11 @@ static void __init set_system_time(void)
528 } 559 }
529 560
530 if (mregs) { 561 if (mregs) {
531 spin_lock_irq(&mostek_lock); 562 mostek_set_system_time(mregs);
532 563 return;
533 /* Traditional Mostek chip. */ 564 }
534 tmp = mostek_read(mregs + MOSTEK_CREG);
535 tmp |= MSTK_CREG_READ;
536 mostek_write(mregs + MOSTEK_CREG, tmp);
537 565
538 sec = MSTK_REG_SEC(mregs); 566 if (bregs) {
539 min = MSTK_REG_MIN(mregs);
540 hour = MSTK_REG_HOUR(mregs);
541 day = MSTK_REG_DOM(mregs);
542 mon = MSTK_REG_MONTH(mregs);
543 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
544 } else if (bregs) {
545 unsigned char val = readb(bregs + 0x0e); 567 unsigned char val = readb(bregs + 0x0e);
546 unsigned int century; 568 unsigned int century;
547 569
@@ -596,14 +618,6 @@ static void __init set_system_time(void)
596 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); 618 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
597 set_normalized_timespec(&wall_to_monotonic, 619 set_normalized_timespec(&wall_to_monotonic,
598 -xtime.tv_sec, -xtime.tv_nsec); 620 -xtime.tv_sec, -xtime.tv_nsec);
599
600 if (mregs) {
601 tmp = mostek_read(mregs + MOSTEK_CREG);
602 tmp &= ~MSTK_CREG_READ;
603 mostek_write(mregs + MOSTEK_CREG, tmp);
604
605 spin_unlock_irq(&mostek_lock);
606 }
607} 621}
608 622
609/* davem suggests we keep this within the 4M locked kernel image */ 623/* davem suggests we keep this within the 4M locked kernel image */
@@ -1027,7 +1041,7 @@ void __init time_init(void)
1027 setup_clockevent_multiplier(clock); 1041 setup_clockevent_multiplier(clock);
1028 1042
1029 sparc64_clockevent.max_delta_ns = 1043 sparc64_clockevent.max_delta_ns =
1030 clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent); 1044 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
1031 sparc64_clockevent.min_delta_ns = 1045 sparc64_clockevent.min_delta_ns =
1032 clockevent_delta2ns(0xF, &sparc64_clockevent); 1046 clockevent_delta2ns(0xF, &sparc64_clockevent);
1033 1047
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 4ae2e525d68..56ff5521134 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -105,7 +105,7 @@ startup_continue:
105 wr %g2, 0, %tick_cmpr 105 wr %g2, 0, %tick_cmpr
106 106
107 /* Call OBP by hand to lock KERNBASE into i/d tlbs. 107 /* Call OBP by hand to lock KERNBASE into i/d tlbs.
108 * We lock 2 consequetive entries if we are 'bigkernel'. 108 * We lock 'num_kernel_image_mappings' consequetive entries.
109 */ 109 */
110 sethi %hi(prom_entry_lock), %g2 110 sethi %hi(prom_entry_lock), %g2
1111: ldstub [%g2 + %lo(prom_entry_lock)], %g1 1111: ldstub [%g2 + %lo(prom_entry_lock)], %g1
@@ -119,6 +119,29 @@ startup_continue:
119 add %l2, -(192 + 128), %sp 119 add %l2, -(192 + 128), %sp
120 flushw 120 flushw
121 121
122 /* Setup the loop variables:
123 * %l3: VADDR base
124 * %l4: TTE base
125 * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
126 * %l6: Number of TTE entries to map
127 * %l7: Highest TTE entry number, we count down
128 */
129 sethi %hi(KERNBASE), %l3
130 sethi %hi(kern_locked_tte_data), %l4
131 ldx [%l4 + %lo(kern_locked_tte_data)], %l4
132 clr %l5
133 sethi %hi(num_kernel_image_mappings), %l6
134 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
135 add %l6, 1, %l6
136
137 mov 15, %l7
138 BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
139
140 mov 63, %l7
1412:
142
1433:
144 /* Lock into I-MMU */
122 sethi %hi(call_method), %g2 145 sethi %hi(call_method), %g2
123 or %g2, %lo(call_method), %g2 146 or %g2, %lo(call_method), %g2
124 stx %g2, [%sp + 2047 + 128 + 0x00] 147 stx %g2, [%sp + 2047 + 128 + 0x00]
@@ -132,63 +155,26 @@ startup_continue:
132 sethi %hi(prom_mmu_ihandle_cache), %g2 155 sethi %hi(prom_mmu_ihandle_cache), %g2
133 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 156 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
134 stx %g2, [%sp + 2047 + 128 + 0x20] 157 stx %g2, [%sp + 2047 + 128 + 0x20]
135 sethi %hi(KERNBASE), %g2
136 stx %g2, [%sp + 2047 + 128 + 0x28]
137 sethi %hi(kern_locked_tte_data), %g2
138 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
139 stx %g2, [%sp + 2047 + 128 + 0x30]
140
141 mov 15, %g2
142 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
143 158
144 mov 63, %g2 159 /* Each TTE maps 4MB, convert index to offset. */
1451: 160 sllx %l5, 22, %g1
146 stx %g2, [%sp + 2047 + 128 + 0x38]
147 sethi %hi(p1275buf), %g2
148 or %g2, %lo(p1275buf), %g2
149 ldx [%g2 + 0x08], %o1
150 call %o1
151 add %sp, (2047 + 128), %o0
152 161
153 sethi %hi(bigkernel), %g2 162 add %l3, %g1, %g2
154 lduw [%g2 + %lo(bigkernel)], %g2 163 stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
155 brz,pt %g2, do_dtlb 164 add %l4, %g1, %g2
156 nop 165 stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
157 166
158 sethi %hi(call_method), %g2 167 /* TTE index is highest minus loop index. */
159 or %g2, %lo(call_method), %g2 168 sub %l7, %l5, %g2
160 stx %g2, [%sp + 2047 + 128 + 0x00]
161 mov 5, %g2
162 stx %g2, [%sp + 2047 + 128 + 0x08]
163 mov 1, %g2
164 stx %g2, [%sp + 2047 + 128 + 0x10]
165 sethi %hi(itlb_load), %g2
166 or %g2, %lo(itlb_load), %g2
167 stx %g2, [%sp + 2047 + 128 + 0x18]
168 sethi %hi(prom_mmu_ihandle_cache), %g2
169 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
170 stx %g2, [%sp + 2047 + 128 + 0x20]
171 sethi %hi(KERNBASE + 0x400000), %g2
172 stx %g2, [%sp + 2047 + 128 + 0x28]
173 sethi %hi(kern_locked_tte_data), %g2
174 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
175 sethi %hi(0x400000), %g1
176 add %g2, %g1, %g2
177 stx %g2, [%sp + 2047 + 128 + 0x30]
178
179 mov 14, %g2
180 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
181
182 mov 62, %g2
1831:
184 stx %g2, [%sp + 2047 + 128 + 0x38] 169 stx %g2, [%sp + 2047 + 128 + 0x38]
170
185 sethi %hi(p1275buf), %g2 171 sethi %hi(p1275buf), %g2
186 or %g2, %lo(p1275buf), %g2 172 or %g2, %lo(p1275buf), %g2
187 ldx [%g2 + 0x08], %o1 173 ldx [%g2 + 0x08], %o1
188 call %o1 174 call %o1
189 add %sp, (2047 + 128), %o0 175 add %sp, (2047 + 128), %o0
190 176
191do_dtlb: 177 /* Lock into D-MMU */
192 sethi %hi(call_method), %g2 178 sethi %hi(call_method), %g2
193 or %g2, %lo(call_method), %g2 179 or %g2, %lo(call_method), %g2
194 stx %g2, [%sp + 2047 + 128 + 0x00] 180 stx %g2, [%sp + 2047 + 128 + 0x00]
@@ -202,65 +188,30 @@ do_dtlb:
202 sethi %hi(prom_mmu_ihandle_cache), %g2 188 sethi %hi(prom_mmu_ihandle_cache), %g2
203 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 189 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
204 stx %g2, [%sp + 2047 + 128 + 0x20] 190 stx %g2, [%sp + 2047 + 128 + 0x20]
205 sethi %hi(KERNBASE), %g2
206 stx %g2, [%sp + 2047 + 128 + 0x28]
207 sethi %hi(kern_locked_tte_data), %g2
208 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
209 stx %g2, [%sp + 2047 + 128 + 0x30]
210 191
211 mov 15, %g2 192 /* Each TTE maps 4MB, convert index to offset. */
212 BRANCH_IF_ANY_CHEETAH(g1,g5,1f) 193 sllx %l5, 22, %g1
213 194
214 mov 63, %g2 195 add %l3, %g1, %g2
2151: 196 stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
197 add %l4, %g1, %g2
198 stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
216 199
200 /* TTE index is highest minus loop index. */
201 sub %l7, %l5, %g2
217 stx %g2, [%sp + 2047 + 128 + 0x38] 202 stx %g2, [%sp + 2047 + 128 + 0x38]
203
218 sethi %hi(p1275buf), %g2 204 sethi %hi(p1275buf), %g2
219 or %g2, %lo(p1275buf), %g2 205 or %g2, %lo(p1275buf), %g2
220 ldx [%g2 + 0x08], %o1 206 ldx [%g2 + 0x08], %o1
221 call %o1 207 call %o1
222 add %sp, (2047 + 128), %o0 208 add %sp, (2047 + 128), %o0
223 209
224 sethi %hi(bigkernel), %g2 210 add %l5, 1, %l5
225 lduw [%g2 + %lo(bigkernel)], %g2 211 cmp %l5, %l6
226 brz,pt %g2, do_unlock 212 bne,pt %xcc, 3b
227 nop 213 nop
228 214
229 sethi %hi(call_method), %g2
230 or %g2, %lo(call_method), %g2
231 stx %g2, [%sp + 2047 + 128 + 0x00]
232 mov 5, %g2
233 stx %g2, [%sp + 2047 + 128 + 0x08]
234 mov 1, %g2
235 stx %g2, [%sp + 2047 + 128 + 0x10]
236 sethi %hi(dtlb_load), %g2
237 or %g2, %lo(dtlb_load), %g2
238 stx %g2, [%sp + 2047 + 128 + 0x18]
239 sethi %hi(prom_mmu_ihandle_cache), %g2
240 lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
241 stx %g2, [%sp + 2047 + 128 + 0x20]
242 sethi %hi(KERNBASE + 0x400000), %g2
243 stx %g2, [%sp + 2047 + 128 + 0x28]
244 sethi %hi(kern_locked_tte_data), %g2
245 ldx [%g2 + %lo(kern_locked_tte_data)], %g2
246 sethi %hi(0x400000), %g1
247 add %g2, %g1, %g2
248 stx %g2, [%sp + 2047 + 128 + 0x30]
249
250 mov 14, %g2
251 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
252
253 mov 62, %g2
2541:
255
256 stx %g2, [%sp + 2047 + 128 + 0x38]
257 sethi %hi(p1275buf), %g2
258 or %g2, %lo(p1275buf), %g2
259 ldx [%g2 + 0x08], %o1
260 call %o1
261 add %sp, (2047 + 128), %o0
262
263do_unlock:
264 sethi %hi(prom_entry_lock), %g2 215 sethi %hi(prom_entry_lock), %g2
265 stb %g0, [%g2 + %lo(prom_entry_lock)] 216 stb %g0, [%g2 + %lo(prom_entry_lock)]
266 membar #StoreStore | #StoreLoad 217 membar #StoreStore | #StoreLoad
@@ -269,47 +220,36 @@ do_unlock:
269 nop 220 nop
270 221
271niagara_lock_tlb: 222niagara_lock_tlb:
223 sethi %hi(KERNBASE), %l3
224 sethi %hi(kern_locked_tte_data), %l4
225 ldx [%l4 + %lo(kern_locked_tte_data)], %l4
226 clr %l5
227 sethi %hi(num_kernel_image_mappings), %l6
228 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
229 add %l6, 1, %l6
230
2311:
272 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 232 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
273 sethi %hi(KERNBASE), %o0 233 sllx %l5, 22, %g2
234 add %l3, %g2, %o0
274 clr %o1 235 clr %o1
275 sethi %hi(kern_locked_tte_data), %o2 236 add %l4, %g2, %o2
276 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
277 mov HV_MMU_IMMU, %o3 237 mov HV_MMU_IMMU, %o3
278 ta HV_FAST_TRAP 238 ta HV_FAST_TRAP
279 239
280 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 240 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
281 sethi %hi(KERNBASE), %o0 241 sllx %l5, 22, %g2
242 add %l3, %g2, %o0
282 clr %o1 243 clr %o1
283 sethi %hi(kern_locked_tte_data), %o2 244 add %l4, %g2, %o2
284 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
285 mov HV_MMU_DMMU, %o3 245 mov HV_MMU_DMMU, %o3
286 ta HV_FAST_TRAP 246 ta HV_FAST_TRAP
287 247
288 sethi %hi(bigkernel), %g2 248 add %l5, 1, %l5
289 lduw [%g2 + %lo(bigkernel)], %g2 249 cmp %l5, %l6
290 brz,pt %g2, after_lock_tlb 250 bne,pt %xcc, 1b
291 nop 251 nop
292 252
293 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
294 sethi %hi(KERNBASE + 0x400000), %o0
295 clr %o1
296 sethi %hi(kern_locked_tte_data), %o2
297 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
298 sethi %hi(0x400000), %o3
299 add %o2, %o3, %o2
300 mov HV_MMU_IMMU, %o3
301 ta HV_FAST_TRAP
302
303 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
304 sethi %hi(KERNBASE + 0x400000), %o0
305 clr %o1
306 sethi %hi(kern_locked_tte_data), %o2
307 ldx [%o2 + %lo(kern_locked_tte_data)], %o2
308 sethi %hi(0x400000), %o3
309 add %o2, %o3, %o2
310 mov HV_MMU_DMMU, %o3
311 ta HV_FAST_TRAP
312
313after_lock_tlb: 253after_lock_tlb:
314 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate 254 wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
315 wr %g0, 0, %fprs 255 wr %g0, 0, %fprs
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 007f5317c0d..96da847023f 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -42,6 +42,7 @@
42#endif 42#endif
43#include <asm/prom.h> 43#include <asm/prom.h>
44 44
45#include "entry.h"
45 46
46/* When an irrecoverable trap occurs at tl > 0, the trap entry 47/* When an irrecoverable trap occurs at tl > 0, the trap entry
47 * code logs the trap state registers at every level in the trap 48 * code logs the trap state registers at every level in the trap
@@ -77,11 +78,6 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
77 } 78 }
78} 79}
79 80
80void do_call_debug(struct pt_regs *regs)
81{
82 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
83}
84
85void bad_trap(struct pt_regs *regs, long lvl) 81void bad_trap(struct pt_regs *regs, long lvl)
86{ 82{
87 char buffer[32]; 83 char buffer[32];
@@ -550,41 +546,6 @@ static unsigned long ecache_flush_physbase;
550static unsigned long ecache_flush_linesize; 546static unsigned long ecache_flush_linesize;
551static unsigned long ecache_flush_size; 547static unsigned long ecache_flush_size;
552 548
553/* WARNING: The error trap handlers in assembly know the precise
554 * layout of the following structure.
555 *
556 * C-level handlers below use this information to log the error
557 * and then determine how to recover (if possible).
558 */
559struct cheetah_err_info {
560/*0x00*/u64 afsr;
561/*0x08*/u64 afar;
562
563 /* D-cache state */
564/*0x10*/u64 dcache_data[4]; /* The actual data */
565/*0x30*/u64 dcache_index; /* D-cache index */
566/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
567/*0x40*/u64 dcache_utag; /* D-cache microtag */
568/*0x48*/u64 dcache_stag; /* D-cache snooptag */
569
570 /* I-cache state */
571/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
572/*0x90*/u64 icache_index; /* I-cache index */
573/*0x98*/u64 icache_tag; /* I-cache phys tag */
574/*0xa0*/u64 icache_utag; /* I-cache microtag */
575/*0xa8*/u64 icache_stag; /* I-cache snooptag */
576/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
577/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
578
579 /* E-cache state */
580/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
581/*0xe0*/u64 ecache_index; /* E-cache index */
582/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
583
584/*0xf0*/u64 __pad[32 - 30];
585};
586#define CHAFSR_INVALID ((u64)-1L)
587
588/* This table is ordered in priority of errors and matches the 549/* This table is ordered in priority of errors and matches the
589 * AFAR overwrite policy as well. 550 * AFAR overwrite policy as well.
590 */ 551 */
@@ -758,10 +719,6 @@ static struct afsr_error_table __jalapeno_error_table[] = {
758static struct afsr_error_table *cheetah_error_table; 719static struct afsr_error_table *cheetah_error_table;
759static unsigned long cheetah_afsr_errors; 720static unsigned long cheetah_afsr_errors;
760 721
761/* This is allocated at boot time based upon the largest hardware
762 * cpu ID in the system. We allocate two entries per cpu, one for
763 * TL==0 logging and one for TL >= 1 logging.
764 */
765struct cheetah_err_info *cheetah_error_log; 722struct cheetah_err_info *cheetah_error_log;
766 723
767static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr) 724static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
@@ -2102,7 +2059,7 @@ void do_div0(struct pt_regs *regs)
2102 force_sig_info(SIGFPE, &info, current); 2059 force_sig_info(SIGFPE, &info, current);
2103} 2060}
2104 2061
2105void instruction_dump (unsigned int *pc) 2062static void instruction_dump(unsigned int *pc)
2106{ 2063{
2107 int i; 2064 int i;
2108 2065
@@ -2115,7 +2072,7 @@ void instruction_dump (unsigned int *pc)
2115 printk("\n"); 2072 printk("\n");
2116} 2073}
2117 2074
2118static void user_instruction_dump (unsigned int __user *pc) 2075static void user_instruction_dump(unsigned int __user *pc)
2119{ 2076{
2120 int i; 2077 int i;
2121 unsigned int buf[9]; 2078 unsigned int buf[9];