aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Kconfig15
-rw-r--r--arch/sparc/Makefile12
-rw-r--r--arch/sparc/boot/Makefile3
-rw-r--r--arch/sparc/include/asm/asi.h4
-rw-r--r--arch/sparc/include/asm/device.h3
-rw-r--r--arch/sparc/include/asm/dma-mapping.h145
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/leon.h362
-rw-r--r--arch/sparc/include/asm/leon_amba.h263
-rw-r--r--arch/sparc/include/asm/machines.h6
-rw-r--r--arch/sparc/include/asm/nmi.h5
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h105
-rw-r--r--arch/sparc/include/asm/pci_64.h88
-rw-r--r--arch/sparc/include/asm/perf_counter.h14
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h4
-rw-r--r--arch/sparc/include/asm/prom.h3
-rw-r--r--arch/sparc/include/asm/socket.h3
-rw-r--r--arch/sparc/include/asm/spinlock_32.h12
-rw-r--r--arch/sparc/include/asm/spinlock_64.h28
-rw-r--r--arch/sparc/include/asm/system_32.h1
-rw-r--r--arch/sparc/include/asm/system_64.h4
-rw-r--r--arch/sparc/include/asm/types.h27
-rw-r--r--arch/sparc/include/asm/uaccess_64.h2
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/Makefile7
-rw-r--r--arch/sparc/kernel/cpu.c5
-rw-r--r--arch/sparc/kernel/dma.c175
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/head_32.S5
-rw-r--r--arch/sparc/kernel/idprom.c2
-rw-r--r--arch/sparc/kernel/iommu.c20
-rw-r--r--arch/sparc/kernel/ioport.c222
-rw-r--r--arch/sparc/kernel/irq_32.c5
-rw-r--r--arch/sparc/kernel/leon_kernel.c203
-rw-r--r--arch/sparc/kernel/nmi.c72
-rw-r--r--arch/sparc/kernel/of_device_32.c40
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c30
-rw-r--r--arch/sparc/kernel/pcr.c14
-rw-r--r--arch/sparc/kernel/perf_counter.c557
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/prom_32.c33
-rw-r--r--arch/sparc/kernel/prom_common.c10
-rw-r--r--arch/sparc/kernel/setup_32.c5
-rw-r--r--arch/sparc/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c3
-rw-r--r--arch/sparc/kernel/sys32.S2
-rw-r--r--arch/sparc/kernel/sysfs.c1
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/mm/Makefile1
-rw-r--r--arch/sparc/mm/init_32.c4
-rw-r--r--arch/sparc/mm/leon_mm.c260
-rw-r--r--arch/sparc/mm/loadmmu.c1
-rw-r--r--arch/sparc/mm/srmmu.c53
-rw-r--r--arch/sparc/oprofile/init.c4
57 files changed, 2171 insertions, 710 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3f8b6a92eabd..2bd5c287538a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,6 +25,9 @@ config SPARC
25 select ARCH_WANT_OPTIONAL_GPIOLIB 25 select ARCH_WANT_OPTIONAL_GPIOLIB
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_COUNTERS
29 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG
28 31
29config SPARC32 32config SPARC32
30 def_bool !64BIT 33 def_bool !64BIT
@@ -44,6 +47,7 @@ config SPARC64
44 select RTC_DRV_BQ4802 47 select RTC_DRV_BQ4802
45 select RTC_DRV_SUN4V 48 select RTC_DRV_SUN4V
46 select RTC_DRV_STARFIRE 49 select RTC_DRV_STARFIRE
50 select HAVE_PERF_COUNTERS
47 51
48config ARCH_DEFCONFIG 52config ARCH_DEFCONFIG
49 string 53 string
@@ -437,6 +441,17 @@ config SERIAL_CONSOLE
437 441
438 If unsure, say N. 442 If unsure, say N.
439 443
444config SPARC_LEON
445 bool "Sparc Leon processor family"
446 depends on SPARC32
447 ---help---
448 If you say Y here if you are running on a SPARC-LEON processor.
449 The LEON processor is a synthesizable VHDL model of the
450 SPARC-v8 standard. LEON is part of the GRLIB collection of
451 IP cores that are distributed under GPL. GRLIB can be downloaded
452 from www.gaisler.com. You can download a sparc-linux cross-compilation
453 toolchain at www.gaisler.com.
454
440endmenu 455endmenu
441 456
442menu "Bus options (PCI etc.)" 457menu "Bus options (PCI etc.)"
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 2003ded054c2..467221dd5702 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -38,10 +38,6 @@ CPPFLAGS_vmlinux.lds += -m32
38# Actual linking is done with "make image". 38# Actual linking is done with "make image".
39LDFLAGS_vmlinux = -r 39LDFLAGS_vmlinux = -r
40 40
41# Default target
42all: zImage
43
44
45else 41else
46##### 42#####
47# sparc64 43# sparc64
@@ -91,6 +87,9 @@ endif
91 87
92boot := arch/sparc/boot 88boot := arch/sparc/boot
93 89
90# Default target
91all: zImage
92
94image zImage tftpboot.img vmlinux.aout: vmlinux 93image zImage tftpboot.img vmlinux.aout: vmlinux
95 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 94 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
96 95
@@ -109,8 +108,9 @@ define archhelp
109endef 108endef
110else 109else
111define archhelp 110define archhelp
112 echo '* vmlinux - Standard sparc64 kernel' 111 echo '* vmlinux - standard sparc64 kernel'
113 echo ' vmlinux.aout - a.out kernel for sparc64' 112 echo '* zImage - stripped and compressed sparc64 kernel ($(boot)/zImage)'
113 echo ' vmlinux.aout - a.out kernel for sparc64'
114 echo ' tftpboot.img - image prepared for tftp' 114 echo ' tftpboot.img - image prepared for tftp'
115endef 115endef
116endif 116endif
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 1ff0fd924756..97e3feb9ff1b 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -79,6 +79,9 @@ $(obj)/image: vmlinux FORCE
79 $(call if_changed,strip) 79 $(call if_changed,strip)
80 @echo ' kernel: $@ is ready' 80 @echo ' kernel: $@ is ready'
81 81
82$(obj)/zImage: $(obj)/image
83 $(call if_changed,gzip)
84
82$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE 85$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
83 $(call if_changed,elftoaout) 86 $(call if_changed,elftoaout)
84 $(call if_changed,piggy) 87 $(call if_changed,piggy)
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index 74703c5ef985..b2e3db63a64b 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -40,7 +40,11 @@
40#define ASI_M_UNA01 0x01 /* Same here... */ 40#define ASI_M_UNA01 0x01 /* Same here... */
41#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ 41#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
42#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ 42#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
43#ifndef CONFIG_SPARC_LEON
43#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ 44#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
45#else
46#define ASI_M_MMUREGS 0x19
47#endif /* CONFIG_SPARC_LEON */
44#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ 48#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
45#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ 49#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
46#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ 50#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index 3702e087df2c..f3b85b6b0b76 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -32,4 +32,7 @@ dev_archdata_get_node(const struct dev_archdata *ad)
32 return ad->prom_node; 32 return ad->prom_node;
33} 33}
34 34
35struct pdev_archdata {
36};
37
35#endif /* _ASM_SPARC_DEVICE_H */ 38#endif /* _ASM_SPARC_DEVICE_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 204e4bf64438..5a8c308e2b5c 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/scatterlist.h> 4#include <linux/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/dma-debug.h>
6 7
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8 9
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1) 15#define dma_is_consistent(d, h) (1)
15 16
16struct dma_ops { 17extern struct dma_map_ops *dma_ops, pci32_dma_ops;
17 void *(*alloc_coherent)(struct device *dev, size_t size, 18extern struct bus_type pci_bus_type;
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46 19
47static inline void *dma_alloc_coherent(struct device *dev, size_t size, 20static inline struct dma_map_ops *get_dma_ops(struct device *dev)
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{ 21{
98 dma_ops->unmap_sg(dev, sg, nents, direction); 22#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
99} 23 if (dev->bus == &pci_bus_type)
100 24 return &pci32_dma_ops;
101static inline void dma_sync_single_for_cpu(struct device *dev, 25#endif
102 dma_addr_t dma_handle, size_t size, 26 return dma_ops;
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106} 27}
107 28
108static inline void dma_sync_single_for_device(struct device *dev, 29#include <asm-generic/dma-mapping-common.h>
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117 30
118static inline void dma_sync_sg_for_cpu(struct device *dev, 31static inline void *dma_alloc_coherent(struct device *dev, size_t size,
119 struct scatterlist *sg, int nelems, 32 dma_addr_t *dma_handle, gfp_t flag)
120 enum dma_data_direction direction)
121{ 33{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); 34 struct dma_map_ops *ops = get_dma_ops(dev);
123} 35 void *cpu_addr;
124 36
125static inline void dma_sync_sg_for_device(struct device *dev, 37 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
126 struct scatterlist *sg, int nelems, 38 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
127 enum dma_data_direction direction) 39 return cpu_addr;
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131} 40}
132 41
133static inline void dma_sync_single_range_for_cpu(struct device *dev, 42static inline void dma_free_coherent(struct device *dev, size_t size,
134 dma_addr_t dma_handle, 43 void *cpu_addr, dma_addr_t dma_handle)
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{ 44{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); 45 struct dma_map_ops *ops = get_dma_ops(dev);
140}
141 46
142static inline void dma_sync_single_range_for_device(struct device *dev, 47 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
143 dma_addr_t dma_handle, 48 ops->free_coherent(dev, size, cpu_addr, dma_handle);
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149} 49}
150 50
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 51static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{ 52{
154 return (dma_addr == DMA_ERROR_CODE); 53 return (dma_addr == DMA_ERROR_CODE);
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 1934f2cbf513..a0b443cb3c1f 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
89 return retval; 89 return retval;
90} 90}
91 91
92void __trigger_all_cpu_backtrace(void); 92void arch_trigger_all_cpu_backtrace(void);
93#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
94 94
95extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
96extern void *softirq_stack[NR_CPUS]; 96extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
new file mode 100644
index 000000000000..28a42b73f64f
--- /dev/null
+++ b/arch/sparc/include/asm/leon.h
@@ -0,0 +1,362 @@
1/*
2 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com) Gaisler Research
3 * Copyright (C) 2004 Stefan Holst (mail@s-holst.de) Uni-Stuttgart
4 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
5 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
6 */
7
8#ifndef LEON_H_INCLUDE
9#define LEON_H_INCLUDE
10
11#ifdef CONFIG_SPARC_LEON
12
13#define ASI_LEON_NOCACHE 0x01
14
15#define ASI_LEON_DCACHE_MISS 0x1
16
17#define ASI_LEON_CACHEREGS 0x02
18#define ASI_LEON_IFLUSH 0x10
19#define ASI_LEON_DFLUSH 0x11
20
21#define ASI_LEON_MMUFLUSH 0x18
22#define ASI_LEON_MMUREGS 0x19
23#define ASI_LEON_BYPASS 0x1c
24#define ASI_LEON_FLUSH_PAGE 0x10
25
26/* mmu register access, ASI_LEON_MMUREGS */
27#define LEON_CNR_CTRL 0x000
28#define LEON_CNR_CTXP 0x100
29#define LEON_CNR_CTX 0x200
30#define LEON_CNR_F 0x300
31#define LEON_CNR_FADDR 0x400
32
33#define LEON_CNR_CTX_NCTX 256 /*number of MMU ctx */
34
35#define LEON_CNR_CTRL_TLBDIS 0x80000000
36
37#define LEON_MMUTLB_ENT_MAX 64
38
39/*
40 * diagnostic access from mmutlb.vhd:
41 * 0: pte address
42 * 4: pte
43 * 8: additional flags
44 */
45#define LEON_DIAGF_LVL 0x3
46#define LEON_DIAGF_WR 0x8
47#define LEON_DIAGF_WR_SHIFT 3
48#define LEON_DIAGF_HIT 0x10
49#define LEON_DIAGF_HIT_SHIFT 4
50#define LEON_DIAGF_CTX 0x1fe0
51#define LEON_DIAGF_CTX_SHIFT 5
52#define LEON_DIAGF_VALID 0x2000
53#define LEON_DIAGF_VALID_SHIFT 13
54
55/*
56 * Interrupt Sources
57 *
58 * The interrupt source numbers directly map to the trap type and to
59 * the bits used in the Interrupt Clear, Interrupt Force, Interrupt Mask,
60 * and the Interrupt Pending Registers.
61 */
62#define LEON_INTERRUPT_CORRECTABLE_MEMORY_ERROR 1
63#define LEON_INTERRUPT_UART_1_RX_TX 2
64#define LEON_INTERRUPT_UART_0_RX_TX 3
65#define LEON_INTERRUPT_EXTERNAL_0 4
66#define LEON_INTERRUPT_EXTERNAL_1 5
67#define LEON_INTERRUPT_EXTERNAL_2 6
68#define LEON_INTERRUPT_EXTERNAL_3 7
69#define LEON_INTERRUPT_TIMER1 8
70#define LEON_INTERRUPT_TIMER2 9
71#define LEON_INTERRUPT_EMPTY1 10
72#define LEON_INTERRUPT_EMPTY2 11
73#define LEON_INTERRUPT_OPEN_ETH 12
74#define LEON_INTERRUPT_EMPTY4 13
75#define LEON_INTERRUPT_EMPTY5 14
76#define LEON_INTERRUPT_EMPTY6 15
77
78/* irq masks */
79#define LEON_HARD_INT(x) (1 << (x)) /* irq 0-15 */
80#define LEON_IRQMASK_R 0x0000fffe /* bit 15- 1 of lregs.irqmask */
81#define LEON_IRQPRIO_R 0xfffe0000 /* bit 31-17 of lregs.irqmask */
82
83/* leon uart register definitions */
84#define LEON_OFF_UDATA 0x0
85#define LEON_OFF_USTAT 0x4
86#define LEON_OFF_UCTRL 0x8
87#define LEON_OFF_USCAL 0xc
88
89#define LEON_UCTRL_RE 0x01
90#define LEON_UCTRL_TE 0x02
91#define LEON_UCTRL_RI 0x04
92#define LEON_UCTRL_TI 0x08
93#define LEON_UCTRL_PS 0x10
94#define LEON_UCTRL_PE 0x20
95#define LEON_UCTRL_FL 0x40
96#define LEON_UCTRL_LB 0x80
97
98#define LEON_USTAT_DR 0x01
99#define LEON_USTAT_TS 0x02
100#define LEON_USTAT_TH 0x04
101#define LEON_USTAT_BR 0x08
102#define LEON_USTAT_OV 0x10
103#define LEON_USTAT_PE 0x20
104#define LEON_USTAT_FE 0x40
105
106#define LEON_MCFG2_SRAMDIS 0x00002000
107#define LEON_MCFG2_SDRAMEN 0x00004000
108#define LEON_MCFG2_SRAMBANKSZ 0x00001e00 /* [12-9] */
109#define LEON_MCFG2_SRAMBANKSZ_SHIFT 9
110#define LEON_MCFG2_SDRAMBANKSZ 0x03800000 /* [25-23] */
111#define LEON_MCFG2_SDRAMBANKSZ_SHIFT 23
112
113#define LEON_TCNT0_MASK 0x7fffff
114
115#define LEON_USTAT_ERROR (LEON_USTAT_OV | LEON_USTAT_PE | LEON_USTAT_FE)
116/* no break yet */
117
118#define ASI_LEON3_SYSCTRL 0x02
119#define ASI_LEON3_SYSCTRL_ICFG 0x08
120#define ASI_LEON3_SYSCTRL_DCFG 0x0c
121#define ASI_LEON3_SYSCTRL_CFG_SNOOPING (1 << 27)
122#define ASI_LEON3_SYSCTRL_CFG_SSIZE(c) (1 << ((c >> 20) & 0xf))
123
124#ifndef __ASSEMBLY__
125
126/* do a virtual address read without cache */
127static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
128{
129 unsigned long retval;
130 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
131 "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
132 return retval;
133}
134
135/* do a physical address bypass write, i.e. for 0x80000000 */
136static inline void leon_store_reg(unsigned long paddr, unsigned long value)
137{
138 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r"(value), "r"(paddr),
139 "i"(ASI_LEON_BYPASS) : "memory");
140}
141
142/* do a physical address bypass load, i.e. for 0x80000000 */
143static inline unsigned long leon_load_reg(unsigned long paddr)
144{
145 unsigned long retval;
146 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
147 "=r"(retval) : "r"(paddr), "i"(ASI_LEON_BYPASS));
148 return retval;
149}
150
151extern inline void leon_srmmu_disabletlb(void)
152{
153 unsigned int retval;
154 __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
155 "i"(ASI_LEON_MMUREGS));
156 retval |= LEON_CNR_CTRL_TLBDIS;
157 __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
158 "i"(ASI_LEON_MMUREGS) : "memory");
159}
160
161extern inline void leon_srmmu_enabletlb(void)
162{
163 unsigned int retval;
164 __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
165 "i"(ASI_LEON_MMUREGS));
166 retval = retval & ~LEON_CNR_CTRL_TLBDIS;
167 __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
168 "i"(ASI_LEON_MMUREGS) : "memory");
169}
170
171/* macro access for leon_load_reg() and leon_store_reg() */
172#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
173#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
174#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
175#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
176#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
177#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
178#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
179#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
180#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
181#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
182
183/* macro access for leon_readnobuffer_reg() */
184#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
185
186extern void sparc_leon_eirq_register(int eirq);
187extern void leon_init(void);
188extern void leon_switch_mm(void);
189extern void leon_init_IRQ(void);
190
191extern unsigned long last_valid_pfn;
192
193extern inline unsigned long sparc_leon3_get_dcachecfg(void)
194{
195 unsigned int retval;
196 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
197 "=r"(retval) :
198 "r"(ASI_LEON3_SYSCTRL_DCFG),
199 "i"(ASI_LEON3_SYSCTRL));
200 return retval;
201}
202
203/* enable snooping */
204extern inline void sparc_leon3_enable_snooping(void)
205{
206 __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
207 "set 0x800000, %%l2\n\t"
208 "or %%l2, %%l1, %%l2\n\t"
209 "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
210};
211
212extern inline void sparc_leon3_disable_cache(void)
213{
214 __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t"
215 "set 0x00000f, %%l2\n\t"
216 "andn %%l2, %%l1, %%l2\n\t"
217 "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2");
218};
219
220#endif /*!__ASSEMBLY__*/
221
222#ifdef CONFIG_SMP
223# define LEON3_IRQ_RESCHEDULE 13
224# define LEON3_IRQ_TICKER (leon_percpu_timer_dev[0].irq)
225# define LEON3_IRQ_CROSS_CALL 15
226#endif
227
228#if defined(PAGE_SIZE_LEON_8K)
229#define LEON_PAGE_SIZE_LEON 1
230#elif defined(PAGE_SIZE_LEON_16K)
231#define LEON_PAGE_SIZE_LEON 2)
232#else
233#define LEON_PAGE_SIZE_LEON 0
234#endif
235
236#if LEON_PAGE_SIZE_LEON == 0
237/* [ 8, 6, 6 ] + 12 */
238#define LEON_PGD_SH 24
239#define LEON_PGD_M 0xff
240#define LEON_PMD_SH 18
241#define LEON_PMD_SH_V (LEON_PGD_SH-2)
242#define LEON_PMD_M 0x3f
243#define LEON_PTE_SH 12
244#define LEON_PTE_M 0x3f
245#elif LEON_PAGE_SIZE_LEON == 1
246/* [ 7, 6, 6 ] + 13 */
247#define LEON_PGD_SH 25
248#define LEON_PGD_M 0x7f
249#define LEON_PMD_SH 19
250#define LEON_PMD_SH_V (LEON_PGD_SH-1)
251#define LEON_PMD_M 0x3f
252#define LEON_PTE_SH 13
253#define LEON_PTE_M 0x3f
254#elif LEON_PAGE_SIZE_LEON == 2
255/* [ 6, 6, 6 ] + 14 */
256#define LEON_PGD_SH 26
257#define LEON_PGD_M 0x3f
258#define LEON_PMD_SH 20
259#define LEON_PMD_SH_V (LEON_PGD_SH-0)
260#define LEON_PMD_M 0x3f
261#define LEON_PTE_SH 14
262#define LEON_PTE_M 0x3f
263#elif LEON_PAGE_SIZE_LEON == 3
264/* [ 4, 7, 6 ] + 15 */
265#define LEON_PGD_SH 28
266#define LEON_PGD_M 0x0f
267#define LEON_PMD_SH 21
268#define LEON_PMD_SH_V (LEON_PGD_SH-0)
269#define LEON_PMD_M 0x7f
270#define LEON_PTE_SH 15
271#define LEON_PTE_M 0x3f
272#else
273#error cannot determine LEON_PAGE_SIZE_LEON
274#endif
275
276#define PAGE_MIN_SHIFT (12)
277#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
278
279#define LEON3_XCCR_SETS_MASK 0x07000000UL
280#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
281
282#define LEON2_CCR_DSETS_MASK 0x03000000UL
283#define LEON2_CFG_SSIZE_MASK 0x00007000UL
284
285#ifndef __ASSEMBLY__
286extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
287extern void leon_flush_icache_all(void);
288extern void leon_flush_dcache_all(void);
289extern void leon_flush_cache_all(void);
290extern void leon_flush_tlb_all(void);
291extern int leon_flush_during_switch;
292extern int leon_flush_needed(void);
293
294struct vm_area_struct;
295extern void leon_flush_icache_all(void);
296extern void leon_flush_dcache_all(void);
297extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
298extern void leon_flush_cache_all(void);
299extern void leon_flush_tlb_all(void);
300extern int leon_flush_during_switch;
301extern int leon_flush_needed(void);
302extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
303
304/* struct that hold LEON3 cache configuration registers */
305struct leon3_cacheregs {
306 unsigned long ccr; /* 0x00 - Cache Control Register */
307 unsigned long iccr; /* 0x08 - Instruction Cache Configuration Register */
308 unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
309};
310
311/* struct that hold LEON2 cache configuration register
312 * & configuration register
313 */
314struct leon2_cacheregs {
315 unsigned long ccr, cfg;
316};
317
318#ifdef __KERNEL__
319
320#include <linux/interrupt.h>
321
322struct device_node;
323extern int sparc_leon_eirq_get(int eirq, int cpu);
324extern irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id);
325extern void sparc_leon_eirq_register(int eirq);
326extern void leon_clear_clock_irq(void);
327extern void leon_load_profile_irq(int cpu, unsigned int limit);
328extern void leon_init_timers(irq_handler_t counter_fn);
329extern void leon_clear_clock_irq(void);
330extern void leon_load_profile_irq(int cpu, unsigned int limit);
331extern void leon_trans_init(struct device_node *dp);
332extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
333extern void leon_init_IRQ(void);
334extern void leon_init(void);
335extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr);
336extern void init_leon(void);
337extern void poke_leonsparc(void);
338extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
339extern int leon_flush_needed(void);
340extern void leon_switch_mm(void);
341extern int srmmu_swprobe_trace;
342
343#endif /* __KERNEL__ */
344
345#endif /* __ASSEMBLY__ */
346
347/* macros used in leon_mm.c */
348#define PFN(x) ((x) >> PAGE_SHIFT)
349#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
350#define _SRMMU_PTE_PMASK_LEON 0xffffffff
351
352#else /* defined(CONFIG_SPARC_LEON) */
353
354/* nop definitions for !LEON case */
355#define leon_init() do {} while (0)
356#define leon_switch_mm() do {} while (0)
357#define leon_init_IRQ() do {} while (0)
358#define init_leon() do {} while (0)
359
360#endif /* !defined(CONFIG_SPARC_LEON) */
361
362#endif
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
new file mode 100644
index 000000000000..618e88821795
--- /dev/null
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -0,0 +1,263 @@
1/*
2*Copyright (C) 2004 Konrad Eisele (eiselekd@web.de,konrad@gaisler.com), Gaisler Research
3*Copyright (C) 2004 Stefan Holst (mail@s-holst.de), Uni-Stuttgart
4*Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com),Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
5*/
6
7#ifndef LEON_AMBA_H_INCLUDE
8#define LEON_AMBA_H_INCLUDE
9
10#ifndef __ASSEMBLY__
11
12struct amba_prom_registers {
13 unsigned int phys_addr; /* The physical address of this register */
14 unsigned int reg_size; /* How many bytes does this register take up? */
15};
16
17#endif
18
19/*
20 * The following defines the bits in the LEON UART Status Registers.
21 */
22
23#define LEON_REG_UART_STATUS_DR 0x00000001 /* Data Ready */
24#define LEON_REG_UART_STATUS_TSE 0x00000002 /* TX Send Register Empty */
25#define LEON_REG_UART_STATUS_THE 0x00000004 /* TX Hold Register Empty */
26#define LEON_REG_UART_STATUS_BR 0x00000008 /* Break Error */
27#define LEON_REG_UART_STATUS_OE 0x00000010 /* RX Overrun Error */
28#define LEON_REG_UART_STATUS_PE 0x00000020 /* RX Parity Error */
29#define LEON_REG_UART_STATUS_FE 0x00000040 /* RX Framing Error */
30#define LEON_REG_UART_STATUS_ERR 0x00000078 /* Error Mask */
31
32/*
33 * The following defines the bits in the LEON UART Ctrl Registers.
34 */
35
36#define LEON_REG_UART_CTRL_RE 0x00000001 /* Receiver enable */
37#define LEON_REG_UART_CTRL_TE 0x00000002 /* Transmitter enable */
38#define LEON_REG_UART_CTRL_RI 0x00000004 /* Receiver interrupt enable */
39#define LEON_REG_UART_CTRL_TI 0x00000008 /* Transmitter irq */
40#define LEON_REG_UART_CTRL_PS 0x00000010 /* Parity select */
41#define LEON_REG_UART_CTRL_PE 0x00000020 /* Parity enable */
42#define LEON_REG_UART_CTRL_FL 0x00000040 /* Flow control enable */
43#define LEON_REG_UART_CTRL_LB 0x00000080 /* Loop Back enable */
44
45#define LEON3_GPTIMER_EN 1
46#define LEON3_GPTIMER_RL 2
47#define LEON3_GPTIMER_LD 4
48#define LEON3_GPTIMER_IRQEN 8
49#define LEON3_GPTIMER_SEPIRQ 8
50
51#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
52/* 0 = hold scalar and counter */
53#define LEON23_REG_TIMER_CONTROL_RL 0x00000002 /* 1 = reload at 0 */
54 /* 0 = stop at 0 */
55#define LEON23_REG_TIMER_CONTROL_LD 0x00000004 /* 1 = load counter */
56 /* 0 = no function */
57#define LEON23_REG_TIMER_CONTROL_IQ 0x00000008 /* 1 = irq enable */
58 /* 0 = no function */
59
60/*
61 * The following defines the bits in the LEON PS/2 Status Registers.
62 */
63
64#define LEON_REG_PS2_STATUS_DR 0x00000001 /* Data Ready */
65#define LEON_REG_PS2_STATUS_PE 0x00000002 /* Parity error */
66#define LEON_REG_PS2_STATUS_FE 0x00000004 /* Framing error */
67#define LEON_REG_PS2_STATUS_KI 0x00000008 /* Keyboard inhibit */
68#define LEON_REG_PS2_STATUS_RF 0x00000010 /* RX buffer full */
69#define LEON_REG_PS2_STATUS_TF 0x00000020 /* TX buffer full */
70
71/*
72 * The following defines the bits in the LEON PS/2 Ctrl Registers.
73 */
74
75#define LEON_REG_PS2_CTRL_RE 0x00000001 /* Receiver enable */
76#define LEON_REG_PS2_CTRL_TE 0x00000002 /* Transmitter enable */
77#define LEON_REG_PS2_CTRL_RI 0x00000004 /* Keyboard receive irq */
78#define LEON_REG_PS2_CTRL_TI 0x00000008 /* Keyboard transmit irq */
79
80#define LEON3_IRQMPSTATUS_CPUNR 28
81#define LEON3_IRQMPSTATUS_BROADCAST 27
82
83#define GPTIMER_CONFIG_IRQNT(a) (((a) >> 3) & 0x1f)
84#define GPTIMER_CONFIG_ISSEP(a) ((a) & (1 << 8))
85#define GPTIMER_CONFIG_NTIMERS(a) ((a) & (0x7))
86#define LEON3_GPTIMER_CTRL_PENDING 0x10
87#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
88#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
89
90#ifdef CONFIG_SPARC_LEON
91
92#ifndef __ASSEMBLY__
93
94struct leon3_irqctrl_regs_map {
95 u32 ilevel;
96 u32 ipend;
97 u32 iforce;
98 u32 iclear;
99 u32 mpstatus;
100 u32 mpbroadcast;
101 u32 notused02;
102 u32 notused03;
103 u32 notused10;
104 u32 notused11;
105 u32 notused12;
106 u32 notused13;
107 u32 notused20;
108 u32 notused21;
109 u32 notused22;
110 u32 notused23;
111 u32 mask[16];
112 u32 force[16];
113 /* Extended IRQ registers */
114 u32 intid[16]; /* 0xc0 */
115};
116
117struct leon3_apbuart_regs_map {
118 u32 data;
119 u32 status;
120 u32 ctrl;
121 u32 scaler;
122};
123
124struct leon3_gptimerelem_regs_map {
125 u32 val;
126 u32 rld;
127 u32 ctrl;
128 u32 unused;
129};
130
131struct leon3_gptimer_regs_map {
132 u32 scalar;
133 u32 scalar_reload;
134 u32 config;
135 u32 unused;
136 struct leon3_gptimerelem_regs_map e[8];
137};
138
139/*
140 * Types and structure used for AMBA Plug & Play bus scanning
141 */
142
143#define AMBA_MAXAPB_DEVS 64
144#define AMBA_MAXAPB_DEVS_PERBUS 16
145
146struct amba_device_table {
147 int devnr; /* number of devices on AHB or APB bus */
148 unsigned int *addr[16]; /* addresses to the devices configuration tables */
149 unsigned int allocbits[1]; /* 0=unallocated, 1=allocated driver */
150};
151
152struct amba_apbslv_device_table {
153 int devnr; /* number of devices on AHB or APB bus */
154 unsigned int *addr[AMBA_MAXAPB_DEVS]; /* addresses to the devices configuration tables */
155 unsigned int apbmst[AMBA_MAXAPB_DEVS]; /* apb master if a entry is a apb slave */
156 unsigned int apbmstidx[AMBA_MAXAPB_DEVS]; /* apb master idx if a entry is a apb slave */
157 unsigned int allocbits[4]; /* 0=unallocated, 1=allocated driver */
158};
159
160struct amba_confarea_type {
161 struct amba_confarea_type *next;/* next bus in chain */
162 struct amba_device_table ahbmst;
163 struct amba_device_table ahbslv;
164 struct amba_apbslv_device_table apbslv;
165 unsigned int apbmst;
166};
167
168/* collect apb slaves */
169struct amba_apb_device {
170 unsigned int start, irq, bus_id;
171 struct amba_confarea_type *bus;
172};
173
174/* collect ahb slaves */
175struct amba_ahb_device {
176 unsigned int start[4], irq, bus_id;
177 struct amba_confarea_type *bus;
178};
179
180struct device_node;
181void _amba_init(struct device_node *dp, struct device_node ***nextp);
182
183extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
184extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
185extern struct amba_apb_device leon_percpu_timer_dev[16];
186extern int leondebug_irq_disable;
187extern int leon_debug_irqout;
188extern unsigned long leon3_gptimer_irq;
189extern unsigned int sparc_leon_eirq;
190
191#endif /* __ASSEMBLY__ */
192
193#define LEON3_IO_AREA 0xfff00000
194#define LEON3_CONF_AREA 0xff000
195#define LEON3_AHB_SLAVE_CONF_AREA (1 << 11)
196
197#define LEON3_AHB_CONF_WORDS 8
198#define LEON3_APB_CONF_WORDS 2
199#define LEON3_AHB_MASTERS 16
200#define LEON3_AHB_SLAVES 16
201#define LEON3_APB_SLAVES 16
202#define LEON3_APBUARTS 8
203
204/* Vendor codes */
205#define VENDOR_GAISLER 1
206#define VENDOR_PENDER 2
207#define VENDOR_ESA 4
208#define VENDOR_OPENCORES 8
209
210/* Gaisler Research device id's */
211#define GAISLER_LEON3 0x003
212#define GAISLER_LEON3DSU 0x004
213#define GAISLER_ETHAHB 0x005
214#define GAISLER_APBMST 0x006
215#define GAISLER_AHBUART 0x007
216#define GAISLER_SRCTRL 0x008
217#define GAISLER_SDCTRL 0x009
218#define GAISLER_APBUART 0x00C
219#define GAISLER_IRQMP 0x00D
220#define GAISLER_AHBRAM 0x00E
221#define GAISLER_GPTIMER 0x011
222#define GAISLER_PCITRG 0x012
223#define GAISLER_PCISBRG 0x013
224#define GAISLER_PCIFBRG 0x014
225#define GAISLER_PCITRACE 0x015
226#define GAISLER_PCIDMA 0x016
227#define GAISLER_AHBTRACE 0x017
228#define GAISLER_ETHDSU 0x018
229#define GAISLER_PIOPORT 0x01A
230#define GAISLER_GRGPIO 0x01A
231#define GAISLER_AHBJTAG 0x01c
232#define GAISLER_ETHMAC 0x01D
233#define GAISLER_AHB2AHB 0x020
234#define GAISLER_USBDC 0x021
235#define GAISLER_ATACTRL 0x024
236#define GAISLER_DDRSPA 0x025
237#define GAISLER_USBEHC 0x026
238#define GAISLER_USBUHC 0x027
239#define GAISLER_I2CMST 0x028
240#define GAISLER_SPICTRL 0x02D
241#define GAISLER_DDR2SPA 0x02E
242#define GAISLER_SPIMCTRL 0x045
243#define GAISLER_LEON4 0x048
244#define GAISLER_LEON4DSU 0x049
245#define GAISLER_AHBSTAT 0x052
246#define GAISLER_FTMCTRL 0x054
247#define GAISLER_KBD 0x060
248#define GAISLER_VGA 0x061
249#define GAISLER_SVGA 0x063
250#define GAISLER_GRSYSMON 0x066
251#define GAISLER_GRACECTRL 0x067
252
253#define GAISLER_L2TIME 0xffd /* internal device: leon2 timer */
254#define GAISLER_L2C 0xffe /* internal device: leon2compat */
255#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
256
257#define amba_vendor(x) (((x) >> 24) & 0xff)
258
259#define amba_device(x) (((x) >> 12) & 0xfff)
260
261#endif /* !defined(CONFIG_SPARC_LEON) */
262
263#endif
diff --git a/arch/sparc/include/asm/machines.h b/arch/sparc/include/asm/machines.h
index c28c2f248794..cd9c099567e4 100644
--- a/arch/sparc/include/asm/machines.h
+++ b/arch/sparc/include/asm/machines.h
@@ -15,7 +15,7 @@ struct Sun_Machine_Models {
15/* Current number of machines we know about that has an IDPROM 15/* Current number of machines we know about that has an IDPROM
16 * machtype entry including one entry for the 0x80 OBP machines. 16 * machtype entry including one entry for the 0x80 OBP machines.
17 */ 17 */
18#define NUM_SUN_MACHINES 15 18#define NUM_SUN_MACHINES 16
19 19
20/* The machine type in the idprom area looks like this: 20/* The machine type in the idprom area looks like this:
21 * 21 *
@@ -30,6 +30,7 @@ struct Sun_Machine_Models {
30 30
31#define SM_ARCH_MASK 0xf0 31#define SM_ARCH_MASK 0xf0
32#define SM_SUN4 0x20 32#define SM_SUN4 0x20
33#define M_LEON 0x30
33#define SM_SUN4C 0x50 34#define SM_SUN4C 0x50
34#define SM_SUN4M 0x70 35#define SM_SUN4M 0x70
35#define SM_SUN4M_OBP 0x80 36#define SM_SUN4M_OBP 0x80
@@ -41,6 +42,9 @@ struct Sun_Machine_Models {
41#define SM_4_330 0x03 /* Sun 4/300 series */ 42#define SM_4_330 0x03 /* Sun 4/300 series */
42#define SM_4_470 0x04 /* Sun 4/400 series */ 43#define SM_4_470 0x04 /* Sun 4/400 series */
43 44
45/* Leon machines */
46#define M_LEON3_SOC 0x02 /* Leon3 SoC */
47
44/* Sun4c machines Full Name - PROM NAME */ 48/* Sun4c machines Full Name - PROM NAME */
45#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */ 49#define SM_4C_SS1 0x01 /* Sun4c SparcStation 1 - Sun 4/60 */
46#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */ 50#define SM_4C_IPC 0x02 /* Sun4c SparcStation IPC - Sun 4/40 */
diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h
index fbd546dd4feb..72e6500e7ab0 100644
--- a/arch/sparc/include/asm/nmi.h
+++ b/arch/sparc/include/asm/nmi.h
@@ -5,6 +5,9 @@ extern int __init nmi_init(void);
5extern void perfctr_irq(int irq, struct pt_regs *regs); 5extern void perfctr_irq(int irq, struct pt_regs *regs);
6extern void nmi_adjust_hz(unsigned int new_hz); 6extern void nmi_adjust_hz(unsigned int new_hz);
7 7
8extern int nmi_usable; 8extern atomic_t nmi_active;
9
10extern void start_nmi_watchdog(void *unused);
11extern void stop_nmi_watchdog(void *unused);
9 12
10#endif /* __NMI_H */ 13#endif /* __NMI_H */
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd179335..d9c031f9910f 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/pci_32.h> 6#include <asm/pci_32.h>
7#endif 7#endif
8
9#include <asm-generic/pci-dma-compat.h>
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..ac0e8369fd97 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
31 */ 31 */
32#define PCI_DMA_BUS_IS_PHYS (0) 32#define PCI_DMA_BUS_IS_PHYS (0)
33 33
34#include <asm/scatterlist.h>
35
36struct pci_dev; 34struct pci_dev;
37 35
38/* Allocate and map kernel buffer using consistent mode DMA for a device.
39 * hwdev should be valid struct pci_dev pointer for PCI devices.
40 */
41extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
42
43/* Free and unmap a consistent DMA buffer.
44 * cpu_addr is what was returned from pci_alloc_consistent,
45 * size must be the same as what as passed into pci_alloc_consistent,
46 * and likewise dma_addr must be the same as what *dma_addrp was set to.
47 *
48 * References to the memory and mappings assosciated with cpu_addr/dma_addr
49 * past this call are illegal.
50 */
51extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
52
53/* Map a single buffer of the indicated size for DMA in streaming mode.
54 * The 32-bit bus address to use is returned.
55 *
56 * Once the device is given the dma address, the device owns this memory
57 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
58 */
59extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
60
61/* Unmap a single streaming mode DMA translation. The dma_addr and size
62 * must match what was provided for in a previous pci_map_single call. All
63 * other usages are undefined.
64 *
65 * After this call, reads by the cpu to the buffer are guaranteed to see
66 * whatever the device wrote there.
67 */
68extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
69
70/* pci_unmap_{single,page} is not a nop, thus... */ 36/* pci_unmap_{single,page} is not a nop, thus... */
71#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
72 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
81#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
82 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
83 49
84/*
85 * Same as above, only with pages instead of mapped addresses.
86 */
87extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
88 unsigned long offset, size_t size, int direction);
89extern void pci_unmap_page(struct pci_dev *hwdev,
90 dma_addr_t dma_address, size_t size, int direction);
91
92/* Map a set of buffers described by scatterlist in streaming
93 * mode for DMA. This is the scather-gather version of the
94 * above pci_map_single interface. Here the scatter gather list
95 * elements are each tagged with the appropriate dma address
96 * and length. They are obtained via sg_dma_{address,length}(SG).
97 *
98 * NOTE: An implementation may be able to use a smaller number of
99 * DMA address/length pairs than there are SG table elements.
100 * (for example via virtual mapping capabilities)
101 * The routine returns the number of addr/length pairs actually
102 * used, at most nents.
103 *
104 * Device ownership issues as mentioned above for pci_map_single are
105 * the same here.
106 */
107extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
108
109/* Unmap a set of streaming mode DMA translations.
110 * Again, cpu read rules concerning calls here are the same as for
111 * pci_unmap_single() above.
112 */
113extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
114
115/* Make physical memory consistent for a single
116 * streaming mode DMA translation after a transfer.
117 *
118 * If you perform a pci_map_single() but wish to interrogate the
119 * buffer using the cpu, yet do not wish to teardown the PCI dma
120 * mapping, you must call this function before doing so. At the
121 * next point you give the PCI dma address back to the card, you
122 * must first perform a pci_dma_sync_for_device, and then the device
123 * again owns the buffer.
124 */
125extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
126extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
127
128/* Make physical memory consistent for a set of streaming
129 * mode DMA translations after a transfer.
130 *
131 * The same as pci_dma_sync_single_* but for a scatter-gather list,
132 * same rules and usage.
133 */
134extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
135extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
136
137/* Return whether the given PCI device DMA address mask can
138 * be supported properly. For example, if your device can
139 * only drive the low 24-bits during PCI bus mastering, then
140 * you would pass 0x00ffffff as the mask to this function.
141 */
142static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
143{
144 return 1;
145}
146
147#ifdef CONFIG_PCI 50#ifdef CONFIG_PCI
148static inline void pci_dma_burst_advice(struct pci_dev *pdev, 51static inline void pci_dma_burst_advice(struct pci_dev *pdev,
149 enum pci_dma_burst_strategy *strat, 52 enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
154} 57}
155#endif 58#endif
156 59
157#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
158
159static inline int pci_dma_mapping_error(struct pci_dev *pdev,
160 dma_addr_t dma_addr)
161{
162 return (dma_addr == PCI_DMA_ERROR_CODE);
163}
164
165struct device_node; 60struct device_node;
166extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); 61extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
167 62
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..5cc9f6aa5494 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
35 */ 35 */
36#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
37 37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */ 38/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME; 40 dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL)) 50 (((PTR)->LEN_NAME) = (VAL))
82 51
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */ 52/* PCI IOMMU mapping bypass support. */
135 53
136/* PCI 64-bit addressing works for all slots on all controller 54/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 58#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL 59#define PCI64_ADDR_BASE 0xfffc000000000000UL
142 60
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI 61#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev, 62static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat, 63 enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h
new file mode 100644
index 000000000000..5d7a8ca0e491
--- /dev/null
+++ b/arch/sparc/include/asm/perf_counter.h
@@ -0,0 +1,14 @@
1#ifndef __ASM_SPARC_PERF_COUNTER_H
2#define __ASM_SPARC_PERF_COUNTER_H
3
4extern void set_perf_counter_pending(void);
5
6#define PERF_COUNTER_INDEX_OFFSET 0
7
8#ifdef CONFIG_PERF_COUNTERS
9extern void init_hw_perf_counters(void);
10#else
11static inline void init_hw_perf_counters(void) { }
12#endif
13
14#endif
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 808555fc1d58..1407c07bdade 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -267,6 +267,7 @@ static inline void srmmu_flush_tlb_page(unsigned long page)
267 267
268} 268}
269 269
270#ifndef CONFIG_SPARC_LEON
270static inline unsigned long srmmu_hwprobe(unsigned long vaddr) 271static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
271{ 272{
272 unsigned long retval; 273 unsigned long retval;
@@ -278,6 +279,9 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
278 279
279 return retval; 280 return retval;
280} 281}
282#else
283#define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK)
284#endif
281 285
282static inline int 286static inline int
283srmmu_get_pte (unsigned long addr) 287srmmu_get_pte (unsigned long addr)
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index be8d7aaeb60d..82a190d7efc1 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -118,5 +118,8 @@ extern struct device_node *of_console_device;
118extern char *of_console_path; 118extern char *of_console_path;
119extern char *of_console_options; 119extern char *of_console_options;
120 120
121extern void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
122extern char *build_full_name(struct device_node *dp);
123
121#endif /* __KERNEL__ */ 124#endif /* __KERNEL__ */
122#endif /* _SPARC_PROM_H */ 125#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h
index 982a12f959f4..3a5ae3d12088 100644
--- a/arch/sparc/include/asm/socket.h
+++ b/arch/sparc/include/asm/socket.h
@@ -29,6 +29,9 @@
29#define SO_RCVBUFFORCE 0x100b 29#define SO_RCVBUFFORCE 0x100b
30#define SO_ERROR 0x1007 30#define SO_ERROR 0x1007
31#define SO_TYPE 0x1008 31#define SO_TYPE 0x1008
32#define SO_PROTOCOL 0x1028
33#define SO_DOMAIN 0x1029
34
32 35
33/* Linux specific, keep the same. */ 36/* Linux specific, keep the same. */
34#define SO_NO_CHECK 0x000b 37#define SO_NO_CHECK 0x000b
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 46f91ab66a50..857630cff636 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void __read_lock(raw_rwlock_t *rw) 79static inline void arch_read_lock(raw_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register raw_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
92#define __raw_read_lock(lock) \ 92#define __raw_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 __read_lock(lock); \ 95 arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void __read_unlock(raw_rwlock_t *rw) 99static inline void arch_read_unlock(raw_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register raw_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
112#define __raw_read_unlock(lock) \ 112#define __raw_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 __read_unlock(lock); \ 115 arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int __read_trylock(raw_rwlock_t *rw) 153static inline int arch_read_trylock(raw_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register raw_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = __read_trylock(lock); \ 172 res = arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index f6b2b92ad8d2..43e514783582 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline __read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(raw_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline __read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(raw_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline __read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(raw_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline __write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(raw_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline __write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(raw_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline __write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(raw_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) __read_lock(p) 213#define __raw_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) __read_lock(p) 214#define __raw_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) __read_trylock(p) 215#define __raw_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) __read_unlock(p) 216#define __raw_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) __write_lock(p) 217#define __raw_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) __write_lock(p) 218#define __raw_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) __write_unlock(p) 219#define __raw_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) __write_trylock(p) 220#define __raw_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 751c8c17f5a0..890036b3689a 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -32,6 +32,7 @@ enum sparc_cpu {
32 sun4u = 0x05, /* V8 ploos ploos */ 32 sun4u = 0x05, /* V8 ploos ploos */
33 sun_unknown = 0x06, 33 sun_unknown = 0x06,
34 ap1000 = 0x07, /* almost a sun4m */ 34 ap1000 = 0x07, /* almost a sun4m */
35 sparc_leon = 0x08, /* Leon SoC */
35}; 36};
36 37
37/* Really, userland should not be looking at any of this... */ 38/* Really, userland should not be looking at any of this... */
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 6c077816ab28..25e848f0cad7 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -29,6 +29,10 @@ enum sparc_cpu {
29/* This cannot ever be a sun4c :) That's just history. */ 29/* This cannot ever be a sun4c :) That's just history. */
30#define ARCH_SUN4C 0 30#define ARCH_SUN4C 0
31 31
32extern const char *sparc_cpu_type;
33extern const char *sparc_fpu_type;
34extern const char *sparc_pmu_type;
35
32extern char reboot_command[]; 36extern char reboot_command[];
33 37
34/* These are here in an effort to more fully work around Spitfire Errata 38/* These are here in an effort to more fully work around Spitfire Errata
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index de671d73baed..09c79a9c8516 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -8,9 +8,8 @@
8 * need to be careful to avoid a name clashes. 8 * need to be careful to avoid a name clashes.
9 */ 9 */
10 10
11#if defined(__sparc__) && defined(__arch64__) 11#if defined(__sparc__)
12 12
13/*** SPARC 64 bit ***/
14#include <asm-generic/int-ll64.h> 13#include <asm-generic/int-ll64.h>
15 14
16#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
@@ -26,33 +25,21 @@ typedef unsigned short umode_t;
26/* Dma addresses come in generic and 64-bit flavours. */ 25/* Dma addresses come in generic and 64-bit flavours. */
27 26
28typedef u32 dma_addr_t; 27typedef u32 dma_addr_t;
29typedef u64 dma64_addr_t;
30 28
31#endif /* __ASSEMBLY__ */ 29#if defined(__arch64__)
32 30
33#endif /* __KERNEL__ */ 31/*** SPARC 64 bit ***/
32typedef u64 dma64_addr_t;
34#else 33#else
35
36/*** SPARC 32 bit ***/ 34/*** SPARC 32 bit ***/
37#include <asm-generic/int-ll64.h>
38
39#ifndef __ASSEMBLY__
40
41typedef unsigned short umode_t;
42
43#endif /* __ASSEMBLY__ */
44
45#ifdef __KERNEL__
46
47#ifndef __ASSEMBLY__
48
49typedef u32 dma_addr_t;
50typedef u32 dma64_addr_t; 35typedef u32 dma64_addr_t;
51 36
37#endif /* defined(__arch64__) */
38
52#endif /* __ASSEMBLY__ */ 39#endif /* __ASSEMBLY__ */
53 40
54#endif /* __KERNEL__ */ 41#endif /* __KERNEL__ */
55 42
56#endif /* defined(__sparc__) && defined(__arch64__) */ 43#endif /* defined(__sparc__) */
57 44
58#endif /* defined(_SPARC_TYPES_H) */ 45#endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index a38c03238918..9ea271e19c70 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -7,8 +7,8 @@
7 7
8#ifdef __KERNEL__ 8#ifdef __KERNEL__
9#include <linux/compiler.h> 9#include <linux/compiler.h>
10#include <linux/sched.h>
11#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/thread_info.h>
12#include <asm/asi.h> 12#include <asm/asi.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/spitfire.h> 14#include <asm/spitfire.h>
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b2c406de7d4f..706df669f3b8 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -395,8 +395,9 @@
395#define __NR_preadv 324 395#define __NR_preadv 324
396#define __NR_pwritev 325 396#define __NR_pwritev 325
397#define __NR_rt_tgsigqueueinfo 326 397#define __NR_rt_tgsigqueueinfo 326
398#define __NR_perf_counter_open 327
398 399
399#define NR_SYSCALLS 327 400#define NR_SYSCALLS 328
400 401
401#ifdef __32bit_syscall_numbers__ 402#ifdef __32bit_syscall_numbers__
402/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 403/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 475ce4696acd..247cc620cee5 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -41,6 +41,8 @@ obj-y += of_device_common.o
41obj-y += of_device_$(BITS).o 41obj-y += of_device_$(BITS).o
42obj-$(CONFIG_SPARC64) += prom_irqtrans.o 42obj-$(CONFIG_SPARC64) += prom_irqtrans.o
43 43
44obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
45
44obj-$(CONFIG_SPARC64) += reboot.o 46obj-$(CONFIG_SPARC64) += reboot.o
45obj-$(CONFIG_SPARC64) += sysfs.o 47obj-$(CONFIG_SPARC64) += sysfs.o
46obj-$(CONFIG_SPARC64) += iommu.o 48obj-$(CONFIG_SPARC64) += iommu.o
@@ -61,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
61obj-$(CONFIG_SPARC32) += devres.o 63obj-$(CONFIG_SPARC32) += devres.o
62devres-y := ../../../kernel/irq/devres.o 64devres-y := ../../../kernel/irq/devres.o
63 65
64obj-$(CONFIG_SPARC32) += dma.o 66obj-y += dma.o
65 67
66obj-$(CONFIG_SPARC32_PCI) += pcic.o 68obj-$(CONFIG_SPARC32_PCI) += pcic.o
67 69
@@ -101,3 +103,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
101obj-$(CONFIG_AUDIT) += audit.o 103obj-$(CONFIG_AUDIT) += audit.o
102audit--$(CONFIG_AUDIT) := compat_audit.o 104audit--$(CONFIG_AUDIT) := compat_audit.o
103obj-$(CONFIG_COMPAT) += $(audit--y) 105obj-$(CONFIG_COMPAT) += $(audit--y)
106
107pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o
108obj-$(CONFIG_SPARC64) += $(pc--y)
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index d85c3dc4953a..1446df90ef85 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void)
312 312
313 psr = get_psr(); 313 psr = get_psr();
314 put_psr(psr | PSR_EF); 314 put_psr(psr | PSR_EF);
315#ifdef CONFIG_SPARC_LEON
316 fpu_vers = 7;
317#else
315 fpu_vers = ((get_fsr() >> 17) & 0x7); 318 fpu_vers = ((get_fsr() >> 17) & 0x7);
319#endif
320
316 put_psr(psr); 321 put_psr(psr);
317 322
318 set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); 323 set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index 524c32f97c55..e1ba8ee21b9a 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -1,178 +1,13 @@
1/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h> 1#include <linux/kernel.h>
7#include <linux/module.h> 2#include <linux/module.h>
8#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h> 4#include <linux/dma-debug.h>
10#include <linux/mm.h>
11
12#ifdef CONFIG_PCI
13#include <linux/pci.h>
14#endif
15 5
16#include "dma.h" 6#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
17 7
18int dma_supported(struct device *dev, u64 mask) 8static int __init dma_init(void)
19{ 9{
20#ifdef CONFIG_PCI 10 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
21 if (dev->bus == &pci_bus_type)
22 return pci_dma_supported(to_pci_dev(dev), mask);
23#endif
24 return 0; 11 return 0;
25} 12}
26EXPORT_SYMBOL(dma_supported); 13fs_initcall(dma_init);
27
28int dma_set_mask(struct device *dev, u64 dma_mask)
29{
30#ifdef CONFIG_PCI
31 if (dev->bus == &pci_bus_type)
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33#endif
34 return -EOPNOTSUPP;
35}
36EXPORT_SYMBOL(dma_set_mask);
37
38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag)
40{
41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type)
43 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle);
46}
47
48static void dma32_free_coherent(struct device *dev, size_t size,
49 void *cpu_addr, dma_addr_t dma_handle)
50{
51#ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type) {
53 pci_free_consistent(to_pci_dev(dev), size,
54 cpu_addr, dma_handle);
55 return;
56 }
57#endif
58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
59}
60
61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
62 unsigned long offset, size_t size,
63 enum dma_data_direction direction)
64{
65#ifdef CONFIG_PCI
66 if (dev->bus == &pci_bus_type)
67 return pci_map_page(to_pci_dev(dev), page, offset,
68 size, (int)direction);
69#endif
70 return sbus_map_single(dev, page_address(page) + offset,
71 size, (int)direction);
72}
73
74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
75 size_t size, enum dma_data_direction direction)
76{
77#ifdef CONFIG_PCI
78 if (dev->bus == &pci_bus_type) {
79 pci_unmap_page(to_pci_dev(dev), dma_address,
80 size, (int)direction);
81 return;
82 }
83#endif
84 sbus_unmap_single(dev, dma_address, size, (int)direction);
85}
86
87static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
88 int nents, enum dma_data_direction direction)
89{
90#ifdef CONFIG_PCI
91 if (dev->bus == &pci_bus_type)
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93#endif
94 return sbus_map_sg(dev, sg, nents, direction);
95}
96
97void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
98 int nents, enum dma_data_direction direction)
99{
100#ifdef CONFIG_PCI
101 if (dev->bus == &pci_bus_type) {
102 pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
103 return;
104 }
105#endif
106 sbus_unmap_sg(dev, sg, nents, (int)direction);
107}
108
109static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113#ifdef CONFIG_PCI
114 if (dev->bus == &pci_bus_type) {
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
116 size, (int)direction);
117 return;
118 }
119#endif
120 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
121}
122
123static void dma32_sync_single_for_device(struct device *dev,
124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127#ifdef CONFIG_PCI
128 if (dev->bus == &pci_bus_type) {
129 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
130 size, (int)direction);
131 return;
132 }
133#endif
134 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
135}
136
137static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
138 int nelems, enum dma_data_direction direction)
139{
140#ifdef CONFIG_PCI
141 if (dev->bus == &pci_bus_type) {
142 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
143 nelems, (int)direction);
144 return;
145 }
146#endif
147 BUG();
148}
149
150static void dma32_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sg, int nelems,
152 enum dma_data_direction direction)
153{
154#ifdef CONFIG_PCI
155 if (dev->bus == &pci_bus_type) {
156 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
157 nelems, (int)direction);
158 return;
159 }
160#endif
161 BUG();
162}
163
164static const struct dma_ops dma32_dma_ops = {
165 .alloc_coherent = dma32_alloc_coherent,
166 .free_coherent = dma32_free_coherent,
167 .map_page = dma32_map_page,
168 .unmap_page = dma32_unmap_page,
169 .map_sg = dma32_map_sg,
170 .unmap_sg = dma32_unmap_sg,
171 .sync_single_for_cpu = dma32_sync_single_for_cpu,
172 .sync_single_for_device = dma32_sync_single_for_device,
173 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
174 .sync_sg_for_device = dma32_sync_sg_for_device,
175};
176
177const struct dma_ops *dma_ops = &dma32_dma_ops;
178EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index f8d8951adb53..000000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
1void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
2void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
3dma_addr_t sbus_map_single(struct device *dev, void *va,
4 size_t len, int direction);
5void sbus_unmap_single(struct device *dev, dma_addr_t ba,
6 size_t n, int direction);
7int sbus_map_sg(struct device *dev, struct scatterlist *sg,
8 int n, int direction);
9void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
10 int n, int direction);
11void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
12 size_t size, int direction);
13void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
14 size_t size, int direction);
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 6b4d8acc4c83..439d82a95ac9 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -809,6 +809,11 @@ found_version:
809 nop 809 nop
810 810
811got_prop: 811got_prop:
812#ifdef CONFIG_SPARC_LEON
813 /* no cpu-type check is needed, it is a SPARC-LEON */
814 ba sun4c_continue_boot
815 nop
816#endif
812 set cputypval, %o2 817 set cputypval, %o2
813 ldub [%o2 + 0x4], %l1 818 ldub [%o2 + 0x4], %l1
814 819
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index 57922f69c3f7..52a15fe2db19 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
31{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, 31{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) },
32{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, 32{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) },
33{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, 33{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) },
34/* Now Leon */
35{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) },
34/* Now, Sun4c's */ 36/* Now, Sun4c's */
35{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, 37{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) },
36{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, 38{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) },
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0aeaefe696b9..7690cc219ecc 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
353 353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz, 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction) 356 enum dma_data_direction direction,
357 struct dma_attrs *attrs)
357{ 358{
358 struct iommu *iommu; 359 struct iommu *iommu;
359 struct strbuf *strbuf; 360 struct strbuf *strbuf;
@@ -474,7 +475,8 @@ do_flush_sync:
474} 475}
475 476
476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
477 size_t sz, enum dma_data_direction direction) 478 size_t sz, enum dma_data_direction direction,
479 struct dma_attrs *attrs)
478{ 480{
479 struct iommu *iommu; 481 struct iommu *iommu;
480 struct strbuf *strbuf; 482 struct strbuf *strbuf;
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
520} 522}
521 523
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 int nelems, enum dma_data_direction direction) 525 int nelems, enum dma_data_direction direction,
526 struct dma_attrs *attrs)
524{ 527{
525 struct scatterlist *s, *outs, *segstart; 528 struct scatterlist *s, *outs, *segstart;
526 unsigned long flags, handle, prot, ctx; 529 unsigned long flags, handle, prot, ctx;
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
691} 694}
692 695
693static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
694 int nelems, enum dma_data_direction direction) 697 int nelems, enum dma_data_direction direction,
698 struct dma_attrs *attrs)
695{ 699{
696 unsigned long flags, ctx; 700 unsigned long flags, ctx;
697 struct scatterlist *sg; 701 struct scatterlist *sg;
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
822 spin_unlock_irqrestore(&iommu->lock, flags); 826 spin_unlock_irqrestore(&iommu->lock, flags);
823} 827}
824 828
825static const struct dma_ops sun4u_dma_ops = { 829static struct dma_map_ops sun4u_dma_ops = {
826 .alloc_coherent = dma_4u_alloc_coherent, 830 .alloc_coherent = dma_4u_alloc_coherent,
827 .free_coherent = dma_4u_free_coherent, 831 .free_coherent = dma_4u_free_coherent,
828 .map_page = dma_4u_map_page, 832 .map_page = dma_4u_map_page,
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
833 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 837 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
834}; 838};
835 839
836const struct dma_ops *dma_ops = &sun4u_dma_ops; 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
837EXPORT_SYMBOL(dma_ops); 841EXPORT_SYMBOL(dma_ops);
838 842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
839int dma_supported(struct device *dev, u64 device_mask) 845int dma_supported(struct device *dev, u64 device_mask)
840{ 846{
841 struct iommu *iommu = dev->archdata.iommu; 847 struct iommu *iommu = dev->archdata.iommu;
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
849 855
850#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
851 if (dev->bus == &pci_bus_type) 857 if (dev->bus == &pci_bus_type)
852 return pci_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
853#endif 859#endif
854 860
855 return 0; 861 return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 87ea0d03d975..9f61fd8cbb7b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -35,6 +35,7 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */ 36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
38#include <linux/scatterlist.h> 39#include <linux/scatterlist.h>
39#include <linux/of_device.h> 40#include <linux/of_device.h>
40 41
@@ -48,8 +49,6 @@
48#include <asm/iommu.h> 49#include <asm/iommu.h>
49#include <asm/io-unit.h> 50#include <asm/io-unit.h>
50 51
51#include "dma.h"
52
53#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 52#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54 53
55static struct resource *_sparc_find_resource(struct resource *r, 54static struct resource *_sparc_find_resource(struct resource *r,
@@ -246,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
246 * Typically devices use them for control blocks. 245 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing. 246 * CPU may access them without any explicit flushing.
248 */ 247 */
249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 248static void *sbus_alloc_coherent(struct device *dev, size_t len,
249 dma_addr_t *dma_addrp, gfp_t gfp)
250{ 250{
251 struct of_device *op = to_of_device(dev); 251 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -299,7 +299,8 @@ err_nopages:
299 return NULL; 299 return NULL;
300} 300}
301 301
302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 302static void sbus_free_coherent(struct device *dev, size_t n, void *p,
303 dma_addr_t ba)
303{ 304{
304 struct resource *res; 305 struct resource *res;
305 struct page *pgv; 306 struct page *pgv;
@@ -317,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
317 318
318 n = (n + PAGE_SIZE-1) & PAGE_MASK; 319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) { 320 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 321 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
321 (long)((res->end-res->start)+1), n); 322 (long)((res->end-res->start)+1), n);
322 return; 323 return;
323 } 324 }
@@ -337,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
337 * CPU view of this memory may be inconsistent with 338 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary. 339 * a device view and explicit flushing is necessary.
339 */ 340 */
340dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 341static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
342 unsigned long offset, size_t len,
343 enum dma_data_direction dir,
344 struct dma_attrs *attrs)
341{ 345{
346 void *va = page_address(page) + offset;
347
342 /* XXX why are some lengths signed, others unsigned? */ 348 /* XXX why are some lengths signed, others unsigned? */
343 if (len <= 0) { 349 if (len <= 0) {
344 return 0; 350 return 0;
@@ -350,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
350 return mmu_get_scsi_one(dev, va, len); 356 return mmu_get_scsi_one(dev, va, len);
351} 357}
352 358
353void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 359static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
360 enum dma_data_direction dir, struct dma_attrs *attrs)
354{ 361{
355 mmu_release_scsi_one(dev, ba, n); 362 mmu_release_scsi_one(dev, ba, n);
356} 363}
357 364
358int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 365static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
366 enum dma_data_direction dir, struct dma_attrs *attrs)
359{ 367{
360 mmu_get_scsi_sgl(dev, sg, n); 368 mmu_get_scsi_sgl(dev, sg, n);
361 369
@@ -366,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
366 return n; 374 return n;
367} 375}
368 376
369void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 377static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
378 enum dma_data_direction dir, struct dma_attrs *attrs)
370{ 379{
371 mmu_release_scsi_sgl(dev, sg, n); 380 mmu_release_scsi_sgl(dev, sg, n);
372} 381}
373 382
374void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 383static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
384 int n, enum dma_data_direction dir)
375{ 385{
386 BUG();
376} 387}
377 388
378void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 389static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int n, enum dma_data_direction dir)
379{ 391{
392 BUG();
380} 393}
381 394
395struct dma_map_ops sbus_dma_ops = {
396 .alloc_coherent = sbus_alloc_coherent,
397 .free_coherent = sbus_free_coherent,
398 .map_page = sbus_map_page,
399 .unmap_page = sbus_unmap_page,
400 .map_sg = sbus_map_sg,
401 .unmap_sg = sbus_unmap_sg,
402 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
403 .sync_sg_for_device = sbus_sync_sg_for_device,
404};
405
406struct dma_map_ops *dma_ops = &sbus_dma_ops;
407EXPORT_SYMBOL(dma_ops);
408
382static int __init sparc_register_ioport(void) 409static int __init sparc_register_ioport(void)
383{ 410{
384 register_proc_sparc_ioport(); 411 register_proc_sparc_ioport();
@@ -395,7 +422,8 @@ arch_initcall(sparc_register_ioport);
395/* Allocate and map kernel buffer using consistent mode DMA for a device. 422/* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices. 423 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */ 424 */
398void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 425static void *pci32_alloc_coherent(struct device *dev, size_t len,
426 dma_addr_t *pba, gfp_t gfp)
399{ 427{
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 428 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va; 429 unsigned long va;
@@ -439,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 467 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start; 468 return (void *) res->start;
441} 469}
442EXPORT_SYMBOL(pci_alloc_consistent);
443 470
444/* Free and unmap a consistent DMA buffer. 471/* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent, 472 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -449,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
449 * References to the memory and mappings associated with cpu_addr/dma_addr 476 * References to the memory and mappings associated with cpu_addr/dma_addr
450 * past this call are illegal. 477 * past this call are illegal.
451 */ 478 */
452void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 479static void pci32_free_coherent(struct device *dev, size_t n, void *p,
480 dma_addr_t ba)
453{ 481{
454 struct resource *res; 482 struct resource *res;
455 unsigned long pgp; 483 unsigned long pgp;
@@ -481,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
481 509
482 free_pages(pgp, get_order(n)); 510 free_pages(pgp, get_order(n));
483} 511}
484EXPORT_SYMBOL(pci_free_consistent);
485
486/* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
492dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494{
495 BUG_ON(direction == PCI_DMA_NONE);
496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498}
499EXPORT_SYMBOL(pci_map_single);
500
501/* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
508void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510{
511 BUG_ON(direction == PCI_DMA_NONE);
512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516}
517EXPORT_SYMBOL(pci_unmap_single);
518 512
519/* 513/*
520 * Same as pci_map_single, but with pages. 514 * Same as pci_map_single, but with pages.
521 */ 515 */
522dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 516static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
523 unsigned long offset, size_t size, int direction) 517 unsigned long offset, size_t size,
518 enum dma_data_direction dir,
519 struct dma_attrs *attrs)
524{ 520{
525 BUG_ON(direction == PCI_DMA_NONE);
526 /* IIep is write-through, not flushing. */ 521 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset; 522 return page_to_phys(page) + offset;
528} 523}
529EXPORT_SYMBOL(pci_map_page);
530
531void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533{
534 BUG_ON(direction == PCI_DMA_NONE);
535 /* mmu_inval_dma_area XXX */
536}
537EXPORT_SYMBOL(pci_unmap_page);
538 524
539/* Map a set of buffers described by scatterlist in streaming 525/* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the 526 * mode for DMA. This is the scather-gather version of the
@@ -551,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
551 * Device ownership issues as mentioned above for pci_map_single are 537 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here. 538 * the same here.
553 */ 539 */
554int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 540static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
555 int direction) 541 int nents, enum dma_data_direction dir,
542 struct dma_attrs *attrs)
556{ 543{
557 struct scatterlist *sg; 544 struct scatterlist *sg;
558 int n; 545 int n;
559 546
560 BUG_ON(direction == PCI_DMA_NONE);
561 /* IIep is write-through, not flushing. */ 547 /* IIep is write-through, not flushing. */
562 for_each_sg(sgl, sg, nents, n) { 548 for_each_sg(sgl, sg, nents, n) {
563 BUG_ON(page_address(sg_page(sg)) == NULL); 549 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -566,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
566 } 552 }
567 return nents; 553 return nents;
568} 554}
569EXPORT_SYMBOL(pci_map_sg);
570 555
571/* Unmap a set of streaming mode DMA translations. 556/* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for 557 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above. 558 * pci_unmap_single() above.
574 */ 559 */
575void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 560static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
576 int direction) 561 int nents, enum dma_data_direction dir,
562 struct dma_attrs *attrs)
577{ 563{
578 struct scatterlist *sg; 564 struct scatterlist *sg;
579 int n; 565 int n;
580 566
581 BUG_ON(direction == PCI_DMA_NONE); 567 if (dir != PCI_DMA_TODEVICE) {
582 if (direction != PCI_DMA_TODEVICE) {
583 for_each_sg(sgl, sg, nents, n) { 568 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL); 569 BUG_ON(page_address(sg_page(sg)) == NULL);
585 mmu_inval_dma_area( 570 mmu_inval_dma_area(
@@ -588,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
588 } 573 }
589 } 574 }
590} 575}
591EXPORT_SYMBOL(pci_unmap_sg);
592 576
593/* Make physical memory consistent for a single 577/* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer. 578 * streaming mode DMA translation before or after a transfer.
@@ -600,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
600 * must first perform a pci_dma_sync_for_device, and then the 584 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer. 585 * device again owns the buffer.
602 */ 586 */
603void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 587static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
588 size_t size, enum dma_data_direction dir)
604{ 589{
605 BUG_ON(direction == PCI_DMA_NONE); 590 if (dir != PCI_DMA_TODEVICE) {
606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 591 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK); 592 (size + PAGE_SIZE-1) & PAGE_MASK);
609 } 593 }
610} 594}
611EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
612 595
613void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 596static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
597 size_t size, enum dma_data_direction dir)
614{ 598{
615 BUG_ON(direction == PCI_DMA_NONE); 599 if (dir != PCI_DMA_TODEVICE) {
616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 600 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK); 601 (size + PAGE_SIZE-1) & PAGE_MASK);
619 } 602 }
620} 603}
621EXPORT_SYMBOL(pci_dma_sync_single_for_device);
622 604
623/* Make physical memory consistent for a set of streaming 605/* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer. 606 * mode DMA translations after a transfer.
@@ -626,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
626 * The same as pci_dma_sync_single_* but for a scatter-gather list, 608 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage. 609 * same rules and usage.
628 */ 610 */
629void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 611static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
612 int nents, enum dma_data_direction dir)
630{ 613{
631 struct scatterlist *sg; 614 struct scatterlist *sg;
632 int n; 615 int n;
633 616
634 BUG_ON(direction == PCI_DMA_NONE); 617 if (dir != PCI_DMA_TODEVICE) {
635 if (direction != PCI_DMA_TODEVICE) {
636 for_each_sg(sgl, sg, nents, n) { 618 for_each_sg(sgl, sg, nents, n) {
637 BUG_ON(page_address(sg_page(sg)) == NULL); 619 BUG_ON(page_address(sg_page(sg)) == NULL);
638 mmu_inval_dma_area( 620 mmu_inval_dma_area(
@@ -641,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
641 } 623 }
642 } 624 }
643} 625}
644EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
645 626
646void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 627static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
628 int nents, enum dma_data_direction dir)
647{ 629{
648 struct scatterlist *sg; 630 struct scatterlist *sg;
649 int n; 631 int n;
650 632
651 BUG_ON(direction == PCI_DMA_NONE); 633 if (dir != PCI_DMA_TODEVICE) {
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) { 634 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL); 635 BUG_ON(page_address(sg_page(sg)) == NULL);
655 mmu_inval_dma_area( 636 mmu_inval_dma_area(
@@ -658,31 +639,78 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
658 } 639 }
659 } 640 }
660} 641}
661EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 642
643struct dma_map_ops pci32_dma_ops = {
644 .alloc_coherent = pci32_alloc_coherent,
645 .free_coherent = pci32_free_coherent,
646 .map_page = pci32_map_page,
647 .map_sg = pci32_map_sg,
648 .unmap_sg = pci32_unmap_sg,
649 .sync_single_for_cpu = pci32_sync_single_for_cpu,
650 .sync_single_for_device = pci32_sync_single_for_device,
651 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
652 .sync_sg_for_device = pci32_sync_sg_for_device,
653};
654EXPORT_SYMBOL(pci32_dma_ops);
655
662#endif /* CONFIG_PCI */ 656#endif /* CONFIG_PCI */
663 657
658/*
659 * Return whether the given PCI device DMA address mask can be
660 * supported properly. For example, if your device can only drive the
661 * low 24-bits during PCI bus mastering, then you would pass
662 * 0x00ffffff as the mask to this function.
663 */
664int dma_supported(struct device *dev, u64 mask)
665{
666#ifdef CONFIG_PCI
667 if (dev->bus == &pci_bus_type)
668 return 1;
669#endif
670 return 0;
671}
672EXPORT_SYMBOL(dma_supported);
673
674int dma_set_mask(struct device *dev, u64 dma_mask)
675{
676#ifdef CONFIG_PCI
677 if (dev->bus == &pci_bus_type)
678 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
679#endif
680 return -EOPNOTSUPP;
681}
682EXPORT_SYMBOL(dma_set_mask);
683
684
664#ifdef CONFIG_PROC_FS 685#ifdef CONFIG_PROC_FS
665 686
666static int 687static int sparc_io_proc_show(struct seq_file *m, void *v)
667_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
668 void *data)
669{ 688{
670 char *p = buf, *e = buf + length; 689 struct resource *root = m->private, *r;
671 struct resource *r;
672 const char *nm; 690 const char *nm;
673 691
674 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { 692 for (r = root->child; r != NULL; r = r->sibling) {
675 if (p + 32 >= e) /* Better than nothing */
676 break;
677 if ((nm = r->name) == 0) nm = "???"; 693 if ((nm = r->name) == 0) nm = "???";
678 p += sprintf(p, "%016llx-%016llx: %s\n", 694 seq_printf(m, "%016llx-%016llx: %s\n",
679 (unsigned long long)r->start, 695 (unsigned long long)r->start,
680 (unsigned long long)r->end, nm); 696 (unsigned long long)r->end, nm);
681 } 697 }
682 698
683 return p-buf; 699 return 0;
684} 700}
685 701
702static int sparc_io_proc_open(struct inode *inode, struct file *file)
703{
704 return single_open(file, sparc_io_proc_show, PDE(inode)->data);
705}
706
707static const struct file_operations sparc_io_proc_fops = {
708 .owner = THIS_MODULE,
709 .open = sparc_io_proc_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
686#endif /* CONFIG_PROC_FS */ 714#endif /* CONFIG_PROC_FS */
687 715
688/* 716/*
@@ -707,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root,
707static void register_proc_sparc_ioport(void) 735static void register_proc_sparc_ioport(void)
708{ 736{
709#ifdef CONFIG_PROC_FS 737#ifdef CONFIG_PROC_FS
710 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); 738 proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
711 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); 739 proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
712#endif 740#endif
713} 741}
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index ad800b80c718..e1af43728329 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -45,6 +45,7 @@
45#include <asm/pcic.h> 45#include <asm/pcic.h>
46#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <asm/irq_regs.h> 47#include <asm/irq_regs.h>
48#include <asm/leon.h>
48 49
49#include "kernel.h" 50#include "kernel.h"
50#include "irq.h" 51#include "irq.h"
@@ -661,6 +662,10 @@ void __init init_IRQ(void)
661 sun4d_init_IRQ(); 662 sun4d_init_IRQ();
662 break; 663 break;
663 664
665 case sparc_leon:
666 leon_init_IRQ();
667 break;
668
664 default: 669 default:
665 prom_printf("Cannot initialize IRQs on this Sun machine..."); 670 prom_printf("Cannot initialize IRQs on this Sun machine...");
666 break; 671 break;
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
new file mode 100644
index 000000000000..54d8a5bd4824
--- /dev/null
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
3 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/errno.h>
9#include <linux/mutex.h>
10#include <linux/slab.h>
11#include <linux/of.h>
12#include <linux/of_platform.h>
13#include <linux/interrupt.h>
14#include <linux/of_device.h>
15#include <asm/oplib.h>
16#include <asm/timer.h>
17#include <asm/prom.h>
18#include <asm/leon.h>
19#include <asm/leon_amba.h>
20
21#include "prom.h"
22#include "irq.h"
23
24struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */
25struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */
26struct amba_apb_device leon_percpu_timer_dev[16];
27
28int leondebug_irq_disable;
29int leon_debug_irqout;
30static int dummy_master_l10_counter;
31
32unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */
33unsigned int sparc_leon_eirq;
34#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0]))
35
36/* Return the IRQ of the pending IRQ on the extended IRQ controller */
37int sparc_leon_eirq_get(int eirq, int cpu)
38{
39 return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
40}
41
42irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id)
43{
44 printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n");
45 return IRQ_HANDLED;
46}
47
48/* The extended IRQ controller has been found, this function registers it */
49void sparc_leon_eirq_register(int eirq)
50{
51 int irq;
52
53 /* Register a "BAD" handler for this interrupt, it should never happen */
54 irq = request_irq(eirq, sparc_leon_eirq_isr,
55 (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL);
56
57 if (irq) {
58 printk(KERN_ERR
59 "sparc_leon_eirq_register: unable to attach IRQ%d\n",
60 eirq);
61 } else {
62 sparc_leon_eirq = eirq;
63 }
64
65}
66
67static inline unsigned long get_irqmask(unsigned int irq)
68{
69 unsigned long mask;
70
71 if (!irq || ((irq > 0xf) && !sparc_leon_eirq)
72 || ((irq > 0x1f) && sparc_leon_eirq)) {
73 printk(KERN_ERR
74 "leon_get_irqmask: false irq number: %d\n", irq);
75 mask = 0;
76 } else {
77 mask = LEON_HARD_INT(irq);
78 }
79 return mask;
80}
81
82static void leon_enable_irq(unsigned int irq_nr)
83{
84 unsigned long mask, flags;
85 mask = get_irqmask(irq_nr);
86 local_irq_save(flags);
87 LEON3_BYPASS_STORE_PA(LEON_IMASK,
88 (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask)));
89 local_irq_restore(flags);
90}
91
92static void leon_disable_irq(unsigned int irq_nr)
93{
94 unsigned long mask, flags;
95 mask = get_irqmask(irq_nr);
96 local_irq_save(flags);
97 LEON3_BYPASS_STORE_PA(LEON_IMASK,
98 (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask)));
99 local_irq_restore(flags);
100
101}
102
103void __init leon_init_timers(irq_handler_t counter_fn)
104{
105 int irq;
106
107 leondebug_irq_disable = 0;
108 leon_debug_irqout = 0;
109 master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
110 dummy_master_l10_counter = 0;
111
112 if (leon3_gptimer_regs && leon3_irqctrl_regs) {
113 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0);
114 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld,
115 (((1000000 / 100) - 1)));
116 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0);
117
118 } else {
119 printk(KERN_ERR "No Timer/irqctrl found\n");
120 BUG();
121 }
122
123 irq = request_irq(leon3_gptimer_irq,
124 counter_fn,
125 (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
126
127 if (irq) {
128 printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n",
129 LEON_INTERRUPT_TIMER1);
130 prom_halt();
131 }
132
133 if (leon3_gptimer_regs) {
134 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl,
135 LEON3_GPTIMER_EN |
136 LEON3_GPTIMER_RL |
137 LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN);
138 }
139}
140
141void leon_clear_clock_irq(void)
142{
143}
144
145void leon_load_profile_irq(int cpu, unsigned int limit)
146{
147 BUG();
148}
149
150
151
152
153void __init leon_trans_init(struct device_node *dp)
154{
155 if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
156 struct property *p;
157 p = of_find_property(dp, "mid", (void *)0);
158 if (p) {
159 int mid;
160 dp->name = prom_early_alloc(5 + 1);
161 memcpy(&mid, p->value, p->length);
162 sprintf((char *)dp->name, "cpu%.2d", mid);
163 }
164 }
165}
166
167void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0;
168
169void __init leon_node_init(struct device_node *dp, struct device_node ***nextp)
170{
171 if (prom_amba_init &&
172 strcmp(dp->type, "ambapp") == 0 &&
173 strcmp(dp->name, "ambapp0") == 0) {
174 prom_amba_init(dp, nextp);
175 }
176}
177
178void __init leon_init_IRQ(void)
179{
180 sparc_init_timers = leon_init_timers;
181
182 BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
183 BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
184 BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM);
185 BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM);
186
187 BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq,
188 BTFIXUPCALL_NORM);
189 BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq,
190 BTFIXUPCALL_NOP);
191
192#ifdef CONFIG_SMP
193 BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM);
194 BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM);
195 BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM);
196#endif
197
198}
199
200void __init leon_init(void)
201{
202 prom_build_more = &leon_node_init;
203}
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b75bf502cd42..378eb53e0776 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21 21
22#include <asm/perf_counter.h>
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23#include <asm/local.h> 24#include <asm/local.h>
24#include <asm/pcr.h> 25#include <asm/pcr.h>
@@ -31,13 +32,19 @@
31 * level 14 as our IRQ off level. 32 * level 14 as our IRQ off level.
32 */ 33 */
33 34
34static int nmi_watchdog_active;
35static int panic_on_timeout; 35static int panic_on_timeout;
36 36
37int nmi_usable; 37/* nmi_active:
38EXPORT_SYMBOL_GPL(nmi_usable); 38 * >0: the NMI watchdog is active, but can be disabled
39 * <0: the NMI watchdog has not been set up, and cannot be enabled
40 * 0: the NMI watchdog is disabled, but can be enabled
41 */
42atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
43EXPORT_SYMBOL(nmi_active);
39 44
40static unsigned int nmi_hz = HZ; 45static unsigned int nmi_hz = HZ;
46static DEFINE_PER_CPU(short, wd_enabled);
47static int endflag __initdata;
41 48
42static DEFINE_PER_CPU(unsigned int, last_irq_sum); 49static DEFINE_PER_CPU(unsigned int, last_irq_sum);
43static DEFINE_PER_CPU(local_t, alert_counter); 50static DEFINE_PER_CPU(local_t, alert_counter);
@@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch);
45 52
46void touch_nmi_watchdog(void) 53void touch_nmi_watchdog(void)
47{ 54{
48 if (nmi_watchdog_active) { 55 if (atomic_read(&nmi_active)) {
49 int cpu; 56 int cpu;
50 57
51 for_each_present_cpu(cpu) { 58 for_each_present_cpu(cpu) {
@@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
78 if (do_panic || panic_on_oops) 85 if (do_panic || panic_on_oops)
79 panic("Non maskable interrupt"); 86 panic("Non maskable interrupt");
80 87
88 nmi_exit();
81 local_irq_enable(); 89 local_irq_enable();
82 do_exit(SIGBUS); 90 do_exit(SIGBUS);
83} 91}
@@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
92 100
93 local_cpu_data().__nmi_count++; 101 local_cpu_data().__nmi_count++;
94 102
103 nmi_enter();
104
95 if (notify_die(DIE_NMI, "nmi", regs, 0, 105 if (notify_die(DIE_NMI, "nmi", regs, 0,
96 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) 106 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
97 touched = 1; 107 touched = 1;
@@ -110,10 +120,12 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
110 __get_cpu_var(last_irq_sum) = sum; 120 __get_cpu_var(last_irq_sum) = sum;
111 local_set(&__get_cpu_var(alert_counter), 0); 121 local_set(&__get_cpu_var(alert_counter), 0);
112 } 122 }
113 if (nmi_usable) { 123 if (__get_cpu_var(wd_enabled)) {
114 write_pic(picl_value(nmi_hz)); 124 write_pic(picl_value(nmi_hz));
115 pcr_ops->write(pcr_enable); 125 pcr_ops->write(pcr_enable);
116 } 126 }
127
128 nmi_exit();
117} 129}
118 130
119static inline unsigned int get_nmi_count(int cpu) 131static inline unsigned int get_nmi_count(int cpu)
@@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu)
121 return cpu_data(cpu).__nmi_count; 133 return cpu_data(cpu).__nmi_count;
122} 134}
123 135
124static int endflag __initdata;
125
126static __init void nmi_cpu_busy(void *data) 136static __init void nmi_cpu_busy(void *data)
127{ 137{
128 local_irq_enable_in_hardirq(); 138 local_irq_enable_in_hardirq();
@@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
143 printk(KERN_WARNING 153 printk(KERN_WARNING
144 "and attach the output of the 'dmesg' command.\n"); 154 "and attach the output of the 'dmesg' command.\n");
145 155
146 nmi_usable = 0; 156 per_cpu(wd_enabled, cpu) = 0;
157 atomic_dec(&nmi_active);
147} 158}
148 159
149static void stop_watchdog(void *unused) 160void stop_nmi_watchdog(void *unused)
150{ 161{
151 pcr_ops->write(PCR_PIC_PRIV); 162 pcr_ops->write(PCR_PIC_PRIV);
163 __get_cpu_var(wd_enabled) = 0;
164 atomic_dec(&nmi_active);
152} 165}
153 166
154static int __init check_nmi_watchdog(void) 167static int __init check_nmi_watchdog(void)
@@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void)
156 unsigned int *prev_nmi_count; 169 unsigned int *prev_nmi_count;
157 int cpu, err; 170 int cpu, err;
158 171
172 if (!atomic_read(&nmi_active))
173 return 0;
174
159 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); 175 prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL);
160 if (!prev_nmi_count) { 176 if (!prev_nmi_count) {
161 err = -ENOMEM; 177 err = -ENOMEM;
@@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void)
172 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ 188 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
173 189
174 for_each_online_cpu(cpu) { 190 for_each_online_cpu(cpu) {
191 if (!per_cpu(wd_enabled, cpu))
192 continue;
175 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) 193 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
176 report_broken_nmi(cpu, prev_nmi_count); 194 report_broken_nmi(cpu, prev_nmi_count);
177 } 195 }
178 endflag = 1; 196 endflag = 1;
179 if (!nmi_usable) { 197 if (!atomic_read(&nmi_active)) {
180 kfree(prev_nmi_count); 198 kfree(prev_nmi_count);
199 atomic_set(&nmi_active, -1);
181 err = -ENODEV; 200 err = -ENODEV;
182 goto error; 201 goto error;
183 } 202 }
@@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void)
188 kfree(prev_nmi_count); 207 kfree(prev_nmi_count);
189 return 0; 208 return 0;
190error: 209error:
191 on_each_cpu(stop_watchdog, NULL, 1); 210 on_each_cpu(stop_nmi_watchdog, NULL, 1);
192 return err; 211 return err;
193} 212}
194 213
195static void start_watchdog(void *unused) 214void start_nmi_watchdog(void *unused)
196{ 215{
216 __get_cpu_var(wd_enabled) = 1;
217 atomic_inc(&nmi_active);
218
219 pcr_ops->write(PCR_PIC_PRIV);
220 write_pic(picl_value(nmi_hz));
221
222 pcr_ops->write(pcr_enable);
223}
224
225static void nmi_adjust_hz_one(void *unused)
226{
227 if (!__get_cpu_var(wd_enabled))
228 return;
229
197 pcr_ops->write(PCR_PIC_PRIV); 230 pcr_ops->write(PCR_PIC_PRIV);
198 write_pic(picl_value(nmi_hz)); 231 write_pic(picl_value(nmi_hz));
199 232
@@ -203,13 +236,13 @@ static void start_watchdog(void *unused)
203void nmi_adjust_hz(unsigned int new_hz) 236void nmi_adjust_hz(unsigned int new_hz)
204{ 237{
205 nmi_hz = new_hz; 238 nmi_hz = new_hz;
206 on_each_cpu(start_watchdog, NULL, 1); 239 on_each_cpu(nmi_adjust_hz_one, NULL, 1);
207} 240}
208EXPORT_SYMBOL_GPL(nmi_adjust_hz); 241EXPORT_SYMBOL_GPL(nmi_adjust_hz);
209 242
210static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) 243static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p)
211{ 244{
212 on_each_cpu(stop_watchdog, NULL, 1); 245 on_each_cpu(stop_nmi_watchdog, NULL, 1);
213 return 0; 246 return 0;
214} 247}
215 248
@@ -221,18 +254,19 @@ int __init nmi_init(void)
221{ 254{
222 int err; 255 int err;
223 256
224 nmi_usable = 1; 257 on_each_cpu(start_nmi_watchdog, NULL, 1);
225
226 on_each_cpu(start_watchdog, NULL, 1);
227 258
228 err = check_nmi_watchdog(); 259 err = check_nmi_watchdog();
229 if (!err) { 260 if (!err) {
230 err = register_reboot_notifier(&nmi_reboot_notifier); 261 err = register_reboot_notifier(&nmi_reboot_notifier);
231 if (err) { 262 if (err) {
232 nmi_usable = 0; 263 on_each_cpu(stop_nmi_watchdog, NULL, 1);
233 on_each_cpu(stop_watchdog, NULL, 1); 264 atomic_set(&nmi_active, -1);
234 } 265 }
235 } 266 }
267 if (!err)
268 init_hw_perf_counters();
269
236 return err; 270 return err;
237} 271}
238 272
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 90396702ea2c..4c26eb59e742 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -9,6 +9,8 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11#include <linux/of_platform.h> 11#include <linux/of_platform.h>
12#include <asm/leon.h>
13#include <asm/leon_amba.h>
12 14
13#include "of_device_common.h" 15#include "of_device_common.h"
14 16
@@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
97 return IORESOURCE_MEM; 99 return IORESOURCE_MEM;
98} 100}
99 101
102 /*
103 * AMBAPP bus specific translator
104 */
105
106static int of_bus_ambapp_match(struct device_node *np)
107{
108 return !strcmp(np->name, "ambapp");
109}
110
111static void of_bus_ambapp_count_cells(struct device_node *child,
112 int *addrc, int *sizec)
113{
114 if (addrc)
115 *addrc = 1;
116 if (sizec)
117 *sizec = 1;
118}
119
120static int of_bus_ambapp_map(u32 *addr, const u32 *range,
121 int na, int ns, int pna)
122{
123 return of_bus_default_map(addr, range, na, ns, pna);
124}
125
126static unsigned long of_bus_ambapp_get_flags(const u32 *addr,
127 unsigned long flags)
128{
129 return IORESOURCE_MEM;
130}
100 131
101/* 132/*
102 * Array of bus specific translators 133 * Array of bus specific translators
@@ -121,6 +152,15 @@ static struct of_bus of_busses[] = {
121 .map = of_bus_default_map, 152 .map = of_bus_default_map,
122 .get_flags = of_bus_sbus_get_flags, 153 .get_flags = of_bus_sbus_get_flags,
123 }, 154 },
155 /* AMBA */
156 {
157 .name = "ambapp",
158 .addr_prop_name = "reg",
159 .match = of_bus_ambapp_match,
160 .count_cells = of_bus_ambapp_count_cells,
161 .map = of_bus_ambapp_map,
162 .get_flags = of_bus_ambapp_get_flags,
163 },
124 /* Default */ 164 /* Default */
125 { 165 {
126 .name = "default", 166 .name = "default",
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad23547..c68648662802 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1039 pci_dev_put(ali_isa_bridge); 1039 pci_dev_put(ali_isa_bridge);
1040} 1040}
1041 1041
1042int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 1042int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
1043{ 1043{
1044 u64 dma_addr_mask; 1044 u64 dma_addr_mask;
1045 1045
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 2485eaa23101..23c33ff9c31e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
232 232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz, 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction) 235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
236{ 237{
237 struct iommu *iommu; 238 struct iommu *iommu;
238 unsigned long flags, npages, oaddr; 239 unsigned long flags, npages, oaddr;
@@ -296,7 +297,8 @@ iommu_map_fail:
296} 297}
297 298
298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299 size_t sz, enum dma_data_direction direction) 300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
300{ 302{
301 struct pci_pbm_info *pbm; 303 struct pci_pbm_info *pbm;
302 struct iommu *iommu; 304 struct iommu *iommu;
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
336} 338}
337 339
338static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339 int nelems, enum dma_data_direction direction) 341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
340{ 343{
341 struct scatterlist *s, *outs, *segstart; 344 struct scatterlist *s, *outs, *segstart;
342 unsigned long flags, handle, prot; 345 unsigned long flags, handle, prot;
@@ -478,7 +481,8 @@ iommu_map_failed:
478} 481}
479 482
480static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 int nelems, enum dma_data_direction direction) 484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
482{ 486{
483 struct pci_pbm_info *pbm; 487 struct pci_pbm_info *pbm;
484 struct scatterlist *sg; 488 struct scatterlist *sg;
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
521 spin_unlock_irqrestore(&iommu->lock, flags); 525 spin_unlock_irqrestore(&iommu->lock, flags);
522} 526}
523 527
524static void dma_4v_sync_single_for_cpu(struct device *dev, 528static struct dma_map_ops sun4v_dma_ops = {
525 dma_addr_t bus_addr, size_t sz,
526 enum dma_data_direction direction)
527{
528 /* Nothing to do... */
529}
530
531static void dma_4v_sync_sg_for_cpu(struct device *dev,
532 struct scatterlist *sglist, int nelems,
533 enum dma_data_direction direction)
534{
535 /* Nothing to do... */
536}
537
538static const struct dma_ops sun4v_dma_ops = {
539 .alloc_coherent = dma_4v_alloc_coherent, 529 .alloc_coherent = dma_4v_alloc_coherent,
540 .free_coherent = dma_4v_free_coherent, 530 .free_coherent = dma_4v_free_coherent,
541 .map_page = dma_4v_map_page, 531 .map_page = dma_4v_map_page,
542 .unmap_page = dma_4v_unmap_page, 532 .unmap_page = dma_4v_unmap_page,
543 .map_sg = dma_4v_map_sg, 533 .map_sg = dma_4v_map_sg,
544 .unmap_sg = dma_4v_unmap_sg, 534 .unmap_sg = dma_4v_unmap_sg,
545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
546 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
547}; 535};
548 536
549static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 1ae8cdd7e703..68ff00107073 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,6 +7,8 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9 9
10#include <linux/perf_counter.h>
11
10#include <asm/pil.h> 12#include <asm/pil.h>
11#include <asm/pcr.h> 13#include <asm/pcr.h>
12#include <asm/nmi.h> 14#include <asm/nmi.h>
@@ -34,10 +36,20 @@ unsigned int picl_shift;
34 */ 36 */
35void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 37void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
36{ 38{
39 struct pt_regs *old_regs;
40
37 clear_softint(1 << PIL_DEFERRED_PCR_WORK); 41 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
42
43 old_regs = set_irq_regs(regs);
44 irq_enter();
45#ifdef CONFIG_PERF_COUNTERS
46 perf_counter_do_pending();
47#endif
48 irq_exit();
49 set_irq_regs(old_regs);
38} 50}
39 51
40void schedule_deferred_pcr_work(void) 52void set_perf_counter_pending(void)
41{ 53{
42 set_softint(1 << PIL_DEFERRED_PCR_WORK); 54 set_softint(1 << PIL_DEFERRED_PCR_WORK);
43} 55}
diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_counter.c
new file mode 100644
index 000000000000..09de4035eaa9
--- /dev/null
+++ b/arch/sparc/kernel/perf_counter.c
@@ -0,0 +1,557 @@
1/* Performance counter support for sparc64.
2 *
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4 *
5 * This code is based almost entirely upon the x86 perf counter
6 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
15#include <linux/perf_counter.h>
16#include <linux/kprobes.h>
17#include <linux/kernel.h>
18#include <linux/kdebug.h>
19#include <linux/mutex.h>
20
21#include <asm/cpudata.h>
22#include <asm/atomic.h>
23#include <asm/nmi.h>
24#include <asm/pcr.h>
25
26/* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register.
29 *
30 * Both counters are controlled using a single control register. The
31 * only way to stop all sampling is to clear all of the context (user,
32 * supervisor, hypervisor) sampling enable bits. But these bits apply
33 * to both counters, thus the two counters can't be enabled/disabled
34 * individually.
35 *
36 * The control register has two event fields, one for each of the two
37 * counters. It's thus nearly impossible to have one counter going
38 * while keeping the other one stopped. Therefore it is possible to
39 * get overflow interrupts for counters not currently "in use" and
40 * that condition must be checked in the overflow interrupt handler.
41 *
42 * So we use a hack, in that we program inactive counters with the
43 * "sw_count0" and "sw_count1" events. These count how many times
44 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
45 * unusual way to encode a NOP and therefore will not trigger in
46 * normal code.
47 */
48
49#define MAX_HWCOUNTERS 2
50#define MAX_PERIOD ((1UL << 32) - 1)
51
52#define PIC_UPPER_INDEX 0
53#define PIC_LOWER_INDEX 1
54
55struct cpu_hw_counters {
56 struct perf_counter *counters[MAX_HWCOUNTERS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
59 int enabled;
60};
61DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
62
63struct perf_event_map {
64 u16 encoding;
65 u8 pic_mask;
66#define PIC_NONE 0x00
67#define PIC_UPPER 0x01
68#define PIC_LOWER 0x02
69};
70
71struct sparc_pmu {
72 const struct perf_event_map *(*event_map)(int);
73 int max_events;
74 int upper_shift;
75 int lower_shift;
76 int event_mask;
77 int hv_bit;
78 int irq_bit;
79 int upper_nop;
80 int lower_nop;
81};
82
83static const struct perf_event_map ultra3i_perfmon_event_map[] = {
84 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
85 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88};
89
90static const struct perf_event_map *ultra3i_event_map(int event)
91{
92 return &ultra3i_perfmon_event_map[event];
93}
94
95static const struct sparc_pmu ultra3i_pmu = {
96 .event_map = ultra3i_event_map,
97 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map),
98 .upper_shift = 11,
99 .lower_shift = 4,
100 .event_mask = 0x3f,
101 .upper_nop = 0x1c,
102 .lower_nop = 0x14,
103};
104
105static const struct perf_event_map niagara2_perfmon_event_map[] = {
106 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
107 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
108 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
109 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
110 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
112};
113
114static const struct perf_event_map *niagara2_event_map(int event)
115{
116 return &niagara2_perfmon_event_map[event];
117}
118
119static const struct sparc_pmu niagara2_pmu = {
120 .event_map = niagara2_event_map,
121 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
122 .upper_shift = 19,
123 .lower_shift = 6,
124 .event_mask = 0xfff,
125 .hv_bit = 0x8,
126 .irq_bit = 0x03,
127 .upper_nop = 0x220,
128 .lower_nop = 0x220,
129};
130
131static const struct sparc_pmu *sparc_pmu __read_mostly;
132
133static u64 event_encoding(u64 event, int idx)
134{
135 if (idx == PIC_UPPER_INDEX)
136 event <<= sparc_pmu->upper_shift;
137 else
138 event <<= sparc_pmu->lower_shift;
139 return event;
140}
141
142static u64 mask_for_index(int idx)
143{
144 return event_encoding(sparc_pmu->event_mask, idx);
145}
146
147static u64 nop_for_index(int idx)
148{
149 return event_encoding(idx == PIC_UPPER_INDEX ?
150 sparc_pmu->upper_nop :
151 sparc_pmu->lower_nop, idx);
152}
153
154static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
155 int idx)
156{
157 u64 val, mask = mask_for_index(idx);
158
159 val = pcr_ops->read();
160 pcr_ops->write((val & ~mask) | hwc->config);
161}
162
163static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
164 int idx)
165{
166 u64 mask = mask_for_index(idx);
167 u64 nop = nop_for_index(idx);
168 u64 val = pcr_ops->read();
169
170 pcr_ops->write((val & ~mask) | nop);
171}
172
173void hw_perf_enable(void)
174{
175 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
176 u64 val;
177 int i;
178
179 if (cpuc->enabled)
180 return;
181
182 cpuc->enabled = 1;
183 barrier();
184
185 val = pcr_ops->read();
186
187 for (i = 0; i < MAX_HWCOUNTERS; i++) {
188 struct perf_counter *cp = cpuc->counters[i];
189 struct hw_perf_counter *hwc;
190
191 if (!cp)
192 continue;
193 hwc = &cp->hw;
194 val |= hwc->config_base;
195 }
196
197 pcr_ops->write(val);
198}
199
200void hw_perf_disable(void)
201{
202 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
203 u64 val;
204
205 if (!cpuc->enabled)
206 return;
207
208 cpuc->enabled = 0;
209
210 val = pcr_ops->read();
211 val &= ~(PCR_UTRACE | PCR_STRACE |
212 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
213 pcr_ops->write(val);
214}
215
216static u32 read_pmc(int idx)
217{
218 u64 val;
219
220 read_pic(val);
221 if (idx == PIC_UPPER_INDEX)
222 val >>= 32;
223
224 return val & 0xffffffff;
225}
226
227static void write_pmc(int idx, u64 val)
228{
229 u64 shift, mask, pic;
230
231 shift = 0;
232 if (idx == PIC_UPPER_INDEX)
233 shift = 32;
234
235 mask = ((u64) 0xffffffff) << shift;
236 val <<= shift;
237
238 read_pic(pic);
239 pic &= ~mask;
240 pic |= val;
241 write_pic(pic);
242}
243
244static int sparc_perf_counter_set_period(struct perf_counter *counter,
245 struct hw_perf_counter *hwc, int idx)
246{
247 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period;
249 int ret = 0;
250
251 if (unlikely(left <= -period)) {
252 left = period;
253 atomic64_set(&hwc->period_left, left);
254 hwc->last_period = period;
255 ret = 1;
256 }
257
258 if (unlikely(left <= 0)) {
259 left += period;
260 atomic64_set(&hwc->period_left, left);
261 hwc->last_period = period;
262 ret = 1;
263 }
264 if (left > MAX_PERIOD)
265 left = MAX_PERIOD;
266
267 atomic64_set(&hwc->prev_count, (u64)-left);
268
269 write_pmc(idx, (u64)(-left) & 0xffffffff);
270
271 perf_counter_update_userpage(counter);
272
273 return ret;
274}
275
276static int sparc_pmu_enable(struct perf_counter *counter)
277{
278 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
279 struct hw_perf_counter *hwc = &counter->hw;
280 int idx = hwc->idx;
281
282 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN;
284
285 sparc_pmu_disable_counter(hwc, idx);
286
287 cpuc->counters[idx] = counter;
288 set_bit(idx, cpuc->active_mask);
289
290 sparc_perf_counter_set_period(counter, hwc, idx);
291 sparc_pmu_enable_counter(hwc, idx);
292 perf_counter_update_userpage(counter);
293 return 0;
294}
295
296static u64 sparc_perf_counter_update(struct perf_counter *counter,
297 struct hw_perf_counter *hwc, int idx)
298{
299 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count;
301 s64 delta;
302
303again:
304 prev_raw_count = atomic64_read(&hwc->prev_count);
305 new_raw_count = read_pmc(idx);
306
307 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
308 new_raw_count) != prev_raw_count)
309 goto again;
310
311 delta = (new_raw_count << shift) - (prev_raw_count << shift);
312 delta >>= shift;
313
314 atomic64_add(delta, &counter->count);
315 atomic64_sub(delta, &hwc->period_left);
316
317 return new_raw_count;
318}
319
320static void sparc_pmu_disable(struct perf_counter *counter)
321{
322 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
323 struct hw_perf_counter *hwc = &counter->hw;
324 int idx = hwc->idx;
325
326 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_counter(hwc, idx);
328
329 barrier();
330
331 sparc_perf_counter_update(counter, hwc, idx);
332 cpuc->counters[idx] = NULL;
333 clear_bit(idx, cpuc->used_mask);
334
335 perf_counter_update_userpage(counter);
336}
337
338static void sparc_pmu_read(struct perf_counter *counter)
339{
340 struct hw_perf_counter *hwc = &counter->hw;
341 sparc_perf_counter_update(counter, hwc, hwc->idx);
342}
343
344static void sparc_pmu_unthrottle(struct perf_counter *counter)
345{
346 struct hw_perf_counter *hwc = &counter->hw;
347 sparc_pmu_enable_counter(hwc, hwc->idx);
348}
349
350static atomic_t active_counters = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex);
352
353void perf_counter_grab_pmc(void)
354{
355 if (atomic_inc_not_zero(&active_counters))
356 return;
357
358 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_counters) == 0) {
360 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0);
363 }
364 atomic_inc(&active_counters);
365 }
366 mutex_unlock(&pmc_grab_mutex);
367}
368
369void perf_counter_release_pmc(void)
370{
371 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
372 if (atomic_read(&nmi_active) == 0)
373 on_each_cpu(start_nmi_watchdog, NULL, 1);
374 mutex_unlock(&pmc_grab_mutex);
375 }
376}
377
378static void hw_perf_counter_destroy(struct perf_counter *counter)
379{
380 perf_counter_release_pmc();
381}
382
383static int __hw_perf_counter_init(struct perf_counter *counter)
384{
385 struct perf_counter_attr *attr = &counter->attr;
386 struct hw_perf_counter *hwc = &counter->hw;
387 const struct perf_event_map *pmap;
388 u64 enc;
389
390 if (atomic_read(&nmi_active) < 0)
391 return -ENODEV;
392
393 if (attr->type != PERF_TYPE_HARDWARE)
394 return -EOPNOTSUPP;
395
396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL;
398
399 perf_counter_grab_pmc();
400 counter->destroy = hw_perf_counter_destroy;
401
402 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable
404 * things write 'config | config_base'.
405 */
406 hwc->config_base = sparc_pmu->irq_bit;
407 if (!attr->exclude_user)
408 hwc->config_base |= PCR_UTRACE;
409 if (!attr->exclude_kernel)
410 hwc->config_base |= PCR_STRACE;
411 if (!attr->exclude_hv)
412 hwc->config_base |= sparc_pmu->hv_bit;
413
414 if (!hwc->sample_period) {
415 hwc->sample_period = MAX_PERIOD;
416 hwc->last_period = hwc->sample_period;
417 atomic64_set(&hwc->period_left, hwc->sample_period);
418 }
419
420 pmap = sparc_pmu->event_map(attr->config);
421
422 enc = pmap->encoding;
423 if (pmap->pic_mask & PIC_UPPER) {
424 hwc->idx = PIC_UPPER_INDEX;
425 enc <<= sparc_pmu->upper_shift;
426 } else {
427 hwc->idx = PIC_LOWER_INDEX;
428 enc <<= sparc_pmu->lower_shift;
429 }
430
431 hwc->config |= enc;
432 return 0;
433}
434
435static const struct pmu pmu = {
436 .enable = sparc_pmu_enable,
437 .disable = sparc_pmu_disable,
438 .read = sparc_pmu_read,
439 .unthrottle = sparc_pmu_unthrottle,
440};
441
442const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
443{
444 int err = __hw_perf_counter_init(counter);
445
446 if (err)
447 return ERR_PTR(err);
448 return &pmu;
449}
450
451void perf_counter_print_debug(void)
452{
453 unsigned long flags;
454 u64 pcr, pic;
455 int cpu;
456
457 if (!sparc_pmu)
458 return;
459
460 local_irq_save(flags);
461
462 cpu = smp_processor_id();
463
464 pcr = pcr_ops->read();
465 read_pic(pic);
466
467 pr_info("\n");
468 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
469 cpu, pcr, pic);
470
471 local_irq_restore(flags);
472}
473
474static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args)
476{
477 struct die_args *args = __args;
478 struct perf_sample_data data;
479 struct cpu_hw_counters *cpuc;
480 struct pt_regs *regs;
481 int idx;
482
483 if (!atomic_read(&active_counters))
484 return NOTIFY_DONE;
485
486 switch (cmd) {
487 case DIE_NMI:
488 break;
489
490 default:
491 return NOTIFY_DONE;
492 }
493
494 regs = args->regs;
495
496 data.regs = regs;
497 data.addr = 0;
498
499 cpuc = &__get_cpu_var(cpu_hw_counters);
500 for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
501 struct perf_counter *counter = cpuc->counters[idx];
502 struct hw_perf_counter *hwc;
503 u64 val;
504
505 if (!test_bit(idx, cpuc->active_mask))
506 continue;
507 hwc = &counter->hw;
508 val = sparc_perf_counter_update(counter, hwc, idx);
509 if (val & (1ULL << 31))
510 continue;
511
512 data.period = counter->hw.last_period;
513 if (!sparc_perf_counter_set_period(counter, hwc, idx))
514 continue;
515
516 if (perf_counter_overflow(counter, 1, &data))
517 sparc_pmu_disable_counter(hwc, idx);
518 }
519
520 return NOTIFY_STOP;
521}
522
523static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
524 .notifier_call = perf_counter_nmi_handler,
525};
526
527static bool __init supported_pmu(void)
528{
529 if (!strcmp(sparc_pmu_type, "ultra3i")) {
530 sparc_pmu = &ultra3i_pmu;
531 return true;
532 }
533 if (!strcmp(sparc_pmu_type, "niagara2")) {
534 sparc_pmu = &niagara2_pmu;
535 return true;
536 }
537 return false;
538}
539
540void __init init_hw_perf_counters(void)
541{
542 pr_info("Performance counters: ");
543
544 if (!supported_pmu()) {
545 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
546 return;
547 }
548
549 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
550
551 /* All sparc64 PMUs currently have 2 counters. But this simple
552 * driver only supports one active counter at a time.
553 */
554 perf_max_counters = 1;
555
556 register_die_notifier(&perf_counter_nmi_notifier);
557}
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4041f94e7724..18d67854a1b8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
251 } 251 }
252} 252}
253 253
254void __trigger_all_cpu_backtrace(void) 254void arch_trigger_all_cpu_backtrace(void)
255{ 255{
256 struct thread_info *tp = current_thread_info(); 256 struct thread_info *tp = current_thread_info();
257 struct pt_regs *regs = get_irq_regs(); 257 struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
304 304
305static void sysrq_handle_globreg(int key, struct tty_struct *tty) 305static void sysrq_handle_globreg(int key, struct tty_struct *tty)
306{ 306{
307 __trigger_all_cpu_backtrace(); 307 arch_trigger_all_cpu_backtrace();
308} 308}
309 309
310static struct sysrq_key_op sparc_globalreg_op = { 310static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index fe43e80772db..0a37e8cfd160 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -24,6 +24,8 @@
24 24
25#include <asm/prom.h> 25#include <asm/prom.h>
26#include <asm/oplib.h> 26#include <asm/oplib.h>
27#include <asm/leon.h>
28#include <asm/leon_amba.h>
27 29
28#include "prom.h" 30#include "prom.h"
29 31
@@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
131 regs->which_io, regs->phys_addr); 133 regs->which_io, regs->phys_addr);
132} 134}
133 135
136/* "name:vendor:device@irq,addrlo" */
137static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
138{
139 struct amba_prom_registers *regs; unsigned int *intr;
140 unsigned int *device, *vendor;
141 struct property *prop;
142
143 prop = of_find_property(dp, "reg", NULL);
144 if (!prop)
145 return;
146 regs = prop->value;
147 prop = of_find_property(dp, "interrupts", NULL);
148 if (!prop)
149 return;
150 intr = prop->value;
151 prop = of_find_property(dp, "vendor", NULL);
152 if (!prop)
153 return;
154 vendor = prop->value;
155 prop = of_find_property(dp, "device", NULL);
156 if (!prop)
157 return;
158 device = prop->value;
159
160 sprintf(tmp_buf, "%s:%d:%d@%x,%x",
161 dp->name, *vendor, *device,
162 *intr, regs->phys_addr);
163}
164
134static void __init __build_path_component(struct device_node *dp, char *tmp_buf) 165static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
135{ 166{
136 struct device_node *parent = dp->parent; 167 struct device_node *parent = dp->parent;
@@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
143 return sbus_path_component(dp, tmp_buf); 174 return sbus_path_component(dp, tmp_buf);
144 if (!strcmp(parent->type, "ebus")) 175 if (!strcmp(parent->type, "ebus"))
145 return ebus_path_component(dp, tmp_buf); 176 return ebus_path_component(dp, tmp_buf);
177 if (!strcmp(parent->type, "ambapp"))
178 return ambapp_path_component(dp, tmp_buf);
146 179
147 /* "isa" is handled with platform naming */ 180 /* "isa" is handled with platform naming */
148 } 181 }
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 0fb5789d43c8..138910c67206 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -22,9 +22,12 @@
22#include <linux/of.h> 22#include <linux/of.h>
23#include <asm/prom.h> 23#include <asm/prom.h>
24#include <asm/oplib.h> 24#include <asm/oplib.h>
25#include <asm/leon.h>
25 26
26#include "prom.h" 27#include "prom.h"
27 28
29void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp);
30
28struct device_node *of_console_device; 31struct device_node *of_console_device;
29EXPORT_SYMBOL(of_console_device); 32EXPORT_SYMBOL(of_console_device);
30 33
@@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev,
161 name = prom_nextprop(node, prev, p->name); 164 name = prom_nextprop(node, prev, p->name);
162 } 165 }
163 166
164 if (strlen(name) == 0) { 167 if (!name || strlen(name) == 0) {
165 tmp = p; 168 tmp = p;
166 return NULL; 169 return NULL;
167 } 170 }
@@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node,
242 return dp; 245 return dp;
243} 246}
244 247
245static char * __init build_full_name(struct device_node *dp) 248char * __init build_full_name(struct device_node *dp)
246{ 249{
247 int len, ourlen, plen; 250 int len, ourlen, plen;
248 char *n; 251 char *n;
@@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent,
289 292
290 dp->child = prom_build_tree(dp, prom_getchild(node), nextp); 293 dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
291 294
295 if (prom_build_more)
296 prom_build_more(dp, nextp);
297
292 node = prom_getsibling(node); 298 node = prom_getsibling(node);
293 } 299 }
294 300
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 998cadb4e7f2..16a47ffe03c1 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p)
235 sparc_cpu_model = sun4e; 235 sparc_cpu_model = sun4e;
236 if (!strcmp(&cputypval,"sun4u")) 236 if (!strcmp(&cputypval,"sun4u"))
237 sparc_cpu_model = sun4u; 237 sparc_cpu_model = sun4u;
238 if (!strncmp(&cputypval, "leon" , 4))
239 sparc_cpu_model = sparc_leon;
238 240
239 printk("ARCH: "); 241 printk("ARCH: ");
240 switch(sparc_cpu_model) { 242 switch(sparc_cpu_model) {
@@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p)
256 case sun4u: 258 case sun4u:
257 printk("SUN4U\n"); 259 printk("SUN4U\n");
258 break; 260 break;
261 case sparc_leon:
262 printk("LEON\n");
263 break;
259 default: 264 default:
260 printk("UNKNOWN!\n"); 265 printk("UNKNOWN!\n");
261 break; 266 break;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 181d069a2d44..7ce1a1005b1d 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
590 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 590 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
591 clear_thread_flag(TIF_NOTIFY_RESUME); 591 clear_thread_flag(TIF_NOTIFY_RESUME);
592 tracehook_notify_resume(regs); 592 tracehook_notify_resume(regs);
593 if (current->replacement_session_keyring)
594 key_replace_session_keyring();
593 } 595 }
594} 596}
595 597
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index ec82d76dc6f2..647afbda7ae1 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
613 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 613 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
614 clear_thread_flag(TIF_NOTIFY_RESUME); 614 clear_thread_flag(TIF_NOTIFY_RESUME);
615 tracehook_notify_resume(regs); 615 tracehook_notify_resume(regs);
616 if (current->replacement_session_keyring)
617 key_replace_session_keyring();
616 } 618 }
617} 619}
620
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index aed94869ad6a..e7061138c98a 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -121,7 +121,7 @@ SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
121SIGN1(sys32_umask, sys_umask, %o0) 121SIGN1(sys32_umask, sys_umask, %o0)
122SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) 122SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
123SIGN1(sys32_sendto, sys_sendto, %o0) 123SIGN1(sys32_sendto, sys_sendto, %o0)
124SIGN1(sys32_recvfrom, sys_recvfrom, %o0) 124SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
125SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) 125SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
126SIGN2(sys32_connect, sys_connect, %o0, %o2) 126SIGN2(sys32_connect, sys_connect, %o0, %o2)
127SIGN2(sys32_bind, sys_bind, %o0, %o2) 127SIGN2(sys32_bind, sys_bind, %o0, %o2)
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index d28f496f4669..ca39c606fe8e 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4 */ 4 */
5#include <linux/sched.h>
5#include <linux/sysdev.h> 6#include <linux/sysdev.h>
6#include <linux/cpu.h> 7#include <linux/cpu.h>
7#include <linux/smp.h> 8#include <linux/smp.h>
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 690901657291..04181577cb65 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,5 @@ sys_call_table:
82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 82/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 83/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 84/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo 85/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
86 86
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 2ee7250ba7ae..91b06b7f7acf 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,7 @@ sys_call_table32:
83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 83/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 85/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open
87 87
88#endif /* CONFIG_COMPAT */ 88#endif /* CONFIG_COMPAT */
89 89
@@ -158,4 +158,4 @@ sys_call_table:
158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 158/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 160/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
161 .word sys_pwritev, sys_rt_tgsigqueueinfo 161 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 681abe0a4594..79836a7dd00c 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_SPARC32) += loadmmu.o
11obj-y += generic_$(BITS).o 11obj-y += generic_$(BITS).o
12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o 12obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 13obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
14obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
14 15
15# Only used by sparc64 16# Only used by sparc64
16obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 17obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 26bb3919ff1f..54114ad0bdee 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -34,6 +34,7 @@
34#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */ 34#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
35#include <asm/tlb.h> 35#include <asm/tlb.h>
36#include <asm/prom.h> 36#include <asm/prom.h>
37#include <asm/leon.h>
37 38
38DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
39 40
@@ -326,6 +327,9 @@ void __init paging_init(void)
326 sparc_unmapped_base = 0xe0000000; 327 sparc_unmapped_base = 0xe0000000;
327 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); 328 BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
328 break; 329 break;
330 case sparc_leon:
331 leon_init();
332 /* fall through */
329 case sun4m: 333 case sun4m:
330 case sun4d: 334 case sun4d:
331 srmmu_paging_init(); 335 srmmu_paging_init();
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
new file mode 100644
index 000000000000..c0e01297e64e
--- /dev/null
+++ b/arch/sparc/mm/leon_mm.c
@@ -0,0 +1,260 @@
1/*
2 * linux/arch/sparc/mm/leon_m.c
3 *
4 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
5 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
6 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
7 *
8 * do srmmu probe in software
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <asm/asi.h>
15#include <asm/leon.h>
16#include <asm/tlbflush.h>
17
18int leon_flush_during_switch = 1;
19int srmmu_swprobe_trace;
20
21unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
22{
23
24 unsigned int ctxtbl;
25 unsigned int pgd, pmd, ped;
26 unsigned int ptr;
27 unsigned int lvl, pte, paddrbase;
28 unsigned int ctx;
29 unsigned int paddr_calc;
30
31 paddrbase = 0;
32
33 if (srmmu_swprobe_trace)
34 printk(KERN_INFO "swprobe: trace on\n");
35
36 ctxtbl = srmmu_get_ctable_ptr();
37 if (!(ctxtbl)) {
38 if (srmmu_swprobe_trace)
39 printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n");
40 return 0;
41 }
42 if (!_pfn_valid(PFN(ctxtbl))) {
43 if (srmmu_swprobe_trace)
44 printk(KERN_INFO
45 "swprobe: !_pfn_valid(%x)=>0\n",
46 PFN(ctxtbl));
47 return 0;
48 }
49
50 ctx = srmmu_get_context();
51 if (srmmu_swprobe_trace)
52 printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx);
53
54 pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
55
56 if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
57 if (srmmu_swprobe_trace)
58 printk(KERN_INFO "swprobe: pgd is entry level 3\n");
59 lvl = 3;
60 pte = pgd;
61 paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
62 goto ready;
63 }
64 if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
65 if (srmmu_swprobe_trace)
66 printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
67 return 0;
68 }
69
70 if (srmmu_swprobe_trace)
71 printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd);
72
73 ptr = (pgd & SRMMU_PTD_PMASK) << 4;
74 ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
75 if (!_pfn_valid(PFN(ptr)))
76 return 0;
77
78 pmd = LEON_BYPASS_LOAD_PA(ptr);
79 if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
80 if (srmmu_swprobe_trace)
81 printk(KERN_INFO "swprobe: pmd is entry level 2\n");
82 lvl = 2;
83 pte = pmd;
84 paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
85 goto ready;
86 }
87 if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
88 if (srmmu_swprobe_trace)
89 printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
90 return 0;
91 }
92
93 if (srmmu_swprobe_trace)
94 printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
95
96 ptr = (pmd & SRMMU_PTD_PMASK) << 4;
97 ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
98 if (!_pfn_valid(PFN(ptr))) {
99 if (srmmu_swprobe_trace)
100 printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
101 PFN(ptr));
102 return 0;
103 }
104
105 ped = LEON_BYPASS_LOAD_PA(ptr);
106
107 if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
108 if (srmmu_swprobe_trace)
109 printk(KERN_INFO "swprobe: ped is entry level 1\n");
110 lvl = 1;
111 pte = ped;
112 paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
113 goto ready;
114 }
115 if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
116 if (srmmu_swprobe_trace)
117 printk(KERN_INFO "swprobe: ped is invalid => 0\n");
118 return 0;
119 }
120
121 if (srmmu_swprobe_trace)
122 printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped);
123
124 ptr = (ped & SRMMU_PTD_PMASK) << 4;
125 ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
126 if (!_pfn_valid(PFN(ptr)))
127 return 0;
128
129 ptr = LEON_BYPASS_LOAD_PA(ptr);
130 if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
131 if (srmmu_swprobe_trace)
132 printk(KERN_INFO "swprobe: ptr is entry level 0\n");
133 lvl = 0;
134 pte = ptr;
135 paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
136 goto ready;
137 }
138 if (srmmu_swprobe_trace)
139 printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
140 return 0;
141
142ready:
143 switch (lvl) {
144 case 0:
145 paddr_calc =
146 (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
147 break;
148 case 1:
149 paddr_calc =
150 (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
151 break;
152 case 2:
153 paddr_calc =
154 (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
155 break;
156 default:
157 case 3:
158 paddr_calc = vaddr;
159 break;
160 }
161 if (srmmu_swprobe_trace)
162 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
163 if (paddr)
164 *paddr = paddr_calc;
165 return paddrbase;
166}
167
168void leon_flush_icache_all(void)
169{
170 __asm__ __volatile__(" flush "); /*iflush*/
171}
172
173void leon_flush_dcache_all(void)
174{
175 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
176 "i"(ASI_LEON_DFLUSH) : "memory");
177}
178
179void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
180{
181 if (vma->vm_flags & VM_EXEC)
182 leon_flush_icache_all();
183 leon_flush_dcache_all();
184}
185
186void leon_flush_cache_all(void)
187{
188 __asm__ __volatile__(" flush "); /*iflush*/
189 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
190 "i"(ASI_LEON_DFLUSH) : "memory");
191}
192
193void leon_flush_tlb_all(void)
194{
195 leon_flush_cache_all();
196 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
197 "i"(ASI_LEON_MMUFLUSH) : "memory");
198}
199
200/* get all cache regs */
201void leon3_getCacheRegs(struct leon3_cacheregs *regs)
202{
203 unsigned long ccr, iccr, dccr;
204
205 if (!regs)
206 return;
207 /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
208 __asm__ __volatile__("lda [%%g0] %3, %0\n\t"
209 "mov 0x08, %%g1\n\t"
210 "lda [%%g1] %3, %1\n\t"
211 "mov 0x0c, %%g1\n\t"
212 "lda [%%g1] %3, %2\n\t"
213 : "=r"(ccr), "=r"(iccr), "=r"(dccr)
214 /* output */
215 : "i"(ASI_LEON_CACHEREGS) /* input */
216 : "g1" /* clobber list */
217 );
218 regs->ccr = ccr;
219 regs->iccr = iccr;
220 regs->dccr = dccr;
221}
222
223/* Due to virtual cache we need to check cache configuration if
224 * it is possible to skip flushing in some cases.
225 *
226 * Leon2 and Leon3 differ in their way of telling cache information
227 *
228 */
229int leon_flush_needed(void)
230{
231 int flush_needed = -1;
232 unsigned int ssize, sets;
233 char *setStr[4] =
234 { "direct mapped", "2-way associative", "3-way associative",
235 "4-way associative"
236 };
237 /* leon 3 */
238 struct leon3_cacheregs cregs;
239 leon3_getCacheRegs(&cregs);
240 sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
241 /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
242 ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
243
244 printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
245 sets > 3 ? "unknown" : setStr[sets], ssize);
246 if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
247 /* Set Size <= Page size ==>
248 flush on every context switch not needed. */
249 flush_needed = 0;
250 printk(KERN_INFO "CACHE: not flushing on every context switch\n");
251 }
252 return flush_needed;
253}
254
255void leon_switch_mm(void)
256{
257 flush_tlb_mm((void *)0);
258 if (leon_flush_during_switch)
259 leon_flush_cache_all();
260}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
index 652be05acbea..82ec8f666036 100644
--- a/arch/sparc/mm/loadmmu.c
+++ b/arch/sparc/mm/loadmmu.c
@@ -33,6 +33,7 @@ void __init load_mmu(void)
33 break; 33 break;
34 case sun4m: 34 case sun4m:
35 case sun4d: 35 case sun4d:
36 case sparc_leon:
36 ld_mmu_srmmu(); 37 ld_mmu_srmmu();
37 break; 38 break;
38 default: 39 default:
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index ade4eb373bdd..509b1ffeba66 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -46,6 +46,7 @@
46#include <asm/tsunami.h> 46#include <asm/tsunami.h>
47#include <asm/swift.h> 47#include <asm/swift.h>
48#include <asm/turbosparc.h> 48#include <asm/turbosparc.h>
49#include <asm/leon.h>
49 50
50#include <asm/btfixup.h> 51#include <asm/btfixup.h>
51 52
@@ -569,6 +570,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
569 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); 570 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
570 } 571 }
571 572
573 if (sparc_cpu_model == sparc_leon)
574 leon_switch_mm();
575
572 if (is_hypersparc) 576 if (is_hypersparc)
573 hyper_flush_whole_icache(); 577 hyper_flush_whole_icache();
574 578
@@ -1977,6 +1981,45 @@ static void __init init_viking(void)
1977 poke_srmmu = poke_viking; 1981 poke_srmmu = poke_viking;
1978} 1982}
1979 1983
1984#ifdef CONFIG_SPARC_LEON
1985
1986void __init poke_leonsparc(void)
1987{
1988}
1989
1990void __init init_leon(void)
1991{
1992
1993 srmmu_name = "Leon";
1994
1995 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1996 BTFIXUPCALL_NORM);
1997 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1998 BTFIXUPCALL_NORM);
1999 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
2000 BTFIXUPCALL_NORM);
2001 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
2002 BTFIXUPCALL_NORM);
2003 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
2004 BTFIXUPCALL_NORM);
2005
2006 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2007 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2008 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2009 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
2010
2011 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
2012 BTFIXUPCALL_NOP);
2013 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
2014
2015 poke_srmmu = poke_leonsparc;
2016
2017 srmmu_cache_pagetables = 0;
2018
2019 leon_flush_during_switch = leon_flush_needed();
2020}
2021#endif
2022
1980/* Probe for the srmmu chip version. */ 2023/* Probe for the srmmu chip version. */
1981static void __init get_srmmu_type(void) 2024static void __init get_srmmu_type(void)
1982{ 2025{
@@ -1992,7 +2035,15 @@ static void __init get_srmmu_type(void)
1992 psr_typ = (psr >> 28) & 0xf; 2035 psr_typ = (psr >> 28) & 0xf;
1993 psr_vers = (psr >> 24) & 0xf; 2036 psr_vers = (psr >> 24) & 0xf;
1994 2037
1995 /* First, check for HyperSparc or Cypress. */ 2038 /* First, check for sparc-leon. */
2039 if (sparc_cpu_model == sparc_leon) {
2040 psr_typ = 0xf; /* hardcoded ids for older models/simulators */
2041 psr_vers = 2;
2042 init_leon();
2043 return;
2044 }
2045
2046 /* Second, check for HyperSparc or Cypress. */
1996 if(mod_typ == 1) { 2047 if(mod_typ == 1) {
1997 switch(mod_rev) { 2048 switch(mod_rev) {
1998 case 7: 2049 case 7:
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index d172f86439b1..f97cb8b6ee5f 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -21,7 +21,7 @@
21static int profile_timer_exceptions_notify(struct notifier_block *self, 21static int profile_timer_exceptions_notify(struct notifier_block *self,
22 unsigned long val, void *data) 22 unsigned long val, void *data)
23{ 23{
24 struct die_args *args = (struct die_args *)data; 24 struct die_args *args = data;
25 int ret = NOTIFY_DONE; 25 int ret = NOTIFY_DONE;
26 26
27 switch (val) { 27 switch (val) {
@@ -57,7 +57,7 @@ static void timer_stop(void)
57 57
58static int op_nmi_timer_init(struct oprofile_operations *ops) 58static int op_nmi_timer_init(struct oprofile_operations *ops)
59{ 59{
60 if (!nmi_usable) 60 if (atomic_read(&nmi_active) <= 0)
61 return -ENODEV; 61 return -ENODEV;
62 62
63 ops->start = timer_start; 63 ops->start = timer_start;