aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/cpudata_64.h197
-rw-r--r--arch/sparc/include/asm/dma-mapping.h168
-rw-r--r--arch/sparc/include/asm/dma-mapping_32.h60
-rw-r--r--arch/sparc/include/asm/dma-mapping_64.h154
-rw-r--r--arch/sparc/include/asm/errno.h2
-rw-r--r--arch/sparc/include/asm/ftrace.h11
-rw-r--r--arch/sparc/include/asm/mdesc.h3
-rw-r--r--arch/sparc/include/asm/percpu_64.h8
-rw-r--r--arch/sparc/include/asm/prom.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h207
-rw-r--r--arch/sparc/include/asm/unistd.h3
11 files changed, 392 insertions, 423 deletions
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a11b89ee9ef8..926397d345ff 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
6#ifndef _SPARC64_CPUDATA_H 6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H 7#define _SPARC64_CPUDATA_H
8 8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
13 10
14#include <linux/percpu.h> 11#include <linux/percpu.h>
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) 35#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data) 36#define local_cpu_data() __get_cpu_var(__cpu_data)
40 37
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long tsb_huge;
75 unsigned long tsb_huge_temp;
76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned long irq_worklist_pa;
79 unsigned int cpu_mondo_qmask;
80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 void *hdesc;
84} __attribute__((aligned(64)));
85extern struct trap_per_cpu trap_block[NR_CPUS];
86extern void init_cur_cpu_trap(struct thread_info *);
87extern void setup_tba(void);
88extern int ncpus_probed;
89extern const struct seq_operations cpuinfo_op; 38extern const struct seq_operations cpuinfo_op;
90 39
91extern unsigned long real_hard_smp_processor_id(void);
92
93struct cpuid_patch_entry {
94 unsigned int addr;
95 unsigned int cheetah_safari[4];
96 unsigned int cheetah_jbus[4];
97 unsigned int starfire[4];
98 unsigned int sun4v[4];
99};
100extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
101
102struct sun4v_1insn_patch_entry {
103 unsigned int addr;
104 unsigned int insn;
105};
106extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
107 __sun4v_1insn_patch_end;
108
109struct sun4v_2insn_patch_entry {
110 unsigned int addr;
111 unsigned int insns[2];
112};
113extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
114 __sun4v_2insn_patch_end;
115
116#endif /* !(__ASSEMBLY__) */ 40#endif /* !(__ASSEMBLY__) */
117 41
118#define TRAP_PER_CPU_THREAD 0x00 42#include <asm/trap_block.h>
119#define TRAP_PER_CPU_PGD_PADDR 0x08
120#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
121#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
122#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
123#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
124#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
125#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
126#define TRAP_PER_CPU_FAULT_INFO 0x40
127#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
128#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
129#define TRAP_PER_CPU_TSB_HUGE 0xd0
130#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
131#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
132#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
133#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
134#define TRAP_PER_CPU_RESUM_QMASK 0xf0
135#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
136
137#define TRAP_BLOCK_SZ_SHIFT 8
138
139#include <asm/scratchpad.h>
140
141#define __GET_CPUID(REG) \
142 /* Spitfire implementation (default). */ \
143661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
144 srlx REG, 17, REG; \
145 and REG, 0x1f, REG; \
146 nop; \
147 .section .cpuid_patch, "ax"; \
148 /* Instruction location. */ \
149 .word 661b; \
150 /* Cheetah Safari implementation. */ \
151 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
152 srlx REG, 17, REG; \
153 and REG, 0x3ff, REG; \
154 nop; \
155 /* Cheetah JBUS implementation. */ \
156 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
157 srlx REG, 17, REG; \
158 and REG, 0x1f, REG; \
159 nop; \
160 /* Starfire implementation. */ \
161 sethi %hi(0x1fff40000d0 >> 9), REG; \
162 sllx REG, 9, REG; \
163 or REG, 0xd0, REG; \
164 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
165 /* sun4v implementation. */ \
166 mov SCRATCHPAD_CPUID, REG; \
167 ldxa [REG] ASI_SCRATCHPAD, REG; \
168 nop; \
169 nop; \
170 .previous;
171
172#ifdef CONFIG_SMP
173
174#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
175 __GET_CPUID(TMP) \
176 sethi %hi(trap_block), DEST; \
177 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
178 or DEST, %lo(trap_block), DEST; \
179 add DEST, TMP, DEST; \
180
181/* Clobbers TMP, current address space PGD phys address into DEST. */
182#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
183 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
184 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
185
186/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
187#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
188 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
189 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
190
191/* Clobbers TMP, loads DEST with current thread info pointer. */
192#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
193 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
194 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
195
196/* Given the current thread info pointer in THR, load the per-cpu
197 * area base of the current processor into DEST. REG1, REG2, and REG3 are
198 * clobbered.
199 *
200 * You absolutely cannot use DEST as a temporary in this code. The
201 * reason is that traps can happen during execution, and return from
202 * trap will load the fully resolved DEST per-cpu base. This can corrupt
203 * the calculations done by the macro mid-stream.
204 */
205#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
206 lduh [THR + TI_CPU], REG1; \
207 sethi %hi(__per_cpu_shift), REG3; \
208 sethi %hi(__per_cpu_base), REG2; \
209 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
210 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
211 sllx REG1, REG3, REG3; \
212 add REG3, REG2, DEST;
213
214#else
215
216#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
217 sethi %hi(trap_block), DEST; \
218 or DEST, %lo(trap_block), DEST; \
219
220/* Uniprocessor versions, we know the cpuid is zero. */
221#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
222 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
223 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
224
225/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
226#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
227 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
228 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
229
230#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
231 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
232 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
233
234/* No per-cpu areas on uniprocessor, so no need to load DEST. */
235#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
236
237#endif /* !(CONFIG_SMP) */
238 43
239#endif /* _SPARC64_CPUDATA_H */ 44#endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 0f4150e26619..204e4bf64438 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -1,8 +1,166 @@
1#ifndef ___ASM_SPARC_DMA_MAPPING_H 1#ifndef ___ASM_SPARC_DMA_MAPPING_H
2#define ___ASM_SPARC_DMA_MAPPING_H 2#define ___ASM_SPARC_DMA_MAPPING_H
3#if defined(__sparc__) && defined(__arch64__) 3
4#include <asm/dma-mapping_64.h> 4#include <linux/scatterlist.h>
5#else 5#include <linux/mm.h>
6#include <asm/dma-mapping_32.h> 6
7#endif 7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9extern int dma_supported(struct device *dev, u64 mask);
10extern int dma_set_mask(struct device *dev, u64 dma_mask);
11
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1)
15
16struct dma_ops {
17 void *(*alloc_coherent)(struct device *dev, size_t size,
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46
47static inline void *dma_alloc_coherent(struct device *dev, size_t size,
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{
98 dma_ops->unmap_sg(dev, sg, nents, direction);
99}
100
101static inline void dma_sync_single_for_cpu(struct device *dev,
102 dma_addr_t dma_handle, size_t size,
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106}
107
108static inline void dma_sync_single_for_device(struct device *dev,
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117
118static inline void dma_sync_sg_for_cpu(struct device *dev,
119 struct scatterlist *sg, int nelems,
120 enum dma_data_direction direction)
121{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
123}
124
125static inline void dma_sync_sg_for_device(struct device *dev,
126 struct scatterlist *sg, int nelems,
127 enum dma_data_direction direction)
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131}
132
133static inline void dma_sync_single_range_for_cpu(struct device *dev,
134 dma_addr_t dma_handle,
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
140}
141
142static inline void dma_sync_single_range_for_device(struct device *dev,
143 dma_addr_t dma_handle,
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149}
150
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{
154 return (dma_addr == DMA_ERROR_CODE);
155}
156
157static inline int dma_get_cache_alignment(void)
158{
159 /*
160 * no easy way to get cache size on all processors, so return
161 * the maximum possible, to be safe
162 */
163 return (1 << INTERNODE_CACHE_SHIFT);
164}
165
8#endif 166#endif
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h
deleted file mode 100644
index 8a57ea0573e6..000000000000
--- a/arch/sparc/include/asm/dma-mapping_32.h
+++ /dev/null
@@ -1,60 +0,0 @@
1#ifndef _ASM_SPARC_DMA_MAPPING_H
2#define _ASM_SPARC_DMA_MAPPING_H
3
4#include <linux/types.h>
5
6struct device;
7struct scatterlist;
8struct page;
9
10#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
11
12extern int dma_supported(struct device *dev, u64 mask);
13extern int dma_set_mask(struct device *dev, u64 dma_mask);
14extern void *dma_alloc_coherent(struct device *dev, size_t size,
15 dma_addr_t *dma_handle, gfp_t flag);
16extern void dma_free_coherent(struct device *dev, size_t size,
17 void *cpu_addr, dma_addr_t dma_handle);
18extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
19 size_t size,
20 enum dma_data_direction direction);
21extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
22 size_t size,
23 enum dma_data_direction direction);
24extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
25 unsigned long offset, size_t size,
26 enum dma_data_direction direction);
27extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
28 size_t size, enum dma_data_direction direction);
29extern int dma_map_sg(struct device *dev, struct scatterlist *sg,
30 int nents, enum dma_data_direction direction);
31extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
32 int nents, enum dma_data_direction direction);
33extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
34 size_t size,
35 enum dma_data_direction direction);
36extern void dma_sync_single_for_device(struct device *dev,
37 dma_addr_t dma_handle,
38 size_t size,
39 enum dma_data_direction direction);
40extern void dma_sync_single_range_for_cpu(struct device *dev,
41 dma_addr_t dma_handle,
42 unsigned long offset,
43 size_t size,
44 enum dma_data_direction direction);
45extern void dma_sync_single_range_for_device(struct device *dev,
46 dma_addr_t dma_handle,
47 unsigned long offset, size_t size,
48 enum dma_data_direction direction);
49extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
50 int nelems, enum dma_data_direction direction);
51extern void dma_sync_sg_for_device(struct device *dev,
52 struct scatterlist *sg, int nelems,
53 enum dma_data_direction direction);
54extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
55extern int dma_get_cache_alignment(void);
56
57#define dma_alloc_noncoherent dma_alloc_coherent
58#define dma_free_noncoherent dma_free_coherent
59
60#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
deleted file mode 100644
index bfa64f9702d5..000000000000
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ /dev/null
@@ -1,154 +0,0 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H
3
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
6
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9struct dma_ops {
10 void *(*alloc_coherent)(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flag);
12 void (*free_coherent)(struct device *dev, size_t size,
13 void *cpu_addr, dma_addr_t dma_handle);
14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
15 size_t size,
16 enum dma_data_direction direction);
17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
18 size_t size,
19 enum dma_data_direction direction);
20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
21 enum dma_data_direction direction);
22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
23 int nhwentries,
24 enum dma_data_direction direction);
25 void (*sync_single_for_cpu)(struct device *dev,
26 dma_addr_t dma_handle, size_t size,
27 enum dma_data_direction direction);
28 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
29 int nelems,
30 enum dma_data_direction direction);
31};
32extern const struct dma_ops *dma_ops;
33
34extern int dma_supported(struct device *dev, u64 mask);
35extern int dma_set_mask(struct device *dev, u64 dma_mask);
36
37static inline void *dma_alloc_coherent(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flag)
39{
40 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
41}
42
43static inline void dma_free_coherent(struct device *dev, size_t size,
44 void *cpu_addr, dma_addr_t dma_handle)
45{
46 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
50 size_t size,
51 enum dma_data_direction direction)
52{
53 return dma_ops->map_single(dev, cpu_addr, size, direction);
54}
55
56static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
57 size_t size,
58 enum dma_data_direction direction)
59{
60 dma_ops->unmap_single(dev, dma_addr, size, direction);
61}
62
63static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64 unsigned long offset, size_t size,
65 enum dma_data_direction direction)
66{
67 return dma_ops->map_single(dev, page_address(page) + offset,
68 size, direction);
69}
70
71static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
72 size_t size,
73 enum dma_data_direction direction)
74{
75 dma_ops->unmap_single(dev, dma_address, size, direction);
76}
77
78static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction direction)
80{
81 return dma_ops->map_sg(dev, sg, nents, direction);
82}
83
84static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
85 int nents, enum dma_data_direction direction)
86{
87 dma_ops->unmap_sg(dev, sg, nents, direction);
88}
89
90static inline void dma_sync_single_for_cpu(struct device *dev,
91 dma_addr_t dma_handle, size_t size,
92 enum dma_data_direction direction)
93{
94 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
95}
96
97static inline void dma_sync_single_for_device(struct device *dev,
98 dma_addr_t dma_handle,
99 size_t size,
100 enum dma_data_direction direction)
101{
102 /* No flushing needed to sync cpu writes to the device. */
103}
104
105static inline void dma_sync_single_range_for_cpu(struct device *dev,
106 dma_addr_t dma_handle,
107 unsigned long offset,
108 size_t size,
109 enum dma_data_direction direction)
110{
111 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
112}
113
114static inline void dma_sync_single_range_for_device(struct device *dev,
115 dma_addr_t dma_handle,
116 unsigned long offset,
117 size_t size,
118 enum dma_data_direction direction)
119{
120 /* No flushing needed to sync cpu writes to the device. */
121}
122
123
124static inline void dma_sync_sg_for_cpu(struct device *dev,
125 struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
129}
130
131static inline void dma_sync_sg_for_device(struct device *dev,
132 struct scatterlist *sg, int nelems,
133 enum dma_data_direction direction)
134{
135 /* No flushing needed to sync cpu writes to the device. */
136}
137
138static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
139{
140 return (dma_addr == DMA_ERROR_CODE);
141}
142
143static inline int dma_get_cache_alignment(void)
144{
145 /* no easy way to get cache size on all processors, so return
146 * the maximum possible, to be safe */
147 return (1 << INTERNODE_CACHE_SHIFT);
148}
149
150#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
151#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
152#define dma_is_consistent(d, h) (1)
153
154#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/asm/errno.h
index a9ef172977de..4e2bc490d714 100644
--- a/arch/sparc/include/asm/errno.h
+++ b/arch/sparc/include/asm/errno.h
@@ -110,4 +110,6 @@
110#define EOWNERDEAD 132 /* Owner died */ 110#define EOWNERDEAD 132 /* Owner died */
111#define ENOTRECOVERABLE 133 /* State not recoverable */ 111#define ENOTRECOVERABLE 133 /* State not recoverable */
112 112
113#define ERFKILL 134 /* Operation not possible due to RF-kill */
114
113#endif 115#endif
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d27716cd38c1..b0f18e9893db 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -11,4 +11,15 @@ extern void _mcount(void);
11 11
12#endif 12#endif
13 13
14#ifdef CONFIG_DYNAMIC_FTRACE
15/* reloction of mcount call site is the same as the address */
16static inline unsigned long ftrace_call_adjust(unsigned long addr)
17{
18 return addr;
19}
20
21struct dyn_arch_ftrace {
22};
23#endif /* CONFIG_DYNAMIC_FTRACE */
24
14#endif /* _ASM_SPARC64_FTRACE */ 25#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 1acc7272e537..9faa046713fb 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -71,7 +71,8 @@ struct mdesc_notifier_client {
71 71
72extern void mdesc_register_notifier(struct mdesc_notifier_client *client); 72extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
73 73
74extern void mdesc_fill_in_cpu_data(cpumask_t mask); 74extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
75extern void mdesc_populate_present_mask(cpumask_t *mask);
75 76
76extern void sun4v_mdesc_init(void); 77extern void sun4v_mdesc_init(void);
77 78
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index bee64593023e..007aafb4ae97 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5");
7 7
8#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
9 9
10extern void real_setup_per_cpu_areas(void); 10#include <asm/trap_block.h>
11 11
12extern unsigned long __per_cpu_base;
13extern unsigned long __per_cpu_shift;
14#define __per_cpu_offset(__cpu) \ 12#define __per_cpu_offset(__cpu) \
15 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) 13 (trap_block[(__cpu)].__per_cpu_base)
16#define per_cpu_offset(x) (__per_cpu_offset(x)) 14#define per_cpu_offset(x) (__per_cpu_offset(x))
17 15
18#define __my_cpu_offset __local_per_cpu_offset 16#define __my_cpu_offset __local_per_cpu_offset
19 17
20#else /* ! SMP */ 18#else /* ! SMP */
21 19
22#define real_setup_per_cpu_areas() do { } while (0)
23
24#endif /* SMP */ 20#endif /* SMP */
25 21
26#include <asm-generic/percpu.h> 22#include <asm-generic/percpu.h>
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 900d44714f8d..be8d7aaeb60d 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp);
86#endif 86#endif
87 87
88extern void prom_build_devicetree(void); 88extern void prom_build_devicetree(void);
89extern void of_populate_present_mask(void);
90extern void of_fill_in_cpu_data(void);
89 91
90/* Dummy ref counting routines - to be implemented later */ 92/* Dummy ref counting routines - to be implemented later */
91static inline struct device_node *of_node_get(struct device_node *node) 93static inline struct device_node *of_node_get(struct device_node *node)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 000000000000..7e26b2db6211
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
1#ifndef _SPARC_TRAP_BLOCK_H
2#define _SPARC_TRAP_BLOCK_H
3
4#include <asm/hypervisor.h>
5#include <asm/asi.h>
6
7#ifndef __ASSEMBLY__
8
9/* Trap handling code needs to get at a few critical values upon
10 * trap entry and to process TSB misses. These cannot be in the
11 * per_cpu() area as we really need to lock them into the TLB and
12 * thus make them part of the main kernel image. As a result we
13 * try to make this as small as possible.
14 *
15 * This is padded out and aligned to 64-bytes to avoid false sharing
16 * on SMP.
17 */
18
19/* If you modify the size of this structure, please update
20 * TRAP_BLOCK_SZ_SHIFT below.
21 */
22struct thread_info;
23struct trap_per_cpu {
24/* D-cache line 1: Basic thread information, cpu and device mondo queues */
25 struct thread_info *thread;
26 unsigned long pgd_paddr;
27 unsigned long cpu_mondo_pa;
28 unsigned long dev_mondo_pa;
29
30/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
31 unsigned long resum_mondo_pa;
32 unsigned long resum_kernel_buf_pa;
33 unsigned long nonresum_mondo_pa;
34 unsigned long nonresum_kernel_buf_pa;
35
36/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
37 struct hv_fault_status fault_info;
38
39/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
40 unsigned long cpu_mondo_block_pa;
41 unsigned long cpu_list_pa;
42 unsigned long tsb_huge;
43 unsigned long tsb_huge_temp;
44
45/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
46 unsigned long irq_worklist_pa;
47 unsigned int cpu_mondo_qmask;
48 unsigned int dev_mondo_qmask;
49 unsigned int resum_qmask;
50 unsigned int nonresum_qmask;
51 unsigned long __per_cpu_base;
52} __attribute__((aligned(64)));
53extern struct trap_per_cpu trap_block[NR_CPUS];
54extern void init_cur_cpu_trap(struct thread_info *);
55extern void setup_tba(void);
56extern int ncpus_probed;
57
58extern unsigned long real_hard_smp_processor_id(void);
59
60struct cpuid_patch_entry {
61 unsigned int addr;
62 unsigned int cheetah_safari[4];
63 unsigned int cheetah_jbus[4];
64 unsigned int starfire[4];
65 unsigned int sun4v[4];
66};
67extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
68
69struct sun4v_1insn_patch_entry {
70 unsigned int addr;
71 unsigned int insn;
72};
73extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
74 __sun4v_1insn_patch_end;
75
76struct sun4v_2insn_patch_entry {
77 unsigned int addr;
78 unsigned int insns[2];
79};
80extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
81 __sun4v_2insn_patch_end;
82
83
84#endif /* !(__ASSEMBLY__) */
85
86#define TRAP_PER_CPU_THREAD 0x00
87#define TRAP_PER_CPU_PGD_PADDR 0x08
88#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
89#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
90#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
91#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
92#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
93#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
94#define TRAP_PER_CPU_FAULT_INFO 0x40
95#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
96#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
97#define TRAP_PER_CPU_TSB_HUGE 0xd0
98#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
99#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
100#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
101#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
102#define TRAP_PER_CPU_RESUM_QMASK 0xf0
103#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
104#define TRAP_PER_CPU_PER_CPU_BASE 0xf8
105
106#define TRAP_BLOCK_SZ_SHIFT 8
107
108#include <asm/scratchpad.h>
109
110#define __GET_CPUID(REG) \
111 /* Spitfire implementation (default). */ \
112661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
113 srlx REG, 17, REG; \
114 and REG, 0x1f, REG; \
115 nop; \
116 .section .cpuid_patch, "ax"; \
117 /* Instruction location. */ \
118 .word 661b; \
119 /* Cheetah Safari implementation. */ \
120 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
121 srlx REG, 17, REG; \
122 and REG, 0x3ff, REG; \
123 nop; \
124 /* Cheetah JBUS implementation. */ \
125 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
126 srlx REG, 17, REG; \
127 and REG, 0x1f, REG; \
128 nop; \
129 /* Starfire implementation. */ \
130 sethi %hi(0x1fff40000d0 >> 9), REG; \
131 sllx REG, 9, REG; \
132 or REG, 0xd0, REG; \
133 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
134 /* sun4v implementation. */ \
135 mov SCRATCHPAD_CPUID, REG; \
136 ldxa [REG] ASI_SCRATCHPAD, REG; \
137 nop; \
138 nop; \
139 .previous;
140
141#ifdef CONFIG_SMP
142
143#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
144 __GET_CPUID(TMP) \
145 sethi %hi(trap_block), DEST; \
146 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
147 or DEST, %lo(trap_block), DEST; \
148 add DEST, TMP, DEST; \
149
150/* Clobbers TMP, current address space PGD phys address into DEST. */
151#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
152 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
153 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
154
155/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
156#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
157 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
158 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
159
160/* Clobbers TMP, loads DEST with current thread info pointer. */
161#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
162 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
163 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
164
165/* Given the current thread info pointer in THR, load the per-cpu
166 * area base of the current processor into DEST. REG1, REG2, and REG3 are
167 * clobbered.
168 *
169 * You absolutely cannot use DEST as a temporary in this code. The
170 * reason is that traps can happen during execution, and return from
171 * trap will load the fully resolved DEST per-cpu base. This can corrupt
172 * the calculations done by the macro mid-stream.
173 */
174#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
175 lduh [THR + TI_CPU], REG1; \
176 sethi %hi(trap_block), REG2; \
177 sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \
178 or REG2, %lo(trap_block), REG2; \
179 add REG2, REG1, REG2; \
180 ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
181
182#else
183
184#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
185 sethi %hi(trap_block), DEST; \
186 or DEST, %lo(trap_block), DEST; \
187
188/* Uniprocessor versions, we know the cpuid is zero. */
189#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
190 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
191 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
192
193/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
194#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
195 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
196 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
197
198#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
199 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
200 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
201
202/* No per-cpu areas on uniprocessor, so no need to load DEST. */
203#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
204
205#endif /* !(CONFIG_SMP) */
206
207#endif /* _SPARC_TRAP_BLOCK_H */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b8eb71ef3163..b2c406de7d4f 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -394,8 +394,9 @@
394#define __NR_accept4 323 394#define __NR_accept4 323
395#define __NR_preadv 324 395#define __NR_preadv 324
396#define __NR_pwritev 325 396#define __NR_pwritev 325
397#define __NR_rt_tgsigqueueinfo 326
397 398
398#define NR_SYSCALLS 326 399#define NR_SYSCALLS 327
399 400
400#ifdef __32bit_syscall_numbers__ 401#ifdef __32bit_syscall_numbers__
401/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, 402/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,