aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-07-18 00:55:51 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-18 00:55:51 -0400
commitf5e706ad886b6a5eb59637830110b09ccebf01c5 (patch)
treeea043a0a28e16a2ac6395c35d737f52698a165b7
parent5e3609f60c09f0f15f71f80c6d7933b2c7be71a6 (diff)
sparc: join the remaining header files
With this commit all sparc64 header files are moved to asm-sparc. The remaining files (71 files) were too different to be trivially merged so divide them up in a _32.h and a _64.h file which are both included from the file with no bit size. The following script were used: cd include FILES=`wc -l asm-sparc64/*h | grep -v '^ 1' | cut -b 20-` for FILE in ${FILES}; do echo $FILE: BASE=`echo $FILE | cut -d '.' -f 1` FN32=${BASE}_32.h FN64=${BASE}_64.h GUARD=___ASM_SPARC_`echo $BASE | tr '-' '_' | tr [:lower:] [:upper:]`_H git mv asm-sparc/$FILE asm-sparc/$FN32 git mv asm-sparc64/$FILE asm-sparc/$FN64 echo git mv done printf "#ifndef %s\n" $GUARD > asm-sparc/$FILE printf "#define %s\n" $GUARD >> asm-sparc/$FILE printf "#if defined(__sparc__) && defined(__arch64__)\n" >> asm-sparc/$FILE printf "#include <asm-sparc/%s>\n" $FN64 >> asm-sparc/$FILE printf "#else\n" >> asm-sparc/$FILE printf "#include <asm-sparc/%s>\n" $FN32 >> asm-sparc/$FILE printf "#endif\n" >> asm-sparc/$FILE printf "#endif\n" >> asm-sparc/$FILE git add asm-sparc/$FILE echo new file done printf "#include <asm-sparc/%s>\n" $FILE > asm-sparc64/$FILE git add asm-sparc64/$FILE echo sparc64 file done done The guard contains three '_' to avoid conflict with existing guards. In additing the two Kbuild files are emptied to avoid breaking headers_* targets. We will reintroduce the exported header files when the necessary kbuild changes are merged. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/asm-sparc/Kbuild23
-rw-r--r--include/asm-sparc/atomic.h169
-rw-r--r--include/asm-sparc/atomic_32.h165
-rw-r--r--include/asm-sparc/atomic_64.h128
-rw-r--r--include/asm-sparc/auxio.h97
-rw-r--r--include/asm-sparc/auxio_32.h89
-rw-r--r--include/asm-sparc/auxio_64.h100
-rw-r--r--include/asm-sparc/bitops.h117
-rw-r--r--include/asm-sparc/bitops_32.h111
-rw-r--r--include/asm-sparc/bitops_64.h107
-rw-r--r--include/asm-sparc/cacheflush.h93
-rw-r--r--include/asm-sparc/cacheflush_32.h85
-rw-r--r--include/asm-sparc/cacheflush_64.h76
-rw-r--r--include/asm-sparc/checksum.h249
-rw-r--r--include/asm-sparc/checksum_32.h241
-rw-r--r--include/asm-sparc/checksum_64.h167
-rw-r--r--include/asm-sparc/cpudata.h35
-rw-r--r--include/asm-sparc/cpudata_32.h27
-rw-r--r--include/asm-sparc/cpudata_64.h240
-rw-r--r--include/asm-sparc/delay.h42
-rw-r--r--include/asm-sparc/delay_32.h34
-rw-r--r--include/asm-sparc/delay_64.h17
-rw-r--r--include/asm-sparc/dma-mapping.h17
-rw-r--r--include/asm-sparc/dma-mapping_32.h11
-rw-r--r--include/asm-sparc/dma-mapping_64.h154
-rw-r--r--include/asm-sparc/dma.h290
-rw-r--r--include/asm-sparc/dma_32.h288
-rw-r--r--include/asm-sparc/dma_64.h205
-rw-r--r--include/asm-sparc/ebus.h105
-rw-r--r--include/asm-sparc/ebus_32.h99
-rw-r--r--include/asm-sparc/ebus_64.h94
-rw-r--r--include/asm-sparc/elf.h149
-rw-r--r--include/asm-sparc/elf_32.h145
-rw-r--r--include/asm-sparc/elf_64.h217
-rw-r--r--include/asm-sparc/floppy.h394
-rw-r--r--include/asm-sparc/floppy_32.h388
-rw-r--r--include/asm-sparc/floppy_64.h782
-rw-r--r--include/asm-sparc/futex.h12
-rw-r--r--include/asm-sparc/futex_32.h6
-rw-r--r--include/asm-sparc/futex_64.h110
-rw-r--r--include/asm-sparc/hardirq.h31
-rw-r--r--include/asm-sparc/hardirq_32.h23
-rw-r--r--include/asm-sparc/hardirq_64.h19
-rw-r--r--include/asm-sparc/head.h106
-rw-r--r--include/asm-sparc/head_32.h102
-rw-r--r--include/asm-sparc/head_64.h76
-rw-r--r--include/asm-sparc/ide.h103
-rw-r--r--include/asm-sparc/ide_32.h95
-rw-r--r--include/asm-sparc/ide_64.h118
-rw-r--r--include/asm-sparc/io.h331
-rw-r--r--include/asm-sparc/io_32.h325
-rw-r--r--include/asm-sparc/io_64.h511
-rw-r--r--include/asm-sparc/iommu.h129
-rw-r--r--include/asm-sparc/iommu_32.h121
-rw-r--r--include/asm-sparc/iommu_64.h62
-rw-r--r--include/asm-sparc/ipcbuf.h39
-rw-r--r--include/asm-sparc/ipcbuf_32.h31
-rw-r--r--include/asm-sparc/ipcbuf_64.h28
-rw-r--r--include/asm-sparc/irq.h21
-rw-r--r--include/asm-sparc/irq_32.h15
-rw-r--r--include/asm-sparc/irq_64.h93
-rw-r--r--include/asm-sparc/irqflags.h47
-rw-r--r--include/asm-sparc/irqflags_32.h39
-rw-r--r--include/asm-sparc/irqflags_64.h89
-rw-r--r--include/asm-sparc/kdebug.h81
-rw-r--r--include/asm-sparc/kdebug_32.h73
-rw-r--r--include/asm-sparc/kdebug_64.h19
-rw-r--r--include/asm-sparc/mc146818rtc.h35
-rw-r--r--include/asm-sparc/mc146818rtc_32.h29
-rw-r--r--include/asm-sparc/mc146818rtc_64.h34
-rw-r--r--include/asm-sparc/mmu.h13
-rw-r--r--include/asm-sparc/mmu_32.h7
-rw-r--r--include/asm-sparc/mmu_64.h127
-rw-r--r--include/asm-sparc/mmu_context.h50
-rw-r--r--include/asm-sparc/mmu_context_32.h42
-rw-r--r--include/asm-sparc/mmu_context_64.h155
-rw-r--r--include/asm-sparc/module.h15
-rw-r--r--include/asm-sparc/module_32.h7
-rw-r--r--include/asm-sparc/module_64.h7
-rw-r--r--include/asm-sparc/mostek.h173
-rw-r--r--include/asm-sparc/mostek_32.h171
-rw-r--r--include/asm-sparc/mostek_64.h143
-rw-r--r--include/asm-sparc/namei.h21
-rw-r--r--include/asm-sparc/namei_32.h13
-rw-r--r--include/asm-sparc/namei_64.h13
-rw-r--r--include/asm-sparc/of_platform.h32
-rw-r--r--include/asm-sparc/of_platform_32.h24
-rw-r--r--include/asm-sparc/of_platform_64.h25
-rw-r--r--include/asm-sparc/openprom.h263
-rw-r--r--include/asm-sparc/openprom_32.h255
-rw-r--r--include/asm-sparc/openprom_64.h280
-rw-r--r--include/asm-sparc/oplib.h278
-rw-r--r--include/asm-sparc/oplib_32.h272
-rw-r--r--include/asm-sparc/oplib_64.h322
-rw-r--r--include/asm-sparc/page.h165
-rw-r--r--include/asm-sparc/page_32.h163
-rw-r--r--include/asm-sparc/page_64.h142
-rw-r--r--include/asm-sparc/pci.h176
-rw-r--r--include/asm-sparc/pci_32.h170
-rw-r--r--include/asm-sparc/pci_64.h209
-rw-r--r--include/asm-sparc/percpu.h14
-rw-r--r--include/asm-sparc/percpu_32.h6
-rw-r--r--include/asm-sparc/percpu_64.h28
-rw-r--r--include/asm-sparc/pgalloc.h76
-rw-r--r--include/asm-sparc/pgalloc_32.h68
-rw-r--r--include/asm-sparc/pgalloc_64.h81
-rw-r--r--include/asm-sparc/pgtable.h482
-rw-r--r--include/asm-sparc/pgtable_32.h480
-rw-r--r--include/asm-sparc/pgtable_64.h781
-rw-r--r--include/asm-sparc/posix_types.h124
-rw-r--r--include/asm-sparc/posix_types_32.h118
-rw-r--r--include/asm-sparc/posix_types_64.h122
-rw-r--r--include/asm-sparc/processor.h132
-rw-r--r--include/asm-sparc/processor_32.h128
-rw-r--r--include/asm-sparc/processor_64.h237
-rw-r--r--include/asm-sparc/ptrace.h179
-rw-r--r--include/asm-sparc/ptrace_32.h175
-rw-r--r--include/asm-sparc/ptrace_64.h346
-rw-r--r--include/asm-sparc/reg.h87
-rw-r--r--include/asm-sparc/reg_32.h79
-rw-r--r--include/asm-sparc/reg_64.h56
-rw-r--r--include/asm-sparc/sbus.h161
-rw-r--r--include/asm-sparc/sbus_32.h153
-rw-r--r--include/asm-sparc/sbus_64.h190
-rw-r--r--include/asm-sparc/scatterlist.h32
-rw-r--r--include/asm-sparc/scatterlist_32.h26
-rw-r--r--include/asm-sparc/scatterlist_64.h27
-rw-r--r--include/asm-sparc/sections.h12
-rw-r--r--include/asm-sparc/sections_32.h6
-rw-r--r--include/asm-sparc/sections_64.h9
-rw-r--r--include/asm-sparc/sfp-machine.h214
-rw-r--r--include/asm-sparc/sfp-machine_32.h212
-rw-r--r--include/asm-sparc/sfp-machine_64.h93
-rw-r--r--include/asm-sparc/shmparam.h19
-rw-r--r--include/asm-sparc/shmparam_32.h11
-rw-r--r--include/asm-sparc/shmparam_64.h10
-rw-r--r--include/asm-sparc/sigcontext.h70
-rw-r--r--include/asm-sparc/sigcontext_32.h62
-rw-r--r--include/asm-sparc/sigcontext_64.h87
-rw-r--r--include/asm-sparc/siginfo.h25
-rw-r--r--include/asm-sparc/siginfo_32.h17
-rw-r--r--include/asm-sparc/siginfo_64.h32
-rw-r--r--include/asm-sparc/signal.h209
-rw-r--r--include/asm-sparc/signal_32.h207
-rw-r--r--include/asm-sparc/signal_64.h194
-rw-r--r--include/asm-sparc/smp.h177
-rw-r--r--include/asm-sparc/smp_32.h173
-rw-r--r--include/asm-sparc/smp_64.h64
-rw-r--r--include/asm-sparc/spinlock.h200
-rw-r--r--include/asm-sparc/spinlock_32.h192
-rw-r--r--include/asm-sparc/spinlock_64.h250
-rw-r--r--include/asm-sparc/stat.h82
-rw-r--r--include/asm-sparc/stat_32.h76
-rw-r--r--include/asm-sparc/stat_64.h47
-rw-r--r--include/asm-sparc/statfs.h12
-rw-r--r--include/asm-sparc/statfs_32.h6
-rw-r--r--include/asm-sparc/statfs_64.h54
-rw-r--r--include/asm-sparc/string.h213
-rw-r--r--include/asm-sparc/string_32.h205
-rw-r--r--include/asm-sparc/string_64.h83
-rw-r--r--include/asm-sparc/system.h290
-rw-r--r--include/asm-sparc/system_32.h288
-rw-r--r--include/asm-sparc/system_64.h355
-rw-r--r--include/asm-sparc/thread_info.h157
-rw-r--r--include/asm-sparc/thread_info_32.h151
-rw-r--r--include/asm-sparc/thread_info_64.h277
-rw-r--r--include/asm-sparc/timer.h111
-rw-r--r--include/asm-sparc/timer_32.h107
-rw-r--r--include/asm-sparc/timer_64.h30
-rw-r--r--include/asm-sparc/timex.h21
-rw-r--r--include/asm-sparc/timex_32.h15
-rw-r--r--include/asm-sparc/timex_64.h19
-rw-r--r--include/asm-sparc/tlb.h32
-rw-r--r--include/asm-sparc/tlb_32.h24
-rw-r--r--include/asm-sparc/tlb_64.h111
-rw-r--r--include/asm-sparc/tlbflush.h68
-rw-r--r--include/asm-sparc/tlbflush_32.h60
-rw-r--r--include/asm-sparc/tlbflush_64.h44
-rw-r--r--include/asm-sparc/topology.h14
-rw-r--r--include/asm-sparc/topology_32.h6
-rw-r--r--include/asm-sparc/topology_64.h86
-rw-r--r--include/asm-sparc/uaccess.h342
-rw-r--r--include/asm-sparc/uaccess_32.h336
-rw-r--r--include/asm-sparc/uaccess_64.h273
-rw-r--r--include/asm-sparc/unistd.h386
-rw-r--r--include/asm-sparc/unistd_32.h378
-rw-r--r--include/asm-sparc/unistd_64.h373
-rw-r--r--include/asm-sparc/xor.h277
-rw-r--r--include/asm-sparc/xor_32.h269
-rw-r--r--include/asm-sparc/xor_64.h70
-rw-r--r--include/asm-sparc64/Kbuild22
-rw-r--r--include/asm-sparc64/atomic.h129
-rw-r--r--include/asm-sparc64/auxio.h101
-rw-r--r--include/asm-sparc64/bitops.h108
-rw-r--r--include/asm-sparc64/cacheflush.h77
-rw-r--r--include/asm-sparc64/checksum.h168
-rw-r--r--include/asm-sparc64/cpudata.h241
-rw-r--r--include/asm-sparc64/delay.h18
-rw-r--r--include/asm-sparc64/dma-mapping.h155
-rw-r--r--include/asm-sparc64/dma.h206
-rw-r--r--include/asm-sparc64/ebus.h95
-rw-r--r--include/asm-sparc64/elf.h218
-rw-r--r--include/asm-sparc64/floppy.h783
-rw-r--r--include/asm-sparc64/futex.h111
-rw-r--r--include/asm-sparc64/hardirq.h20
-rw-r--r--include/asm-sparc64/head.h77
-rw-r--r--include/asm-sparc64/ide.h119
-rw-r--r--include/asm-sparc64/io.h512
-rw-r--r--include/asm-sparc64/iommu.h63
-rw-r--r--include/asm-sparc64/ipcbuf.h29
-rw-r--r--include/asm-sparc64/irq.h94
-rw-r--r--include/asm-sparc64/irqflags.h90
-rw-r--r--include/asm-sparc64/kdebug.h20
-rw-r--r--include/asm-sparc64/mc146818rtc.h35
-rw-r--r--include/asm-sparc64/mmu.h128
-rw-r--r--include/asm-sparc64/mmu_context.h156
-rw-r--r--include/asm-sparc64/module.h8
-rw-r--r--include/asm-sparc64/mostek.h144
-rw-r--r--include/asm-sparc64/namei.h14
-rw-r--r--include/asm-sparc64/of_platform.h26
-rw-r--r--include/asm-sparc64/openprom.h281
-rw-r--r--include/asm-sparc64/oplib.h323
-rw-r--r--include/asm-sparc64/page.h143
-rw-r--r--include/asm-sparc64/pci.h210
-rw-r--r--include/asm-sparc64/percpu.h29
-rw-r--r--include/asm-sparc64/pgalloc.h82
-rw-r--r--include/asm-sparc64/pgtable.h782
-rw-r--r--include/asm-sparc64/posix_types.h123
-rw-r--r--include/asm-sparc64/processor.h238
-rw-r--r--include/asm-sparc64/ptrace.h347
-rw-r--r--include/asm-sparc64/reg.h57
-rw-r--r--include/asm-sparc64/sbus.h191
-rw-r--r--include/asm-sparc64/scatterlist.h28
-rw-r--r--include/asm-sparc64/sections.h10
-rw-r--r--include/asm-sparc64/sfp-machine.h94
-rw-r--r--include/asm-sparc64/shmparam.h11
-rw-r--r--include/asm-sparc64/sigcontext.h88
-rw-r--r--include/asm-sparc64/siginfo.h33
-rw-r--r--include/asm-sparc64/signal.h195
-rw-r--r--include/asm-sparc64/smp.h65
-rw-r--r--include/asm-sparc64/spinlock.h251
-rw-r--r--include/asm-sparc64/stat.h48
-rw-r--r--include/asm-sparc64/statfs.h55
-rw-r--r--include/asm-sparc64/string.h84
-rw-r--r--include/asm-sparc64/system.h356
-rw-r--r--include/asm-sparc64/thread_info.h278
-rw-r--r--include/asm-sparc64/timer.h31
-rw-r--r--include/asm-sparc64/timex.h20
-rw-r--r--include/asm-sparc64/tlb.h112
-rw-r--r--include/asm-sparc64/tlbflush.h45
-rw-r--r--include/asm-sparc64/topology.h87
-rw-r--r--include/asm-sparc64/uaccess.h274
-rw-r--r--include/asm-sparc64/unistd.h374
-rw-r--r--include/asm-sparc64/xor.h71
254 files changed, 17513 insertions, 16987 deletions
diff --git a/include/asm-sparc/Kbuild b/include/asm-sparc/Kbuild
index c83e3c0aa30b..6cdaf9d33b38 100644
--- a/include/asm-sparc/Kbuild
+++ b/include/asm-sparc/Kbuild
@@ -1,22 +1 @@
1include include/asm-generic/Kbuild.asm # dummy file to avoid breaking make headers_install
2
3header-y += apc.h
4header-y += asi.h
5header-y += bpp.h
6header-y += display7seg.h
7header-y += envctrl.h
8header-y += jsflash.h
9header-y += openprom.h
10header-y += openpromio.h
11header-y += psrcompat.h
12header-y += pstate.h
13header-y += reg.h
14header-y += traps.h
15header-y += uctx.h
16header-y += utrap.h
17header-y += vfc_ioctls.h
18header-y += watchdog.h
19
20unifdef-y += fbio.h
21unifdef-y += perfctr.h
22unifdef-y += psr.h
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 5c944b5a8040..66d8166ec1d7 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -1,165 +1,8 @@
1/* atomic.h: These still suck, but the I-cache hit rate is higher. 1#ifndef ___ASM_SPARC_ATOMIC_H
2 * 2#define ___ASM_SPARC_ATOMIC_H
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) 4#include <asm-sparc/atomic_64.h>
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_ATOMIC__
12#define __ARCH_SPARC_ATOMIC__
13
14#include <linux/types.h>
15
16typedef struct { volatile int counter; } atomic_t;
17
18#ifdef __KERNEL__
19
20#define ATOMIC_INIT(i) { (i) }
21
22extern int __atomic_add_return(int, atomic_t *);
23extern int atomic_cmpxchg(atomic_t *, int, int);
24#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
25extern int atomic_add_unless(atomic_t *, int, int);
26extern void atomic_set(atomic_t *, int);
27
28#define atomic_read(v) ((v)->counter)
29
30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
32#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
33#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
34
35#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
36#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
37#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
38#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
39
40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
41
42/*
43 * atomic_inc_and_test - increment and test
44 * @v: pointer of type atomic_t
45 *
46 * Atomically increments @v by 1
47 * and returns true if the result is zero, or false for all
48 * other cases.
49 */
50#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
51
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54
55#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
56
57/* This is the old 24-bit implementation. It's still used internally
58 * by some sparc-specific code, notably the semaphore implementation.
59 */
60typedef struct { volatile int counter; } atomic24_t;
61
62#ifndef CONFIG_SMP
63
64#define ATOMIC24_INIT(i) { (i) }
65#define atomic24_read(v) ((v)->counter)
66#define atomic24_set(v, i) (((v)->counter) = i)
67
68#else 5#else
69/* We do the bulk of the actual work out of line in two common 6#include <asm-sparc/atomic_32.h>
70 * routines in assembler, see arch/sparc/lib/atomic.S for the 7#endif
71 * "fun" details.
72 *
73 * For SMP the trick is you embed the spin lock byte within
74 * the word, use the low byte so signedness is easily retained
75 * via a quick arithmetic shift. It looks like this:
76 *
77 * ----------------------------------------
78 * | signed 24-bit counter value | lock | atomic_t
79 * ----------------------------------------
80 * 31 8 7 0
81 */
82
83#define ATOMIC24_INIT(i) { ((i) << 8) }
84
85static inline int atomic24_read(const atomic24_t *v)
86{
87 int ret = v->counter;
88
89 while(ret & 0xff)
90 ret = v->counter;
91
92 return ret >> 8;
93}
94
95#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
96#endif 8#endif
97
98static inline int __atomic24_add(int i, atomic24_t *v)
99{
100 register volatile int *ptr asm("g1");
101 register int increment asm("g2");
102 register int tmp1 asm("g3");
103 register int tmp2 asm("g4");
104 register int tmp3 asm("g7");
105
106 ptr = &v->counter;
107 increment = i;
108
109 __asm__ __volatile__(
110 "mov %%o7, %%g4\n\t"
111 "call ___atomic24_add\n\t"
112 " add %%o7, 8, %%o7\n"
113 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
114 : "0" (increment), "r" (ptr)
115 : "memory", "cc");
116
117 return increment;
118}
119
120static inline int __atomic24_sub(int i, atomic24_t *v)
121{
122 register volatile int *ptr asm("g1");
123 register int increment asm("g2");
124 register int tmp1 asm("g3");
125 register int tmp2 asm("g4");
126 register int tmp3 asm("g7");
127
128 ptr = &v->counter;
129 increment = i;
130
131 __asm__ __volatile__(
132 "mov %%o7, %%g4\n\t"
133 "call ___atomic24_sub\n\t"
134 " add %%o7, 8, %%o7\n"
135 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
136 : "0" (increment), "r" (ptr)
137 : "memory", "cc");
138
139 return increment;
140}
141
142#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
143#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
144
145#define atomic24_dec_return(v) __atomic24_sub(1, (v))
146#define atomic24_inc_return(v) __atomic24_add(1, (v))
147
148#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
149#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
150
151#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
152#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
153
154#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
155
156/* Atomic operations are already serializing */
157#define smp_mb__before_atomic_dec() barrier()
158#define smp_mb__after_atomic_dec() barrier()
159#define smp_mb__before_atomic_inc() barrier()
160#define smp_mb__after_atomic_inc() barrier()
161
162#endif /* !(__KERNEL__) */
163
164#include <asm-generic/atomic.h>
165#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/atomic_32.h b/include/asm-sparc/atomic_32.h
new file mode 100644
index 000000000000..5c944b5a8040
--- /dev/null
+++ b/include/asm-sparc/atomic_32.h
@@ -0,0 +1,165 @@
1/* atomic.h: These still suck, but the I-cache hit rate is higher.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_ATOMIC__
12#define __ARCH_SPARC_ATOMIC__
13
14#include <linux/types.h>
15
16typedef struct { volatile int counter; } atomic_t;
17
18#ifdef __KERNEL__
19
20#define ATOMIC_INIT(i) { (i) }
21
22extern int __atomic_add_return(int, atomic_t *);
23extern int atomic_cmpxchg(atomic_t *, int, int);
24#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
25extern int atomic_add_unless(atomic_t *, int, int);
26extern void atomic_set(atomic_t *, int);
27
28#define atomic_read(v) ((v)->counter)
29
30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
32#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
33#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
34
35#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
36#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
37#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
38#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
39
40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
41
42/*
43 * atomic_inc_and_test - increment and test
44 * @v: pointer of type atomic_t
45 *
46 * Atomically increments @v by 1
47 * and returns true if the result is zero, or false for all
48 * other cases.
49 */
50#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
51
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54
55#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
56
57/* This is the old 24-bit implementation. It's still used internally
58 * by some sparc-specific code, notably the semaphore implementation.
59 */
60typedef struct { volatile int counter; } atomic24_t;
61
62#ifndef CONFIG_SMP
63
64#define ATOMIC24_INIT(i) { (i) }
65#define atomic24_read(v) ((v)->counter)
66#define atomic24_set(v, i) (((v)->counter) = i)
67
68#else
69/* We do the bulk of the actual work out of line in two common
70 * routines in assembler, see arch/sparc/lib/atomic.S for the
71 * "fun" details.
72 *
73 * For SMP the trick is you embed the spin lock byte within
74 * the word, use the low byte so signedness is easily retained
75 * via a quick arithmetic shift. It looks like this:
76 *
77 * ----------------------------------------
78 * | signed 24-bit counter value | lock | atomic_t
79 * ----------------------------------------
80 * 31 8 7 0
81 */
82
83#define ATOMIC24_INIT(i) { ((i) << 8) }
84
85static inline int atomic24_read(const atomic24_t *v)
86{
87 int ret = v->counter;
88
89 while(ret & 0xff)
90 ret = v->counter;
91
92 return ret >> 8;
93}
94
95#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
96#endif
97
98static inline int __atomic24_add(int i, atomic24_t *v)
99{
100 register volatile int *ptr asm("g1");
101 register int increment asm("g2");
102 register int tmp1 asm("g3");
103 register int tmp2 asm("g4");
104 register int tmp3 asm("g7");
105
106 ptr = &v->counter;
107 increment = i;
108
109 __asm__ __volatile__(
110 "mov %%o7, %%g4\n\t"
111 "call ___atomic24_add\n\t"
112 " add %%o7, 8, %%o7\n"
113 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
114 : "0" (increment), "r" (ptr)
115 : "memory", "cc");
116
117 return increment;
118}
119
120static inline int __atomic24_sub(int i, atomic24_t *v)
121{
122 register volatile int *ptr asm("g1");
123 register int increment asm("g2");
124 register int tmp1 asm("g3");
125 register int tmp2 asm("g4");
126 register int tmp3 asm("g7");
127
128 ptr = &v->counter;
129 increment = i;
130
131 __asm__ __volatile__(
132 "mov %%o7, %%g4\n\t"
133 "call ___atomic24_sub\n\t"
134 " add %%o7, 8, %%o7\n"
135 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
136 : "0" (increment), "r" (ptr)
137 : "memory", "cc");
138
139 return increment;
140}
141
142#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
143#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
144
145#define atomic24_dec_return(v) __atomic24_sub(1, (v))
146#define atomic24_inc_return(v) __atomic24_add(1, (v))
147
148#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
149#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
150
151#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
152#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
153
154#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
155
156/* Atomic operations are already serializing */
157#define smp_mb__before_atomic_dec() barrier()
158#define smp_mb__after_atomic_dec() barrier()
159#define smp_mb__before_atomic_inc() barrier()
160#define smp_mb__after_atomic_inc() barrier()
161
162#endif /* !(__KERNEL__) */
163
164#include <asm-generic/atomic.h>
165#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/atomic_64.h b/include/asm-sparc/atomic_64.h
new file mode 100644
index 000000000000..2c71ec4a3b18
--- /dev/null
+++ b/include/asm-sparc/atomic_64.h
@@ -0,0 +1,128 @@
1/* atomic.h: Thankfully the V9 is at least reasonable for this
2 * stuff.
3 *
4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#ifndef __ARCH_SPARC64_ATOMIC__
8#define __ARCH_SPARC64_ATOMIC__
9
10#include <linux/types.h>
11#include <asm/system.h>
12
13typedef struct { volatile int counter; } atomic_t;
14typedef struct { volatile __s64 counter; } atomic64_t;
15
16#define ATOMIC_INIT(i) { (i) }
17#define ATOMIC64_INIT(i) { (i) }
18
19#define atomic_read(v) ((v)->counter)
20#define atomic64_read(v) ((v)->counter)
21
22#define atomic_set(v, i) (((v)->counter) = i)
23#define atomic64_set(v, i) (((v)->counter) = i)
24
25extern void atomic_add(int, atomic_t *);
26extern void atomic64_add(int, atomic64_t *);
27extern void atomic_sub(int, atomic_t *);
28extern void atomic64_sub(int, atomic64_t *);
29
30extern int atomic_add_ret(int, atomic_t *);
31extern int atomic64_add_ret(int, atomic64_t *);
32extern int atomic_sub_ret(int, atomic_t *);
33extern int atomic64_sub_ret(int, atomic64_t *);
34
35#define atomic_dec_return(v) atomic_sub_ret(1, v)
36#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
37
38#define atomic_inc_return(v) atomic_add_ret(1, v)
39#define atomic64_inc_return(v) atomic64_add_ret(1, v)
40
41#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
42#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
43
44#define atomic_add_return(i, v) atomic_add_ret(i, v)
45#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
46
47/*
48 * atomic_inc_and_test - increment and test
49 * @v: pointer of type atomic_t
50 *
51 * Atomically increments @v by 1
52 * and returns true if the result is zero, or false for all
53 * other cases.
54 */
55#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
56#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
57
58#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
59#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
60
61#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
62#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
63
64#define atomic_inc(v) atomic_add(1, v)
65#define atomic64_inc(v) atomic64_add(1, v)
66
67#define atomic_dec(v) atomic_sub(1, v)
68#define atomic64_dec(v) atomic64_sub(1, v)
69
70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
72
73#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
74#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
75
76static inline int atomic_add_unless(atomic_t *v, int a, int u)
77{
78 int c, old;
79 c = atomic_read(v);
80 for (;;) {
81 if (unlikely(c == (u)))
82 break;
83 old = atomic_cmpxchg((v), c, c + (a));
84 if (likely(old == c))
85 break;
86 c = old;
87 }
88 return c != (u);
89}
90
91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
92
93#define atomic64_cmpxchg(v, o, n) \
94 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
95#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
96
97static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
98{
99 long c, old;
100 c = atomic64_read(v);
101 for (;;) {
102 if (unlikely(c == (u)))
103 break;
104 old = atomic64_cmpxchg((v), c, c + (a));
105 if (likely(old == c))
106 break;
107 c = old;
108 }
109 return c != (u);
110}
111
112#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
113
114/* Atomic operations are already serializing */
115#ifdef CONFIG_SMP
116#define smp_mb__before_atomic_dec() membar_storeload_loadload();
117#define smp_mb__after_atomic_dec() membar_storeload_storestore();
118#define smp_mb__before_atomic_inc() membar_storeload_loadload();
119#define smp_mb__after_atomic_inc() membar_storeload_storestore();
120#else
121#define smp_mb__before_atomic_dec() barrier()
122#define smp_mb__after_atomic_dec() barrier()
123#define smp_mb__before_atomic_inc() barrier()
124#define smp_mb__after_atomic_inc() barrier()
125#endif
126
127#include <asm-generic/atomic.h>
128#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/asm-sparc/auxio.h b/include/asm-sparc/auxio.h
index e552b8d68450..24c6f3c0f577 100644
--- a/include/asm-sparc/auxio.h
+++ b/include/asm-sparc/auxio.h
@@ -1,89 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_AUXIO_H
2 * auxio.h: Definitions and code for the Auxiliary I/O register. 2#define ___ASM_SPARC_AUXIO_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/auxio_64.h>
5 */ 5#else
6#ifndef _SPARC_AUXIO_H 6#include <asm-sparc/auxio_32.h>
7#define _SPARC_AUXIO_H 7#endif
8 8#endif
9#include <asm/system.h>
10#include <asm/vaddrs.h>
11
12/* This register is an unsigned char in IO space. It does two things.
13 * First, it is used to control the front panel LED light on machines
14 * that have it (good for testing entry points to trap handlers and irq's)
15 * Secondly, it controls various floppy drive parameters.
16 */
17#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
18#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
19#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
20#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
21#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
22#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
23#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
24
25/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
26#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
27
28/* Set the following to zero to eject the floppy. */
29#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
30#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
31
32#ifndef __ASSEMBLY__
33
34/*
35 * NOTE: these routines are implementation dependent--
36 * understand the hardware you are querying!
37 */
38extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
39extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
40
41/*
42 * The following routines are provided for driver-compatibility
43 * with sparc64 (primarily sunlance.c)
44 */
45
46#define AUXIO_LTE_ON 1
47#define AUXIO_LTE_OFF 0
48
49/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
50 *
51 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
52 */
53#define auxio_set_lte(on) \
54do { \
55 if(on) { \
56 set_auxio(AUXIO_LINK_TEST, 0); \
57 } else { \
58 set_auxio(0, AUXIO_LINK_TEST); \
59 } \
60} while (0)
61
62#define AUXIO_LED_ON 1
63#define AUXIO_LED_OFF 0
64
65/* auxio_set_led - Set system front panel LED
66 *
67 * on - AUXIO_LED_ON or AUXIO_LED_OFF
68 */
69#define auxio_set_led(on) \
70do { \
71 if(on) { \
72 set_auxio(AUXIO_LED, 0); \
73 } else { \
74 set_auxio(0, AUXIO_LED); \
75 } \
76} while (0)
77
78#endif /* !(__ASSEMBLY__) */
79
80
81/* AUXIO2 (Power Off Control) */
82extern __volatile__ unsigned char * auxio_power_register;
83
84#define AUXIO_POWER_DETECT_FAILURE 32
85#define AUXIO_POWER_CLEAR_FAILURE 2
86#define AUXIO_POWER_OFF 1
87
88
89#endif /* !(_SPARC_AUXIO_H) */
diff --git a/include/asm-sparc/auxio_32.h b/include/asm-sparc/auxio_32.h
new file mode 100644
index 000000000000..4db8f23db20f
--- /dev/null
+++ b/include/asm-sparc/auxio_32.h
@@ -0,0 +1,89 @@
1/*
2 * auxio.h: Definitions and code for the Auxiliary I/O register.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6#ifndef _SPARC_AUXIO_H
7#define _SPARC_AUXIO_H
8
9#include <asm/system.h>
10#include <asm/vaddrs.h>
11
12/* This register is an unsigned char in IO space. It does two things.
13 * First, it is used to control the front panel LED light on machines
14 * that have it (good for testing entry points to trap handlers and irq's)
15 * Secondly, it controls various floppy drive parameters.
16 */
17#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
18#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
19#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
20#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
21#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
22#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
23#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
24
25/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
26#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
27
28/* Set the following to zero to eject the floppy. */
29#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
30#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
31
32#ifndef __ASSEMBLY__
33
34/*
35 * NOTE: these routines are implementation dependent--
36 * understand the hardware you are querying!
37 */
38extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
39extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
40
41/*
42 * The following routines are provided for driver-compatibility
43 * with sparc64 (primarily sunlance.c)
44 */
45
46#define AUXIO_LTE_ON 1
47#define AUXIO_LTE_OFF 0
48
49/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
50 *
51 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
52 */
53#define auxio_set_lte(on) \
54do { \
55 if(on) { \
56 set_auxio(AUXIO_LINK_TEST, 0); \
57 } else { \
58 set_auxio(0, AUXIO_LINK_TEST); \
59 } \
60} while (0)
61
62#define AUXIO_LED_ON 1
63#define AUXIO_LED_OFF 0
64
65/* auxio_set_led - Set system front panel LED
66 *
67 * on - AUXIO_LED_ON or AUXIO_LED_OFF
68 */
69#define auxio_set_led(on) \
70do { \
71 if(on) { \
72 set_auxio(AUXIO_LED, 0); \
73 } else { \
74 set_auxio(0, AUXIO_LED); \
75 } \
76} while (0)
77
78#endif /* !(__ASSEMBLY__) */
79
80
81/* AUXIO2 (Power Off Control) */
82extern __volatile__ unsigned char * auxio_power_register;
83
84#define AUXIO_POWER_DETECT_FAILURE 32
85#define AUXIO_POWER_CLEAR_FAILURE 2
86#define AUXIO_POWER_OFF 1
87
88
89#endif /* !(_SPARC_AUXIO_H) */
diff --git a/include/asm-sparc/auxio_64.h b/include/asm-sparc/auxio_64.h
new file mode 100644
index 000000000000..f61cd1e3e395
--- /dev/null
+++ b/include/asm-sparc/auxio_64.h
@@ -0,0 +1,100 @@
1/*
2 * auxio.h: Definitions and code for the Auxiliary I/O registers.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 *
6 * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
7 */
8#ifndef _SPARC64_AUXIO_H
9#define _SPARC64_AUXIO_H
10
11/* AUXIO implementations:
12 * sbus-based NCR89C105 "Slavio"
13 * LED/Floppy (AUX1) register
14 * Power (AUX2) register
15 *
16 * ebus-based auxio on PCIO
17 * LED Auxio Register
18 * Power Auxio Register
19 *
20 * Register definitions from NCR _NCR89C105 Chip Specification_
21 *
22 * SLAVIO AUX1 @ 0x1900000
23 * -------------------------------------------------
24 * | (R) | (R) | D | (R) | E | M | T | L |
25 * -------------------------------------------------
26 * (R) - bit 7:6,4 are reserved and should be masked in s/w
27 * D - Floppy Density Sense (1=high density) R/O
28 * E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
29 * M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
30 * T - Terminal Count: sends TC pulse to 82077 floppy controller
31 * L - System LED on front panel (0=off, 1=on)
32 */
33#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
34#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
35#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
36#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
37#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
38#define AUXIO_AUX1_LED 0x01 /* System LED */
39
40/* SLAVIO AUX2 @ 0x1910000
41 * -------------------------------------------------
42 * | (R) | (R) | D | (R) | (R) | (R) | C | F |
43 * -------------------------------------------------
44 * (R) - bits 7:6,4:2 are reserved and should be masked in s/w
45 * D - Power Failure Detect (1=power fail)
46 * C - Clear Power Failure Detect Int (1=clear)
47 * F - Power Off (1=power off)
48 */
49#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
50#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
51#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
52#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
53
54/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
55 *
56 * PCIO LED Auxio @ 0x726000
57 * -------------------------------------------------
58 * | 31:1 Unused | LED |
59 * -------------------------------------------------
60 * Bits 31:1 unused
61 * LED - System LED on front panel (0=off, 1=on)
62 */
63#define AUXIO_PCIO_LED 0x01 /* System LED */
64
65/* PCIO Power Auxio @ 0x724000
66 * -------------------------------------------------
67 * | 31:2 Unused | CPO | SPO |
68 * -------------------------------------------------
69 * Bits 31:2 unused
70 * CPO - Courtesy Power Off (1=off)
71 * SPO - System Power Off (1=off)
72 */
73#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
74#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
75
76#ifndef __ASSEMBLY__
77
78extern void __iomem *auxio_register;
79
80#define AUXIO_LTE_ON 1
81#define AUXIO_LTE_OFF 0
82
83/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
84 *
85 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
86 */
87extern void auxio_set_lte(int on);
88
89#define AUXIO_LED_ON 1
90#define AUXIO_LED_OFF 0
91
92/* auxio_set_led - Set system front panel LED
93 *
94 * on - AUXIO_LED_ON or AUXIO_LED_OFF
95 */
96extern void auxio_set_led(int on);
97
98#endif /* ifndef __ASSEMBLY__ */
99
100#endif /* !(_SPARC64_AUXIO_H) */
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 68b98a7e6454..1a2949d0193f 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -1,111 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_BITOPS_H
2 * bitops.h: Bit string operations on the Sparc. 2#define ___ASM_SPARC_BITOPS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/bitops_64.h>
5 * Copyright 1996 Eddie C. Dost (ecd@skynet.be) 5#else
6 * Copyright 2001 Anton Blanchard (anton@samba.org) 6#include <asm-sparc/bitops_32.h>
7 */ 7#endif
8
9#ifndef _SPARC_BITOPS_H
10#define _SPARC_BITOPS_H
11
12#include <linux/compiler.h>
13#include <asm/byteorder.h>
14
15#ifdef __KERNEL__
16
17#ifndef _LINUX_BITOPS_H
18#error only <linux/bitops.h> can be included directly
19#endif 8#endif
20
21extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
22extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
23extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
24
25/*
26 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
27 * is in the highest of the four bytes and bit '31' is the high bit
28 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
29 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
30 */
31static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
32{
33 unsigned long *ADDR, mask;
34
35 ADDR = ((unsigned long *) addr) + (nr >> 5);
36 mask = 1 << (nr & 31);
37
38 return ___set_bit(ADDR, mask) != 0;
39}
40
41static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
42{
43 unsigned long *ADDR, mask;
44
45 ADDR = ((unsigned long *) addr) + (nr >> 5);
46 mask = 1 << (nr & 31);
47
48 (void) ___set_bit(ADDR, mask);
49}
50
51static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
52{
53 unsigned long *ADDR, mask;
54
55 ADDR = ((unsigned long *) addr) + (nr >> 5);
56 mask = 1 << (nr & 31);
57
58 return ___clear_bit(ADDR, mask) != 0;
59}
60
61static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
62{
63 unsigned long *ADDR, mask;
64
65 ADDR = ((unsigned long *) addr) + (nr >> 5);
66 mask = 1 << (nr & 31);
67
68 (void) ___clear_bit(ADDR, mask);
69}
70
71static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
72{
73 unsigned long *ADDR, mask;
74
75 ADDR = ((unsigned long *) addr) + (nr >> 5);
76 mask = 1 << (nr & 31);
77
78 return ___change_bit(ADDR, mask) != 0;
79}
80
81static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
82{
83 unsigned long *ADDR, mask;
84
85 ADDR = ((unsigned long *) addr) + (nr >> 5);
86 mask = 1 << (nr & 31);
87
88 (void) ___change_bit(ADDR, mask);
89}
90
91#include <asm-generic/bitops/non-atomic.h>
92
93#define smp_mb__before_clear_bit() do { } while(0)
94#define smp_mb__after_clear_bit() do { } while(0)
95
96#include <asm-generic/bitops/ffz.h>
97#include <asm-generic/bitops/__ffs.h>
98#include <asm-generic/bitops/sched.h>
99#include <asm-generic/bitops/ffs.h>
100#include <asm-generic/bitops/fls.h>
101#include <asm-generic/bitops/fls64.h>
102#include <asm-generic/bitops/hweight.h>
103#include <asm-generic/bitops/lock.h>
104#include <asm-generic/bitops/find.h>
105#include <asm-generic/bitops/ext2-non-atomic.h>
106#include <asm-generic/bitops/ext2-atomic.h>
107#include <asm-generic/bitops/minix.h>
108
109#endif /* __KERNEL__ */
110
111#endif /* defined(_SPARC_BITOPS_H) */
diff --git a/include/asm-sparc/bitops_32.h b/include/asm-sparc/bitops_32.h
new file mode 100644
index 000000000000..68b98a7e6454
--- /dev/null
+++ b/include/asm-sparc/bitops_32.h
@@ -0,0 +1,111 @@
1/*
2 * bitops.h: Bit string operations on the Sparc.
3 *
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright 2001 Anton Blanchard (anton@samba.org)
7 */
8
9#ifndef _SPARC_BITOPS_H
10#define _SPARC_BITOPS_H
11
12#include <linux/compiler.h>
13#include <asm/byteorder.h>
14
15#ifdef __KERNEL__
16
17#ifndef _LINUX_BITOPS_H
18#error only <linux/bitops.h> can be included directly
19#endif
20
21extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
22extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
23extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
24
25/*
26 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
27 * is in the highest of the four bytes and bit '31' is the high bit
28 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
29 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
30 */
31static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
32{
33 unsigned long *ADDR, mask;
34
35 ADDR = ((unsigned long *) addr) + (nr >> 5);
36 mask = 1 << (nr & 31);
37
38 return ___set_bit(ADDR, mask) != 0;
39}
40
41static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
42{
43 unsigned long *ADDR, mask;
44
45 ADDR = ((unsigned long *) addr) + (nr >> 5);
46 mask = 1 << (nr & 31);
47
48 (void) ___set_bit(ADDR, mask);
49}
50
51static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
52{
53 unsigned long *ADDR, mask;
54
55 ADDR = ((unsigned long *) addr) + (nr >> 5);
56 mask = 1 << (nr & 31);
57
58 return ___clear_bit(ADDR, mask) != 0;
59}
60
61static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
62{
63 unsigned long *ADDR, mask;
64
65 ADDR = ((unsigned long *) addr) + (nr >> 5);
66 mask = 1 << (nr & 31);
67
68 (void) ___clear_bit(ADDR, mask);
69}
70
71static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
72{
73 unsigned long *ADDR, mask;
74
75 ADDR = ((unsigned long *) addr) + (nr >> 5);
76 mask = 1 << (nr & 31);
77
78 return ___change_bit(ADDR, mask) != 0;
79}
80
81static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
82{
83 unsigned long *ADDR, mask;
84
85 ADDR = ((unsigned long *) addr) + (nr >> 5);
86 mask = 1 << (nr & 31);
87
88 (void) ___change_bit(ADDR, mask);
89}
90
91#include <asm-generic/bitops/non-atomic.h>
92
93#define smp_mb__before_clear_bit() do { } while(0)
94#define smp_mb__after_clear_bit() do { } while(0)
95
96#include <asm-generic/bitops/ffz.h>
97#include <asm-generic/bitops/__ffs.h>
98#include <asm-generic/bitops/sched.h>
99#include <asm-generic/bitops/ffs.h>
100#include <asm-generic/bitops/fls.h>
101#include <asm-generic/bitops/fls64.h>
102#include <asm-generic/bitops/hweight.h>
103#include <asm-generic/bitops/lock.h>
104#include <asm-generic/bitops/find.h>
105#include <asm-generic/bitops/ext2-non-atomic.h>
106#include <asm-generic/bitops/ext2-atomic.h>
107#include <asm-generic/bitops/minix.h>
108
109#endif /* __KERNEL__ */
110
111#endif /* defined(_SPARC_BITOPS_H) */
diff --git a/include/asm-sparc/bitops_64.h b/include/asm-sparc/bitops_64.h
new file mode 100644
index 000000000000..bb87b8080220
--- /dev/null
+++ b/include/asm-sparc/bitops_64.h
@@ -0,0 +1,107 @@
1/*
2 * bitops.h: Bit string operations on the V9.
3 *
4 * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC64_BITOPS_H
8#define _SPARC64_BITOPS_H
9
10#ifndef _LINUX_BITOPS_H
11#error only <linux/bitops.h> can be included directly
12#endif
13
14#include <linux/compiler.h>
15#include <asm/byteorder.h>
16
17extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
18extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
19extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
20extern void set_bit(unsigned long nr, volatile unsigned long *addr);
21extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
22extern void change_bit(unsigned long nr, volatile unsigned long *addr);
23
24#include <asm-generic/bitops/non-atomic.h>
25
26#ifdef CONFIG_SMP
27#define smp_mb__before_clear_bit() membar_storeload_loadload()
28#define smp_mb__after_clear_bit() membar_storeload_storestore()
29#else
30#define smp_mb__before_clear_bit() barrier()
31#define smp_mb__after_clear_bit() barrier()
32#endif
33
34#include <asm-generic/bitops/ffz.h>
35#include <asm-generic/bitops/__ffs.h>
36#include <asm-generic/bitops/fls.h>
37#include <asm-generic/bitops/__fls.h>
38#include <asm-generic/bitops/fls64.h>
39
40#ifdef __KERNEL__
41
42#include <asm-generic/bitops/sched.h>
43#include <asm-generic/bitops/ffs.h>
44
45/*
46 * hweightN: returns the hamming weight (i.e. the number
47 * of bits set) of a N-bit word
48 */
49
50#ifdef ULTRA_HAS_POPULATION_COUNT
51
52static inline unsigned int hweight64(unsigned long w)
53{
54 unsigned int res;
55
56 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
57 return res;
58}
59
60static inline unsigned int hweight32(unsigned int w)
61{
62 unsigned int res;
63
64 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
65 return res;
66}
67
68static inline unsigned int hweight16(unsigned int w)
69{
70 unsigned int res;
71
72 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
73 return res;
74}
75
76static inline unsigned int hweight8(unsigned int w)
77{
78 unsigned int res;
79
80 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
81 return res;
82}
83
84#else
85
86#include <asm-generic/bitops/hweight.h>
87
88#endif
89#include <asm-generic/bitops/lock.h>
90#endif /* __KERNEL__ */
91
92#include <asm-generic/bitops/find.h>
93
94#ifdef __KERNEL__
95
96#include <asm-generic/bitops/ext2-non-atomic.h>
97
98#define ext2_set_bit_atomic(lock,nr,addr) \
99 test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
100#define ext2_clear_bit_atomic(lock,nr,addr) \
101 test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
102
103#include <asm-generic/bitops/minix.h>
104
105#endif /* __KERNEL__ */
106
107#endif /* defined(_SPARC64_BITOPS_H) */
diff --git a/include/asm-sparc/cacheflush.h b/include/asm-sparc/cacheflush.h
index 68ac10910271..2b6a37957c2d 100644
--- a/include/asm-sparc/cacheflush.h
+++ b/include/asm-sparc/cacheflush.h
@@ -1,85 +1,8 @@
1#ifndef _SPARC_CACHEFLUSH_H 1#ifndef ___ASM_SPARC_CACHEFLUSH_H
2#define _SPARC_CACHEFLUSH_H 2#define ___ASM_SPARC_CACHEFLUSH_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/mm.h> /* Common for other includes */ 4#include <asm-sparc/cacheflush_64.h>
5// #include <linux/kernel.h> from pgalloc.h 5#else
6// #include <linux/sched.h> from pgalloc.h 6#include <asm-sparc/cacheflush_32.h>
7 7#endif
8// #include <asm/page.h> 8#endif
9#include <asm/btfixup.h>
10
11/*
12 * Fine grained cache flushing.
13 */
14#ifdef CONFIG_SMP
15
16BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20
21#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25
26BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28
29#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31
32extern void smp_flush_cache_all(void);
33extern void smp_flush_cache_mm(struct mm_struct *mm);
34extern void smp_flush_cache_range(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end);
37extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38
39extern void smp_flush_page_to_ram(unsigned long page);
40extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41
42#endif /* CONFIG_SMP */
43
44BTFIXUPDEF_CALL(void, flush_cache_all, void)
45BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
54#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma, pg) do { } while (0)
56
57#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
58
59#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
60 do { \
61 flush_cache_page(vma, vaddr, page_to_pfn(page));\
62 memcpy(dst, src, len); \
63 } while (0)
64#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
65 do { \
66 flush_cache_page(vma, vaddr, page_to_pfn(page));\
67 memcpy(dst, src, len); \
68 } while (0)
69
70BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
71BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
72
73#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
74#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
75
76extern void sparc_flush_page_to_ram(struct page *page);
77
78#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
79#define flush_dcache_mmap_lock(mapping) do { } while (0)
80#define flush_dcache_mmap_unlock(mapping) do { } while (0)
81
82#define flush_cache_vmap(start, end) flush_cache_all()
83#define flush_cache_vunmap(start, end) flush_cache_all()
84
85#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/include/asm-sparc/cacheflush_32.h b/include/asm-sparc/cacheflush_32.h
new file mode 100644
index 000000000000..68ac10910271
--- /dev/null
+++ b/include/asm-sparc/cacheflush_32.h
@@ -0,0 +1,85 @@
1#ifndef _SPARC_CACHEFLUSH_H
2#define _SPARC_CACHEFLUSH_H
3
4#include <linux/mm.h> /* Common for other includes */
5// #include <linux/kernel.h> from pgalloc.h
6// #include <linux/sched.h> from pgalloc.h
7
8// #include <asm/page.h>
9#include <asm/btfixup.h>
10
11/*
12 * Fine grained cache flushing.
13 */
14#ifdef CONFIG_SMP
15
16BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20
21#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25
26BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28
29#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31
32extern void smp_flush_cache_all(void);
33extern void smp_flush_cache_mm(struct mm_struct *mm);
34extern void smp_flush_cache_range(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end);
37extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38
39extern void smp_flush_page_to_ram(unsigned long page);
40extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41
42#endif /* CONFIG_SMP */
43
44BTFIXUPDEF_CALL(void, flush_cache_all, void)
45BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
54#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma, pg) do { } while (0)
56
57#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
58
59#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
60 do { \
61 flush_cache_page(vma, vaddr, page_to_pfn(page));\
62 memcpy(dst, src, len); \
63 } while (0)
64#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
65 do { \
66 flush_cache_page(vma, vaddr, page_to_pfn(page));\
67 memcpy(dst, src, len); \
68 } while (0)
69
70BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
71BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
72
73#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
74#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
75
76extern void sparc_flush_page_to_ram(struct page *page);
77
78#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
79#define flush_dcache_mmap_lock(mapping) do { } while (0)
80#define flush_dcache_mmap_unlock(mapping) do { } while (0)
81
82#define flush_cache_vmap(start, end) flush_cache_all()
83#define flush_cache_vunmap(start, end) flush_cache_all()
84
85#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/include/asm-sparc/cacheflush_64.h b/include/asm-sparc/cacheflush_64.h
new file mode 100644
index 000000000000..c43321729b3b
--- /dev/null
+++ b/include/asm-sparc/cacheflush_64.h
@@ -0,0 +1,76 @@
1#ifndef _SPARC64_CACHEFLUSH_H
2#define _SPARC64_CACHEFLUSH_H
3
4#include <asm/page.h>
5
6#ifndef __ASSEMBLY__
7
8#include <linux/mm.h>
9
10/* Cache flush operations. */
11
12/* These are the same regardless of whether this is an SMP kernel or not. */
13#define flush_cache_mm(__mm) \
14 do { if ((__mm) == current->mm) flushw_user(); } while(0)
15#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
16#define flush_cache_range(vma, start, end) \
17 flush_cache_mm((vma)->vm_mm)
18#define flush_cache_page(vma, page, pfn) \
19 flush_cache_mm((vma)->vm_mm)
20
21/*
22 * On spitfire, the icache doesn't snoop local stores and we don't
23 * use block commit stores (which invalidate icache lines) during
24 * module load, so we need this.
25 */
26extern void flush_icache_range(unsigned long start, unsigned long end);
27extern void __flush_icache_page(unsigned long);
28
29extern void __flush_dcache_page(void *addr, int flush_icache);
30extern void flush_dcache_page_impl(struct page *page);
31#ifdef CONFIG_SMP
32extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
33extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
34#else
35#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
36#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
37#endif
38
39extern void __flush_dcache_range(unsigned long start, unsigned long end);
40extern void flush_dcache_page(struct page *page);
41
42#define flush_icache_page(vma, pg) do { } while(0)
43#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
44
45extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
46 unsigned long uaddr, void *kaddr,
47 unsigned long len, int write);
48
49#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
50 do { \
51 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
52 memcpy(dst, src, len); \
53 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
54 } while (0)
55
56#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
57 do { \
58 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
59 memcpy(dst, src, len); \
60 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
61 } while (0)
62
63#define flush_dcache_mmap_lock(mapping) do { } while (0)
64#define flush_dcache_mmap_unlock(mapping) do { } while (0)
65
66#define flush_cache_vmap(start, end) do { } while (0)
67#define flush_cache_vunmap(start, end) do { } while (0)
68
69#ifdef CONFIG_DEBUG_PAGEALLOC
70/* internal debugging function */
71void kernel_map_pages(struct page *page, int numpages, int enable);
72#endif
73
74#endif /* !__ASSEMBLY__ */
75
76#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/include/asm-sparc/checksum.h b/include/asm-sparc/checksum.h
index d044ddb5a3cf..4e3553d4f6e1 100644
--- a/include/asm-sparc/checksum.h
+++ b/include/asm-sparc/checksum.h
@@ -1,241 +1,8 @@
1#ifndef __SPARC_CHECKSUM_H 1#ifndef ___ASM_SPARC_CHECKSUM_H
2#define __SPARC_CHECKSUM_H 2#define ___ASM_SPARC_CHECKSUM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* checksum.h: IP/UDP/TCP checksum routines on the Sparc. 4#include <asm-sparc/checksum_64.h>
5 * 5#else
6 * Copyright(C) 1995 Linus Torvalds 6#include <asm-sparc/checksum_32.h>
7 * Copyright(C) 1995 Miguel de Icaza 7#endif
8 * Copyright(C) 1996 David S. Miller 8#endif
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void *buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from fs:src while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40
41extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
42
43static inline __wsum
44csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
45{
46 register unsigned int ret asm("o0") = (unsigned int)src;
47 register char *d asm("o1") = dst;
48 register int l asm("g1") = len;
49
50 __asm__ __volatile__ (
51 "call __csum_partial_copy_sparc_generic\n\t"
52 " mov %6, %%g7\n"
53 : "=&r" (ret), "=&r" (d), "=&r" (l)
54 : "0" (ret), "1" (d), "2" (l), "r" (sum)
55 : "o2", "o3", "o4", "o5", "o7",
56 "g2", "g3", "g4", "g5", "g7",
57 "memory", "cc");
58 return (__force __wsum)ret;
59}
60
61static inline __wsum
62csum_partial_copy_from_user(const void __user *src, void *dst, int len,
63 __wsum sum, int *err)
64 {
65 register unsigned long ret asm("o0") = (unsigned long)src;
66 register char *d asm("o1") = dst;
67 register int l asm("g1") = len;
68 register __wsum s asm("g7") = sum;
69
70 __asm__ __volatile__ (
71 ".section __ex_table,#alloc\n\t"
72 ".align 4\n\t"
73 ".word 1f,2\n\t"
74 ".previous\n"
75 "1:\n\t"
76 "call __csum_partial_copy_sparc_generic\n\t"
77 " st %8, [%%sp + 64]\n"
78 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
79 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
80 : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
81 "cc", "memory");
82 return (__force __wsum)ret;
83}
84
85static inline __wsum
86csum_partial_copy_to_user(const void *src, void __user *dst, int len,
87 __wsum sum, int *err)
88{
89 if (!access_ok (VERIFY_WRITE, dst, len)) {
90 *err = -EFAULT;
91 return sum;
92 } else {
93 register unsigned long ret asm("o0") = (unsigned long)src;
94 register char __user *d asm("o1") = dst;
95 register int l asm("g1") = len;
96 register __wsum s asm("g7") = sum;
97
98 __asm__ __volatile__ (
99 ".section __ex_table,#alloc\n\t"
100 ".align 4\n\t"
101 ".word 1f,1\n\t"
102 ".previous\n"
103 "1:\n\t"
104 "call __csum_partial_copy_sparc_generic\n\t"
105 " st %8, [%%sp + 64]\n"
106 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
107 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
108 : "o2", "o3", "o4", "o5", "o7",
109 "g2", "g3", "g4", "g5",
110 "cc", "memory");
111 return (__force __wsum)ret;
112 }
113}
114
115#define HAVE_CSUM_COPY_USER
116#define csum_and_copy_to_user csum_partial_copy_to_user
117
118/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
119 * the majority of the time.
120 */
121static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
122{
123 __sum16 sum;
124
125 /* Note: We must read %2 before we touch %0 for the first time,
126 * because GCC can legitimately use the same register for
127 * both operands.
128 */
129 __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
130 "ld\t[%1 + 0x00], %0\n\t"
131 "ld\t[%1 + 0x04], %%g2\n\t"
132 "ld\t[%1 + 0x08], %%g3\n\t"
133 "addcc\t%%g2, %0, %0\n\t"
134 "addxcc\t%%g3, %0, %0\n\t"
135 "ld\t[%1 + 0x0c], %%g2\n\t"
136 "ld\t[%1 + 0x10], %%g3\n\t"
137 "addxcc\t%%g2, %0, %0\n\t"
138 "addx\t%0, %%g0, %0\n"
139 "1:\taddcc\t%%g3, %0, %0\n\t"
140 "add\t%1, 4, %1\n\t"
141 "addxcc\t%0, %%g0, %0\n\t"
142 "subcc\t%%g4, 1, %%g4\n\t"
143 "be,a\t2f\n\t"
144 "sll\t%0, 16, %%g2\n\t"
145 "b\t1b\n\t"
146 "ld\t[%1 + 0x10], %%g3\n"
147 "2:\taddcc\t%0, %%g2, %%g2\n\t"
148 "srl\t%%g2, 16, %0\n\t"
149 "addx\t%0, %%g0, %0\n\t"
150 "xnor\t%%g0, %0, %0"
151 : "=r" (sum), "=&r" (iph)
152 : "r" (ihl), "1" (iph)
153 : "g2", "g3", "g4", "cc", "memory");
154 return sum;
155}
156
157/* Fold a partial checksum without adding pseudo headers. */
158static inline __sum16 csum_fold(__wsum sum)
159{
160 unsigned int tmp;
161
162 __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
163 "srl\t%1, 16, %1\n\t"
164 "addx\t%1, %%g0, %1\n\t"
165 "xnor\t%%g0, %1, %0"
166 : "=&r" (sum), "=r" (tmp)
167 : "0" (sum), "1" ((__force u32)sum<<16)
168 : "cc");
169 return (__force __sum16)sum;
170}
171
172static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
173 unsigned short len,
174 unsigned short proto,
175 __wsum sum)
176{
177 __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
178 "addxcc\t%2, %0, %0\n\t"
179 "addxcc\t%3, %0, %0\n\t"
180 "addx\t%0, %%g0, %0\n\t"
181 : "=r" (sum), "=r" (saddr)
182 : "r" (daddr), "r" (proto + len), "0" (sum),
183 "1" (saddr)
184 : "cc");
185 return sum;
186}
187
188/*
189 * computes the checksum of the TCP/UDP pseudo-header
190 * returns a 16-bit checksum, already complemented
191 */
192static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
193 unsigned short len,
194 unsigned short proto,
195 __wsum sum)
196{
197 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
198}
199
200#define _HAVE_ARCH_IPV6_CSUM
201
202static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
203 const struct in6_addr *daddr,
204 __u32 len, unsigned short proto,
205 __wsum sum)
206{
207 __asm__ __volatile__ (
208 "addcc %3, %4, %%g4\n\t"
209 "addxcc %5, %%g4, %%g4\n\t"
210 "ld [%2 + 0x0c], %%g2\n\t"
211 "ld [%2 + 0x08], %%g3\n\t"
212 "addxcc %%g2, %%g4, %%g4\n\t"
213 "ld [%2 + 0x04], %%g2\n\t"
214 "addxcc %%g3, %%g4, %%g4\n\t"
215 "ld [%2 + 0x00], %%g3\n\t"
216 "addxcc %%g2, %%g4, %%g4\n\t"
217 "ld [%1 + 0x0c], %%g2\n\t"
218 "addxcc %%g3, %%g4, %%g4\n\t"
219 "ld [%1 + 0x08], %%g3\n\t"
220 "addxcc %%g2, %%g4, %%g4\n\t"
221 "ld [%1 + 0x04], %%g2\n\t"
222 "addxcc %%g3, %%g4, %%g4\n\t"
223 "ld [%1 + 0x00], %%g3\n\t"
224 "addxcc %%g2, %%g4, %%g4\n\t"
225 "addxcc %%g3, %%g4, %0\n\t"
226 "addx 0, %0, %0\n"
227 : "=&r" (sum)
228 : "r" (saddr), "r" (daddr),
229 "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
230 : "g2", "g3", "g4", "cc");
231
232 return csum_fold(sum);
233}
234
235/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
236static inline __sum16 ip_compute_csum(const void *buff, int len)
237{
238 return csum_fold(csum_partial(buff, len, 0));
239}
240
241#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/include/asm-sparc/checksum_32.h b/include/asm-sparc/checksum_32.h
new file mode 100644
index 000000000000..bdbda1453aa9
--- /dev/null
+++ b/include/asm-sparc/checksum_32.h
@@ -0,0 +1,241 @@
1#ifndef __SPARC_CHECKSUM_H
2#define __SPARC_CHECKSUM_H
3
4/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
5 *
6 * Copyright(C) 1995 Linus Torvalds
7 * Copyright(C) 1995 Miguel de Icaza
8 * Copyright(C) 1996 David S. Miller
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void *buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from fs:src while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40
41extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
42
43static inline __wsum
44csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
45{
46 register unsigned int ret asm("o0") = (unsigned int)src;
47 register char *d asm("o1") = dst;
48 register int l asm("g1") = len;
49
50 __asm__ __volatile__ (
51 "call __csum_partial_copy_sparc_generic\n\t"
52 " mov %6, %%g7\n"
53 : "=&r" (ret), "=&r" (d), "=&r" (l)
54 : "0" (ret), "1" (d), "2" (l), "r" (sum)
55 : "o2", "o3", "o4", "o5", "o7",
56 "g2", "g3", "g4", "g5", "g7",
57 "memory", "cc");
58 return (__force __wsum)ret;
59}
60
61static inline __wsum
62csum_partial_copy_from_user(const void __user *src, void *dst, int len,
63 __wsum sum, int *err)
64 {
65 register unsigned long ret asm("o0") = (unsigned long)src;
66 register char *d asm("o1") = dst;
67 register int l asm("g1") = len;
68 register __wsum s asm("g7") = sum;
69
70 __asm__ __volatile__ (
71 ".section __ex_table,#alloc\n\t"
72 ".align 4\n\t"
73 ".word 1f,2\n\t"
74 ".previous\n"
75 "1:\n\t"
76 "call __csum_partial_copy_sparc_generic\n\t"
77 " st %8, [%%sp + 64]\n"
78 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
79 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
80 : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
81 "cc", "memory");
82 return (__force __wsum)ret;
83}
84
85static inline __wsum
86csum_partial_copy_to_user(const void *src, void __user *dst, int len,
87 __wsum sum, int *err)
88{
89 if (!access_ok (VERIFY_WRITE, dst, len)) {
90 *err = -EFAULT;
91 return sum;
92 } else {
93 register unsigned long ret asm("o0") = (unsigned long)src;
94 register char __user *d asm("o1") = dst;
95 register int l asm("g1") = len;
96 register __wsum s asm("g7") = sum;
97
98 __asm__ __volatile__ (
99 ".section __ex_table,#alloc\n\t"
100 ".align 4\n\t"
101 ".word 1f,1\n\t"
102 ".previous\n"
103 "1:\n\t"
104 "call __csum_partial_copy_sparc_generic\n\t"
105 " st %8, [%%sp + 64]\n"
106 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
107 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
108 : "o2", "o3", "o4", "o5", "o7",
109 "g2", "g3", "g4", "g5",
110 "cc", "memory");
111 return (__force __wsum)ret;
112 }
113}
114
115#define HAVE_CSUM_COPY_USER
116#define csum_and_copy_to_user csum_partial_copy_to_user
117
118/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
119 * the majority of the time.
120 */
121static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
122{
123 __sum16 sum;
124
125 /* Note: We must read %2 before we touch %0 for the first time,
126 * because GCC can legitimately use the same register for
127 * both operands.
128 */
129 __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
130 "ld\t[%1 + 0x00], %0\n\t"
131 "ld\t[%1 + 0x04], %%g2\n\t"
132 "ld\t[%1 + 0x08], %%g3\n\t"
133 "addcc\t%%g2, %0, %0\n\t"
134 "addxcc\t%%g3, %0, %0\n\t"
135 "ld\t[%1 + 0x0c], %%g2\n\t"
136 "ld\t[%1 + 0x10], %%g3\n\t"
137 "addxcc\t%%g2, %0, %0\n\t"
138 "addx\t%0, %%g0, %0\n"
139 "1:\taddcc\t%%g3, %0, %0\n\t"
140 "add\t%1, 4, %1\n\t"
141 "addxcc\t%0, %%g0, %0\n\t"
142 "subcc\t%%g4, 1, %%g4\n\t"
143 "be,a\t2f\n\t"
144 "sll\t%0, 16, %%g2\n\t"
145 "b\t1b\n\t"
146 "ld\t[%1 + 0x10], %%g3\n"
147 "2:\taddcc\t%0, %%g2, %%g2\n\t"
148 "srl\t%%g2, 16, %0\n\t"
149 "addx\t%0, %%g0, %0\n\t"
150 "xnor\t%%g0, %0, %0"
151 : "=r" (sum), "=&r" (iph)
152 : "r" (ihl), "1" (iph)
153 : "g2", "g3", "g4", "cc", "memory");
154 return sum;
155}
156
157/* Fold a partial checksum without adding pseudo headers. */
158static inline __sum16 csum_fold(__wsum sum)
159{
160 unsigned int tmp;
161
162 __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
163 "srl\t%1, 16, %1\n\t"
164 "addx\t%1, %%g0, %1\n\t"
165 "xnor\t%%g0, %1, %0"
166 : "=&r" (sum), "=r" (tmp)
167 : "0" (sum), "1" ((__force u32)sum<<16)
168 : "cc");
169 return (__force __sum16)sum;
170}
171
172static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
173 unsigned short len,
174 unsigned short proto,
175 __wsum sum)
176{
177 __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
178 "addxcc\t%2, %0, %0\n\t"
179 "addxcc\t%3, %0, %0\n\t"
180 "addx\t%0, %%g0, %0\n\t"
181 : "=r" (sum), "=r" (saddr)
182 : "r" (daddr), "r" (proto + len), "0" (sum),
183 "1" (saddr)
184 : "cc");
185 return sum;
186}
187
188/*
189 * computes the checksum of the TCP/UDP pseudo-header
190 * returns a 16-bit checksum, already complemented
191 */
192static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
193 unsigned short len,
194 unsigned short proto,
195 __wsum sum)
196{
197 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
198}
199
200#define _HAVE_ARCH_IPV6_CSUM
201
202static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
203 const struct in6_addr *daddr,
204 __u32 len, unsigned short proto,
205 __wsum sum)
206{
207 __asm__ __volatile__ (
208 "addcc %3, %4, %%g4\n\t"
209 "addxcc %5, %%g4, %%g4\n\t"
210 "ld [%2 + 0x0c], %%g2\n\t"
211 "ld [%2 + 0x08], %%g3\n\t"
212 "addxcc %%g2, %%g4, %%g4\n\t"
213 "ld [%2 + 0x04], %%g2\n\t"
214 "addxcc %%g3, %%g4, %%g4\n\t"
215 "ld [%2 + 0x00], %%g3\n\t"
216 "addxcc %%g2, %%g4, %%g4\n\t"
217 "ld [%1 + 0x0c], %%g2\n\t"
218 "addxcc %%g3, %%g4, %%g4\n\t"
219 "ld [%1 + 0x08], %%g3\n\t"
220 "addxcc %%g2, %%g4, %%g4\n\t"
221 "ld [%1 + 0x04], %%g2\n\t"
222 "addxcc %%g3, %%g4, %%g4\n\t"
223 "ld [%1 + 0x00], %%g3\n\t"
224 "addxcc %%g2, %%g4, %%g4\n\t"
225 "addxcc %%g3, %%g4, %0\n\t"
226 "addx 0, %0, %0\n"
227 : "=&r" (sum)
228 : "r" (saddr), "r" (daddr),
229 "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
230 : "g2", "g3", "g4", "cc");
231
232 return csum_fold(sum);
233}
234
235/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
236static inline __sum16 ip_compute_csum(const void *buff, int len)
237{
238 return csum_fold(csum_partial(buff, len, 0));
239}
240
241#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/include/asm-sparc/checksum_64.h b/include/asm-sparc/checksum_64.h
new file mode 100644
index 000000000000..019b9615e43c
--- /dev/null
+++ b/include/asm-sparc/checksum_64.h
@@ -0,0 +1,167 @@
1#ifndef __SPARC64_CHECKSUM_H
2#define __SPARC64_CHECKSUM_H
3
4/* checksum.h: IP/UDP/TCP checksum routines on the V9.
5 *
6 * Copyright(C) 1995 Linus Torvalds
7 * Copyright(C) 1995 Miguel de Icaza
8 * Copyright(C) 1996 David S. Miller
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void * buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from user space while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
41 int len, __wsum sum);
42
43extern long __csum_partial_copy_from_user(const void __user *src,
44 void *dst, int len,
45 __wsum sum);
46
47static inline __wsum
48csum_partial_copy_from_user(const void __user *src,
49 void *dst, int len,
50 __wsum sum, int *err)
51{
52 long ret = __csum_partial_copy_from_user(src, dst, len, sum);
53 if (ret < 0)
54 *err = -EFAULT;
55 return (__force __wsum) ret;
56}
57
58/*
59 * Copy and checksum to user
60 */
61#define HAVE_CSUM_COPY_USER
62extern long __csum_partial_copy_to_user(const void *src,
63 void __user *dst, int len,
64 __wsum sum);
65
66static inline __wsum
67csum_and_copy_to_user(const void *src,
68 void __user *dst, int len,
69 __wsum sum, int *err)
70{
71 long ret = __csum_partial_copy_to_user(src, dst, len, sum);
72 if (ret < 0)
73 *err = -EFAULT;
74 return (__force __wsum) ret;
75}
76
77/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
78 * the majority of the time.
79 */
80extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
81
82/* Fold a partial checksum without adding pseudo headers. */
83static inline __sum16 csum_fold(__wsum sum)
84{
85 unsigned int tmp;
86
87 __asm__ __volatile__(
88" addcc %0, %1, %1\n"
89" srl %1, 16, %1\n"
90" addc %1, %%g0, %1\n"
91" xnor %%g0, %1, %0\n"
92 : "=&r" (sum), "=r" (tmp)
93 : "0" (sum), "1" ((__force u32)sum<<16)
94 : "cc");
95 return (__force __sum16)sum;
96}
97
98static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
99 unsigned int len,
100 unsigned short proto,
101 __wsum sum)
102{
103 __asm__ __volatile__(
104" addcc %1, %0, %0\n"
105" addccc %2, %0, %0\n"
106" addccc %3, %0, %0\n"
107" addc %0, %%g0, %0\n"
108 : "=r" (sum), "=r" (saddr)
109 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
110 : "cc");
111 return sum;
112}
113
114/*
115 * computes the checksum of the TCP/UDP pseudo-header
116 * returns a 16-bit checksum, already complemented
117 */
118static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
119 unsigned short len,
120 unsigned short proto,
121 __wsum sum)
122{
123 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
124}
125
126#define _HAVE_ARCH_IPV6_CSUM
127
128static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
129 const struct in6_addr *daddr,
130 __u32 len, unsigned short proto,
131 __wsum sum)
132{
133 __asm__ __volatile__ (
134" addcc %3, %4, %%g7\n"
135" addccc %5, %%g7, %%g7\n"
136" lduw [%2 + 0x0c], %%g2\n"
137" lduw [%2 + 0x08], %%g3\n"
138" addccc %%g2, %%g7, %%g7\n"
139" lduw [%2 + 0x04], %%g2\n"
140" addccc %%g3, %%g7, %%g7\n"
141" lduw [%2 + 0x00], %%g3\n"
142" addccc %%g2, %%g7, %%g7\n"
143" lduw [%1 + 0x0c], %%g2\n"
144" addccc %%g3, %%g7, %%g7\n"
145" lduw [%1 + 0x08], %%g3\n"
146" addccc %%g2, %%g7, %%g7\n"
147" lduw [%1 + 0x04], %%g2\n"
148" addccc %%g3, %%g7, %%g7\n"
149" lduw [%1 + 0x00], %%g3\n"
150" addccc %%g2, %%g7, %%g7\n"
151" addccc %%g3, %%g7, %0\n"
152" addc 0, %0, %0\n"
153 : "=&r" (sum)
154 : "r" (saddr), "r" (daddr), "r"(htonl(len)),
155 "r"(htonl(proto)), "r"(sum)
156 : "g2", "g3", "g7", "cc");
157
158 return csum_fold(sum);
159}
160
161/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
162static inline __sum16 ip_compute_csum(const void *buff, int len)
163{
164 return csum_fold(csum_partial(buff, len, 0));
165}
166
167#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/include/asm-sparc/cpudata.h b/include/asm-sparc/cpudata.h
index a2c4d51d36c4..b76fac0c8d8f 100644
--- a/include/asm-sparc/cpudata.h
+++ b/include/asm-sparc/cpudata.h
@@ -1,27 +1,8 @@
1/* cpudata.h: Per-cpu parameters. 1#ifndef ___ASM_SPARC_CPUDATA_H
2 * 2#define ___ASM_SPARC_CPUDATA_H
3 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/cpudata_64.h>
5 * Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h 5#else
6 * both (C) David S. Miller. 6#include <asm-sparc/cpudata_32.h>
7 */ 7#endif
8 8#endif
9#ifndef _SPARC_CPUDATA_H
10#define _SPARC_CPUDATA_H
11
12#include <linux/percpu.h>
13
14typedef struct {
15 unsigned long udelay_val;
16 unsigned long clock_tick;
17 unsigned int multiplier;
18 unsigned int counter;
19 int prom_node;
20 int mid;
21 int next;
22} cpuinfo_sparc;
23
24DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
25#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
26
27#endif /* _SPARC_CPUDATA_H */
diff --git a/include/asm-sparc/cpudata_32.h b/include/asm-sparc/cpudata_32.h
new file mode 100644
index 000000000000..a2c4d51d36c4
--- /dev/null
+++ b/include/asm-sparc/cpudata_32.h
@@ -0,0 +1,27 @@
1/* cpudata.h: Per-cpu parameters.
2 *
3 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
4 *
5 * Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
6 * both (C) David S. Miller.
7 */
8
9#ifndef _SPARC_CPUDATA_H
10#define _SPARC_CPUDATA_H
11
12#include <linux/percpu.h>
13
14typedef struct {
15 unsigned long udelay_val;
16 unsigned long clock_tick;
17 unsigned int multiplier;
18 unsigned int counter;
19 int prom_node;
20 int mid;
21 int next;
22} cpuinfo_sparc;
23
24DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
25#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
26
27#endif /* _SPARC_CPUDATA_H */
diff --git a/include/asm-sparc/cpudata_64.h b/include/asm-sparc/cpudata_64.h
new file mode 100644
index 000000000000..532975ecfe10
--- /dev/null
+++ b/include/asm-sparc/cpudata_64.h
@@ -0,0 +1,240 @@
1/* cpudata.h: Per-cpu parameters.
2 *
3 * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H
8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__
13
14#include <linux/percpu.h>
15#include <linux/threads.h>
16
17typedef struct {
18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int __pad0;
21 unsigned long clock_tick; /* %tick's per second */
22 unsigned long __pad;
23 unsigned int __pad1;
24 unsigned int __pad2;
25
26 /* Dcache line 2, rarely used */
27 unsigned int dcache_size;
28 unsigned int dcache_line_size;
29 unsigned int icache_size;
30 unsigned int icache_line_size;
31 unsigned int ecache_size;
32 unsigned int ecache_line_size;
33 int core_id;
34 int proc_id;
35} cpuinfo_sparc;
36
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data)
40
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long tsb_huge;
75 unsigned long tsb_huge_temp;
76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned long irq_worklist_pa;
79 unsigned int cpu_mondo_qmask;
80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 void *hdesc;
84} __attribute__((aligned(64)));
85extern struct trap_per_cpu trap_block[NR_CPUS];
86extern void init_cur_cpu_trap(struct thread_info *);
87extern void setup_tba(void);
88extern int ncpus_probed;
89extern void __init cpu_probe(void);
90extern const struct seq_operations cpuinfo_op;
91
92extern unsigned long real_hard_smp_processor_id(void);
93
94struct cpuid_patch_entry {
95 unsigned int addr;
96 unsigned int cheetah_safari[4];
97 unsigned int cheetah_jbus[4];
98 unsigned int starfire[4];
99 unsigned int sun4v[4];
100};
101extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
102
103struct sun4v_1insn_patch_entry {
104 unsigned int addr;
105 unsigned int insn;
106};
107extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
108 __sun4v_1insn_patch_end;
109
110struct sun4v_2insn_patch_entry {
111 unsigned int addr;
112 unsigned int insns[2];
113};
114extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
115 __sun4v_2insn_patch_end;
116
117#endif /* !(__ASSEMBLY__) */
118
119#define TRAP_PER_CPU_THREAD 0x00
120#define TRAP_PER_CPU_PGD_PADDR 0x08
121#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
122#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
123#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
124#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
125#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
126#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
127#define TRAP_PER_CPU_FAULT_INFO 0x40
128#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
129#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
130#define TRAP_PER_CPU_TSB_HUGE 0xd0
131#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
132#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
133#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
134#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
135#define TRAP_PER_CPU_RESUM_QMASK 0xf0
136#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
137
138#define TRAP_BLOCK_SZ_SHIFT 8
139
140#include <asm/scratchpad.h>
141
142#define __GET_CPUID(REG) \
143 /* Spitfire implementation (default). */ \
144661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
145 srlx REG, 17, REG; \
146 and REG, 0x1f, REG; \
147 nop; \
148 .section .cpuid_patch, "ax"; \
149 /* Instruction location. */ \
150 .word 661b; \
151 /* Cheetah Safari implementation. */ \
152 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
153 srlx REG, 17, REG; \
154 and REG, 0x3ff, REG; \
155 nop; \
156 /* Cheetah JBUS implementation. */ \
157 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
158 srlx REG, 17, REG; \
159 and REG, 0x1f, REG; \
160 nop; \
161 /* Starfire implementation. */ \
162 sethi %hi(0x1fff40000d0 >> 9), REG; \
163 sllx REG, 9, REG; \
164 or REG, 0xd0, REG; \
165 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
166 /* sun4v implementation. */ \
167 mov SCRATCHPAD_CPUID, REG; \
168 ldxa [REG] ASI_SCRATCHPAD, REG; \
169 nop; \
170 nop; \
171 .previous;
172
173#ifdef CONFIG_SMP
174
175#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
176 __GET_CPUID(TMP) \
177 sethi %hi(trap_block), DEST; \
178 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
179 or DEST, %lo(trap_block), DEST; \
180 add DEST, TMP, DEST; \
181
182/* Clobbers TMP, current address space PGD phys address into DEST. */
183#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
184 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
185 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
186
187/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
188#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
189 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
190 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
191
192/* Clobbers TMP, loads DEST with current thread info pointer. */
193#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
194 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
195 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
196
197/* Given the current thread info pointer in THR, load the per-cpu
198 * area base of the current processor into DEST. REG1, REG2, and REG3 are
199 * clobbered.
200 *
201 * You absolutely cannot use DEST as a temporary in this code. The
202 * reason is that traps can happen during execution, and return from
203 * trap will load the fully resolved DEST per-cpu base. This can corrupt
204 * the calculations done by the macro mid-stream.
205 */
206#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
207 lduh [THR + TI_CPU], REG1; \
208 sethi %hi(__per_cpu_shift), REG3; \
209 sethi %hi(__per_cpu_base), REG2; \
210 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
211 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
212 sllx REG1, REG3, REG3; \
213 add REG3, REG2, DEST;
214
215#else
216
217#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
218 sethi %hi(trap_block), DEST; \
219 or DEST, %lo(trap_block), DEST; \
220
221/* Uniprocessor versions, we know the cpuid is zero. */
222#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
223 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
224 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
225
226/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
227#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
228 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
229 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
230
231#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
232 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
233 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
234
235/* No per-cpu areas on uniprocessor, so no need to load DEST. */
236#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
237
238#endif /* !(CONFIG_SMP) */
239
240#endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc/delay.h b/include/asm-sparc/delay.h
index bc9aba2bead6..6210a3ce9751 100644
--- a/include/asm-sparc/delay.h
+++ b/include/asm-sparc/delay.h
@@ -1,34 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_DELAY_H
2 * delay.h: Linux delay routines on the Sparc. 2#define ___ASM_SPARC_DELAY_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu). 4#include <asm-sparc/delay_64.h>
5 */ 5#else
6 6#include <asm-sparc/delay_32.h>
7#ifndef __SPARC_DELAY_H 7#endif
8#define __SPARC_DELAY_H 8#endif
9
10#include <asm/cpudata.h>
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__("cmp %0, 0\n\t"
15 "1: bne 1b\n\t"
16 "subcc %0, 1, %0\n" :
17 "=&r" (loops) :
18 "0" (loops) :
19 "cc");
20}
21
22/* This is too messy with inline asm on the Sparc. */
23extern void __udelay(unsigned long usecs, unsigned long lpj);
24extern void __ndelay(unsigned long nsecs, unsigned long lpj);
25
26#ifdef CONFIG_SMP
27#define __udelay_val cpu_data(smp_processor_id()).udelay_val
28#else /* SMP */
29#define __udelay_val loops_per_jiffy
30#endif /* SMP */
31#define udelay(__usecs) __udelay(__usecs, __udelay_val)
32#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
33
34#endif /* defined(__SPARC_DELAY_H) */
diff --git a/include/asm-sparc/delay_32.h b/include/asm-sparc/delay_32.h
new file mode 100644
index 000000000000..bc9aba2bead6
--- /dev/null
+++ b/include/asm-sparc/delay_32.h
@@ -0,0 +1,34 @@
1/*
2 * delay.h: Linux delay routines on the Sparc.
3 *
4 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
5 */
6
7#ifndef __SPARC_DELAY_H
8#define __SPARC_DELAY_H
9
10#include <asm/cpudata.h>
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__("cmp %0, 0\n\t"
15 "1: bne 1b\n\t"
16 "subcc %0, 1, %0\n" :
17 "=&r" (loops) :
18 "0" (loops) :
19 "cc");
20}
21
22/* This is too messy with inline asm on the Sparc. */
23extern void __udelay(unsigned long usecs, unsigned long lpj);
24extern void __ndelay(unsigned long nsecs, unsigned long lpj);
25
26#ifdef CONFIG_SMP
27#define __udelay_val cpu_data(smp_processor_id()).udelay_val
28#else /* SMP */
29#define __udelay_val loops_per_jiffy
30#endif /* SMP */
31#define udelay(__usecs) __udelay(__usecs, __udelay_val)
32#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
33
34#endif /* defined(__SPARC_DELAY_H) */
diff --git a/include/asm-sparc/delay_64.h b/include/asm-sparc/delay_64.h
new file mode 100644
index 000000000000..a77aa622d762
--- /dev/null
+++ b/include/asm-sparc/delay_64.h
@@ -0,0 +1,17 @@
1/* delay.h: Linux delay routines on sparc64.
2 *
3 * Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
4 */
5
6#ifndef _SPARC64_DELAY_H
7#define _SPARC64_DELAY_H
8
9#ifndef __ASSEMBLY__
10
11extern void __delay(unsigned long loops);
12extern void udelay(unsigned long usecs);
13#define mdelay(n) udelay((n) * 1000)
14
15#endif /* !__ASSEMBLY__ */
16
17#endif /* _SPARC64_DELAY_H */
diff --git a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h
index f3a641e6b2c8..7483504259ce 100644
--- a/include/asm-sparc/dma-mapping.h
+++ b/include/asm-sparc/dma-mapping.h
@@ -1,11 +1,8 @@
1#ifndef _ASM_SPARC_DMA_MAPPING_H 1#ifndef ___ASM_SPARC_DMA_MAPPING_H
2#define _ASM_SPARC_DMA_MAPPING_H 2#define ___ASM_SPARC_DMA_MAPPING_H
3 3#if defined(__sparc__) && defined(__arch64__)
4 4#include <asm-sparc/dma-mapping_64.h>
5#ifdef CONFIG_PCI
6#include <asm-generic/dma-mapping.h>
7#else 5#else
8#include <asm-generic/dma-mapping-broken.h> 6#include <asm-sparc/dma-mapping_32.h>
9#endif /* PCI */ 7#endif
10 8#endif
11#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/include/asm-sparc/dma-mapping_32.h b/include/asm-sparc/dma-mapping_32.h
new file mode 100644
index 000000000000..f3a641e6b2c8
--- /dev/null
+++ b/include/asm-sparc/dma-mapping_32.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_SPARC_DMA_MAPPING_H
2#define _ASM_SPARC_DMA_MAPPING_H
3
4
5#ifdef CONFIG_PCI
6#include <asm-generic/dma-mapping.h>
7#else
8#include <asm-generic/dma-mapping-broken.h>
9#endif /* PCI */
10
11#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h
new file mode 100644
index 000000000000..38cbec76a33f
--- /dev/null
+++ b/include/asm-sparc/dma-mapping_64.h
@@ -0,0 +1,154 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H
3
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
6
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9struct dma_ops {
10 void *(*alloc_coherent)(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flag);
12 void (*free_coherent)(struct device *dev, size_t size,
13 void *cpu_addr, dma_addr_t dma_handle);
14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
15 size_t size,
16 enum dma_data_direction direction);
17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
18 size_t size,
19 enum dma_data_direction direction);
20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
21 enum dma_data_direction direction);
22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
23 int nhwentries,
24 enum dma_data_direction direction);
25 void (*sync_single_for_cpu)(struct device *dev,
26 dma_addr_t dma_handle, size_t size,
27 enum dma_data_direction direction);
28 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
29 int nelems,
30 enum dma_data_direction direction);
31};
32extern const struct dma_ops *dma_ops;
33
34extern int dma_supported(struct device *dev, u64 mask);
35extern int dma_set_mask(struct device *dev, u64 dma_mask);
36
37static inline void *dma_alloc_coherent(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flag)
39{
40 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
41}
42
43static inline void dma_free_coherent(struct device *dev, size_t size,
44 void *cpu_addr, dma_addr_t dma_handle)
45{
46 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
50 size_t size,
51 enum dma_data_direction direction)
52{
53 return dma_ops->map_single(dev, cpu_addr, size, direction);
54}
55
56static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
57 size_t size,
58 enum dma_data_direction direction)
59{
60 dma_ops->unmap_single(dev, dma_addr, size, direction);
61}
62
63static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64 unsigned long offset, size_t size,
65 enum dma_data_direction direction)
66{
67 return dma_ops->map_single(dev, page_address(page) + offset,
68 size, direction);
69}
70
71static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
72 size_t size,
73 enum dma_data_direction direction)
74{
75 dma_ops->unmap_single(dev, dma_address, size, direction);
76}
77
78static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction direction)
80{
81 return dma_ops->map_sg(dev, sg, nents, direction);
82}
83
84static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
85 int nents, enum dma_data_direction direction)
86{
87 dma_ops->unmap_sg(dev, sg, nents, direction);
88}
89
90static inline void dma_sync_single_for_cpu(struct device *dev,
91 dma_addr_t dma_handle, size_t size,
92 enum dma_data_direction direction)
93{
94 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
95}
96
97static inline void dma_sync_single_for_device(struct device *dev,
98 dma_addr_t dma_handle,
99 size_t size,
100 enum dma_data_direction direction)
101{
102 /* No flushing needed to sync cpu writes to the device. */
103}
104
105static inline void dma_sync_single_range_for_cpu(struct device *dev,
106 dma_addr_t dma_handle,
107 unsigned long offset,
108 size_t size,
109 enum dma_data_direction direction)
110{
111 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
112}
113
114static inline void dma_sync_single_range_for_device(struct device *dev,
115 dma_addr_t dma_handle,
116 unsigned long offset,
117 size_t size,
118 enum dma_data_direction direction)
119{
120 /* No flushing needed to sync cpu writes to the device. */
121}
122
123
124static inline void dma_sync_sg_for_cpu(struct device *dev,
125 struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
129}
130
131static inline void dma_sync_sg_for_device(struct device *dev,
132 struct scatterlist *sg, int nelems,
133 enum dma_data_direction direction)
134{
135 /* No flushing needed to sync cpu writes to the device. */
136}
137
138static inline int dma_mapping_error(dma_addr_t dma_addr)
139{
140 return (dma_addr == DMA_ERROR_CODE);
141}
142
143static inline int dma_get_cache_alignment(void)
144{
145 /* no easy way to get cache size on all processors, so return
146 * the maximum possible, to be safe */
147 return (1 << INTERNODE_CACHE_SHIFT);
148}
149
150#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
151#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
152#define dma_is_consistent(d, h) (1)
153
154#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc/dma.h b/include/asm-sparc/dma.h
index 959d6c8a71ae..8cc69bfaae2a 100644
--- a/include/asm-sparc/dma.h
+++ b/include/asm-sparc/dma.h
@@ -1,288 +1,8 @@
1/* include/asm-sparc/dma.h 1#ifndef ___ASM_SPARC_DMA_H
2 * 2#define ___ASM_SPARC_DMA_H
3 * Copyright 1995 (C) David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/dma_64.h>
5
6#ifndef _ASM_SPARC_DMA_H
7#define _ASM_SPARC_DMA_H
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12#include <asm/vac-ops.h> /* for invalidate's, etc. */
13#include <asm/sbus.h>
14#include <asm/delay.h>
15#include <asm/oplib.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19
20struct page;
21extern spinlock_t dma_spin_lock;
22
23static inline unsigned long claim_dma_lock(void)
24{
25 unsigned long flags;
26 spin_lock_irqsave(&dma_spin_lock, flags);
27 return flags;
28}
29
30static inline void release_dma_lock(unsigned long flags)
31{
32 spin_unlock_irqrestore(&dma_spin_lock, flags);
33}
34
35/* These are irrelevant for Sparc DMA, but we leave it in so that
36 * things can compile.
37 */
38#define MAX_DMA_CHANNELS 8
39#define MAX_DMA_ADDRESS (~0UL)
40#define DMA_MODE_READ 1
41#define DMA_MODE_WRITE 2
42
43/* Useful constants */
44#define SIZE_16MB (16*1024*1024)
45#define SIZE_64K (64*1024)
46
47/* SBUS DMA controller reg offsets */
48#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
49#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
50#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
51#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
52
53/* DVMA chip revisions */
54enum dvma_rev {
55 dvmarev0,
56 dvmaesc1,
57 dvmarev1,
58 dvmarev2,
59 dvmarev3,
60 dvmarevplus,
61 dvmahme
62};
63
64#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
65
66/* Linux DMA information structure, filled during probe. */
67struct sbus_dma {
68 struct sbus_dma *next;
69 struct sbus_dev *sdev;
70 void __iomem *regs;
71
72 /* Status, misc info */
73 int node; /* Prom node for this DMA device */
74 int running; /* Are we doing DMA now? */
75 int allocated; /* Are we "owned" by anyone yet? */
76
77 /* Transfer information. */
78 unsigned long addr; /* Start address of current transfer */
79 int nbytes; /* Size of current transfer */
80 int realbytes; /* For splitting up large transfers, etc. */
81
82 /* DMA revision */
83 enum dvma_rev revision;
84};
85
86extern struct sbus_dma *dma_chain;
87
88/* Broken hardware... */
89#ifdef CONFIG_SUN4
90/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
91 * Or is rev0 present only on sun4 boxes? -jj */
92#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
93#else 5#else
94#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1) 6#include <asm-sparc/dma_32.h>
95#endif 7#endif
96#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
97
98/* Main routines in dma.c */
99extern void dvma_init(struct sbus_bus *);
100
101/* Fields in the cond_reg register */
102/* First, the version identification bits */
103#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
104#define DMA_VERS0 0x00000000 /* Sunray DMA version */
105#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
106#define DMA_VERS1 0x80000000 /* DMA rev 1 */
107#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
108#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
109#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
110
111#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
112#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
113#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
114#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
115#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
116#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
117#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
118#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
119#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
120#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
121#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
122#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
123#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
124#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
125#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
126#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
127#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
128#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
129#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
130#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
131#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
132#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
133#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
134#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
135#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
136#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
137#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
138#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
139#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
140#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
141#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
142#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
143#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
144#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
145#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
146#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
147#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
148#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
149#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
150#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
151#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
152#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
153#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
154
155/* Values describing the burst-size property from the PROM */
156#define DMA_BURST1 0x01
157#define DMA_BURST2 0x02
158#define DMA_BURST4 0x04
159#define DMA_BURST8 0x08
160#define DMA_BURST16 0x10
161#define DMA_BURST32 0x20
162#define DMA_BURST64 0x40
163#define DMA_BURSTBITS 0x7f
164
165/* Determine highest possible final transfer address given a base */
166#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
167
168/* Yes, I hack a lot of elisp in my spare time... */
169#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
170#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
171#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
172#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
173#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
174#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
175#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
176#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
177#define DMA_BEGINDMA_W(regs) \
178 ((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
179#define DMA_BEGINDMA_R(regs) \
180 ((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
181
182/* For certain DMA chips, we need to disable ints upon irq entry
183 * and turn them back on when we are done. So in any ESP interrupt
184 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
185 * when leaving the handler. You have been warned...
186 */
187#define DMA_IRQ_ENTRY(dma, dregs) do { \
188 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
189 } while (0)
190
191#define DMA_IRQ_EXIT(dma, dregs) do { \
192 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
193 } while(0)
194
195#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
196/* Pause until counter runs out or BIT isn't set in the DMA condition
197 * register.
198 */
199static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
200 unsigned long bit)
201{
202 int ctr = 50000; /* Let's find some bugs ;) */
203
204 /* Busy wait until the bit is not set any more */
205 while((regs->cond_reg&bit) && (ctr>0)) {
206 ctr--;
207 __delay(5);
208 }
209
210 /* Check for bogus outcome. */
211 if(!ctr)
212 panic("DMA timeout");
213}
214
215/* Reset the friggin' thing... */
216#define DMA_RESET(dma) do { \
217 struct sparc_dma_registers *regs = dma->regs; \
218 /* Let the current FIFO drain itself */ \
219 sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
220 /* Reset the logic */ \
221 regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
222 __delay(400); /* let the bits set ;) */ \
223 regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
224 sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
225 /* Enable FAST transfers if available */ \
226 if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
227 dma->running = 0; \
228} while(0)
229#endif 8#endif
230
231#define for_each_dvma(dma) \
232 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
233
234extern int get_dma_list(char *);
235extern int request_dma(unsigned int, __const__ char *);
236extern void free_dma(unsigned int);
237
238/* From PCI */
239
240#ifdef CONFIG_PCI
241extern int isa_dma_bridge_buggy;
242#else
243#define isa_dma_bridge_buggy (0)
244#endif
245
246/* Routines for data transfer buffers. */
247BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
248BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
249
250#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
251#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
252
253/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
254BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
255BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
256BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
257BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
258
259#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
260#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
261#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
262#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
263
264/*
265 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
266 *
267 * The mmu_map_dma_area establishes two mappings in one go.
268 * These mappings point to pages normally mapped at 'va' (linear address).
269 * First mapping is for CPU visible address at 'a', uncached.
270 * This is an alias, but it works because it is an uncached mapping.
271 * Second mapping is for device visible address, or "bus" address.
272 * The bus address is returned at '*pba'.
273 *
274 * These functions seem distinct, but are hard to split. On sun4c,
275 * at least for now, 'a' is equal to bus address, and retured in *pba.
276 * On sun4m, page attributes depend on the CPU type, so we have to
277 * know if we are mapping RAM or I/O, so it has to be an additional argument
278 * to a separate mapping function for CPU visible mappings.
279 */
280BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
281BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
282BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
283
284#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
285#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
286#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
287
288#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/include/asm-sparc/dma_32.h b/include/asm-sparc/dma_32.h
new file mode 100644
index 000000000000..959d6c8a71ae
--- /dev/null
+++ b/include/asm-sparc/dma_32.h
@@ -0,0 +1,288 @@
1/* include/asm-sparc/dma.h
2 *
3 * Copyright 1995 (C) David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _ASM_SPARC_DMA_H
7#define _ASM_SPARC_DMA_H
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12#include <asm/vac-ops.h> /* for invalidate's, etc. */
13#include <asm/sbus.h>
14#include <asm/delay.h>
15#include <asm/oplib.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19
20struct page;
21extern spinlock_t dma_spin_lock;
22
23static inline unsigned long claim_dma_lock(void)
24{
25 unsigned long flags;
26 spin_lock_irqsave(&dma_spin_lock, flags);
27 return flags;
28}
29
30static inline void release_dma_lock(unsigned long flags)
31{
32 spin_unlock_irqrestore(&dma_spin_lock, flags);
33}
34
35/* These are irrelevant for Sparc DMA, but we leave it in so that
36 * things can compile.
37 */
38#define MAX_DMA_CHANNELS 8
39#define MAX_DMA_ADDRESS (~0UL)
40#define DMA_MODE_READ 1
41#define DMA_MODE_WRITE 2
42
43/* Useful constants */
44#define SIZE_16MB (16*1024*1024)
45#define SIZE_64K (64*1024)
46
47/* SBUS DMA controller reg offsets */
48#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
49#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
50#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
51#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
52
53/* DVMA chip revisions */
54enum dvma_rev {
55 dvmarev0,
56 dvmaesc1,
57 dvmarev1,
58 dvmarev2,
59 dvmarev3,
60 dvmarevplus,
61 dvmahme
62};
63
64#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
65
66/* Linux DMA information structure, filled during probe. */
67struct sbus_dma {
68 struct sbus_dma *next;
69 struct sbus_dev *sdev;
70 void __iomem *regs;
71
72 /* Status, misc info */
73 int node; /* Prom node for this DMA device */
74 int running; /* Are we doing DMA now? */
75 int allocated; /* Are we "owned" by anyone yet? */
76
77 /* Transfer information. */
78 unsigned long addr; /* Start address of current transfer */
79 int nbytes; /* Size of current transfer */
80 int realbytes; /* For splitting up large transfers, etc. */
81
82 /* DMA revision */
83 enum dvma_rev revision;
84};
85
86extern struct sbus_dma *dma_chain;
87
88/* Broken hardware... */
89#ifdef CONFIG_SUN4
90/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
91 * Or is rev0 present only on sun4 boxes? -jj */
92#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
93#else
94#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
95#endif
96#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
97
98/* Main routines in dma.c */
99extern void dvma_init(struct sbus_bus *);
100
101/* Fields in the cond_reg register */
102/* First, the version identification bits */
103#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
104#define DMA_VERS0 0x00000000 /* Sunray DMA version */
105#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
106#define DMA_VERS1 0x80000000 /* DMA rev 1 */
107#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
108#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
109#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
110
111#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
112#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
113#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
114#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
115#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
116#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
117#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
118#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
119#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
120#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
121#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
122#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
123#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
124#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
125#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
126#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
127#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
128#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
129#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
130#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
131#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
132#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
133#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
134#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
135#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
136#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
137#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
138#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
139#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
140#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
141#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
142#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
143#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
144#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
145#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
146#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
147#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
148#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
149#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
150#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
151#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
152#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
153#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
154
155/* Values describing the burst-size property from the PROM */
156#define DMA_BURST1 0x01
157#define DMA_BURST2 0x02
158#define DMA_BURST4 0x04
159#define DMA_BURST8 0x08
160#define DMA_BURST16 0x10
161#define DMA_BURST32 0x20
162#define DMA_BURST64 0x40
163#define DMA_BURSTBITS 0x7f
164
165/* Determine highest possible final transfer address given a base */
166#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
167
168/* Yes, I hack a lot of elisp in my spare time... */
169#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
170#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
171#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
172#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
173#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
174#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
175#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
176#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
177#define DMA_BEGINDMA_W(regs) \
178 ((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
179#define DMA_BEGINDMA_R(regs) \
180 ((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
181
182/* For certain DMA chips, we need to disable ints upon irq entry
183 * and turn them back on when we are done. So in any ESP interrupt
184 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
185 * when leaving the handler. You have been warned...
186 */
187#define DMA_IRQ_ENTRY(dma, dregs) do { \
188 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
189 } while (0)
190
191#define DMA_IRQ_EXIT(dma, dregs) do { \
192 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
193 } while(0)
194
195#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
196/* Pause until counter runs out or BIT isn't set in the DMA condition
197 * register.
198 */
199static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
200 unsigned long bit)
201{
202 int ctr = 50000; /* Let's find some bugs ;) */
203
204 /* Busy wait until the bit is not set any more */
205 while((regs->cond_reg&bit) && (ctr>0)) {
206 ctr--;
207 __delay(5);
208 }
209
210 /* Check for bogus outcome. */
211 if(!ctr)
212 panic("DMA timeout");
213}
214
215/* Reset the friggin' thing... */
216#define DMA_RESET(dma) do { \
217 struct sparc_dma_registers *regs = dma->regs; \
218 /* Let the current FIFO drain itself */ \
219 sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
220 /* Reset the logic */ \
221 regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
222 __delay(400); /* let the bits set ;) */ \
223 regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
224 sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
225 /* Enable FAST transfers if available */ \
226 if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
227 dma->running = 0; \
228} while(0)
229#endif
230
231#define for_each_dvma(dma) \
232 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
233
234extern int get_dma_list(char *);
235extern int request_dma(unsigned int, __const__ char *);
236extern void free_dma(unsigned int);
237
238/* From PCI */
239
240#ifdef CONFIG_PCI
241extern int isa_dma_bridge_buggy;
242#else
243#define isa_dma_bridge_buggy (0)
244#endif
245
246/* Routines for data transfer buffers. */
247BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
248BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
249
250#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
251#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
252
253/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
254BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
255BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
256BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
257BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
258
259#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
260#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
261#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
262#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
263
264/*
265 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
266 *
267 * The mmu_map_dma_area establishes two mappings in one go.
268 * These mappings point to pages normally mapped at 'va' (linear address).
269 * First mapping is for CPU visible address at 'a', uncached.
270 * This is an alias, but it works because it is an uncached mapping.
271 * Second mapping is for device visible address, or "bus" address.
272 * The bus address is returned at '*pba'.
273 *
274 * These functions seem distinct, but are hard to split. On sun4c,
275 * at least for now, 'a' is equal to bus address, and retured in *pba.
276 * On sun4m, page attributes depend on the CPU type, so we have to
277 * know if we are mapping RAM or I/O, so it has to be an additional argument
278 * to a separate mapping function for CPU visible mappings.
279 */
280BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
281BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
282BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
283
284#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
285#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
286#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
287
288#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/include/asm-sparc/dma_64.h b/include/asm-sparc/dma_64.h
new file mode 100644
index 000000000000..9d4c024bd3b3
--- /dev/null
+++ b/include/asm-sparc/dma_64.h
@@ -0,0 +1,205 @@
1/*
2 * include/asm-sparc64/dma.h
3 *
4 * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _ASM_SPARC64_DMA_H
8#define _ASM_SPARC64_DMA_H
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/spinlock.h>
13
14#include <asm/sbus.h>
15#include <asm/delay.h>
16#include <asm/oplib.h>
17
18/* These are irrelevant for Sparc DMA, but we leave it in so that
19 * things can compile.
20 */
21#define MAX_DMA_CHANNELS 8
22#define DMA_MODE_READ 1
23#define DMA_MODE_WRITE 2
24#define MAX_DMA_ADDRESS (~0UL)
25
26/* Useful constants */
27#define SIZE_16MB (16*1024*1024)
28#define SIZE_64K (64*1024)
29
30/* SBUS DMA controller reg offsets */
31#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
32#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
33#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
34#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
35
36/* DVMA chip revisions */
37enum dvma_rev {
38 dvmarev0,
39 dvmaesc1,
40 dvmarev1,
41 dvmarev2,
42 dvmarev3,
43 dvmarevplus,
44 dvmahme
45};
46
47#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
48
49/* Linux DMA information structure, filled during probe. */
50struct sbus_dma {
51 struct sbus_dma *next;
52 struct sbus_dev *sdev;
53 void __iomem *regs;
54
55 /* Status, misc info */
56 int node; /* Prom node for this DMA device */
57 int running; /* Are we doing DMA now? */
58 int allocated; /* Are we "owned" by anyone yet? */
59
60 /* Transfer information. */
61 u32 addr; /* Start address of current transfer */
62 int nbytes; /* Size of current transfer */
63 int realbytes; /* For splitting up large transfers, etc. */
64
65 /* DMA revision */
66 enum dvma_rev revision;
67};
68
69extern struct sbus_dma *dma_chain;
70
71/* Broken hardware... */
72#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
73#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
74
75/* Main routines in dma.c */
76extern void dvma_init(struct sbus_bus *);
77
78/* Fields in the cond_reg register */
79/* First, the version identification bits */
80#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
81#define DMA_VERS0 0x00000000 /* Sunray DMA version */
82#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
83#define DMA_VERS1 0x80000000 /* DMA rev 1 */
84#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
85#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
86#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
87
88#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
89#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
90#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
91#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
92#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
93#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
94#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
95#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
96#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
97#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
98#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
99#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
100#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
101#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
102#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
103#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
104#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
105#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
106#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
107#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
108#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
109#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
110#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
111#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
112#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
113#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
114#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
115#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
116#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
117#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
118#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
119#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
120#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
121#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
122#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
123#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
124#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
125#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
126#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
127#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
128#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
129
130/* Values describing the burst-size property from the PROM */
131#define DMA_BURST1 0x01
132#define DMA_BURST2 0x02
133#define DMA_BURST4 0x04
134#define DMA_BURST8 0x08
135#define DMA_BURST16 0x10
136#define DMA_BURST32 0x20
137#define DMA_BURST64 0x40
138#define DMA_BURSTBITS 0x7f
139
140/* Determine highest possible final transfer address given a base */
141#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
142
143/* Yes, I hack a lot of elisp in my spare time... */
144#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
145#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
146#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
147#define DMA_OFF(__regs) \
148do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
149 tmp &= ~DMA_ENABLE; \
150 sbus_writel(tmp, (__regs) + DMA_CSR); \
151} while(0)
152#define DMA_INTSOFF(__regs) \
153do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
154 tmp &= ~DMA_INT_ENAB; \
155 sbus_writel(tmp, (__regs) + DMA_CSR); \
156} while(0)
157#define DMA_INTSON(__regs) \
158do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
159 tmp |= DMA_INT_ENAB; \
160 sbus_writel(tmp, (__regs) + DMA_CSR); \
161} while(0)
162#define DMA_PUNTFIFO(__regs) \
163do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
164 tmp |= DMA_FIFO_INV; \
165 sbus_writel(tmp, (__regs) + DMA_CSR); \
166} while(0)
167#define DMA_SETSTART(__regs, __addr) \
168 sbus_writel((u32)(__addr), (__regs) + DMA_ADDR);
169#define DMA_BEGINDMA_W(__regs) \
170do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
171 tmp |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB); \
172 sbus_writel(tmp, (__regs) + DMA_CSR); \
173} while(0)
174#define DMA_BEGINDMA_R(__regs) \
175do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
176 tmp |= (DMA_ENABLE|DMA_INT_ENAB); \
177 tmp &= ~DMA_ST_WRITE; \
178 sbus_writel(tmp, (__regs) + DMA_CSR); \
179} while(0)
180
181/* For certain DMA chips, we need to disable ints upon irq entry
182 * and turn them back on when we are done. So in any ESP interrupt
183 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
184 * when leaving the handler. You have been warned...
185 */
186#define DMA_IRQ_ENTRY(dma, dregs) do { \
187 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
188 } while (0)
189
190#define DMA_IRQ_EXIT(dma, dregs) do { \
191 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
192 } while(0)
193
194#define for_each_dvma(dma) \
195 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
196
197/* From PCI */
198
199#ifdef CONFIG_PCI
200extern int isa_dma_bridge_buggy;
201#else
202#define isa_dma_bridge_buggy (0)
203#endif
204
205#endif /* !(_ASM_SPARC64_DMA_H) */
diff --git a/include/asm-sparc/ebus.h b/include/asm-sparc/ebus.h
index 491f85d662df..a5da2d00cd18 100644
--- a/include/asm-sparc/ebus.h
+++ b/include/asm-sparc/ebus.h
@@ -1,99 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_EBUS_H
2 * ebus.h: PCI to Ebus pseudo driver software state. 2#define ___ASM_SPARC_EBUS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) 4#include <asm-sparc/ebus_64.h>
5 * 5#else
6 * Adopted for sparc by V. Roganov and G. Raiko. 6#include <asm-sparc/ebus_32.h>
7 */ 7#endif
8
9#ifndef __SPARC_EBUS_H
10#define __SPARC_EBUS_H
11
12#ifndef _LINUX_IOPORT_H
13#include <linux/ioport.h>
14#endif 8#endif
15#include <asm/oplib.h>
16#include <asm/prom.h>
17#include <asm/of_device.h>
18
19struct linux_ebus_child {
20 struct linux_ebus_child *next;
21 struct linux_ebus_device *parent;
22 struct linux_ebus *bus;
23 struct device_node *prom_node;
24 struct resource resource[PROMREG_MAX];
25 int num_addrs;
26 unsigned int irqs[PROMINTR_MAX];
27 int num_irqs;
28};
29
30struct linux_ebus_device {
31 struct of_device ofdev;
32 struct linux_ebus_device *next;
33 struct linux_ebus_child *children;
34 struct linux_ebus *bus;
35 struct device_node *prom_node;
36 struct resource resource[PROMREG_MAX];
37 int num_addrs;
38 unsigned int irqs[PROMINTR_MAX];
39 int num_irqs;
40};
41#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
42
43struct linux_ebus {
44 struct of_device ofdev;
45 struct linux_ebus *next;
46 struct linux_ebus_device *devices;
47 struct linux_pbm_info *parent;
48 struct pci_dev *self;
49 struct device_node *prom_node;
50};
51#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
52
53struct linux_ebus_dma {
54 unsigned int dcsr;
55 unsigned int dacr;
56 unsigned int dbcr;
57};
58
59#define EBUS_DCSR_INT_PEND 0x00000001
60#define EBUS_DCSR_ERR_PEND 0x00000002
61#define EBUS_DCSR_DRAIN 0x00000004
62#define EBUS_DCSR_INT_EN 0x00000010
63#define EBUS_DCSR_RESET 0x00000080
64#define EBUS_DCSR_WRITE 0x00000100
65#define EBUS_DCSR_EN_DMA 0x00000200
66#define EBUS_DCSR_CYC_PEND 0x00000400
67#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
68#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
69#define EBUS_DCSR_EN_CNT 0x00002000
70#define EBUS_DCSR_TC 0x00004000
71#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
72#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
73#define EBUS_DCSR_BURST_SZ_1 0x00080000
74#define EBUS_DCSR_BURST_SZ_4 0x00000000
75#define EBUS_DCSR_BURST_SZ_8 0x00040000
76#define EBUS_DCSR_BURST_SZ_16 0x000c0000
77#define EBUS_DCSR_DIAG_EN 0x00100000
78#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
79#define EBUS_DCSR_TCI_DIS 0x00800000
80#define EBUS_DCSR_EN_NEXT 0x01000000
81#define EBUS_DCSR_DMA_ON 0x02000000
82#define EBUS_DCSR_A_LOADED 0x04000000
83#define EBUS_DCSR_NA_LOADED 0x08000000
84#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
85
86extern struct linux_ebus *ebus_chain;
87
88extern void ebus_init(void);
89
90#define for_each_ebus(bus) \
91 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
92
93#define for_each_ebusdev(dev, bus) \
94 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
95
96#define for_each_edevchild(dev, child) \
97 for((child) = (dev)->children; (child); (child) = (child)->next)
98
99#endif /* !(__SPARC_EBUS_H) */
diff --git a/include/asm-sparc/ebus_32.h b/include/asm-sparc/ebus_32.h
new file mode 100644
index 000000000000..29cb7dfc6b79
--- /dev/null
+++ b/include/asm-sparc/ebus_32.h
@@ -0,0 +1,99 @@
1/*
2 * ebus.h: PCI to Ebus pseudo driver software state.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 *
6 * Adopted for sparc by V. Roganov and G. Raiko.
7 */
8
9#ifndef __SPARC_EBUS_H
10#define __SPARC_EBUS_H
11
12#ifndef _LINUX_IOPORT_H
13#include <linux/ioport.h>
14#endif
15#include <asm/oplib.h>
16#include <asm/prom.h>
17#include <asm/of_device.h>
18
19struct linux_ebus_child {
20 struct linux_ebus_child *next;
21 struct linux_ebus_device *parent;
22 struct linux_ebus *bus;
23 struct device_node *prom_node;
24 struct resource resource[PROMREG_MAX];
25 int num_addrs;
26 unsigned int irqs[PROMINTR_MAX];
27 int num_irqs;
28};
29
30struct linux_ebus_device {
31 struct of_device ofdev;
32 struct linux_ebus_device *next;
33 struct linux_ebus_child *children;
34 struct linux_ebus *bus;
35 struct device_node *prom_node;
36 struct resource resource[PROMREG_MAX];
37 int num_addrs;
38 unsigned int irqs[PROMINTR_MAX];
39 int num_irqs;
40};
41#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
42
43struct linux_ebus {
44 struct of_device ofdev;
45 struct linux_ebus *next;
46 struct linux_ebus_device *devices;
47 struct linux_pbm_info *parent;
48 struct pci_dev *self;
49 struct device_node *prom_node;
50};
51#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
52
53struct linux_ebus_dma {
54 unsigned int dcsr;
55 unsigned int dacr;
56 unsigned int dbcr;
57};
58
59#define EBUS_DCSR_INT_PEND 0x00000001
60#define EBUS_DCSR_ERR_PEND 0x00000002
61#define EBUS_DCSR_DRAIN 0x00000004
62#define EBUS_DCSR_INT_EN 0x00000010
63#define EBUS_DCSR_RESET 0x00000080
64#define EBUS_DCSR_WRITE 0x00000100
65#define EBUS_DCSR_EN_DMA 0x00000200
66#define EBUS_DCSR_CYC_PEND 0x00000400
67#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
68#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
69#define EBUS_DCSR_EN_CNT 0x00002000
70#define EBUS_DCSR_TC 0x00004000
71#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
72#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
73#define EBUS_DCSR_BURST_SZ_1 0x00080000
74#define EBUS_DCSR_BURST_SZ_4 0x00000000
75#define EBUS_DCSR_BURST_SZ_8 0x00040000
76#define EBUS_DCSR_BURST_SZ_16 0x000c0000
77#define EBUS_DCSR_DIAG_EN 0x00100000
78#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
79#define EBUS_DCSR_TCI_DIS 0x00800000
80#define EBUS_DCSR_EN_NEXT 0x01000000
81#define EBUS_DCSR_DMA_ON 0x02000000
82#define EBUS_DCSR_A_LOADED 0x04000000
83#define EBUS_DCSR_NA_LOADED 0x08000000
84#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
85
86extern struct linux_ebus *ebus_chain;
87
88extern void ebus_init(void);
89
90#define for_each_ebus(bus) \
91 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
92
93#define for_each_ebusdev(dev, bus) \
94 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
95
96#define for_each_edevchild(dev, child) \
97 for((child) = (dev)->children; (child); (child) = (child)->next)
98
99#endif /* !(__SPARC_EBUS_H) */
diff --git a/include/asm-sparc/ebus_64.h b/include/asm-sparc/ebus_64.h
new file mode 100644
index 000000000000..fcc62b97ced5
--- /dev/null
+++ b/include/asm-sparc/ebus_64.h
@@ -0,0 +1,94 @@
1/*
2 * ebus.h: PCI to Ebus pseudo driver software state.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8#ifndef __SPARC64_EBUS_H
9#define __SPARC64_EBUS_H
10
11#include <asm/oplib.h>
12#include <asm/prom.h>
13#include <asm/of_device.h>
14
15struct linux_ebus_child {
16 struct linux_ebus_child *next;
17 struct linux_ebus_device *parent;
18 struct linux_ebus *bus;
19 struct device_node *prom_node;
20 struct resource resource[PROMREG_MAX];
21 int num_addrs;
22 unsigned int irqs[PROMINTR_MAX];
23 int num_irqs;
24};
25
26struct linux_ebus_device {
27 struct of_device ofdev;
28 struct linux_ebus_device *next;
29 struct linux_ebus_child *children;
30 struct linux_ebus *bus;
31 struct device_node *prom_node;
32 struct resource resource[PROMREG_MAX];
33 int num_addrs;
34 unsigned int irqs[PROMINTR_MAX];
35 int num_irqs;
36};
37#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
38
39struct linux_ebus {
40 struct of_device ofdev;
41 struct linux_ebus *next;
42 struct linux_ebus_device *devices;
43 struct pci_dev *self;
44 int index;
45 int is_rio;
46 struct device_node *prom_node;
47};
48#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
49
50struct ebus_dma_info {
51 spinlock_t lock;
52 void __iomem *regs;
53
54 unsigned int flags;
55#define EBUS_DMA_FLAG_USE_EBDMA_HANDLER 0x00000001
56#define EBUS_DMA_FLAG_TCI_DISABLE 0x00000002
57
58 /* These are only valid is EBUS_DMA_FLAG_USE_EBDMA_HANDLER is
59 * set.
60 */
61 void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
62 void *client_cookie;
63 unsigned int irq;
64#define EBUS_DMA_EVENT_ERROR 1
65#define EBUS_DMA_EVENT_DMA 2
66#define EBUS_DMA_EVENT_DEVICE 4
67
68 unsigned char name[64];
69};
70
71extern int ebus_dma_register(struct ebus_dma_info *p);
72extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
73extern void ebus_dma_unregister(struct ebus_dma_info *p);
74extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
75 size_t len);
76extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
77extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
78extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
79extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
80
81extern struct linux_ebus *ebus_chain;
82
83extern void ebus_init(void);
84
85#define for_each_ebus(bus) \
86 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
87
88#define for_each_ebusdev(dev, bus) \
89 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
90
91#define for_each_edevchild(dev, child) \
92 for((child) = (dev)->children; (child); (child) = (child)->next)
93
94#endif /* !(__SPARC64_EBUS_H) */
diff --git a/include/asm-sparc/elf.h b/include/asm-sparc/elf.h
index d043f80bc2fd..f035c45d7b5e 100644
--- a/include/asm-sparc/elf.h
+++ b/include/asm-sparc/elf.h
@@ -1,145 +1,8 @@
1#ifndef __ASMSPARC_ELF_H 1#ifndef ___ASM_SPARC_ELF_H
2#define __ASMSPARC_ELF_H 2#define ___ASM_SPARC_ELF_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/elf_64.h>
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9
10/*
11 * Sparc section types
12 */
13#define STT_REGISTER 13
14
15/*
16 * Sparc ELF relocation types
17 */
18#define R_SPARC_NONE 0
19#define R_SPARC_8 1
20#define R_SPARC_16 2
21#define R_SPARC_32 3
22#define R_SPARC_DISP8 4
23#define R_SPARC_DISP16 5
24#define R_SPARC_DISP32 6
25#define R_SPARC_WDISP30 7
26#define R_SPARC_WDISP22 8
27#define R_SPARC_HI22 9
28#define R_SPARC_22 10
29#define R_SPARC_13 11
30#define R_SPARC_LO10 12
31#define R_SPARC_GOT10 13
32#define R_SPARC_GOT13 14
33#define R_SPARC_GOT22 15
34#define R_SPARC_PC10 16
35#define R_SPARC_PC22 17
36#define R_SPARC_WPLT30 18
37#define R_SPARC_COPY 19
38#define R_SPARC_GLOB_DAT 20
39#define R_SPARC_JMP_SLOT 21
40#define R_SPARC_RELATIVE 22
41#define R_SPARC_UA32 23
42#define R_SPARC_PLT32 24
43#define R_SPARC_HIPLT22 25
44#define R_SPARC_LOPLT10 26
45#define R_SPARC_PCPLT32 27
46#define R_SPARC_PCPLT22 28
47#define R_SPARC_PCPLT10 29
48#define R_SPARC_10 30
49#define R_SPARC_11 31
50#define R_SPARC_64 32
51#define R_SPARC_OLO10 33
52#define R_SPARC_WDISP16 40
53#define R_SPARC_WDISP19 41
54#define R_SPARC_7 43
55#define R_SPARC_5 44
56#define R_SPARC_6 45
57
58/* Bits present in AT_HWCAP, primarily for Sparc32. */
59
60#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
61#define HWCAP_SPARC_STBAR 2
62#define HWCAP_SPARC_SWAP 4
63#define HWCAP_SPARC_MULDIV 8
64#define HWCAP_SPARC_V9 16
65#define HWCAP_SPARC_ULTRA3 32
66
67#define CORE_DUMP_USE_REGSET
68
69/* Format is:
70 * G0 --> G7
71 * O0 --> O7
72 * L0 --> L7
73 * I0 --> I7
74 * PSR, PC, nPC, Y, WIM, TBR
75 */
76typedef unsigned long elf_greg_t;
77#define ELF_NGREG 38
78typedef elf_greg_t elf_gregset_t[ELF_NGREG];
79
80typedef struct {
81 union {
82 unsigned long pr_regs[32];
83 double pr_dregs[16];
84 } pr_fr;
85 unsigned long __unused;
86 unsigned long pr_fsr;
87 unsigned char pr_qcnt;
88 unsigned char pr_q_entrysize;
89 unsigned char pr_en;
90 unsigned int pr_q[64];
91} elf_fpregset_t;
92
93#include <asm/mbus.h>
94
95/*
96 * This is used to ensure we don't load something for the wrong architecture.
97 */
98#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
99
100/*
101 * These are used to set parameters in the core dumps.
102 */
103#define ELF_ARCH EM_SPARC
104#define ELF_CLASS ELFCLASS32
105#define ELF_DATA ELFDATA2MSB
106
107#define USE_ELF_CORE_DUMP
108#ifndef CONFIG_SUN4
109#define ELF_EXEC_PAGESIZE 4096
110#else 5#else
111#define ELF_EXEC_PAGESIZE 8192 6#include <asm-sparc/elf_32.h>
7#endif
112#endif 8#endif
113
114
115/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
116 use of this is to invoke "./ld.so someprog" to test out a new version of
117 the loader. We need to make sure that it is out of the way of the program
118 that it will "exec", and that there is sufficient room for the brk. */
119
120#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
121
122/* This yields a mask that user programs can use to figure out what
123 instruction set this cpu supports. This can NOT be done in userspace
124 on Sparc. */
125
126/* Sun4c has none of the capabilities, most sun4m's have them all.
127 * XXX This is gross, set some global variable at boot time. -DaveM
128 */
129#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
130 (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
131 HWCAP_SPARC_SWAP | \
132 ((srmmu_modtype != Cypress && \
133 srmmu_modtype != Cypress_vE && \
134 srmmu_modtype != Cypress_vD) ? \
135 HWCAP_SPARC_MULDIV : 0)))
136
137/* This yields a string that ld.so will use to load implementation
138 specific libraries for optimization. This is more specific in
139 intent than poking at uname or /proc/cpuinfo. */
140
141#define ELF_PLATFORM (NULL)
142
143#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
144
145#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/include/asm-sparc/elf_32.h b/include/asm-sparc/elf_32.h
new file mode 100644
index 000000000000..d043f80bc2fd
--- /dev/null
+++ b/include/asm-sparc/elf_32.h
@@ -0,0 +1,145 @@
1#ifndef __ASMSPARC_ELF_H
2#define __ASMSPARC_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9
10/*
11 * Sparc section types
12 */
13#define STT_REGISTER 13
14
15/*
16 * Sparc ELF relocation types
17 */
18#define R_SPARC_NONE 0
19#define R_SPARC_8 1
20#define R_SPARC_16 2
21#define R_SPARC_32 3
22#define R_SPARC_DISP8 4
23#define R_SPARC_DISP16 5
24#define R_SPARC_DISP32 6
25#define R_SPARC_WDISP30 7
26#define R_SPARC_WDISP22 8
27#define R_SPARC_HI22 9
28#define R_SPARC_22 10
29#define R_SPARC_13 11
30#define R_SPARC_LO10 12
31#define R_SPARC_GOT10 13
32#define R_SPARC_GOT13 14
33#define R_SPARC_GOT22 15
34#define R_SPARC_PC10 16
35#define R_SPARC_PC22 17
36#define R_SPARC_WPLT30 18
37#define R_SPARC_COPY 19
38#define R_SPARC_GLOB_DAT 20
39#define R_SPARC_JMP_SLOT 21
40#define R_SPARC_RELATIVE 22
41#define R_SPARC_UA32 23
42#define R_SPARC_PLT32 24
43#define R_SPARC_HIPLT22 25
44#define R_SPARC_LOPLT10 26
45#define R_SPARC_PCPLT32 27
46#define R_SPARC_PCPLT22 28
47#define R_SPARC_PCPLT10 29
48#define R_SPARC_10 30
49#define R_SPARC_11 31
50#define R_SPARC_64 32
51#define R_SPARC_OLO10 33
52#define R_SPARC_WDISP16 40
53#define R_SPARC_WDISP19 41
54#define R_SPARC_7 43
55#define R_SPARC_5 44
56#define R_SPARC_6 45
57
58/* Bits present in AT_HWCAP, primarily for Sparc32. */
59
60#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
61#define HWCAP_SPARC_STBAR 2
62#define HWCAP_SPARC_SWAP 4
63#define HWCAP_SPARC_MULDIV 8
64#define HWCAP_SPARC_V9 16
65#define HWCAP_SPARC_ULTRA3 32
66
67#define CORE_DUMP_USE_REGSET
68
69/* Format is:
70 * G0 --> G7
71 * O0 --> O7
72 * L0 --> L7
73 * I0 --> I7
74 * PSR, PC, nPC, Y, WIM, TBR
75 */
76typedef unsigned long elf_greg_t;
77#define ELF_NGREG 38
78typedef elf_greg_t elf_gregset_t[ELF_NGREG];
79
80typedef struct {
81 union {
82 unsigned long pr_regs[32];
83 double pr_dregs[16];
84 } pr_fr;
85 unsigned long __unused;
86 unsigned long pr_fsr;
87 unsigned char pr_qcnt;
88 unsigned char pr_q_entrysize;
89 unsigned char pr_en;
90 unsigned int pr_q[64];
91} elf_fpregset_t;
92
93#include <asm/mbus.h>
94
95/*
96 * This is used to ensure we don't load something for the wrong architecture.
97 */
98#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
99
100/*
101 * These are used to set parameters in the core dumps.
102 */
103#define ELF_ARCH EM_SPARC
104#define ELF_CLASS ELFCLASS32
105#define ELF_DATA ELFDATA2MSB
106
107#define USE_ELF_CORE_DUMP
108#ifndef CONFIG_SUN4
109#define ELF_EXEC_PAGESIZE 4096
110#else
111#define ELF_EXEC_PAGESIZE 8192
112#endif
113
114
115/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
116 use of this is to invoke "./ld.so someprog" to test out a new version of
117 the loader. We need to make sure that it is out of the way of the program
118 that it will "exec", and that there is sufficient room for the brk. */
119
120#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
121
122/* This yields a mask that user programs can use to figure out what
123 instruction set this cpu supports. This can NOT be done in userspace
124 on Sparc. */
125
126/* Sun4c has none of the capabilities, most sun4m's have them all.
127 * XXX This is gross, set some global variable at boot time. -DaveM
128 */
129#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
130 (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
131 HWCAP_SPARC_SWAP | \
132 ((srmmu_modtype != Cypress && \
133 srmmu_modtype != Cypress_vE && \
134 srmmu_modtype != Cypress_vD) ? \
135 HWCAP_SPARC_MULDIV : 0)))
136
137/* This yields a string that ld.so will use to load implementation
138 specific libraries for optimization. This is more specific in
139 intent than poking at uname or /proc/cpuinfo. */
140
141#define ELF_PLATFORM (NULL)
142
143#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
144
145#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/include/asm-sparc/elf_64.h b/include/asm-sparc/elf_64.h
new file mode 100644
index 000000000000..0818a1308f4e
--- /dev/null
+++ b/include/asm-sparc/elf_64.h
@@ -0,0 +1,217 @@
1#ifndef __ASM_SPARC64_ELF_H
2#define __ASM_SPARC64_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11#include <asm/spitfire.h>
12
13/*
14 * Sparc section types
15 */
16#define STT_REGISTER 13
17
18/*
19 * Sparc ELF relocation types
20 */
21#define R_SPARC_NONE 0
22#define R_SPARC_8 1
23#define R_SPARC_16 2
24#define R_SPARC_32 3
25#define R_SPARC_DISP8 4
26#define R_SPARC_DISP16 5
27#define R_SPARC_DISP32 6
28#define R_SPARC_WDISP30 7
29#define R_SPARC_WDISP22 8
30#define R_SPARC_HI22 9
31#define R_SPARC_22 10
32#define R_SPARC_13 11
33#define R_SPARC_LO10 12
34#define R_SPARC_GOT10 13
35#define R_SPARC_GOT13 14
36#define R_SPARC_GOT22 15
37#define R_SPARC_PC10 16
38#define R_SPARC_PC22 17
39#define R_SPARC_WPLT30 18
40#define R_SPARC_COPY 19
41#define R_SPARC_GLOB_DAT 20
42#define R_SPARC_JMP_SLOT 21
43#define R_SPARC_RELATIVE 22
44#define R_SPARC_UA32 23
45#define R_SPARC_PLT32 24
46#define R_SPARC_HIPLT22 25
47#define R_SPARC_LOPLT10 26
48#define R_SPARC_PCPLT32 27
49#define R_SPARC_PCPLT22 28
50#define R_SPARC_PCPLT10 29
51#define R_SPARC_10 30
52#define R_SPARC_11 31
53#define R_SPARC_64 32
54#define R_SPARC_OLO10 33
55#define R_SPARC_WDISP16 40
56#define R_SPARC_WDISP19 41
57#define R_SPARC_7 43
58#define R_SPARC_5 44
59#define R_SPARC_6 45
60
61/* Bits present in AT_HWCAP, primarily for Sparc32. */
62
63#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
64#define HWCAP_SPARC_STBAR 2
65#define HWCAP_SPARC_SWAP 4
66#define HWCAP_SPARC_MULDIV 8
67#define HWCAP_SPARC_V9 16
68#define HWCAP_SPARC_ULTRA3 32
69#define HWCAP_SPARC_BLKINIT 64
70#define HWCAP_SPARC_N2 128
71
72#define CORE_DUMP_USE_REGSET
73
74/*
75 * These are used to set parameters in the core dumps.
76 */
77#define ELF_ARCH EM_SPARCV9
78#define ELF_CLASS ELFCLASS64
79#define ELF_DATA ELFDATA2MSB
80
81/* Format of 64-bit elf_gregset_t is:
82 * G0 --> G7
83 * O0 --> O7
84 * L0 --> L7
85 * I0 --> I7
86 * TSTATE
87 * TPC
88 * TNPC
89 * Y
90 */
91typedef unsigned long elf_greg_t;
92#define ELF_NGREG 36
93typedef elf_greg_t elf_gregset_t[ELF_NGREG];
94
95typedef struct {
96 unsigned long pr_regs[32];
97 unsigned long pr_fsr;
98 unsigned long pr_gsr;
99 unsigned long pr_fprs;
100} elf_fpregset_t;
101
102/* Format of 32-bit elf_gregset_t is:
103 * G0 --> G7
104 * O0 --> O7
105 * L0 --> L7
106 * I0 --> I7
107 * PSR, PC, nPC, Y, WIM, TBR
108 */
109typedef unsigned int compat_elf_greg_t;
110#define COMPAT_ELF_NGREG 38
111typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
112
113typedef struct {
114 union {
115 unsigned int pr_regs[32];
116 unsigned long pr_dregs[16];
117 } pr_fr;
118 unsigned int __unused;
119 unsigned int pr_fsr;
120 unsigned char pr_qcnt;
121 unsigned char pr_q_entrysize;
122 unsigned char pr_en;
123 unsigned int pr_q[64];
124} compat_elf_fpregset_t;
125
126/* UltraSparc extensions. Still unused, but will be eventually. */
127typedef struct {
128 unsigned int pr_type;
129 unsigned int pr_align;
130 union {
131 struct {
132 union {
133 unsigned int pr_regs[32];
134 unsigned long pr_dregs[16];
135 long double pr_qregs[8];
136 } pr_xfr;
137 } pr_v8p;
138 unsigned int pr_xfsr;
139 unsigned int pr_fprs;
140 unsigned int pr_xg[8];
141 unsigned int pr_xo[8];
142 unsigned long pr_tstate;
143 unsigned int pr_filler[8];
144 } pr_un;
145} elf_xregset_t;
146
147/*
148 * This is used to ensure we don't load something for the wrong architecture.
149 */
150#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
151#define compat_elf_check_arch(x) ((x)->e_machine == EM_SPARC || \
152 (x)->e_machine == EM_SPARC32PLUS)
153#define compat_start_thread start_thread32
154
155#define USE_ELF_CORE_DUMP
156#define ELF_EXEC_PAGESIZE PAGE_SIZE
157
158/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
159 use of this is to invoke "./ld.so someprog" to test out a new version of
160 the loader. We need to make sure that it is out of the way of the program
161 that it will "exec", and that there is sufficient room for the brk. */
162
163#define ELF_ET_DYN_BASE 0x0000010000000000UL
164#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
165
166
167/* This yields a mask that user programs can use to figure out what
168 instruction set this cpu supports. */
169
170/* On Ultra, we support all of the v8 capabilities. */
171static inline unsigned int sparc64_elf_hwcap(void)
172{
173 unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
174 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
175 HWCAP_SPARC_V9);
176
177 if (tlb_type == cheetah || tlb_type == cheetah_plus)
178 cap |= HWCAP_SPARC_ULTRA3;
179 else if (tlb_type == hypervisor) {
180 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
181 sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
182 cap |= HWCAP_SPARC_BLKINIT;
183 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
184 cap |= HWCAP_SPARC_N2;
185 }
186
187 return cap;
188}
189
190#define ELF_HWCAP sparc64_elf_hwcap();
191
192/* This yields a string that ld.so will use to load implementation
193 specific libraries for optimization. This is more specific in
194 intent than poking at uname or /proc/cpuinfo. */
195
196#define ELF_PLATFORM (NULL)
197
198#define SET_PERSONALITY(ex, ibcs2) \
199do { unsigned long new_flags = current_thread_info()->flags; \
200 new_flags &= _TIF_32BIT; \
201 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
202 new_flags |= _TIF_32BIT; \
203 else \
204 new_flags &= ~_TIF_32BIT; \
205 if ((current_thread_info()->flags & _TIF_32BIT) \
206 != new_flags) \
207 set_thread_flag(TIF_ABI_PENDING); \
208 else \
209 clear_thread_flag(TIF_ABI_PENDING); \
210 /* flush_thread will update pgd cache */ \
211 if (ibcs2) \
212 set_personality(PER_SVR4); \
213 else if (current->personality != PER_LINUX32) \
214 set_personality(PER_LINUX); \
215} while (0)
216
217#endif /* !(__ASM_SPARC64_ELF_H) */
diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h
index d3978e068e2b..6c628ba15a8d 100644
--- a/include/asm-sparc/floppy.h
+++ b/include/asm-sparc/floppy.h
@@ -1,388 +1,8 @@
1/* asm-sparc/floppy.h: Sparc specific parts of the Floppy driver. 1#ifndef ___ASM_SPARC_FLOPPY_H
2 * 2#define ___ASM_SPARC_FLOPPY_H
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/floppy_64.h>
5 5#else
6#ifndef __ASM_SPARC_FLOPPY_H 6#include <asm-sparc/floppy_32.h>
7#define __ASM_SPARC_FLOPPY_H 7#endif
8
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/system.h>
12#include <asm/idprom.h>
13#include <asm/machines.h>
14#include <asm/oplib.h>
15#include <asm/auxio.h>
16#include <asm/irq.h>
17
18/* We don't need no stinkin' I/O port allocation crap. */
19#undef release_region
20#undef request_region
21#define release_region(X, Y) do { } while(0)
22#define request_region(X, Y, Z) (1)
23
24/* References:
25 * 1) Netbsd Sun floppy driver.
26 * 2) NCR 82077 controller manual
27 * 3) Intel 82077 controller manual
28 */
29struct sun_flpy_controller {
30 volatile unsigned char status_82072; /* Main Status reg. */
31#define dcr_82072 status_82072 /* Digital Control reg. */
32#define status1_82077 status_82072 /* Auxiliary Status reg. 1 */
33
34 volatile unsigned char data_82072; /* Data fifo. */
35#define status2_82077 data_82072 /* Auxiliary Status reg. 2 */
36
37 volatile unsigned char dor_82077; /* Digital Output reg. */
38 volatile unsigned char tapectl_82077; /* What the? Tape control reg? */
39
40 volatile unsigned char status_82077; /* Main Status Register. */
41#define drs_82077 status_82077 /* Digital Rate Select reg. */
42
43 volatile unsigned char data_82077; /* Data fifo. */
44 volatile unsigned char ___unused;
45 volatile unsigned char dir_82077; /* Digital Input reg. */
46#define dcr_82077 dir_82077 /* Config Control reg. */
47};
48
49/* You'll only ever find one controller on a SparcStation anyways. */
50static struct sun_flpy_controller *sun_fdc = NULL;
51extern volatile unsigned char *fdc_status;
52
53struct sun_floppy_ops {
54 unsigned char (*fd_inb)(int port);
55 void (*fd_outb)(unsigned char value, int port);
56};
57
58static struct sun_floppy_ops sun_fdops;
59
60#define fd_inb(port) sun_fdops.fd_inb(port)
61#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
62#define fd_enable_dma() sun_fd_enable_dma()
63#define fd_disable_dma() sun_fd_disable_dma()
64#define fd_request_dma() (0) /* nothing... */
65#define fd_free_dma() /* nothing... */
66#define fd_clear_dma_ff() /* nothing... */
67#define fd_set_dma_mode(mode) sun_fd_set_dma_mode(mode)
68#define fd_set_dma_addr(addr) sun_fd_set_dma_addr(addr)
69#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
70#define fd_enable_irq() /* nothing... */
71#define fd_disable_irq() /* nothing... */
72#define fd_cacheflush(addr, size) /* nothing... */
73#define fd_request_irq() sun_fd_request_irq()
74#define fd_free_irq() /* nothing... */
75#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
76#define fd_dma_mem_alloc(size) ((unsigned long) vmalloc(size))
77#define fd_dma_mem_free(addr,size) (vfree((void *)(addr)))
78#endif 8#endif
79
80/* XXX This isn't really correct. XXX */
81#define get_dma_residue(x) (0)
82
83#define FLOPPY0_TYPE 4
84#define FLOPPY1_TYPE 0
85
86/* Super paranoid... */
87#undef HAVE_DISABLE_HLT
88
89/* Here is where we catch the floppy driver trying to initialize,
90 * therefore this is where we call the PROM device tree probing
91 * routine etc. on the Sparc.
92 */
93#define FDC1 sun_floppy_init()
94
95#define N_FDC 1
96#define N_DRIVE 8
97
98/* No 64k boundary crossing problems on the Sparc. */
99#define CROSS_64KB(a,s) (0)
100
101/* Routines unique to each controller type on a Sun. */
102static void sun_set_dor(unsigned char value, int fdc_82077)
103{
104 if (sparc_cpu_model == sun4c) {
105 unsigned int bits = 0;
106 if (value & 0x10)
107 bits |= AUXIO_FLPY_DSEL;
108 if ((value & 0x80) == 0)
109 bits |= AUXIO_FLPY_EJCT;
110 set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
111 }
112 if (fdc_82077) {
113 sun_fdc->dor_82077 = value;
114 }
115}
116
117static unsigned char sun_read_dir(void)
118{
119 if (sparc_cpu_model == sun4c)
120 return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0;
121 else
122 return sun_fdc->dir_82077;
123}
124
125static unsigned char sun_82072_fd_inb(int port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to read unknown port %d\n", port);
131 panic("floppy: Port bolixed.");
132 case 4: /* FD_STATUS */
133 return sun_fdc->status_82072 & ~STATUS_DMA;
134 case 5: /* FD_DATA */
135 return sun_fdc->data_82072;
136 case 7: /* FD_DIR */
137 return sun_read_dir();
138 };
139 panic("sun_82072_fd_inb: How did I get here?");
140}
141
142static void sun_82072_fd_outb(unsigned char value, int port)
143{
144 udelay(5);
145 switch(port & 7) {
146 default:
147 printk("floppy: Asked to write to unknown port %d\n", port);
148 panic("floppy: Port bolixed.");
149 case 2: /* FD_DOR */
150 sun_set_dor(value, 0);
151 break;
152 case 5: /* FD_DATA */
153 sun_fdc->data_82072 = value;
154 break;
155 case 7: /* FD_DCR */
156 sun_fdc->dcr_82072 = value;
157 break;
158 case 4: /* FD_STATUS */
159 sun_fdc->status_82072 = value;
160 break;
161 };
162 return;
163}
164
165static unsigned char sun_82077_fd_inb(int port)
166{
167 udelay(5);
168 switch(port & 7) {
169 default:
170 printk("floppy: Asked to read unknown port %d\n", port);
171 panic("floppy: Port bolixed.");
172 case 0: /* FD_STATUS_0 */
173 return sun_fdc->status1_82077;
174 case 1: /* FD_STATUS_1 */
175 return sun_fdc->status2_82077;
176 case 2: /* FD_DOR */
177 return sun_fdc->dor_82077;
178 case 3: /* FD_TDR */
179 return sun_fdc->tapectl_82077;
180 case 4: /* FD_STATUS */
181 return sun_fdc->status_82077 & ~STATUS_DMA;
182 case 5: /* FD_DATA */
183 return sun_fdc->data_82077;
184 case 7: /* FD_DIR */
185 return sun_read_dir();
186 };
187 panic("sun_82077_fd_inb: How did I get here?");
188}
189
190static void sun_82077_fd_outb(unsigned char value, int port)
191{
192 udelay(5);
193 switch(port & 7) {
194 default:
195 printk("floppy: Asked to write to unknown port %d\n", port);
196 panic("floppy: Port bolixed.");
197 case 2: /* FD_DOR */
198 sun_set_dor(value, 1);
199 break;
200 case 5: /* FD_DATA */
201 sun_fdc->data_82077 = value;
202 break;
203 case 7: /* FD_DCR */
204 sun_fdc->dcr_82077 = value;
205 break;
206 case 4: /* FD_STATUS */
207 sun_fdc->status_82077 = value;
208 break;
209 case 3: /* FD_TDR */
210 sun_fdc->tapectl_82077 = value;
211 break;
212 };
213 return;
214}
215
216/* For pseudo-dma (Sun floppy drives have no real DMA available to
217 * them so we must eat the data fifo bytes directly ourselves) we have
218 * three state variables. doing_pdma tells our inline low-level
219 * assembly floppy interrupt entry point whether it should sit and eat
220 * bytes from the fifo or just transfer control up to the higher level
221 * floppy interrupt c-code. I tried very hard but I could not get the
222 * pseudo-dma to work in c-code without getting many overruns and
223 * underruns. If non-zero, doing_pdma encodes the direction of
224 * the transfer for debugging. 1=read 2=write
225 */
226extern char *pdma_vaddr;
227extern unsigned long pdma_size;
228extern volatile int doing_pdma;
229
230/* This is software state */
231extern char *pdma_base;
232extern unsigned long pdma_areasize;
233
234/* Common routines to all controller types on the Sparc. */
235static inline void virtual_dma_init(void)
236{
237 /* nothing... */
238}
239
240static inline void sun_fd_disable_dma(void)
241{
242 doing_pdma = 0;
243 if (pdma_base) {
244 mmu_unlockarea(pdma_base, pdma_areasize);
245 pdma_base = NULL;
246 }
247}
248
249static inline void sun_fd_set_dma_mode(int mode)
250{
251 switch(mode) {
252 case DMA_MODE_READ:
253 doing_pdma = 1;
254 break;
255 case DMA_MODE_WRITE:
256 doing_pdma = 2;
257 break;
258 default:
259 printk("Unknown dma mode %d\n", mode);
260 panic("floppy: Giving up...");
261 }
262}
263
264static inline void sun_fd_set_dma_addr(char *buffer)
265{
266 pdma_vaddr = buffer;
267}
268
269static inline void sun_fd_set_dma_count(int length)
270{
271 pdma_size = length;
272}
273
274static inline void sun_fd_enable_dma(void)
275{
276 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
277 pdma_base = pdma_vaddr;
278 pdma_areasize = pdma_size;
279}
280
281/* Our low-level entry point in arch/sparc/kernel/entry.S */
282extern int sparc_floppy_request_irq(int irq, unsigned long flags,
283 irq_handler_t irq_handler);
284
285static int sun_fd_request_irq(void)
286{
287 static int once = 0;
288 int error;
289
290 if(!once) {
291 once = 1;
292 error = sparc_floppy_request_irq(FLOPPY_IRQ,
293 IRQF_DISABLED,
294 floppy_interrupt);
295 return ((error == 0) ? 0 : -1);
296 } else return 0;
297}
298
299static struct linux_prom_registers fd_regs[2];
300
301static int sun_floppy_init(void)
302{
303 char state[128];
304 int tnode, fd_node, num_regs;
305 struct resource r;
306
307 use_virtual_dma = 1;
308
309 FLOPPY_IRQ = 11;
310 /* Forget it if we aren't on a machine that could possibly
311 * ever have a floppy drive.
312 */
313 if((sparc_cpu_model != sun4c && sparc_cpu_model != sun4m) ||
314 ((idprom->id_machtype == (SM_SUN4C | SM_4C_SLC)) ||
315 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC)))) {
316 /* We certainly don't have a floppy controller. */
317 goto no_sun_fdc;
318 }
319 /* Well, try to find one. */
320 tnode = prom_getchild(prom_root_node);
321 fd_node = prom_searchsiblings(tnode, "obio");
322 if(fd_node != 0) {
323 tnode = prom_getchild(fd_node);
324 fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
325 } else {
326 fd_node = prom_searchsiblings(tnode, "fd");
327 }
328 if(fd_node == 0) {
329 goto no_sun_fdc;
330 }
331
332 /* The sun4m lets us know if the controller is actually usable. */
333 if(sparc_cpu_model == sun4m &&
334 prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
335 if(!strcmp(state, "disabled")) {
336 goto no_sun_fdc;
337 }
338 }
339 num_regs = prom_getproperty(fd_node, "reg", (char *) fd_regs, sizeof(fd_regs));
340 num_regs = (num_regs / sizeof(fd_regs[0]));
341 prom_apply_obio_ranges(fd_regs, num_regs);
342 memset(&r, 0, sizeof(r));
343 r.flags = fd_regs[0].which_io;
344 r.start = fd_regs[0].phys_addr;
345 sun_fdc = (struct sun_flpy_controller *)
346 sbus_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
347
348 /* Last minute sanity check... */
349 if(sun_fdc->status_82072 == 0xff) {
350 sun_fdc = NULL;
351 goto no_sun_fdc;
352 }
353
354 sun_fdops.fd_inb = sun_82077_fd_inb;
355 sun_fdops.fd_outb = sun_82077_fd_outb;
356 fdc_status = &sun_fdc->status_82077;
357
358 if (sun_fdc->dor_82077 == 0x80) {
359 sun_fdc->dor_82077 = 0x02;
360 if (sun_fdc->dor_82077 == 0x80) {
361 sun_fdops.fd_inb = sun_82072_fd_inb;
362 sun_fdops.fd_outb = sun_82072_fd_outb;
363 fdc_status = &sun_fdc->status_82072;
364 }
365 }
366
367 /* Success... */
368 allowed_drive_mask = 0x01;
369 return (int) sun_fdc;
370
371no_sun_fdc:
372 return -1;
373}
374
375static int sparc_eject(void)
376{
377 set_dor(0x00, 0xff, 0x90);
378 udelay(500);
379 set_dor(0x00, 0x6f, 0x00);
380 udelay(500);
381 return 0;
382}
383
384#define fd_eject(drive) sparc_eject()
385
386#define EXTRA_FLOPPY_PARAMS
387
388#endif /* !(__ASM_SPARC_FLOPPY_H) */
diff --git a/include/asm-sparc/floppy_32.h b/include/asm-sparc/floppy_32.h
new file mode 100644
index 000000000000..acdd06eafe59
--- /dev/null
+++ b/include/asm-sparc/floppy_32.h
@@ -0,0 +1,388 @@
1/* asm-sparc/floppy.h: Sparc specific parts of the Floppy driver.
2 *
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef __ASM_SPARC_FLOPPY_H
7#define __ASM_SPARC_FLOPPY_H
8
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/system.h>
12#include <asm/idprom.h>
13#include <asm/machines.h>
14#include <asm/oplib.h>
15#include <asm/auxio.h>
16#include <asm/irq.h>
17
18/* We don't need no stinkin' I/O port allocation crap. */
19#undef release_region
20#undef request_region
21#define release_region(X, Y) do { } while(0)
22#define request_region(X, Y, Z) (1)
23
24/* References:
25 * 1) Netbsd Sun floppy driver.
26 * 2) NCR 82077 controller manual
27 * 3) Intel 82077 controller manual
28 */
29struct sun_flpy_controller {
30 volatile unsigned char status_82072; /* Main Status reg. */
31#define dcr_82072 status_82072 /* Digital Control reg. */
32#define status1_82077 status_82072 /* Auxiliary Status reg. 1 */
33
34 volatile unsigned char data_82072; /* Data fifo. */
35#define status2_82077 data_82072 /* Auxiliary Status reg. 2 */
36
37 volatile unsigned char dor_82077; /* Digital Output reg. */
38 volatile unsigned char tapectl_82077; /* What the? Tape control reg? */
39
40 volatile unsigned char status_82077; /* Main Status Register. */
41#define drs_82077 status_82077 /* Digital Rate Select reg. */
42
43 volatile unsigned char data_82077; /* Data fifo. */
44 volatile unsigned char ___unused;
45 volatile unsigned char dir_82077; /* Digital Input reg. */
46#define dcr_82077 dir_82077 /* Config Control reg. */
47};
48
49/* You'll only ever find one controller on a SparcStation anyways. */
50static struct sun_flpy_controller *sun_fdc = NULL;
51extern volatile unsigned char *fdc_status;
52
53struct sun_floppy_ops {
54 unsigned char (*fd_inb)(int port);
55 void (*fd_outb)(unsigned char value, int port);
56};
57
58static struct sun_floppy_ops sun_fdops;
59
60#define fd_inb(port) sun_fdops.fd_inb(port)
61#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
62#define fd_enable_dma() sun_fd_enable_dma()
63#define fd_disable_dma() sun_fd_disable_dma()
64#define fd_request_dma() (0) /* nothing... */
65#define fd_free_dma() /* nothing... */
66#define fd_clear_dma_ff() /* nothing... */
67#define fd_set_dma_mode(mode) sun_fd_set_dma_mode(mode)
68#define fd_set_dma_addr(addr) sun_fd_set_dma_addr(addr)
69#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
70#define fd_enable_irq() /* nothing... */
71#define fd_disable_irq() /* nothing... */
72#define fd_cacheflush(addr, size) /* nothing... */
73#define fd_request_irq() sun_fd_request_irq()
74#define fd_free_irq() /* nothing... */
75#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
76#define fd_dma_mem_alloc(size) ((unsigned long) vmalloc(size))
77#define fd_dma_mem_free(addr,size) (vfree((void *)(addr)))
78#endif
79
80/* XXX This isn't really correct. XXX */
81#define get_dma_residue(x) (0)
82
83#define FLOPPY0_TYPE 4
84#define FLOPPY1_TYPE 0
85
86/* Super paranoid... */
87#undef HAVE_DISABLE_HLT
88
89/* Here is where we catch the floppy driver trying to initialize,
90 * therefore this is where we call the PROM device tree probing
91 * routine etc. on the Sparc.
92 */
93#define FDC1 sun_floppy_init()
94
95#define N_FDC 1
96#define N_DRIVE 8
97
98/* No 64k boundary crossing problems on the Sparc. */
99#define CROSS_64KB(a,s) (0)
100
101/* Routines unique to each controller type on a Sun. */
102static void sun_set_dor(unsigned char value, int fdc_82077)
103{
104 if (sparc_cpu_model == sun4c) {
105 unsigned int bits = 0;
106 if (value & 0x10)
107 bits |= AUXIO_FLPY_DSEL;
108 if ((value & 0x80) == 0)
109 bits |= AUXIO_FLPY_EJCT;
110 set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
111 }
112 if (fdc_82077) {
113 sun_fdc->dor_82077 = value;
114 }
115}
116
117static unsigned char sun_read_dir(void)
118{
119 if (sparc_cpu_model == sun4c)
120 return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0;
121 else
122 return sun_fdc->dir_82077;
123}
124
125static unsigned char sun_82072_fd_inb(int port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to read unknown port %d\n", port);
131 panic("floppy: Port bolixed.");
132 case 4: /* FD_STATUS */
133 return sun_fdc->status_82072 & ~STATUS_DMA;
134 case 5: /* FD_DATA */
135 return sun_fdc->data_82072;
136 case 7: /* FD_DIR */
137 return sun_read_dir();
138 };
139 panic("sun_82072_fd_inb: How did I get here?");
140}
141
142static void sun_82072_fd_outb(unsigned char value, int port)
143{
144 udelay(5);
145 switch(port & 7) {
146 default:
147 printk("floppy: Asked to write to unknown port %d\n", port);
148 panic("floppy: Port bolixed.");
149 case 2: /* FD_DOR */
150 sun_set_dor(value, 0);
151 break;
152 case 5: /* FD_DATA */
153 sun_fdc->data_82072 = value;
154 break;
155 case 7: /* FD_DCR */
156 sun_fdc->dcr_82072 = value;
157 break;
158 case 4: /* FD_STATUS */
159 sun_fdc->status_82072 = value;
160 break;
161 };
162 return;
163}
164
165static unsigned char sun_82077_fd_inb(int port)
166{
167 udelay(5);
168 switch(port & 7) {
169 default:
170 printk("floppy: Asked to read unknown port %d\n", port);
171 panic("floppy: Port bolixed.");
172 case 0: /* FD_STATUS_0 */
173 return sun_fdc->status1_82077;
174 case 1: /* FD_STATUS_1 */
175 return sun_fdc->status2_82077;
176 case 2: /* FD_DOR */
177 return sun_fdc->dor_82077;
178 case 3: /* FD_TDR */
179 return sun_fdc->tapectl_82077;
180 case 4: /* FD_STATUS */
181 return sun_fdc->status_82077 & ~STATUS_DMA;
182 case 5: /* FD_DATA */
183 return sun_fdc->data_82077;
184 case 7: /* FD_DIR */
185 return sun_read_dir();
186 };
187 panic("sun_82077_fd_inb: How did I get here?");
188}
189
190static void sun_82077_fd_outb(unsigned char value, int port)
191{
192 udelay(5);
193 switch(port & 7) {
194 default:
195 printk("floppy: Asked to write to unknown port %d\n", port);
196 panic("floppy: Port bolixed.");
197 case 2: /* FD_DOR */
198 sun_set_dor(value, 1);
199 break;
200 case 5: /* FD_DATA */
201 sun_fdc->data_82077 = value;
202 break;
203 case 7: /* FD_DCR */
204 sun_fdc->dcr_82077 = value;
205 break;
206 case 4: /* FD_STATUS */
207 sun_fdc->status_82077 = value;
208 break;
209 case 3: /* FD_TDR */
210 sun_fdc->tapectl_82077 = value;
211 break;
212 };
213 return;
214}
215
216/* For pseudo-dma (Sun floppy drives have no real DMA available to
217 * them so we must eat the data fifo bytes directly ourselves) we have
218 * three state variables. doing_pdma tells our inline low-level
219 * assembly floppy interrupt entry point whether it should sit and eat
220 * bytes from the fifo or just transfer control up to the higher level
221 * floppy interrupt c-code. I tried very hard but I could not get the
222 * pseudo-dma to work in c-code without getting many overruns and
223 * underruns. If non-zero, doing_pdma encodes the direction of
224 * the transfer for debugging. 1=read 2=write
225 */
226extern char *pdma_vaddr;
227extern unsigned long pdma_size;
228extern volatile int doing_pdma;
229
230/* This is software state */
231extern char *pdma_base;
232extern unsigned long pdma_areasize;
233
234/* Common routines to all controller types on the Sparc. */
235static inline void virtual_dma_init(void)
236{
237 /* nothing... */
238}
239
240static inline void sun_fd_disable_dma(void)
241{
242 doing_pdma = 0;
243 if (pdma_base) {
244 mmu_unlockarea(pdma_base, pdma_areasize);
245 pdma_base = NULL;
246 }
247}
248
249static inline void sun_fd_set_dma_mode(int mode)
250{
251 switch(mode) {
252 case DMA_MODE_READ:
253 doing_pdma = 1;
254 break;
255 case DMA_MODE_WRITE:
256 doing_pdma = 2;
257 break;
258 default:
259 printk("Unknown dma mode %d\n", mode);
260 panic("floppy: Giving up...");
261 }
262}
263
264static inline void sun_fd_set_dma_addr(char *buffer)
265{
266 pdma_vaddr = buffer;
267}
268
269static inline void sun_fd_set_dma_count(int length)
270{
271 pdma_size = length;
272}
273
274static inline void sun_fd_enable_dma(void)
275{
276 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
277 pdma_base = pdma_vaddr;
278 pdma_areasize = pdma_size;
279}
280
281/* Our low-level entry point in arch/sparc/kernel/entry.S */
282extern int sparc_floppy_request_irq(int irq, unsigned long flags,
283 irq_handler_t irq_handler);
284
285static int sun_fd_request_irq(void)
286{
287 static int once = 0;
288 int error;
289
290 if(!once) {
291 once = 1;
292 error = sparc_floppy_request_irq(FLOPPY_IRQ,
293 IRQF_DISABLED,
294 floppy_interrupt);
295 return ((error == 0) ? 0 : -1);
296 } else return 0;
297}
298
299static struct linux_prom_registers fd_regs[2];
300
301static int sun_floppy_init(void)
302{
303 char state[128];
304 int tnode, fd_node, num_regs;
305 struct resource r;
306
307 use_virtual_dma = 1;
308
309 FLOPPY_IRQ = 11;
310 /* Forget it if we aren't on a machine that could possibly
311 * ever have a floppy drive.
312 */
313 if((sparc_cpu_model != sun4c && sparc_cpu_model != sun4m) ||
314 ((idprom->id_machtype == (SM_SUN4C | SM_4C_SLC)) ||
315 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC)))) {
316 /* We certainly don't have a floppy controller. */
317 goto no_sun_fdc;
318 }
319 /* Well, try to find one. */
320 tnode = prom_getchild(prom_root_node);
321 fd_node = prom_searchsiblings(tnode, "obio");
322 if(fd_node != 0) {
323 tnode = prom_getchild(fd_node);
324 fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
325 } else {
326 fd_node = prom_searchsiblings(tnode, "fd");
327 }
328 if(fd_node == 0) {
329 goto no_sun_fdc;
330 }
331
332 /* The sun4m lets us know if the controller is actually usable. */
333 if(sparc_cpu_model == sun4m &&
334 prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
335 if(!strcmp(state, "disabled")) {
336 goto no_sun_fdc;
337 }
338 }
339 num_regs = prom_getproperty(fd_node, "reg", (char *) fd_regs, sizeof(fd_regs));
340 num_regs = (num_regs / sizeof(fd_regs[0]));
341 prom_apply_obio_ranges(fd_regs, num_regs);
342 memset(&r, 0, sizeof(r));
343 r.flags = fd_regs[0].which_io;
344 r.start = fd_regs[0].phys_addr;
345 sun_fdc = (struct sun_flpy_controller *)
346 sbus_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
347
348 /* Last minute sanity check... */
349 if(sun_fdc->status_82072 == 0xff) {
350 sun_fdc = NULL;
351 goto no_sun_fdc;
352 }
353
354 sun_fdops.fd_inb = sun_82077_fd_inb;
355 sun_fdops.fd_outb = sun_82077_fd_outb;
356 fdc_status = &sun_fdc->status_82077;
357
358 if (sun_fdc->dor_82077 == 0x80) {
359 sun_fdc->dor_82077 = 0x02;
360 if (sun_fdc->dor_82077 == 0x80) {
361 sun_fdops.fd_inb = sun_82072_fd_inb;
362 sun_fdops.fd_outb = sun_82072_fd_outb;
363 fdc_status = &sun_fdc->status_82072;
364 }
365 }
366
367 /* Success... */
368 allowed_drive_mask = 0x01;
369 return (int) sun_fdc;
370
371no_sun_fdc:
372 return -1;
373}
374
375static int sparc_eject(void)
376{
377 set_dor(0x00, 0xff, 0x90);
378 udelay(500);
379 set_dor(0x00, 0x6f, 0x00);
380 udelay(500);
381 return 0;
382}
383
384#define fd_eject(drive) sparc_eject()
385
386#define EXTRA_FLOPPY_PARAMS
387
388#endif /* !(__ASM_SPARC_FLOPPY_H) */
diff --git a/include/asm-sparc/floppy_64.h b/include/asm-sparc/floppy_64.h
new file mode 100644
index 000000000000..c39db1060bc7
--- /dev/null
+++ b/include/asm-sparc/floppy_64.h
@@ -0,0 +1,782 @@
1/* floppy.h: Sparc specific parts of the Floppy driver.
2 *
3 * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 *
6 * Ultra/PCI support added: Sep 1997 Eddie C. Dost (ecd@skynet.be)
7 */
8
9#ifndef __ASM_SPARC64_FLOPPY_H
10#define __ASM_SPARC64_FLOPPY_H
11
12#include <linux/init.h>
13#include <linux/pci.h>
14
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/system.h>
18#include <asm/idprom.h>
19#include <asm/oplib.h>
20#include <asm/auxio.h>
21#include <asm/sbus.h>
22#include <asm/irq.h>
23
24
25/*
26 * Define this to enable exchanging drive 0 and 1 if only drive 1 is
27 * probed on PCI machines.
28 */
29#undef PCI_FDC_SWAP_DRIVES
30
31
32/* References:
33 * 1) Netbsd Sun floppy driver.
34 * 2) NCR 82077 controller manual
35 * 3) Intel 82077 controller manual
36 */
37struct sun_flpy_controller {
38 volatile unsigned char status1_82077; /* Auxiliary Status reg. 1 */
39 volatile unsigned char status2_82077; /* Auxiliary Status reg. 2 */
40 volatile unsigned char dor_82077; /* Digital Output reg. */
41 volatile unsigned char tapectl_82077; /* Tape Control reg */
42 volatile unsigned char status_82077; /* Main Status Register. */
43#define drs_82077 status_82077 /* Digital Rate Select reg. */
44 volatile unsigned char data_82077; /* Data fifo. */
45 volatile unsigned char ___unused;
46 volatile unsigned char dir_82077; /* Digital Input reg. */
47#define dcr_82077 dir_82077 /* Config Control reg. */
48};
49
50/* You'll only ever find one controller on an Ultra anyways. */
51static struct sun_flpy_controller *sun_fdc = (struct sun_flpy_controller *)-1;
52unsigned long fdc_status;
53static struct sbus_dev *floppy_sdev = NULL;
54
55struct sun_floppy_ops {
56 unsigned char (*fd_inb) (unsigned long port);
57 void (*fd_outb) (unsigned char value, unsigned long port);
58 void (*fd_enable_dma) (void);
59 void (*fd_disable_dma) (void);
60 void (*fd_set_dma_mode) (int);
61 void (*fd_set_dma_addr) (char *);
62 void (*fd_set_dma_count) (int);
63 unsigned int (*get_dma_residue) (void);
64 int (*fd_request_irq) (void);
65 void (*fd_free_irq) (void);
66 int (*fd_eject) (int);
67};
68
69static struct sun_floppy_ops sun_fdops;
70
71#define fd_inb(port) sun_fdops.fd_inb(port)
72#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
73#define fd_enable_dma() sun_fdops.fd_enable_dma()
74#define fd_disable_dma() sun_fdops.fd_disable_dma()
75#define fd_request_dma() (0) /* nothing... */
76#define fd_free_dma() /* nothing... */
77#define fd_clear_dma_ff() /* nothing... */
78#define fd_set_dma_mode(mode) sun_fdops.fd_set_dma_mode(mode)
79#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
80#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
81#define get_dma_residue(x) sun_fdops.get_dma_residue()
82#define fd_cacheflush(addr, size) /* nothing... */
83#define fd_request_irq() sun_fdops.fd_request_irq()
84#define fd_free_irq() sun_fdops.fd_free_irq()
85#define fd_eject(drive) sun_fdops.fd_eject(drive)
86
87/* Super paranoid... */
88#undef HAVE_DISABLE_HLT
89
90static int sun_floppy_types[2] = { 0, 0 };
91
92/* Here is where we catch the floppy driver trying to initialize,
93 * therefore this is where we call the PROM device tree probing
94 * routine etc. on the Sparc.
95 */
96#define FLOPPY0_TYPE sun_floppy_init()
97#define FLOPPY1_TYPE sun_floppy_types[1]
98
99#define FDC1 ((unsigned long)sun_fdc)
100
101#define N_FDC 1
102#define N_DRIVE 8
103
104/* No 64k boundary crossing problems on the Sparc. */
105#define CROSS_64KB(a,s) (0)
106
107static unsigned char sun_82077_fd_inb(unsigned long port)
108{
109 udelay(5);
110 switch(port & 7) {
111 default:
112 printk("floppy: Asked to read unknown port %lx\n", port);
113 panic("floppy: Port bolixed.");
114 case 4: /* FD_STATUS */
115 return sbus_readb(&sun_fdc->status_82077) & ~STATUS_DMA;
116 case 5: /* FD_DATA */
117 return sbus_readb(&sun_fdc->data_82077);
118 case 7: /* FD_DIR */
119 /* XXX: Is DCL on 0x80 in sun4m? */
120 return sbus_readb(&sun_fdc->dir_82077);
121 };
122 panic("sun_82072_fd_inb: How did I get here?");
123}
124
125static void sun_82077_fd_outb(unsigned char value, unsigned long port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to write to unknown port %lx\n", port);
131 panic("floppy: Port bolixed.");
132 case 2: /* FD_DOR */
133 /* Happily, the 82077 has a real DOR register. */
134 sbus_writeb(value, &sun_fdc->dor_82077);
135 break;
136 case 5: /* FD_DATA */
137 sbus_writeb(value, &sun_fdc->data_82077);
138 break;
139 case 7: /* FD_DCR */
140 sbus_writeb(value, &sun_fdc->dcr_82077);
141 break;
142 case 4: /* FD_STATUS */
143 sbus_writeb(value, &sun_fdc->status_82077);
144 break;
145 };
146 return;
147}
148
149/* For pseudo-dma (Sun floppy drives have no real DMA available to
150 * them so we must eat the data fifo bytes directly ourselves) we have
151 * three state variables. doing_pdma tells our inline low-level
152 * assembly floppy interrupt entry point whether it should sit and eat
153 * bytes from the fifo or just transfer control up to the higher level
154 * floppy interrupt c-code. I tried very hard but I could not get the
155 * pseudo-dma to work in c-code without getting many overruns and
156 * underruns. If non-zero, doing_pdma encodes the direction of
157 * the transfer for debugging. 1=read 2=write
158 */
159unsigned char *pdma_vaddr;
160unsigned long pdma_size;
161volatile int doing_pdma = 0;
162
163/* This is software state */
164char *pdma_base = NULL;
165unsigned long pdma_areasize;
166
167/* Common routines to all controller types on the Sparc. */
168static void sun_fd_disable_dma(void)
169{
170 doing_pdma = 0;
171 if (pdma_base) {
172 mmu_unlockarea(pdma_base, pdma_areasize);
173 pdma_base = NULL;
174 }
175}
176
177static void sun_fd_set_dma_mode(int mode)
178{
179 switch(mode) {
180 case DMA_MODE_READ:
181 doing_pdma = 1;
182 break;
183 case DMA_MODE_WRITE:
184 doing_pdma = 2;
185 break;
186 default:
187 printk("Unknown dma mode %d\n", mode);
188 panic("floppy: Giving up...");
189 }
190}
191
192static void sun_fd_set_dma_addr(char *buffer)
193{
194 pdma_vaddr = buffer;
195}
196
197static void sun_fd_set_dma_count(int length)
198{
199 pdma_size = length;
200}
201
202static void sun_fd_enable_dma(void)
203{
204 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
205 pdma_base = pdma_vaddr;
206 pdma_areasize = pdma_size;
207}
208
209irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie)
210{
211 if (likely(doing_pdma)) {
212 void __iomem *stat = (void __iomem *) fdc_status;
213 unsigned char *vaddr = pdma_vaddr;
214 unsigned long size = pdma_size;
215 u8 val;
216
217 while (size) {
218 val = readb(stat);
219 if (unlikely(!(val & 0x80))) {
220 pdma_vaddr = vaddr;
221 pdma_size = size;
222 return IRQ_HANDLED;
223 }
224 if (unlikely(!(val & 0x20))) {
225 pdma_vaddr = vaddr;
226 pdma_size = size;
227 doing_pdma = 0;
228 goto main_interrupt;
229 }
230 if (val & 0x40) {
231 /* read */
232 *vaddr++ = readb(stat + 1);
233 } else {
234 unsigned char data = *vaddr++;
235
236 /* write */
237 writeb(data, stat + 1);
238 }
239 size--;
240 }
241
242 pdma_vaddr = vaddr;
243 pdma_size = size;
244
245 /* Send Terminal Count pulse to floppy controller. */
246 val = readb(auxio_register);
247 val |= AUXIO_AUX1_FTCNT;
248 writeb(val, auxio_register);
249 val &= ~AUXIO_AUX1_FTCNT;
250 writeb(val, auxio_register);
251
252 doing_pdma = 0;
253 }
254
255main_interrupt:
256 return floppy_interrupt(irq, dev_cookie);
257}
258
259static int sun_fd_request_irq(void)
260{
261 static int once = 0;
262 int error;
263
264 if(!once) {
265 once = 1;
266
267 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
268 IRQF_DISABLED, "floppy", NULL);
269
270 return ((error == 0) ? 0 : -1);
271 }
272 return 0;
273}
274
275static void sun_fd_free_irq(void)
276{
277}
278
279static unsigned int sun_get_dma_residue(void)
280{
281 /* XXX This isn't really correct. XXX */
282 return 0;
283}
284
285static int sun_fd_eject(int drive)
286{
287 set_dor(0x00, 0xff, 0x90);
288 udelay(500);
289 set_dor(0x00, 0x6f, 0x00);
290 udelay(500);
291 return 0;
292}
293
294#ifdef CONFIG_PCI
295#include <asm/ebus.h>
296#include <asm/ns87303.h>
297
298static struct ebus_dma_info sun_pci_fd_ebus_dma;
299static struct pci_dev *sun_pci_ebus_dev;
300static int sun_pci_broken_drive = -1;
301
302struct sun_pci_dma_op {
303 unsigned int addr;
304 int len;
305 int direction;
306 char *buf;
307};
308static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
309static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
310
311extern irqreturn_t floppy_interrupt(int irq, void *dev_id);
312
313static unsigned char sun_pci_fd_inb(unsigned long port)
314{
315 udelay(5);
316 return inb(port);
317}
318
319static void sun_pci_fd_outb(unsigned char val, unsigned long port)
320{
321 udelay(5);
322 outb(val, port);
323}
324
325static void sun_pci_fd_broken_outb(unsigned char val, unsigned long port)
326{
327 udelay(5);
328 /*
329 * XXX: Due to SUN's broken floppy connector on AX and AXi
330 * we need to turn on MOTOR_0 also, if the floppy is
331 * jumpered to DS1 (like most PC floppies are). I hope
332 * this does not hurt correct hardware like the AXmp.
333 * (Eddie, Sep 12 1998).
334 */
335 if (port == ((unsigned long)sun_fdc) + 2) {
336 if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x20)) {
337 val |= 0x10;
338 }
339 }
340 outb(val, port);
341}
342
343#ifdef PCI_FDC_SWAP_DRIVES
344static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port)
345{
346 udelay(5);
347 /*
348 * XXX: Due to SUN's broken floppy connector on AX and AXi
349 * we need to turn on MOTOR_0 also, if the floppy is
350 * jumpered to DS1 (like most PC floppies are). I hope
351 * this does not hurt correct hardware like the AXmp.
352 * (Eddie, Sep 12 1998).
353 */
354 if (port == ((unsigned long)sun_fdc) + 2) {
355 if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x10)) {
356 val &= ~(0x03);
357 val |= 0x21;
358 }
359 }
360 outb(val, port);
361}
362#endif /* PCI_FDC_SWAP_DRIVES */
363
364static void sun_pci_fd_enable_dma(void)
365{
366 BUG_ON((NULL == sun_pci_dma_pending.buf) ||
367 (0 == sun_pci_dma_pending.len) ||
368 (0 == sun_pci_dma_pending.direction));
369
370 sun_pci_dma_current.buf = sun_pci_dma_pending.buf;
371 sun_pci_dma_current.len = sun_pci_dma_pending.len;
372 sun_pci_dma_current.direction = sun_pci_dma_pending.direction;
373
374 sun_pci_dma_pending.buf = NULL;
375 sun_pci_dma_pending.len = 0;
376 sun_pci_dma_pending.direction = 0;
377 sun_pci_dma_pending.addr = -1U;
378
379 sun_pci_dma_current.addr =
380 pci_map_single(sun_pci_ebus_dev,
381 sun_pci_dma_current.buf,
382 sun_pci_dma_current.len,
383 sun_pci_dma_current.direction);
384
385 ebus_dma_enable(&sun_pci_fd_ebus_dma, 1);
386
387 if (ebus_dma_request(&sun_pci_fd_ebus_dma,
388 sun_pci_dma_current.addr,
389 sun_pci_dma_current.len))
390 BUG();
391}
392
393static void sun_pci_fd_disable_dma(void)
394{
395 ebus_dma_enable(&sun_pci_fd_ebus_dma, 0);
396 if (sun_pci_dma_current.addr != -1U)
397 pci_unmap_single(sun_pci_ebus_dev,
398 sun_pci_dma_current.addr,
399 sun_pci_dma_current.len,
400 sun_pci_dma_current.direction);
401 sun_pci_dma_current.addr = -1U;
402}
403
404static void sun_pci_fd_set_dma_mode(int mode)
405{
406 if (mode == DMA_MODE_WRITE)
407 sun_pci_dma_pending.direction = PCI_DMA_TODEVICE;
408 else
409 sun_pci_dma_pending.direction = PCI_DMA_FROMDEVICE;
410
411 ebus_dma_prepare(&sun_pci_fd_ebus_dma, mode != DMA_MODE_WRITE);
412}
413
414static void sun_pci_fd_set_dma_count(int length)
415{
416 sun_pci_dma_pending.len = length;
417}
418
419static void sun_pci_fd_set_dma_addr(char *buffer)
420{
421 sun_pci_dma_pending.buf = buffer;
422}
423
424static unsigned int sun_pci_get_dma_residue(void)
425{
426 return ebus_dma_residue(&sun_pci_fd_ebus_dma);
427}
428
429static int sun_pci_fd_request_irq(void)
430{
431 return ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 1);
432}
433
434static void sun_pci_fd_free_irq(void)
435{
436 ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 0);
437}
438
439static int sun_pci_fd_eject(int drive)
440{
441 return -EINVAL;
442}
443
444void sun_pci_fd_dma_callback(struct ebus_dma_info *p, int event, void *cookie)
445{
446 floppy_interrupt(0, NULL);
447}
448
449/*
450 * Floppy probing, we'd like to use /dev/fd0 for a single Floppy on PCI,
451 * even if this is configured using DS1, thus looks like /dev/fd1 with
452 * the cabling used in Ultras.
453 */
454#define DOR (port + 2)
455#define MSR (port + 4)
456#define FIFO (port + 5)
457
458static void sun_pci_fd_out_byte(unsigned long port, unsigned char val,
459 unsigned long reg)
460{
461 unsigned char status;
462 int timeout = 1000;
463
464 while (!((status = inb(MSR)) & 0x80) && --timeout)
465 udelay(100);
466 outb(val, reg);
467}
468
469static unsigned char sun_pci_fd_sensei(unsigned long port)
470{
471 unsigned char result[2] = { 0x70, 0x00 };
472 unsigned char status;
473 int i = 0;
474
475 sun_pci_fd_out_byte(port, 0x08, FIFO);
476 do {
477 int timeout = 1000;
478
479 while (!((status = inb(MSR)) & 0x80) && --timeout)
480 udelay(100);
481
482 if (!timeout)
483 break;
484
485 if ((status & 0xf0) == 0xd0)
486 result[i++] = inb(FIFO);
487 else
488 break;
489 } while (i < 2);
490
491 return result[0];
492}
493
494static void sun_pci_fd_reset(unsigned long port)
495{
496 unsigned char mask = 0x00;
497 unsigned char status;
498 int timeout = 10000;
499
500 outb(0x80, MSR);
501 do {
502 status = sun_pci_fd_sensei(port);
503 if ((status & 0xc0) == 0xc0)
504 mask |= 1 << (status & 0x03);
505 else
506 udelay(100);
507 } while ((mask != 0x0f) && --timeout);
508}
509
510static int sun_pci_fd_test_drive(unsigned long port, int drive)
511{
512 unsigned char status, data;
513 int timeout = 1000;
514 int ready;
515
516 sun_pci_fd_reset(port);
517
518 data = (0x10 << drive) | 0x0c | drive;
519 sun_pci_fd_out_byte(port, data, DOR);
520
521 sun_pci_fd_out_byte(port, 0x07, FIFO);
522 sun_pci_fd_out_byte(port, drive & 0x03, FIFO);
523
524 do {
525 udelay(100);
526 status = sun_pci_fd_sensei(port);
527 } while (((status & 0xc0) == 0x80) && --timeout);
528
529 if (!timeout)
530 ready = 0;
531 else
532 ready = (status & 0x10) ? 0 : 1;
533
534 sun_pci_fd_reset(port);
535 return ready;
536}
537#undef FIFO
538#undef MSR
539#undef DOR
540
541#endif /* CONFIG_PCI */
542
543#ifdef CONFIG_PCI
544static int __init ebus_fdthree_p(struct linux_ebus_device *edev)
545{
546 if (!strcmp(edev->prom_node->name, "fdthree"))
547 return 1;
548 if (!strcmp(edev->prom_node->name, "floppy")) {
549 const char *compat;
550
551 compat = of_get_property(edev->prom_node,
552 "compatible", NULL);
553 if (compat && !strcmp(compat, "fdthree"))
554 return 1;
555 }
556 return 0;
557}
558#endif
559
560static unsigned long __init sun_floppy_init(void)
561{
562 char state[128];
563 struct sbus_bus *bus;
564 struct sbus_dev *sdev = NULL;
565 static int initialized = 0;
566
567 if (initialized)
568 return sun_floppy_types[0];
569 initialized = 1;
570
571 for_all_sbusdev (sdev, bus) {
572 if (!strcmp(sdev->prom_name, "SUNW,fdtwo"))
573 break;
574 }
575 if(sdev) {
576 floppy_sdev = sdev;
577 FLOPPY_IRQ = sdev->irqs[0];
578 } else {
579#ifdef CONFIG_PCI
580 struct linux_ebus *ebus;
581 struct linux_ebus_device *edev = NULL;
582 unsigned long config = 0;
583 void __iomem *auxio_reg;
584 const char *state_prop;
585
586 for_each_ebus(ebus) {
587 for_each_ebusdev(edev, ebus) {
588 if (ebus_fdthree_p(edev))
589 goto ebus_done;
590 }
591 }
592 ebus_done:
593 if (!edev)
594 return 0;
595
596 state_prop = of_get_property(edev->prom_node, "status", NULL);
597 if (state_prop && !strncmp(state_prop, "disabled", 8))
598 return 0;
599
600 FLOPPY_IRQ = edev->irqs[0];
601
602 /* Make sure the high density bit is set, some systems
603 * (most notably Ultra5/Ultra10) come up with it clear.
604 */
605 auxio_reg = (void __iomem *) edev->resource[2].start;
606 writel(readl(auxio_reg)|0x2, auxio_reg);
607
608 sun_pci_ebus_dev = ebus->self;
609
610 spin_lock_init(&sun_pci_fd_ebus_dma.lock);
611
612 /* XXX ioremap */
613 sun_pci_fd_ebus_dma.regs = (void __iomem *)
614 edev->resource[1].start;
615 if (!sun_pci_fd_ebus_dma.regs)
616 return 0;
617
618 sun_pci_fd_ebus_dma.flags = (EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
619 EBUS_DMA_FLAG_TCI_DISABLE);
620 sun_pci_fd_ebus_dma.callback = sun_pci_fd_dma_callback;
621 sun_pci_fd_ebus_dma.client_cookie = NULL;
622 sun_pci_fd_ebus_dma.irq = FLOPPY_IRQ;
623 strcpy(sun_pci_fd_ebus_dma.name, "floppy");
624 if (ebus_dma_register(&sun_pci_fd_ebus_dma))
625 return 0;
626
627 /* XXX ioremap */
628 sun_fdc = (struct sun_flpy_controller *)edev->resource[0].start;
629
630 sun_fdops.fd_inb = sun_pci_fd_inb;
631 sun_fdops.fd_outb = sun_pci_fd_outb;
632
633 can_use_virtual_dma = use_virtual_dma = 0;
634 sun_fdops.fd_enable_dma = sun_pci_fd_enable_dma;
635 sun_fdops.fd_disable_dma = sun_pci_fd_disable_dma;
636 sun_fdops.fd_set_dma_mode = sun_pci_fd_set_dma_mode;
637 sun_fdops.fd_set_dma_addr = sun_pci_fd_set_dma_addr;
638 sun_fdops.fd_set_dma_count = sun_pci_fd_set_dma_count;
639 sun_fdops.get_dma_residue = sun_pci_get_dma_residue;
640
641 sun_fdops.fd_request_irq = sun_pci_fd_request_irq;
642 sun_fdops.fd_free_irq = sun_pci_fd_free_irq;
643
644 sun_fdops.fd_eject = sun_pci_fd_eject;
645
646 fdc_status = (unsigned long) &sun_fdc->status_82077;
647
648 /*
649 * XXX: Find out on which machines this is really needed.
650 */
651 if (1) {
652 sun_pci_broken_drive = 1;
653 sun_fdops.fd_outb = sun_pci_fd_broken_outb;
654 }
655
656 allowed_drive_mask = 0;
657 if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 0))
658 sun_floppy_types[0] = 4;
659 if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 1))
660 sun_floppy_types[1] = 4;
661
662 /*
663 * Find NS87303 SuperIO config registers (through ecpp).
664 */
665 for_each_ebus(ebus) {
666 for_each_ebusdev(edev, ebus) {
667 if (!strcmp(edev->prom_node->name, "ecpp")) {
668 config = edev->resource[1].start;
669 goto config_done;
670 }
671 }
672 }
673 config_done:
674
675 /*
676 * Sanity check, is this really the NS87303?
677 */
678 switch (config & 0x3ff) {
679 case 0x02e:
680 case 0x15c:
681 case 0x26e:
682 case 0x398:
683 break;
684 default:
685 config = 0;
686 }
687
688 if (!config)
689 return sun_floppy_types[0];
690
691 /* Enable PC-AT mode. */
692 ns87303_modify(config, ASC, 0, 0xc0);
693
694#ifdef PCI_FDC_SWAP_DRIVES
695 /*
696 * If only Floppy 1 is present, swap drives.
697 */
698 if (!sun_floppy_types[0] && sun_floppy_types[1]) {
699 /*
700 * Set the drive exchange bit in FCR on NS87303,
701 * make sure other bits are sane before doing so.
702 */
703 ns87303_modify(config, FER, FER_EDM, 0);
704 ns87303_modify(config, ASC, ASC_DRV2_SEL, 0);
705 ns87303_modify(config, FCR, 0, FCR_LDE);
706
707 config = sun_floppy_types[0];
708 sun_floppy_types[0] = sun_floppy_types[1];
709 sun_floppy_types[1] = config;
710
711 if (sun_pci_broken_drive != -1) {
712 sun_pci_broken_drive = 1 - sun_pci_broken_drive;
713 sun_fdops.fd_outb = sun_pci_fd_lde_broken_outb;
714 }
715 }
716#endif /* PCI_FDC_SWAP_DRIVES */
717
718 return sun_floppy_types[0];
719#else
720 return 0;
721#endif
722 }
723 prom_getproperty(sdev->prom_node, "status", state, sizeof(state));
724 if(!strncmp(state, "disabled", 8))
725 return 0;
726
727 /*
728 * We cannot do sbus_ioremap here: it does request_region,
729 * which the generic floppy driver tries to do once again.
730 * But we must use the sdev resource values as they have
731 * had parent ranges applied.
732 */
733 sun_fdc = (struct sun_flpy_controller *)
734 (sdev->resource[0].start +
735 ((sdev->resource[0].flags & 0x1ffUL) << 32UL));
736
737 /* Last minute sanity check... */
738 if(sbus_readb(&sun_fdc->status1_82077) == 0xff) {
739 sun_fdc = (struct sun_flpy_controller *)-1;
740 return 0;
741 }
742
743 sun_fdops.fd_inb = sun_82077_fd_inb;
744 sun_fdops.fd_outb = sun_82077_fd_outb;
745
746 can_use_virtual_dma = use_virtual_dma = 1;
747 sun_fdops.fd_enable_dma = sun_fd_enable_dma;
748 sun_fdops.fd_disable_dma = sun_fd_disable_dma;
749 sun_fdops.fd_set_dma_mode = sun_fd_set_dma_mode;
750 sun_fdops.fd_set_dma_addr = sun_fd_set_dma_addr;
751 sun_fdops.fd_set_dma_count = sun_fd_set_dma_count;
752 sun_fdops.get_dma_residue = sun_get_dma_residue;
753
754 sun_fdops.fd_request_irq = sun_fd_request_irq;
755 sun_fdops.fd_free_irq = sun_fd_free_irq;
756
757 sun_fdops.fd_eject = sun_fd_eject;
758
759 fdc_status = (unsigned long) &sun_fdc->status_82077;
760
761 /* Success... */
762 allowed_drive_mask = 0x01;
763 sun_floppy_types[0] = 4;
764 sun_floppy_types[1] = 0;
765
766 return sun_floppy_types[0];
767}
768
769#define EXTRA_FLOPPY_PARAMS
770
771static DEFINE_SPINLOCK(dma_spin_lock);
772
773#define claim_dma_lock() \
774({ unsigned long flags; \
775 spin_lock_irqsave(&dma_spin_lock, flags); \
776 flags; \
777})
778
779#define release_dma_lock(__flags) \
780 spin_unlock_irqrestore(&dma_spin_lock, __flags);
781
782#endif /* !(__ASM_SPARC64_FLOPPY_H) */
diff --git a/include/asm-sparc/futex.h b/include/asm-sparc/futex.h
index 6a332a9f099c..c6a9f038c531 100644
--- a/include/asm-sparc/futex.h
+++ b/include/asm-sparc/futex.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_FUTEX_H 1#ifndef ___ASM_SPARC_FUTEX_H
2#define _ASM_FUTEX_H 2#define ___ASM_SPARC_FUTEX_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/futex.h> 4#include <asm-sparc/futex_64.h>
5 5#else
6#include <asm-sparc/futex_32.h>
7#endif
6#endif 8#endif
diff --git a/include/asm-sparc/futex_32.h b/include/asm-sparc/futex_32.h
new file mode 100644
index 000000000000..6a332a9f099c
--- /dev/null
+++ b/include/asm-sparc/futex_32.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif
diff --git a/include/asm-sparc/futex_64.h b/include/asm-sparc/futex_64.h
new file mode 100644
index 000000000000..d8378935ae90
--- /dev/null
+++ b/include/asm-sparc/futex_64.h
@@ -0,0 +1,110 @@
1#ifndef _SPARC64_FUTEX_H
2#define _SPARC64_FUTEX_H
3
4#include <linux/futex.h>
5#include <linux/uaccess.h>
6#include <asm/errno.h>
7#include <asm/system.h>
8
9#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
10 __asm__ __volatile__( \
11 "\n1: lduwa [%3] %%asi, %2\n" \
12 " " insn "\n" \
13 "2: casa [%3] %%asi, %2, %1\n" \
14 " cmp %2, %1\n" \
15 " bne,pn %%icc, 1b\n" \
16 " mov 0, %0\n" \
17 "3:\n" \
18 " .section .fixup,#alloc,#execinstr\n" \
19 " .align 4\n" \
20 "4: sethi %%hi(3b), %0\n" \
21 " jmpl %0 + %%lo(3b), %%g0\n" \
22 " mov %5, %0\n" \
23 " .previous\n" \
24 " .section __ex_table,\"a\"\n" \
25 " .align 4\n" \
26 " .word 1b, 4b\n" \
27 " .word 2b, 4b\n" \
28 " .previous\n" \
29 : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \
30 : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
31 : "memory")
32
33static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
34{
35 int op = (encoded_op >> 28) & 7;
36 int cmp = (encoded_op >> 24) & 15;
37 int oparg = (encoded_op << 8) >> 20;
38 int cmparg = (encoded_op << 20) >> 20;
39 int oldval = 0, ret, tem;
40
41 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
42 return -EFAULT;
43 if (unlikely((((unsigned long) uaddr) & 0x3UL)))
44 return -EINVAL;
45
46 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
47 oparg = 1 << oparg;
48
49 pagefault_disable();
50
51 switch (op) {
52 case FUTEX_OP_SET:
53 __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg);
54 break;
55 case FUTEX_OP_ADD:
56 __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg);
57 break;
58 case FUTEX_OP_OR:
59 __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
60 break;
61 case FUTEX_OP_ANDN:
62 __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
63 break;
64 case FUTEX_OP_XOR:
65 __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
66 break;
67 default:
68 ret = -ENOSYS;
69 }
70
71 pagefault_enable();
72
73 if (!ret) {
74 switch (cmp) {
75 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
76 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
77 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
78 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
79 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
80 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
81 default: ret = -ENOSYS;
82 }
83 }
84 return ret;
85}
86
87static inline int
88futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
89{
90 __asm__ __volatile__(
91 "\n1: casa [%3] %%asi, %2, %0\n"
92 "2:\n"
93 " .section .fixup,#alloc,#execinstr\n"
94 " .align 4\n"
95 "3: sethi %%hi(2b), %0\n"
96 " jmpl %0 + %%lo(2b), %%g0\n"
97 " mov %4, %0\n"
98 " .previous\n"
99 " .section __ex_table,\"a\"\n"
100 " .align 4\n"
101 " .word 1b, 3b\n"
102 " .previous\n"
103 : "=r" (newval)
104 : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
105 : "memory");
106
107 return newval;
108}
109
110#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index 4f63ed8df551..156478773100 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -1,23 +1,8 @@
1/* hardirq.h: 32-bit Sparc hard IRQ support. 1#ifndef ___ASM_SPARC_HARDIRQ_H
2 * 2#define ___ASM_SPARC_HARDIRQ_H
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) 4#include <asm-sparc/hardirq_64.h>
5 */ 5#else
6 6#include <asm-sparc/hardirq_32.h>
7#ifndef __SPARC_HARDIRQ_H 7#endif
8#define __SPARC_HARDIRQ_H 8#endif
9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8
22
23#endif /* __SPARC_HARDIRQ_H */
diff --git a/include/asm-sparc/hardirq_32.h b/include/asm-sparc/hardirq_32.h
new file mode 100644
index 000000000000..4f63ed8df551
--- /dev/null
+++ b/include/asm-sparc/hardirq_32.h
@@ -0,0 +1,23 @@
1/* hardirq.h: 32-bit Sparc hard IRQ support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
5 */
6
7#ifndef __SPARC_HARDIRQ_H
8#define __SPARC_HARDIRQ_H
9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8
22
23#endif /* __SPARC_HARDIRQ_H */
diff --git a/include/asm-sparc/hardirq_64.h b/include/asm-sparc/hardirq_64.h
new file mode 100644
index 000000000000..7c29fd1a87aa
--- /dev/null
+++ b/include/asm-sparc/hardirq_64.h
@@ -0,0 +1,19 @@
1/* hardirq.h: 64-bit Sparc hard IRQ support.
2 *
3 * Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef __SPARC64_HARDIRQ_H
7#define __SPARC64_HARDIRQ_H
8
9#include <asm/cpudata.h>
10
11#define __ARCH_IRQ_STAT
12#define local_softirq_pending() \
13 (local_cpu_data().__softirq_pending)
14
15void ack_bad_irq(unsigned int irq);
16
17#define HARDIRQ_BITS 8
18
19#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/include/asm-sparc/head.h b/include/asm-sparc/head.h
index 7c35491a8b53..14652abdea31 100644
--- a/include/asm-sparc/head.h
+++ b/include/asm-sparc/head.h
@@ -1,102 +1,8 @@
1#ifndef __SPARC_HEAD_H 1#ifndef ___ASM_SPARC_HEAD_H
2#define __SPARC_HEAD_H 2#define ___ASM_SPARC_HEAD_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define KERNBASE 0xf0000000 /* First address the kernel will eventually be */ 4#include <asm-sparc/head_64.h>
5#define LOAD_ADDR 0x4000 /* prom jumps to us here unless this is elf /boot */
6#define SUN4C_SEGSZ (1 << 18)
7#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2) /* Used in boot remapping. */
8#define INTS_ENAB 0x01 /* entry.S uses this. */
9
10#define SUN4_PROM_VECTOR 0xFFE81000 /* SUN4 PROM needs to be hardwired */
11
12#define WRITE_PAUSE nop; nop; nop; /* Have to do this after %wim/%psr chg */
13#define NOP_INSN 0x01000000 /* Used to patch sparc_save_state */
14
15/* Here are some trap goodies */
16
17/* Generic trap entry. */
18#define TRAP_ENTRY(type, label) \
19 rd %psr, %l0; b label; rd %wim, %l3; nop;
20
21/* Data/text faults. Defaults to sun4c version at boot time. */
22#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
23#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
24#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
25#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
26
27/* This is for traps we should NEVER get. */
28#define BAD_TRAP(num) \
29 rd %psr, %l0; mov num, %l7; b bad_trap_handler; rd %wim, %l3;
30
31/* This is for traps when we want just skip the instruction which caused it */
32#define SKIP_TRAP(type, name) \
33 jmpl %l2, %g0; rett %l2 + 4; nop; nop;
34
35/* Notice that for the system calls we pull a trick. We load up a
36 * different pointer to the system call vector table in %l7, but call
37 * the same generic system call low-level entry point. The trap table
38 * entry sequences are also HyperSparc pipeline friendly ;-)
39 */
40
41/* Software trap for Linux system calls. */
42#define LINUX_SYSCALL_TRAP \
43 sethi %hi(sys_call_table), %l7; \
44 or %l7, %lo(sys_call_table), %l7; \
45 b linux_sparc_syscall; \
46 rd %psr, %l0;
47
48#define BREAKPOINT_TRAP \
49 b breakpoint_trap; \
50 rd %psr,%l0; \
51 nop; \
52 nop;
53
54#ifdef CONFIG_KGDB
55#define KGDB_TRAP(num) \
56 b kgdb_trap_low; \
57 rd %psr,%l0; \
58 nop; \
59 nop;
60#else 5#else
61#define KGDB_TRAP(num) \ 6#include <asm-sparc/head_32.h>
62 BAD_TRAP(num) 7#endif
63#endif 8#endif
64
65/* The Get Condition Codes software trap for userland. */
66#define GETCC_TRAP \
67 b getcc_trap_handler; mov %psr, %l0; nop; nop;
68
69/* The Set Condition Codes software trap for userland. */
70#define SETCC_TRAP \
71 b setcc_trap_handler; mov %psr, %l0; nop; nop;
72
73/* The Get PSR software trap for userland. */
74#define GETPSR_TRAP \
75 mov %psr, %i0; jmp %l2; rett %l2 + 4; nop;
76
77/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
78 * gets handled with another macro.
79 */
80#define TRAP_ENTRY_INTERRUPT(int_level) \
81 mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
82
83/* NMI's (Non Maskable Interrupts) are special, you can't keep them
84 * from coming in, and basically if you get one, the shows over. ;(
85 * On the sun4c they are usually asynchronous memory errors, on the
86 * the sun4m they could be either due to mem errors or a software
87 * initiated interrupt from the prom/kern on an SMP box saying "I
88 * command you to do CPU tricks, read your mailbox for more info."
89 */
90#define NMI_TRAP \
91 rd %wim, %l3; b linux_trap_nmi_sun4c; mov %psr, %l0; nop;
92
93/* Window overflows/underflows are special and we need to try to be as
94 * efficient as possible here....
95 */
96#define WINDOW_SPILL \
97 rd %psr, %l0; rd %wim, %l3; b spill_window_entry; andcc %l0, PSR_PS, %g0;
98
99#define WINDOW_FILL \
100 rd %psr, %l0; rd %wim, %l3; b fill_window_entry; andcc %l0, PSR_PS, %g0;
101
102#endif /* __SPARC_HEAD_H */
diff --git a/include/asm-sparc/head_32.h b/include/asm-sparc/head_32.h
new file mode 100644
index 000000000000..7c35491a8b53
--- /dev/null
+++ b/include/asm-sparc/head_32.h
@@ -0,0 +1,102 @@
1#ifndef __SPARC_HEAD_H
2#define __SPARC_HEAD_H
3
4#define KERNBASE 0xf0000000 /* First address the kernel will eventually be */
5#define LOAD_ADDR 0x4000 /* prom jumps to us here unless this is elf /boot */
6#define SUN4C_SEGSZ (1 << 18)
7#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2) /* Used in boot remapping. */
8#define INTS_ENAB 0x01 /* entry.S uses this. */
9
10#define SUN4_PROM_VECTOR 0xFFE81000 /* SUN4 PROM needs to be hardwired */
11
12#define WRITE_PAUSE nop; nop; nop; /* Have to do this after %wim/%psr chg */
13#define NOP_INSN 0x01000000 /* Used to patch sparc_save_state */
14
15/* Here are some trap goodies */
16
17/* Generic trap entry. */
18#define TRAP_ENTRY(type, label) \
19 rd %psr, %l0; b label; rd %wim, %l3; nop;
20
21/* Data/text faults. Defaults to sun4c version at boot time. */
22#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
23#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
24#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
25#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
26
27/* This is for traps we should NEVER get. */
28#define BAD_TRAP(num) \
29 rd %psr, %l0; mov num, %l7; b bad_trap_handler; rd %wim, %l3;
30
31/* This is for traps when we want just skip the instruction which caused it */
32#define SKIP_TRAP(type, name) \
33 jmpl %l2, %g0; rett %l2 + 4; nop; nop;
34
35/* Notice that for the system calls we pull a trick. We load up a
36 * different pointer to the system call vector table in %l7, but call
37 * the same generic system call low-level entry point. The trap table
38 * entry sequences are also HyperSparc pipeline friendly ;-)
39 */
40
41/* Software trap for Linux system calls. */
42#define LINUX_SYSCALL_TRAP \
43 sethi %hi(sys_call_table), %l7; \
44 or %l7, %lo(sys_call_table), %l7; \
45 b linux_sparc_syscall; \
46 rd %psr, %l0;
47
48#define BREAKPOINT_TRAP \
49 b breakpoint_trap; \
50 rd %psr,%l0; \
51 nop; \
52 nop;
53
54#ifdef CONFIG_KGDB
55#define KGDB_TRAP(num) \
56 b kgdb_trap_low; \
57 rd %psr,%l0; \
58 nop; \
59 nop;
60#else
61#define KGDB_TRAP(num) \
62 BAD_TRAP(num)
63#endif
64
65/* The Get Condition Codes software trap for userland. */
66#define GETCC_TRAP \
67 b getcc_trap_handler; mov %psr, %l0; nop; nop;
68
69/* The Set Condition Codes software trap for userland. */
70#define SETCC_TRAP \
71 b setcc_trap_handler; mov %psr, %l0; nop; nop;
72
73/* The Get PSR software trap for userland. */
74#define GETPSR_TRAP \
75 mov %psr, %i0; jmp %l2; rett %l2 + 4; nop;
76
77/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
78 * gets handled with another macro.
79 */
80#define TRAP_ENTRY_INTERRUPT(int_level) \
81 mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
82
83/* NMI's (Non Maskable Interrupts) are special, you can't keep them
84 * from coming in, and basically if you get one, the shows over. ;(
85 * On the sun4c they are usually asynchronous memory errors, on the
86 * the sun4m they could be either due to mem errors or a software
87 * initiated interrupt from the prom/kern on an SMP box saying "I
88 * command you to do CPU tricks, read your mailbox for more info."
89 */
90#define NMI_TRAP \
91 rd %wim, %l3; b linux_trap_nmi_sun4c; mov %psr, %l0; nop;
92
93/* Window overflows/underflows are special and we need to try to be as
94 * efficient as possible here....
95 */
96#define WINDOW_SPILL \
97 rd %psr, %l0; rd %wim, %l3; b spill_window_entry; andcc %l0, PSR_PS, %g0;
98
99#define WINDOW_FILL \
100 rd %psr, %l0; rd %wim, %l3; b fill_window_entry; andcc %l0, PSR_PS, %g0;
101
102#endif /* __SPARC_HEAD_H */
diff --git a/include/asm-sparc/head_64.h b/include/asm-sparc/head_64.h
new file mode 100644
index 000000000000..10e9dabc4c41
--- /dev/null
+++ b/include/asm-sparc/head_64.h
@@ -0,0 +1,76 @@
1#ifndef _SPARC64_HEAD_H
2#define _SPARC64_HEAD_H
3
4#include <asm/pstate.h>
5
6 /* wrpr %g0, val, %gl */
7#define SET_GL(val) \
8 .word 0xa1902000 | val
9
10 /* rdpr %gl, %gN */
11#define GET_GL_GLOBAL(N) \
12 .word 0x81540000 | (N << 25)
13
14#define KERNBASE 0x400000
15
16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
17
18#define __CHEETAH_ID 0x003e0014
19#define __JALAPENO_ID 0x003e0016
20#define __SERRANO_ID 0x003e0022
21
22#define CHEETAH_MANUF 0x003e
23#define CHEETAH_IMPL 0x0014 /* Ultra-III */
24#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */
25#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */
26#define JAGUAR_IMPL 0x0018 /* Ultra-IV */
27#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
28#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
29
30#define BRANCH_IF_SUN4V(tmp1,label) \
31 sethi %hi(is_sun4v), %tmp1; \
32 lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \
33 brnz,pn %tmp1, label; \
34 nop
35
36#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
37 rdpr %ver, %tmp1; \
38 sethi %hi(__CHEETAH_ID), %tmp2; \
39 srlx %tmp1, 32, %tmp1; \
40 or %tmp2, %lo(__CHEETAH_ID), %tmp2;\
41 cmp %tmp1, %tmp2; \
42 be,pn %icc, label; \
43 nop;
44
45#define BRANCH_IF_JALAPENO(tmp1,tmp2,label) \
46 rdpr %ver, %tmp1; \
47 sethi %hi(__JALAPENO_ID), %tmp2; \
48 srlx %tmp1, 32, %tmp1; \
49 or %tmp2, %lo(__JALAPENO_ID), %tmp2;\
50 cmp %tmp1, %tmp2; \
51 be,pn %icc, label; \
52 nop;
53
54#define BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(tmp1,tmp2,label) \
55 rdpr %ver, %tmp1; \
56 srlx %tmp1, (32 + 16), %tmp2; \
57 cmp %tmp2, CHEETAH_MANUF; \
58 bne,pt %xcc, 99f; \
59 sllx %tmp1, 16, %tmp1; \
60 srlx %tmp1, (32 + 16), %tmp2; \
61 cmp %tmp2, CHEETAH_PLUS_IMPL; \
62 bgeu,pt %xcc, label; \
6399: nop;
64
65#define BRANCH_IF_ANY_CHEETAH(tmp1,tmp2,label) \
66 rdpr %ver, %tmp1; \
67 srlx %tmp1, (32 + 16), %tmp2; \
68 cmp %tmp2, CHEETAH_MANUF; \
69 bne,pt %xcc, 99f; \
70 sllx %tmp1, 16, %tmp1; \
71 srlx %tmp1, (32 + 16), %tmp2; \
72 cmp %tmp2, CHEETAH_IMPL; \
73 bgeu,pt %xcc, label; \
7499: nop;
75
76#endif /* !(_SPARC64_HEAD_H) */
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h
index afd1736ed480..a3c7f5ff270a 100644
--- a/include/asm-sparc/ide.h
+++ b/include/asm-sparc/ide.h
@@ -1,95 +1,8 @@
1/* ide.h: SPARC PCI specific IDE glue. 1#ifndef ___ASM_SPARC_IDE_H
2 * 2#define ___ASM_SPARC_IDE_H
3 * Copyright (C) 1997 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) 4#include <asm-sparc/ide_64.h>
5 * Adaptation from sparc64 version to sparc by Pete Zaitcev. 5#else
6 */ 6#include <asm-sparc/ide_32.h>
7 7#endif
8#ifndef _SPARC_IDE_H 8#endif
9#define _SPARC_IDE_H
10
11#ifdef __KERNEL__
12
13#include <asm/pgtable.h>
14#include <asm/io.h>
15#include <asm/psr.h>
16
17#undef MAX_HWIFS
18#define MAX_HWIFS 2
19
20#define __ide_insl(data_reg, buffer, wcount) \
21 __ide_insw(data_reg, buffer, (wcount)<<1)
22#define __ide_outsl(data_reg, buffer, wcount) \
23 __ide_outsw(data_reg, buffer, (wcount)<<1)
24
25/* On sparc, I/O ports and MMIO registers are accessed identically. */
26#define __ide_mm_insw __ide_insw
27#define __ide_mm_insl __ide_insl
28#define __ide_mm_outsw __ide_outsw
29#define __ide_mm_outsl __ide_outsl
30
31static inline void __ide_insw(unsigned long port,
32 void *dst,
33 unsigned long count)
34{
35 volatile unsigned short *data_port;
36 /* unsigned long end = (unsigned long)dst + (count << 1); */ /* P3 */
37 u16 *ps = dst;
38 u32 *pi;
39
40 data_port = (volatile unsigned short *)port;
41
42 if(((unsigned long)ps) & 0x2) {
43 *ps++ = *data_port;
44 count--;
45 }
46 pi = (u32 *)ps;
47 while(count >= 2) {
48 u32 w;
49
50 w = (*data_port) << 16;
51 w |= (*data_port);
52 *pi++ = w;
53 count -= 2;
54 }
55 ps = (u16 *)pi;
56 if(count)
57 *ps++ = *data_port;
58
59 /* __flush_dcache_range((unsigned long)dst, end); */ /* P3 see hme */
60}
61
62static inline void __ide_outsw(unsigned long port,
63 const void *src,
64 unsigned long count)
65{
66 volatile unsigned short *data_port;
67 /* unsigned long end = (unsigned long)src + (count << 1); */
68 const u16 *ps = src;
69 const u32 *pi;
70
71 data_port = (volatile unsigned short *)port;
72
73 if(((unsigned long)src) & 0x2) {
74 *data_port = *ps++;
75 count--;
76 }
77 pi = (const u32 *)ps;
78 while(count >= 2) {
79 u32 w;
80
81 w = *pi++;
82 *data_port = (w >> 16);
83 *data_port = w;
84 count -= 2;
85 }
86 ps = (const u16 *)pi;
87 if(count)
88 *data_port = *ps;
89
90 /* __flush_dcache_range((unsigned long)src, end); */ /* P3 see hme */
91}
92
93#endif /* __KERNEL__ */
94
95#endif /* _SPARC_IDE_H */
diff --git a/include/asm-sparc/ide_32.h b/include/asm-sparc/ide_32.h
new file mode 100644
index 000000000000..afd1736ed480
--- /dev/null
+++ b/include/asm-sparc/ide_32.h
@@ -0,0 +1,95 @@
1/* ide.h: SPARC PCI specific IDE glue.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 * Adaptation from sparc64 version to sparc by Pete Zaitcev.
6 */
7
8#ifndef _SPARC_IDE_H
9#define _SPARC_IDE_H
10
11#ifdef __KERNEL__
12
13#include <asm/pgtable.h>
14#include <asm/io.h>
15#include <asm/psr.h>
16
17#undef MAX_HWIFS
18#define MAX_HWIFS 2
19
20#define __ide_insl(data_reg, buffer, wcount) \
21 __ide_insw(data_reg, buffer, (wcount)<<1)
22#define __ide_outsl(data_reg, buffer, wcount) \
23 __ide_outsw(data_reg, buffer, (wcount)<<1)
24
25/* On sparc, I/O ports and MMIO registers are accessed identically. */
26#define __ide_mm_insw __ide_insw
27#define __ide_mm_insl __ide_insl
28#define __ide_mm_outsw __ide_outsw
29#define __ide_mm_outsl __ide_outsl
30
31static inline void __ide_insw(unsigned long port,
32 void *dst,
33 unsigned long count)
34{
35 volatile unsigned short *data_port;
36 /* unsigned long end = (unsigned long)dst + (count << 1); */ /* P3 */
37 u16 *ps = dst;
38 u32 *pi;
39
40 data_port = (volatile unsigned short *)port;
41
42 if(((unsigned long)ps) & 0x2) {
43 *ps++ = *data_port;
44 count--;
45 }
46 pi = (u32 *)ps;
47 while(count >= 2) {
48 u32 w;
49
50 w = (*data_port) << 16;
51 w |= (*data_port);
52 *pi++ = w;
53 count -= 2;
54 }
55 ps = (u16 *)pi;
56 if(count)
57 *ps++ = *data_port;
58
59 /* __flush_dcache_range((unsigned long)dst, end); */ /* P3 see hme */
60}
61
62static inline void __ide_outsw(unsigned long port,
63 const void *src,
64 unsigned long count)
65{
66 volatile unsigned short *data_port;
67 /* unsigned long end = (unsigned long)src + (count << 1); */
68 const u16 *ps = src;
69 const u32 *pi;
70
71 data_port = (volatile unsigned short *)port;
72
73 if(((unsigned long)src) & 0x2) {
74 *data_port = *ps++;
75 count--;
76 }
77 pi = (const u32 *)ps;
78 while(count >= 2) {
79 u32 w;
80
81 w = *pi++;
82 *data_port = (w >> 16);
83 *data_port = w;
84 count -= 2;
85 }
86 ps = (const u16 *)pi;
87 if(count)
88 *data_port = *ps;
89
90 /* __flush_dcache_range((unsigned long)src, end); */ /* P3 see hme */
91}
92
93#endif /* __KERNEL__ */
94
95#endif /* _SPARC_IDE_H */
diff --git a/include/asm-sparc/ide_64.h b/include/asm-sparc/ide_64.h
new file mode 100644
index 000000000000..1282676da1cd
--- /dev/null
+++ b/include/asm-sparc/ide_64.h
@@ -0,0 +1,118 @@
1/*
2 * ide.h: Ultra/PCI specific IDE glue.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 */
7
8#ifndef _SPARC64_IDE_H
9#define _SPARC64_IDE_H
10
11#ifdef __KERNEL__
12
13#include <asm/pgalloc.h>
14#include <asm/io.h>
15#include <asm/spitfire.h>
16#include <asm/cacheflush.h>
17#include <asm/page.h>
18
19#ifndef MAX_HWIFS
20# ifdef CONFIG_BLK_DEV_IDEPCI
21#define MAX_HWIFS 10
22# else
23#define MAX_HWIFS 2
24# endif
25#endif
26
27#define __ide_insl(data_reg, buffer, wcount) \
28 __ide_insw(data_reg, buffer, (wcount)<<1)
29#define __ide_outsl(data_reg, buffer, wcount) \
30 __ide_outsw(data_reg, buffer, (wcount)<<1)
31
32/* On sparc64, I/O ports and MMIO registers are accessed identically. */
33#define __ide_mm_insw __ide_insw
34#define __ide_mm_insl __ide_insl
35#define __ide_mm_outsw __ide_outsw
36#define __ide_mm_outsl __ide_outsl
37
38static inline unsigned int inw_be(void __iomem *addr)
39{
40 unsigned int ret;
41
42 __asm__ __volatile__("lduha [%1] %2, %0"
43 : "=r" (ret)
44 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
45
46 return ret;
47}
48
49static inline void __ide_insw(void __iomem *port, void *dst, u32 count)
50{
51#ifdef DCACHE_ALIASING_POSSIBLE
52 unsigned long end = (unsigned long)dst + (count << 1);
53#endif
54 u16 *ps = dst;
55 u32 *pi;
56
57 if(((u64)ps) & 0x2) {
58 *ps++ = inw_be(port);
59 count--;
60 }
61 pi = (u32 *)ps;
62 while(count >= 2) {
63 u32 w;
64
65 w = inw_be(port) << 16;
66 w |= inw_be(port);
67 *pi++ = w;
68 count -= 2;
69 }
70 ps = (u16 *)pi;
71 if(count)
72 *ps++ = inw_be(port);
73
74#ifdef DCACHE_ALIASING_POSSIBLE
75 __flush_dcache_range((unsigned long)dst, end);
76#endif
77}
78
79static inline void outw_be(unsigned short w, void __iomem *addr)
80{
81 __asm__ __volatile__("stha %0, [%1] %2"
82 : /* no outputs */
83 : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
84}
85
86static inline void __ide_outsw(void __iomem *port, void *src, u32 count)
87{
88#ifdef DCACHE_ALIASING_POSSIBLE
89 unsigned long end = (unsigned long)src + (count << 1);
90#endif
91 const u16 *ps = src;
92 const u32 *pi;
93
94 if(((u64)src) & 0x2) {
95 outw_be(*ps++, port);
96 count--;
97 }
98 pi = (const u32 *)ps;
99 while(count >= 2) {
100 u32 w;
101
102 w = *pi++;
103 outw_be((w >> 16), port);
104 outw_be(w, port);
105 count -= 2;
106 }
107 ps = (const u16 *)pi;
108 if(count)
109 outw_be(*ps, port);
110
111#ifdef DCACHE_ALIASING_POSSIBLE
112 __flush_dcache_range((unsigned long)src, end);
113#endif
114}
115
116#endif /* __KERNEL__ */
117
118#endif /* _SPARC64_IDE_H */
diff --git a/include/asm-sparc/io.h b/include/asm-sparc/io.h
index 3a3e7bdb06b3..fc9024d3dfc3 100644
--- a/include/asm-sparc/io.h
+++ b/include/asm-sparc/io.h
@@ -1,325 +1,8 @@
1#ifndef __SPARC_IO_H 1#ifndef ___ASM_SPARC_IO_H
2#define __SPARC_IO_H 2#define ___ASM_SPARC_IO_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/kernel.h> 4#include <asm-sparc/io_64.h>
5#include <linux/types.h> 5#else
6#include <linux/ioport.h> /* struct resource */ 6#include <asm-sparc/io_32.h>
7 7#endif
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10
11#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT)
12
13static inline u32 flip_dword (u32 l)
14{
15 return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
16}
17
18static inline u16 flip_word (u16 w)
19{
20 return ((w&0xff) << 8) | ((w>>8)&0xff);
21}
22
23#define mmiowb()
24
25/*
26 * Memory mapped I/O to PCI
27 */
28
29static inline u8 __raw_readb(const volatile void __iomem *addr)
30{
31 return *(__force volatile u8 *)addr;
32}
33
34static inline u16 __raw_readw(const volatile void __iomem *addr)
35{
36 return *(__force volatile u16 *)addr;
37}
38
39static inline u32 __raw_readl(const volatile void __iomem *addr)
40{
41 return *(__force volatile u32 *)addr;
42}
43
44static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
45{
46 *(__force volatile u8 *)addr = b;
47}
48
49static inline void __raw_writew(u16 w, volatile void __iomem *addr)
50{
51 *(__force volatile u16 *)addr = w;
52}
53
54static inline void __raw_writel(u32 l, volatile void __iomem *addr)
55{
56 *(__force volatile u32 *)addr = l;
57}
58
59static inline u8 __readb(const volatile void __iomem *addr)
60{
61 return *(__force volatile u8 *)addr;
62}
63
64static inline u16 __readw(const volatile void __iomem *addr)
65{
66 return flip_word(*(__force volatile u16 *)addr);
67}
68
69static inline u32 __readl(const volatile void __iomem *addr)
70{
71 return flip_dword(*(__force volatile u32 *)addr);
72}
73
74static inline void __writeb(u8 b, volatile void __iomem *addr)
75{
76 *(__force volatile u8 *)addr = b;
77}
78
79static inline void __writew(u16 w, volatile void __iomem *addr)
80{
81 *(__force volatile u16 *)addr = flip_word(w);
82}
83
84static inline void __writel(u32 l, volatile void __iomem *addr)
85{
86 *(__force volatile u32 *)addr = flip_dword(l);
87}
88
89#define readb(__addr) __readb(__addr)
90#define readw(__addr) __readw(__addr)
91#define readl(__addr) __readl(__addr)
92#define readb_relaxed(__addr) readb(__addr)
93#define readw_relaxed(__addr) readw(__addr)
94#define readl_relaxed(__addr) readl(__addr)
95
96#define writeb(__b, __addr) __writeb((__b),(__addr))
97#define writew(__w, __addr) __writew((__w),(__addr))
98#define writel(__l, __addr) __writel((__l),(__addr))
99
100/*
101 * I/O space operations
102 *
103 * Arrangement on a Sun is somewhat complicated.
104 *
105 * First of all, we want to use standard Linux drivers
106 * for keyboard, PC serial, etc. These drivers think
107 * they access I/O space and use inb/outb.
108 * On the other hand, EBus bridge accepts PCI *memory*
109 * cycles and converts them into ISA *I/O* cycles.
110 * Ergo, we want inb & outb to generate PCI memory cycles.
111 *
112 * If we want to issue PCI *I/O* cycles, we do this
113 * with a low 64K fixed window in PCIC. This window gets
114 * mapped somewhere into virtual kernel space and we
115 * can use inb/outb again.
116 */
117#define inb_local(__addr) __readb((void __iomem *)(unsigned long)(__addr))
118#define inb(__addr) __readb((void __iomem *)(unsigned long)(__addr))
119#define inw(__addr) __readw((void __iomem *)(unsigned long)(__addr))
120#define inl(__addr) __readl((void __iomem *)(unsigned long)(__addr))
121
122#define outb_local(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
123#define outb(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
124#define outw(__w, __addr) __writew(__w, (void __iomem *)(unsigned long)(__addr))
125#define outl(__l, __addr) __writel(__l, (void __iomem *)(unsigned long)(__addr))
126
127#define inb_p(__addr) inb(__addr)
128#define outb_p(__b, __addr) outb(__b, __addr)
129#define inw_p(__addr) inw(__addr)
130#define outw_p(__w, __addr) outw(__w, __addr)
131#define inl_p(__addr) inl(__addr)
132#define outl_p(__l, __addr) outl(__l, __addr)
133
134void outsb(unsigned long addr, const void *src, unsigned long cnt);
135void outsw(unsigned long addr, const void *src, unsigned long cnt);
136void outsl(unsigned long addr, const void *src, unsigned long cnt);
137void insb(unsigned long addr, void *dst, unsigned long count);
138void insw(unsigned long addr, void *dst, unsigned long count);
139void insl(unsigned long addr, void *dst, unsigned long count);
140
141#define IO_SPACE_LIMIT 0xffffffff
142
143/*
144 * SBus accessors.
145 *
146 * SBus has only one, memory mapped, I/O space.
147 * We do not need to flip bytes for SBus of course.
148 */
149static inline u8 _sbus_readb(const volatile void __iomem *addr)
150{
151 return *(__force volatile u8 *)addr;
152}
153
154static inline u16 _sbus_readw(const volatile void __iomem *addr)
155{
156 return *(__force volatile u16 *)addr;
157}
158
159static inline u32 _sbus_readl(const volatile void __iomem *addr)
160{
161 return *(__force volatile u32 *)addr;
162}
163
164static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
165{
166 *(__force volatile u8 *)addr = b;
167}
168
169static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
170{
171 *(__force volatile u16 *)addr = w;
172}
173
174static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
175{
176 *(__force volatile u32 *)addr = l;
177}
178
179/*
180 * The only reason for #define's is to hide casts to unsigned long.
181 */
182#define sbus_readb(__addr) _sbus_readb(__addr)
183#define sbus_readw(__addr) _sbus_readw(__addr)
184#define sbus_readl(__addr) _sbus_readl(__addr)
185#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
186#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
187#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
188
189static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
190{
191 while(n--) {
192 sbus_writeb(c, __dst);
193 __dst++;
194 }
195}
196
197static inline void
198_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
199{
200 volatile void __iomem *d = dst;
201
202 while (n--) {
203 writeb(c, d);
204 d++;
205 }
206}
207
208#define memset_io(d,c,sz) _memset_io(d,c,sz)
209
210static inline void
211_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
212{
213 char *d = dst;
214
215 while (n--) {
216 char tmp = readb(src);
217 *d++ = tmp;
218 src++;
219 }
220}
221
222#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
223
224static inline void
225_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
226{
227 const char *s = src;
228 volatile void __iomem *d = dst;
229
230 while (n--) {
231 char tmp = *s++;
232 writeb(tmp, d);
233 d++;
234 }
235}
236
237#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
238
239#ifdef __KERNEL__
240
241/*
242 * Bus number may be embedded in the higher bits of the physical address.
243 * This is why we have no bus number argument to ioremap().
244 */
245extern void __iomem *ioremap(unsigned long offset, unsigned long size);
246#define ioremap_nocache(X,Y) ioremap((X),(Y))
247extern void iounmap(volatile void __iomem *addr);
248
249#define ioread8(X) readb(X)
250#define ioread16(X) readw(X)
251#define ioread32(X) readl(X)
252#define iowrite8(val,X) writeb(val,X)
253#define iowrite16(val,X) writew(val,X)
254#define iowrite32(val,X) writel(val,X)
255
256static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
257{
258 insb((unsigned long __force)port, buf, count);
259}
260static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
261{
262 insw((unsigned long __force)port, buf, count);
263}
264
265static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
266{
267 insl((unsigned long __force)port, buf, count);
268}
269
270static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
271{
272 outsb((unsigned long __force)port, buf, count);
273}
274
275static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
276{
277 outsw((unsigned long __force)port, buf, count);
278}
279
280static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
281{
282 outsl((unsigned long __force)port, buf, count);
283}
284
285/* Create a virtual mapping cookie for an IO port range */
286extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
287extern void ioport_unmap(void __iomem *);
288
289/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
290struct pci_dev;
291extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
292extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
293
294/*
295 * Bus number may be in res->flags... somewhere.
296 */
297extern void __iomem *sbus_ioremap(struct resource *res, unsigned long offset,
298 unsigned long size, char *name);
299extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
300
301
302/*
303 * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
304 * so rtc_port is static in it. This should not change unless a new
305 * hardware pops up.
306 */
307#define RTC_PORT(x) (rtc_port + (x))
308#define RTC_ALWAYS_BCD 0
309
310#endif 8#endif
311
312#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
313
314/*
315 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
316 * access
317 */
318#define xlate_dev_mem_ptr(p) __va(p)
319
320/*
321 * Convert a virtual cached pointer to an uncached pointer
322 */
323#define xlate_dev_kmem_ptr(p) p
324
325#endif /* !(__SPARC_IO_H) */
diff --git a/include/asm-sparc/io_32.h b/include/asm-sparc/io_32.h
new file mode 100644
index 000000000000..c43af722ae8c
--- /dev/null
+++ b/include/asm-sparc/io_32.h
@@ -0,0 +1,325 @@
1#ifndef __SPARC_IO_H
2#define __SPARC_IO_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/ioport.h> /* struct resource */
7
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10
11#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT)
12
13static inline u32 flip_dword (u32 l)
14{
15 return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
16}
17
18static inline u16 flip_word (u16 w)
19{
20 return ((w&0xff) << 8) | ((w>>8)&0xff);
21}
22
23#define mmiowb()
24
25/*
26 * Memory mapped I/O to PCI
27 */
28
29static inline u8 __raw_readb(const volatile void __iomem *addr)
30{
31 return *(__force volatile u8 *)addr;
32}
33
34static inline u16 __raw_readw(const volatile void __iomem *addr)
35{
36 return *(__force volatile u16 *)addr;
37}
38
39static inline u32 __raw_readl(const volatile void __iomem *addr)
40{
41 return *(__force volatile u32 *)addr;
42}
43
44static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
45{
46 *(__force volatile u8 *)addr = b;
47}
48
49static inline void __raw_writew(u16 w, volatile void __iomem *addr)
50{
51 *(__force volatile u16 *)addr = w;
52}
53
54static inline void __raw_writel(u32 l, volatile void __iomem *addr)
55{
56 *(__force volatile u32 *)addr = l;
57}
58
59static inline u8 __readb(const volatile void __iomem *addr)
60{
61 return *(__force volatile u8 *)addr;
62}
63
64static inline u16 __readw(const volatile void __iomem *addr)
65{
66 return flip_word(*(__force volatile u16 *)addr);
67}
68
69static inline u32 __readl(const volatile void __iomem *addr)
70{
71 return flip_dword(*(__force volatile u32 *)addr);
72}
73
74static inline void __writeb(u8 b, volatile void __iomem *addr)
75{
76 *(__force volatile u8 *)addr = b;
77}
78
79static inline void __writew(u16 w, volatile void __iomem *addr)
80{
81 *(__force volatile u16 *)addr = flip_word(w);
82}
83
84static inline void __writel(u32 l, volatile void __iomem *addr)
85{
86 *(__force volatile u32 *)addr = flip_dword(l);
87}
88
89#define readb(__addr) __readb(__addr)
90#define readw(__addr) __readw(__addr)
91#define readl(__addr) __readl(__addr)
92#define readb_relaxed(__addr) readb(__addr)
93#define readw_relaxed(__addr) readw(__addr)
94#define readl_relaxed(__addr) readl(__addr)
95
96#define writeb(__b, __addr) __writeb((__b),(__addr))
97#define writew(__w, __addr) __writew((__w),(__addr))
98#define writel(__l, __addr) __writel((__l),(__addr))
99
100/*
101 * I/O space operations
102 *
103 * Arrangement on a Sun is somewhat complicated.
104 *
105 * First of all, we want to use standard Linux drivers
106 * for keyboard, PC serial, etc. These drivers think
107 * they access I/O space and use inb/outb.
108 * On the other hand, EBus bridge accepts PCI *memory*
109 * cycles and converts them into ISA *I/O* cycles.
110 * Ergo, we want inb & outb to generate PCI memory cycles.
111 *
112 * If we want to issue PCI *I/O* cycles, we do this
113 * with a low 64K fixed window in PCIC. This window gets
114 * mapped somewhere into virtual kernel space and we
115 * can use inb/outb again.
116 */
117#define inb_local(__addr) __readb((void __iomem *)(unsigned long)(__addr))
118#define inb(__addr) __readb((void __iomem *)(unsigned long)(__addr))
119#define inw(__addr) __readw((void __iomem *)(unsigned long)(__addr))
120#define inl(__addr) __readl((void __iomem *)(unsigned long)(__addr))
121
122#define outb_local(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
123#define outb(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
124#define outw(__w, __addr) __writew(__w, (void __iomem *)(unsigned long)(__addr))
125#define outl(__l, __addr) __writel(__l, (void __iomem *)(unsigned long)(__addr))
126
127#define inb_p(__addr) inb(__addr)
128#define outb_p(__b, __addr) outb(__b, __addr)
129#define inw_p(__addr) inw(__addr)
130#define outw_p(__w, __addr) outw(__w, __addr)
131#define inl_p(__addr) inl(__addr)
132#define outl_p(__l, __addr) outl(__l, __addr)
133
134void outsb(unsigned long addr, const void *src, unsigned long cnt);
135void outsw(unsigned long addr, const void *src, unsigned long cnt);
136void outsl(unsigned long addr, const void *src, unsigned long cnt);
137void insb(unsigned long addr, void *dst, unsigned long count);
138void insw(unsigned long addr, void *dst, unsigned long count);
139void insl(unsigned long addr, void *dst, unsigned long count);
140
141#define IO_SPACE_LIMIT 0xffffffff
142
143/*
144 * SBus accessors.
145 *
146 * SBus has only one, memory mapped, I/O space.
147 * We do not need to flip bytes for SBus of course.
148 */
149static inline u8 _sbus_readb(const volatile void __iomem *addr)
150{
151 return *(__force volatile u8 *)addr;
152}
153
154static inline u16 _sbus_readw(const volatile void __iomem *addr)
155{
156 return *(__force volatile u16 *)addr;
157}
158
159static inline u32 _sbus_readl(const volatile void __iomem *addr)
160{
161 return *(__force volatile u32 *)addr;
162}
163
164static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
165{
166 *(__force volatile u8 *)addr = b;
167}
168
169static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
170{
171 *(__force volatile u16 *)addr = w;
172}
173
174static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
175{
176 *(__force volatile u32 *)addr = l;
177}
178
179/*
180 * The only reason for #define's is to hide casts to unsigned long.
181 */
182#define sbus_readb(__addr) _sbus_readb(__addr)
183#define sbus_readw(__addr) _sbus_readw(__addr)
184#define sbus_readl(__addr) _sbus_readl(__addr)
185#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
186#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
187#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
188
189static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
190{
191 while(n--) {
192 sbus_writeb(c, __dst);
193 __dst++;
194 }
195}
196
197static inline void
198_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
199{
200 volatile void __iomem *d = dst;
201
202 while (n--) {
203 writeb(c, d);
204 d++;
205 }
206}
207
208#define memset_io(d,c,sz) _memset_io(d,c,sz)
209
210static inline void
211_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
212{
213 char *d = dst;
214
215 while (n--) {
216 char tmp = readb(src);
217 *d++ = tmp;
218 src++;
219 }
220}
221
222#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
223
224static inline void
225_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
226{
227 const char *s = src;
228 volatile void __iomem *d = dst;
229
230 while (n--) {
231 char tmp = *s++;
232 writeb(tmp, d);
233 d++;
234 }
235}
236
237#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
238
239#ifdef __KERNEL__
240
241/*
242 * Bus number may be embedded in the higher bits of the physical address.
243 * This is why we have no bus number argument to ioremap().
244 */
245extern void __iomem *ioremap(unsigned long offset, unsigned long size);
246#define ioremap_nocache(X,Y) ioremap((X),(Y))
247extern void iounmap(volatile void __iomem *addr);
248
249#define ioread8(X) readb(X)
250#define ioread16(X) readw(X)
251#define ioread32(X) readl(X)
252#define iowrite8(val,X) writeb(val,X)
253#define iowrite16(val,X) writew(val,X)
254#define iowrite32(val,X) writel(val,X)
255
256static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
257{
258 insb((unsigned long __force)port, buf, count);
259}
260static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
261{
262 insw((unsigned long __force)port, buf, count);
263}
264
265static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
266{
267 insl((unsigned long __force)port, buf, count);
268}
269
270static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
271{
272 outsb((unsigned long __force)port, buf, count);
273}
274
275static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
276{
277 outsw((unsigned long __force)port, buf, count);
278}
279
280static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
281{
282 outsl((unsigned long __force)port, buf, count);
283}
284
285/* Create a virtual mapping cookie for an IO port range */
286extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
287extern void ioport_unmap(void __iomem *);
288
289/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
290struct pci_dev;
291extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
292extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
293
294/*
295 * Bus number may be in res->flags... somewhere.
296 */
297extern void __iomem *sbus_ioremap(struct resource *res, unsigned long offset,
298 unsigned long size, char *name);
299extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
300
301
302/*
303 * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
304 * so rtc_port is static in it. This should not change unless a new
305 * hardware pops up.
306 */
307#define RTC_PORT(x) (rtc_port + (x))
308#define RTC_ALWAYS_BCD 0
309
310#endif
311
312#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
313
314/*
315 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
316 * access
317 */
318#define xlate_dev_mem_ptr(p) __va(p)
319
320/*
321 * Convert a virtual cached pointer to an uncached pointer
322 */
323#define xlate_dev_kmem_ptr(p) p
324
325#endif /* !(__SPARC_IO_H) */
diff --git a/include/asm-sparc/io_64.h b/include/asm-sparc/io_64.h
new file mode 100644
index 000000000000..f4907414b39d
--- /dev/null
+++ b/include/asm-sparc/io_64.h
@@ -0,0 +1,511 @@
1#ifndef __SPARC64_IO_H
2#define __SPARC64_IO_H
3
4#include <linux/kernel.h>
5#include <linux/compiler.h>
6#include <linux/types.h>
7
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10#include <asm/asi.h>
11
12/* PC crapola... */
13#define __SLOW_DOWN_IO do { } while (0)
14#define SLOW_DOWN_IO do { } while (0)
15
16/* BIO layer definitions. */
17extern unsigned long kern_base, kern_size;
18#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
19#define BIO_VMERGE_BOUNDARY 8192
20
21static inline u8 _inb(unsigned long addr)
22{
23 u8 ret;
24
25 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */"
26 : "=r" (ret)
27 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
28 : "memory");
29
30 return ret;
31}
32
33static inline u16 _inw(unsigned long addr)
34{
35 u16 ret;
36
37 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */"
38 : "=r" (ret)
39 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
40 : "memory");
41
42 return ret;
43}
44
45static inline u32 _inl(unsigned long addr)
46{
47 u32 ret;
48
49 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */"
50 : "=r" (ret)
51 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
52 : "memory");
53
54 return ret;
55}
56
57static inline void _outb(u8 b, unsigned long addr)
58{
59 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */"
60 : /* no outputs */
61 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
62 : "memory");
63}
64
65static inline void _outw(u16 w, unsigned long addr)
66{
67 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */"
68 : /* no outputs */
69 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
70 : "memory");
71}
72
73static inline void _outl(u32 l, unsigned long addr)
74{
75 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */"
76 : /* no outputs */
77 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
78 : "memory");
79}
80
81#define inb(__addr) (_inb((unsigned long)(__addr)))
82#define inw(__addr) (_inw((unsigned long)(__addr)))
83#define inl(__addr) (_inl((unsigned long)(__addr)))
84#define outb(__b, __addr) (_outb((u8)(__b), (unsigned long)(__addr)))
85#define outw(__w, __addr) (_outw((u16)(__w), (unsigned long)(__addr)))
86#define outl(__l, __addr) (_outl((u32)(__l), (unsigned long)(__addr)))
87
88#define inb_p(__addr) inb(__addr)
89#define outb_p(__b, __addr) outb(__b, __addr)
90#define inw_p(__addr) inw(__addr)
91#define outw_p(__w, __addr) outw(__w, __addr)
92#define inl_p(__addr) inl(__addr)
93#define outl_p(__l, __addr) outl(__l, __addr)
94
95extern void outsb(unsigned long, const void *, unsigned long);
96extern void outsw(unsigned long, const void *, unsigned long);
97extern void outsl(unsigned long, const void *, unsigned long);
98extern void insb(unsigned long, void *, unsigned long);
99extern void insw(unsigned long, void *, unsigned long);
100extern void insl(unsigned long, void *, unsigned long);
101
102static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
103{
104 insb((unsigned long __force)port, buf, count);
105}
106static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
107{
108 insw((unsigned long __force)port, buf, count);
109}
110
111static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
112{
113 insl((unsigned long __force)port, buf, count);
114}
115
116static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
117{
118 outsb((unsigned long __force)port, buf, count);
119}
120
121static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
122{
123 outsw((unsigned long __force)port, buf, count);
124}
125
126static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
127{
128 outsl((unsigned long __force)port, buf, count);
129}
130
131/* Memory functions, same as I/O accesses on Ultra. */
132static inline u8 _readb(const volatile void __iomem *addr)
133{ u8 ret;
134
135 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
136 : "=r" (ret)
137 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
138 : "memory");
139 return ret;
140}
141
142static inline u16 _readw(const volatile void __iomem *addr)
143{ u16 ret;
144
145 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
146 : "=r" (ret)
147 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
148 : "memory");
149
150 return ret;
151}
152
153static inline u32 _readl(const volatile void __iomem *addr)
154{ u32 ret;
155
156 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
157 : "=r" (ret)
158 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
159 : "memory");
160
161 return ret;
162}
163
164static inline u64 _readq(const volatile void __iomem *addr)
165{ u64 ret;
166
167 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
168 : "=r" (ret)
169 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
170 : "memory");
171
172 return ret;
173}
174
175static inline void _writeb(u8 b, volatile void __iomem *addr)
176{
177 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
178 : /* no outputs */
179 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
180 : "memory");
181}
182
183static inline void _writew(u16 w, volatile void __iomem *addr)
184{
185 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
186 : /* no outputs */
187 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
188 : "memory");
189}
190
191static inline void _writel(u32 l, volatile void __iomem *addr)
192{
193 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
194 : /* no outputs */
195 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
196 : "memory");
197}
198
199static inline void _writeq(u64 q, volatile void __iomem *addr)
200{
201 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
202 : /* no outputs */
203 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
204 : "memory");
205}
206
207#define readb(__addr) _readb(__addr)
208#define readw(__addr) _readw(__addr)
209#define readl(__addr) _readl(__addr)
210#define readq(__addr) _readq(__addr)
211#define readb_relaxed(__addr) _readb(__addr)
212#define readw_relaxed(__addr) _readw(__addr)
213#define readl_relaxed(__addr) _readl(__addr)
214#define readq_relaxed(__addr) _readq(__addr)
215#define writeb(__b, __addr) _writeb(__b, __addr)
216#define writew(__w, __addr) _writew(__w, __addr)
217#define writel(__l, __addr) _writel(__l, __addr)
218#define writeq(__q, __addr) _writeq(__q, __addr)
219
220/* Now versions without byte-swapping. */
221static inline u8 _raw_readb(unsigned long addr)
222{
223 u8 ret;
224
225 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
226 : "=r" (ret)
227 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
228
229 return ret;
230}
231
232static inline u16 _raw_readw(unsigned long addr)
233{
234 u16 ret;
235
236 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
237 : "=r" (ret)
238 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
239
240 return ret;
241}
242
243static inline u32 _raw_readl(unsigned long addr)
244{
245 u32 ret;
246
247 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
248 : "=r" (ret)
249 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
250
251 return ret;
252}
253
254static inline u64 _raw_readq(unsigned long addr)
255{
256 u64 ret;
257
258 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
259 : "=r" (ret)
260 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
261
262 return ret;
263}
264
265static inline void _raw_writeb(u8 b, unsigned long addr)
266{
267 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
268 : /* no outputs */
269 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
270}
271
272static inline void _raw_writew(u16 w, unsigned long addr)
273{
274 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
275 : /* no outputs */
276 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
277}
278
279static inline void _raw_writel(u32 l, unsigned long addr)
280{
281 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
282 : /* no outputs */
283 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
284}
285
286static inline void _raw_writeq(u64 q, unsigned long addr)
287{
288 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
289 : /* no outputs */
290 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
291}
292
293#define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr)))
294#define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr)))
295#define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr)))
296#define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr)))
297#define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
298#define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr)))
299#define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr)))
300#define __raw_writeq(__q, __addr) (_raw_writeq((u64)(__q), (unsigned long)(__addr)))
301
302/* Valid I/O Space regions are anywhere, because each PCI bus supported
303 * can live in an arbitrary area of the physical address range.
304 */
305#define IO_SPACE_LIMIT 0xffffffffffffffffUL
306
307/* Now, SBUS variants, only difference from PCI is that we do
308 * not use little-endian ASIs.
309 */
310static inline u8 _sbus_readb(const volatile void __iomem *addr)
311{
312 u8 ret;
313
314 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */"
315 : "=r" (ret)
316 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
317 : "memory");
318
319 return ret;
320}
321
322static inline u16 _sbus_readw(const volatile void __iomem *addr)
323{
324 u16 ret;
325
326 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */"
327 : "=r" (ret)
328 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
329 : "memory");
330
331 return ret;
332}
333
334static inline u32 _sbus_readl(const volatile void __iomem *addr)
335{
336 u32 ret;
337
338 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */"
339 : "=r" (ret)
340 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
341 : "memory");
342
343 return ret;
344}
345
346static inline u64 _sbus_readq(const volatile void __iomem *addr)
347{
348 u64 ret;
349
350 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */"
351 : "=r" (ret)
352 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
353 : "memory");
354
355 return ret;
356}
357
358static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
359{
360 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
361 : /* no outputs */
362 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
363 : "memory");
364}
365
366static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
367{
368 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
369 : /* no outputs */
370 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
371 : "memory");
372}
373
374static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
375{
376 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
377 : /* no outputs */
378 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
379 : "memory");
380}
381
382static inline void _sbus_writeq(u64 l, volatile void __iomem *addr)
383{
384 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
385 : /* no outputs */
386 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
387 : "memory");
388}
389
390#define sbus_readb(__addr) _sbus_readb(__addr)
391#define sbus_readw(__addr) _sbus_readw(__addr)
392#define sbus_readl(__addr) _sbus_readl(__addr)
393#define sbus_readq(__addr) _sbus_readq(__addr)
394#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
395#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
396#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
397#define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr)
398
399static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
400{
401 while(n--) {
402 sbus_writeb(c, dst);
403 dst++;
404 }
405}
406
407#define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz)
408
409static inline void
410_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
411{
412 volatile void __iomem *d = dst;
413
414 while (n--) {
415 writeb(c, d);
416 d++;
417 }
418}
419
420#define memset_io(d,c,sz) _memset_io(d,c,sz)
421
422static inline void
423_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
424{
425 char *d = dst;
426
427 while (n--) {
428 char tmp = readb(src);
429 *d++ = tmp;
430 src++;
431 }
432}
433
434#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
435
436static inline void
437_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
438{
439 const char *s = src;
440 volatile void __iomem *d = dst;
441
442 while (n--) {
443 char tmp = *s++;
444 writeb(tmp, d);
445 d++;
446 }
447}
448
449#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
450
451#define mmiowb()
452
453#ifdef __KERNEL__
454
455/* On sparc64 we have the whole physical IO address space accessible
456 * using physically addressed loads and stores, so this does nothing.
457 */
458static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
459{
460 return (void __iomem *)offset;
461}
462
463#define ioremap_nocache(X,Y) ioremap((X),(Y))
464
465static inline void iounmap(volatile void __iomem *addr)
466{
467}
468
469#define ioread8(X) readb(X)
470#define ioread16(X) readw(X)
471#define ioread32(X) readl(X)
472#define iowrite8(val,X) writeb(val,X)
473#define iowrite16(val,X) writew(val,X)
474#define iowrite32(val,X) writel(val,X)
475
476/* Create a virtual mapping cookie for an IO port range */
477extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
478extern void ioport_unmap(void __iomem *);
479
480/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
481struct pci_dev;
482extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
483extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
484
485/* Similarly for SBUS. */
486#define sbus_ioremap(__res, __offset, __size, __name) \
487({ unsigned long __ret; \
488 __ret = (__res)->start + (((__res)->flags & 0x1ffUL) << 32UL); \
489 __ret += (unsigned long) (__offset); \
490 if (! request_region((__ret), (__size), (__name))) \
491 __ret = 0UL; \
492 (void __iomem *) __ret; \
493})
494
495#define sbus_iounmap(__addr, __size) \
496 release_region((unsigned long)(__addr), (__size))
497
498/*
499 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
500 * access
501 */
502#define xlate_dev_mem_ptr(p) __va(p)
503
504/*
505 * Convert a virtual cached pointer to an uncached pointer
506 */
507#define xlate_dev_kmem_ptr(p) p
508
509#endif
510
511#endif /* !(__SPARC64_IO_H) */
diff --git a/include/asm-sparc/iommu.h b/include/asm-sparc/iommu.h
index 70c589c05a10..91b072b0d7a0 100644
--- a/include/asm-sparc/iommu.h
+++ b/include/asm-sparc/iommu.h
@@ -1,121 +1,8 @@
1/* iommu.h: Definitions for the sun4m IOMMU. 1#ifndef ___ASM_SPARC_IOMMU_H
2 * 2#define ___ASM_SPARC_IOMMU_H
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/iommu_64.h>
5#ifndef _SPARC_IOMMU_H 5#else
6#define _SPARC_IOMMU_H 6#include <asm-sparc/iommu_32.h>
7 7#endif
8#include <asm/page.h> 8#endif
9#include <asm/bitext.h>
10
11/* The iommu handles all virtual to physical address translations
12 * that occur between the SBUS and physical memory. Access by
13 * the cpu to IO registers and similar go over the mbus so are
14 * translated by the on chip SRMMU. The iommu and the srmmu do
15 * not need to have the same translations at all, in fact most
16 * of the time the translations they handle are a disjunct set.
17 * Basically the iommu handles all dvma sbus activity.
18 */
19
20/* The IOMMU registers occupy three pages in IO space. */
21struct iommu_regs {
22 /* First page */
23 volatile unsigned long control; /* IOMMU control */
24 volatile unsigned long base; /* Physical base of iopte page table */
25 volatile unsigned long _unused1[3];
26 volatile unsigned long tlbflush; /* write only */
27 volatile unsigned long pageflush; /* write only */
28 volatile unsigned long _unused2[1017];
29 /* Second page */
30 volatile unsigned long afsr; /* Async-fault status register */
31 volatile unsigned long afar; /* Async-fault physical address */
32 volatile unsigned long _unused3[2];
33 volatile unsigned long sbuscfg0; /* SBUS configuration registers, per-slot */
34 volatile unsigned long sbuscfg1;
35 volatile unsigned long sbuscfg2;
36 volatile unsigned long sbuscfg3;
37 volatile unsigned long mfsr; /* Memory-fault status register */
38 volatile unsigned long mfar; /* Memory-fault physical address */
39 volatile unsigned long _unused4[1014];
40 /* Third page */
41 volatile unsigned long mid; /* IOMMU module-id */
42};
43
44#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */
45#define IOMMU_CTRL_VERS 0x0f000000 /* Version */
46#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */
47#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */
48#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */
49#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */
50#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */
51#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */
52#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */
53#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */
54#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */
55#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */
56
57#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */
58#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after transaction */
59#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than 12.8 us. */
60#define IOMMU_AFSR_BE 0x10000000 /* Write access received error acknowledge */
61#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */
62#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */
63#define IOMMU_AFSR_RESV 0x00f00000 /* Reserver, forced to 0x8 by hardware */
64#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */
65#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */
66#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */
67
68#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when bypass enabled */
69#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */
70#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */
71#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses
72 produced by this device as pure
73 physical. */
74
75#define IOMMU_MFSR_ERR 0x80000000 /* One or more of PERR1 or PERR0 */
76#define IOMMU_MFSR_S 0x01000000 /* Sparc was in supervisor mode */
77#define IOMMU_MFSR_CPU 0x00800000 /* CPU transaction caused parity error */
78#define IOMMU_MFSR_ME 0x00080000 /* Multiple parity errors occurred */
79#define IOMMU_MFSR_PERR 0x00006000 /* high bit indicates parity error occurred
80 on the even word of the access, low bit
81 indicated odd word caused the parity error */
82#define IOMMU_MFSR_BM 0x00001000 /* Error occurred while in boot mode */
83#define IOMMU_MFSR_C 0x00000800 /* Address causing error was marked cacheable */
84#define IOMMU_MFSR_RTYP 0x000000f0 /* Memory request transaction type */
85
86#define IOMMU_MID_SBAE 0x001f0000 /* SBus arbitration enable */
87#define IOMMU_MID_SE 0x00100000 /* Enables SCSI/ETHERNET arbitration */
88#define IOMMU_MID_SB3 0x00080000 /* Enable SBUS device 3 arbitration */
89#define IOMMU_MID_SB2 0x00040000 /* Enable SBUS device 2 arbitration */
90#define IOMMU_MID_SB1 0x00020000 /* Enable SBUS device 1 arbitration */
91#define IOMMU_MID_SB0 0x00010000 /* Enable SBUS device 0 arbitration */
92#define IOMMU_MID_MID 0x0000000f /* Module-id, hardcoded to 0x8 */
93
94/* The format of an iopte in the page tables */
95#define IOPTE_PAGE 0x07ffff00 /* Physical page number (PA[30:12]) */
96#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */
97#define IOPTE_WRITE 0x00000004 /* Writeable */
98#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
99#define IOPTE_WAZ 0x00000001 /* Write as zeros */
100
101struct iommu_struct {
102 struct iommu_regs *regs;
103 iopte_t *page_table;
104 /* For convenience */
105 unsigned long start; /* First managed virtual address */
106 unsigned long end; /* Last managed virtual address */
107
108 struct bit_map usemap;
109};
110
111static inline void iommu_invalidate(struct iommu_regs *regs)
112{
113 regs->tlbflush = 0;
114}
115
116static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
117{
118 regs->pageflush = (ba & PAGE_MASK);
119}
120
121#endif /* !(_SPARC_IOMMU_H) */
diff --git a/include/asm-sparc/iommu_32.h b/include/asm-sparc/iommu_32.h
new file mode 100644
index 000000000000..70c589c05a10
--- /dev/null
+++ b/include/asm-sparc/iommu_32.h
@@ -0,0 +1,121 @@
1/* iommu.h: Definitions for the sun4m IOMMU.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5#ifndef _SPARC_IOMMU_H
6#define _SPARC_IOMMU_H
7
8#include <asm/page.h>
9#include <asm/bitext.h>
10
11/* The iommu handles all virtual to physical address translations
12 * that occur between the SBUS and physical memory. Access by
13 * the cpu to IO registers and similar go over the mbus so are
14 * translated by the on chip SRMMU. The iommu and the srmmu do
15 * not need to have the same translations at all, in fact most
16 * of the time the translations they handle are a disjunct set.
17 * Basically the iommu handles all dvma sbus activity.
18 */
19
20/* The IOMMU registers occupy three pages in IO space. */
21struct iommu_regs {
22 /* First page */
23 volatile unsigned long control; /* IOMMU control */
24 volatile unsigned long base; /* Physical base of iopte page table */
25 volatile unsigned long _unused1[3];
26 volatile unsigned long tlbflush; /* write only */
27 volatile unsigned long pageflush; /* write only */
28 volatile unsigned long _unused2[1017];
29 /* Second page */
30 volatile unsigned long afsr; /* Async-fault status register */
31 volatile unsigned long afar; /* Async-fault physical address */
32 volatile unsigned long _unused3[2];
33 volatile unsigned long sbuscfg0; /* SBUS configuration registers, per-slot */
34 volatile unsigned long sbuscfg1;
35 volatile unsigned long sbuscfg2;
36 volatile unsigned long sbuscfg3;
37 volatile unsigned long mfsr; /* Memory-fault status register */
38 volatile unsigned long mfar; /* Memory-fault physical address */
39 volatile unsigned long _unused4[1014];
40 /* Third page */
41 volatile unsigned long mid; /* IOMMU module-id */
42};
43
44#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */
45#define IOMMU_CTRL_VERS 0x0f000000 /* Version */
46#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */
47#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */
48#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */
49#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */
50#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */
51#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */
52#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */
53#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */
54#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */
55#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */
56
57#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */
58#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after transaction */
59#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than 12.8 us. */
60#define IOMMU_AFSR_BE 0x10000000 /* Write access received error acknowledge */
61#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */
62#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */
63#define IOMMU_AFSR_RESV 0x00f00000 /* Reserver, forced to 0x8 by hardware */
64#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */
65#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */
66#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */
67
68#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when bypass enabled */
69#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */
70#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */
71#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses
72 produced by this device as pure
73 physical. */
74
75#define IOMMU_MFSR_ERR 0x80000000 /* One or more of PERR1 or PERR0 */
76#define IOMMU_MFSR_S 0x01000000 /* Sparc was in supervisor mode */
77#define IOMMU_MFSR_CPU 0x00800000 /* CPU transaction caused parity error */
78#define IOMMU_MFSR_ME 0x00080000 /* Multiple parity errors occurred */
79#define IOMMU_MFSR_PERR 0x00006000 /* high bit indicates parity error occurred
80 on the even word of the access, low bit
81 indicated odd word caused the parity error */
82#define IOMMU_MFSR_BM 0x00001000 /* Error occurred while in boot mode */
83#define IOMMU_MFSR_C 0x00000800 /* Address causing error was marked cacheable */
84#define IOMMU_MFSR_RTYP 0x000000f0 /* Memory request transaction type */
85
86#define IOMMU_MID_SBAE 0x001f0000 /* SBus arbitration enable */
87#define IOMMU_MID_SE 0x00100000 /* Enables SCSI/ETHERNET arbitration */
88#define IOMMU_MID_SB3 0x00080000 /* Enable SBUS device 3 arbitration */
89#define IOMMU_MID_SB2 0x00040000 /* Enable SBUS device 2 arbitration */
90#define IOMMU_MID_SB1 0x00020000 /* Enable SBUS device 1 arbitration */
91#define IOMMU_MID_SB0 0x00010000 /* Enable SBUS device 0 arbitration */
92#define IOMMU_MID_MID 0x0000000f /* Module-id, hardcoded to 0x8 */
93
94/* The format of an iopte in the page tables */
95#define IOPTE_PAGE 0x07ffff00 /* Physical page number (PA[30:12]) */
96#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */
97#define IOPTE_WRITE 0x00000004 /* Writeable */
98#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
99#define IOPTE_WAZ 0x00000001 /* Write as zeros */
100
101struct iommu_struct {
102 struct iommu_regs *regs;
103 iopte_t *page_table;
104 /* For convenience */
105 unsigned long start; /* First managed virtual address */
106 unsigned long end; /* Last managed virtual address */
107
108 struct bit_map usemap;
109};
110
111static inline void iommu_invalidate(struct iommu_regs *regs)
112{
113 regs->tlbflush = 0;
114}
115
116static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
117{
118 regs->pageflush = (ba & PAGE_MASK);
119}
120
121#endif /* !(_SPARC_IOMMU_H) */
diff --git a/include/asm-sparc/iommu_64.h b/include/asm-sparc/iommu_64.h
new file mode 100644
index 000000000000..d7b9afcba08b
--- /dev/null
+++ b/include/asm-sparc/iommu_64.h
@@ -0,0 +1,62 @@
1/* iommu.h: Definitions for the sun5 IOMMU.
2 *
3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5#ifndef _SPARC64_IOMMU_H
6#define _SPARC64_IOMMU_H
7
8/* The format of an iopte in the page tables. */
9#define IOPTE_VALID 0x8000000000000000UL
10#define IOPTE_64K 0x2000000000000000UL
11#define IOPTE_STBUF 0x1000000000000000UL
12#define IOPTE_INTRA 0x0800000000000000UL
13#define IOPTE_CONTEXT 0x07ff800000000000UL
14#define IOPTE_PAGE 0x00007fffffffe000UL
15#define IOPTE_CACHE 0x0000000000000010UL
16#define IOPTE_WRITE 0x0000000000000002UL
17
18#define IOMMU_NUM_CTXS 4096
19
20struct iommu_arena {
21 unsigned long *map;
22 unsigned int hint;
23 unsigned int limit;
24};
25
26struct iommu {
27 spinlock_t lock;
28 struct iommu_arena arena;
29 void (*flush_all)(struct iommu *);
30 iopte_t *page_table;
31 u32 page_table_map_base;
32 unsigned long iommu_control;
33 unsigned long iommu_tsbbase;
34 unsigned long iommu_flush;
35 unsigned long iommu_flushinv;
36 unsigned long iommu_tags;
37 unsigned long iommu_ctxflush;
38 unsigned long write_complete_reg;
39 unsigned long dummy_page;
40 unsigned long dummy_page_pa;
41 unsigned long ctx_lowest_free;
42 DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
43 u32 dma_addr_mask;
44};
45
46struct strbuf {
47 int strbuf_enabled;
48 unsigned long strbuf_control;
49 unsigned long strbuf_pflush;
50 unsigned long strbuf_fsync;
51 unsigned long strbuf_ctxflush;
52 unsigned long strbuf_ctxmatch_base;
53 unsigned long strbuf_flushflag_pa;
54 volatile unsigned long *strbuf_flushflag;
55 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
56};
57
58extern int iommu_table_init(struct iommu *iommu, int tsbsize,
59 u32 dma_offset, u32 dma_addr_mask,
60 int numa_node);
61
62#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/include/asm-sparc/ipcbuf.h b/include/asm-sparc/ipcbuf.h
index 9bef02d04e4b..037605d986e2 100644
--- a/include/asm-sparc/ipcbuf.h
+++ b/include/asm-sparc/ipcbuf.h
@@ -1,31 +1,8 @@
1#ifndef _SPARC_IPCBUF_H 1#ifndef ___ASM_SPARC_IPCBUF_H
2#define _SPARC_IPCBUF_H 2#define ___ASM_SPARC_IPCBUF_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/ipcbuf_64.h>
5 * The ipc64_perm structure for sparc architecture. 5#else
6 * Note extra padding because this structure is passed back and forth 6#include <asm-sparc/ipcbuf_32.h>
7 * between kernel and user space. 7#endif
8 * 8#endif
9 * Pad space is left for:
10 * - 32-bit mode
11 * - 32-bit seq
12 * - 2 miscellaneous 64-bit values (so that this structure matches
13 * sparc64 ipc64_perm)
14 */
15
16struct ipc64_perm
17{
18 __kernel_key_t key;
19 __kernel_uid32_t uid;
20 __kernel_gid32_t gid;
21 __kernel_uid32_t cuid;
22 __kernel_gid32_t cgid;
23 unsigned short __pad1;
24 __kernel_mode_t mode;
25 unsigned short __pad2;
26 unsigned short seq;
27 unsigned long long __unused1;
28 unsigned long long __unused2;
29};
30
31#endif /* _SPARC_IPCBUF_H */
diff --git a/include/asm-sparc/ipcbuf_32.h b/include/asm-sparc/ipcbuf_32.h
new file mode 100644
index 000000000000..6387209518f2
--- /dev/null
+++ b/include/asm-sparc/ipcbuf_32.h
@@ -0,0 +1,31 @@
1#ifndef _SPARC_IPCBUF_H
2#define _SPARC_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for sparc architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode
11 * - 32-bit seq
12 * - 2 miscellaneous 64-bit values (so that this structure matches
13 * sparc64 ipc64_perm)
14 */
15
16struct ipc64_perm
17{
18 __kernel_key_t key;
19 __kernel_uid32_t uid;
20 __kernel_gid32_t gid;
21 __kernel_uid32_t cuid;
22 __kernel_gid32_t cgid;
23 unsigned short __pad1;
24 __kernel_mode_t mode;
25 unsigned short __pad2;
26 unsigned short seq;
27 unsigned long long __unused1;
28 unsigned long long __unused2;
29};
30
31#endif /* _SPARC_IPCBUF_H */
diff --git a/include/asm-sparc/ipcbuf_64.h b/include/asm-sparc/ipcbuf_64.h
new file mode 100644
index 000000000000..a44b855b98db
--- /dev/null
+++ b/include/asm-sparc/ipcbuf_64.h
@@ -0,0 +1,28 @@
1#ifndef _SPARC64_IPCBUF_H
2#define _SPARC64_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for sparc64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit seq
11 * - 2 miscellaneous 64-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _SPARC64_IPCBUF_H */
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h
index fe205cc444b8..7af6bb4aa09c 100644
--- a/include/asm-sparc/irq.h
+++ b/include/asm-sparc/irq.h
@@ -1,15 +1,8 @@
1/* irq.h: IRQ registers on the Sparc. 1#ifndef ___ASM_SPARC_IRQ_H
2 * 2#define ___ASM_SPARC_IRQ_H
3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/irq_64.h>
5 5#else
6#ifndef _SPARC_IRQ_H 6#include <asm-sparc/irq_32.h>
7#define _SPARC_IRQ_H 7#endif
8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16
12
13#define irq_canonicalize(irq) (irq)
14
15#endif 8#endif
diff --git a/include/asm-sparc/irq_32.h b/include/asm-sparc/irq_32.h
new file mode 100644
index 000000000000..fe205cc444b8
--- /dev/null
+++ b/include/asm-sparc/irq_32.h
@@ -0,0 +1,15 @@
1/* irq.h: IRQ registers on the Sparc.
2 *
3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC_IRQ_H
7#define _SPARC_IRQ_H
8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16
12
13#define irq_canonicalize(irq) (irq)
14
15#endif
diff --git a/include/asm-sparc/irq_64.h b/include/asm-sparc/irq_64.h
new file mode 100644
index 000000000000..0bb9bf531745
--- /dev/null
+++ b/include/asm-sparc/irq_64.h
@@ -0,0 +1,93 @@
1/* irq.h: IRQ registers on the 64-bit Sparc.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7#ifndef _SPARC64_IRQ_H
8#define _SPARC64_IRQ_H
9
10#include <linux/linkage.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <asm/pil.h>
15#include <asm/ptrace.h>
16
17/* IMAP/ICLR register defines */
18#define IMAP_VALID 0x80000000UL /* IRQ Enabled */
19#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */
20#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */
21#define IMAP_TID_SHIFT 26
22#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */
23#define IMAP_AID_SHIFT 26
24#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */
25#define IMAP_NID_SHIFT 21
26#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */
27#define IMAP_INO 0x0000003fUL /* IRQ Number */
28#define IMAP_INR 0x000007ffUL /* Full interrupt number*/
29
30#define ICLR_IDLE 0x00000000UL /* Idle state */
31#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */
32#define ICLR_PENDING 0x00000003UL /* Pending state */
33
34/* The largest number of unique interrupt sources we support.
35 * If this needs to ever be larger than 255, you need to change
36 * the type of ino_bucket->virt_irq as appropriate.
37 *
38 * ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
39 */
40#define NR_IRQS 255
41
42extern void irq_install_pre_handler(int virt_irq,
43 void (*func)(unsigned int, void *, void *),
44 void *arg1, void *arg2);
45#define irq_canonicalize(irq) (irq)
46extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
47extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
48extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
49extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
50 unsigned int msi_devino_start,
51 unsigned int msi_devino_end);
52extern void sun4v_destroy_msi(unsigned int virt_irq);
53extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
54 unsigned int msi_devino_start,
55 unsigned int msi_devino_end,
56 unsigned long imap_base,
57 unsigned long iclr_base);
58extern void sun4u_destroy_msi(unsigned int virt_irq);
59extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
60
61extern unsigned char virt_irq_alloc(unsigned int dev_handle,
62 unsigned int dev_ino);
63#ifdef CONFIG_PCI_MSI
64extern void virt_irq_free(unsigned int virt_irq);
65#endif
66
67extern void __init init_IRQ(void);
68extern void fixup_irqs(void);
69
70static inline void set_softint(unsigned long bits)
71{
72 __asm__ __volatile__("wr %0, 0x0, %%set_softint"
73 : /* No outputs */
74 : "r" (bits));
75}
76
77static inline void clear_softint(unsigned long bits)
78{
79 __asm__ __volatile__("wr %0, 0x0, %%clear_softint"
80 : /* No outputs */
81 : "r" (bits));
82}
83
84static inline unsigned long get_softint(void)
85{
86 unsigned long retval;
87
88 __asm__ __volatile__("rd %%softint, %0"
89 : "=r" (retval));
90 return retval;
91}
92
93#endif
diff --git a/include/asm-sparc/irqflags.h b/include/asm-sparc/irqflags.h
index db398fb32826..c6402b187e23 100644
--- a/include/asm-sparc/irqflags.h
+++ b/include/asm-sparc/irqflags.h
@@ -1,39 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_IRQFLAGS_H
2 * include/asm-sparc/irqflags.h 2#define ___ASM_SPARC_IRQFLAGS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * IRQ flags handling 4#include <asm-sparc/irqflags_64.h>
5 * 5#else
6 * This file gets included from lowlevel asm headers too, to provide 6#include <asm-sparc/irqflags_32.h>
7 * wrapped versions of the local_irq_*() APIs, based on the 7#endif
8 * raw_local_irq_*() functions from the lowlevel headers. 8#endif
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15extern void raw_local_irq_restore(unsigned long);
16extern unsigned long __raw_local_irq_save(void);
17extern void raw_local_irq_enable(void);
18
19static inline unsigned long getipl(void)
20{
21 unsigned long retval;
22
23 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
24 return retval;
25}
26
27#define raw_local_save_flags(flags) ((flags) = getipl())
28#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save())
29#define raw_local_irq_disable() ((void) __raw_local_irq_save())
30#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0)
31
32static inline int raw_irqs_disabled_flags(unsigned long flags)
33{
34 return ((flags & PSR_PIL) != 0);
35}
36
37#endif /* (__ASSEMBLY__) */
38
39#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc/irqflags_32.h b/include/asm-sparc/irqflags_32.h
new file mode 100644
index 000000000000..db398fb32826
--- /dev/null
+++ b/include/asm-sparc/irqflags_32.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-sparc/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15extern void raw_local_irq_restore(unsigned long);
16extern unsigned long __raw_local_irq_save(void);
17extern void raw_local_irq_enable(void);
18
19static inline unsigned long getipl(void)
20{
21 unsigned long retval;
22
23 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
24 return retval;
25}
26
27#define raw_local_save_flags(flags) ((flags) = getipl())
28#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save())
29#define raw_local_irq_disable() ((void) __raw_local_irq_save())
30#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0)
31
32static inline int raw_irqs_disabled_flags(unsigned long flags)
33{
34 return ((flags & PSR_PIL) != 0);
35}
36
37#endif /* (__ASSEMBLY__) */
38
39#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc/irqflags_64.h b/include/asm-sparc/irqflags_64.h
new file mode 100644
index 000000000000..024fc54d0682
--- /dev/null
+++ b/include/asm-sparc/irqflags_64.h
@@ -0,0 +1,89 @@
1/*
2 * include/asm-sparc64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "rdpr %%pil, %0"
21 : "=r" (flags)
22 );
23
24 return flags;
25}
26
27#define raw_local_save_flags(flags) \
28 do { (flags) = __raw_local_save_flags(); } while (0)
29
30static inline void raw_local_irq_restore(unsigned long flags)
31{
32 __asm__ __volatile__(
33 "wrpr %0, %%pil"
34 : /* no output */
35 : "r" (flags)
36 : "memory"
37 );
38}
39
40static inline void raw_local_irq_disable(void)
41{
42 __asm__ __volatile__(
43 "wrpr 15, %%pil"
44 : /* no outputs */
45 : /* no inputs */
46 : "memory"
47 );
48}
49
50static inline void raw_local_irq_enable(void)
51{
52 __asm__ __volatile__(
53 "wrpr 0, %%pil"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory"
57 );
58}
59
60static inline int raw_irqs_disabled_flags(unsigned long flags)
61{
62 return (flags > 0);
63}
64
65static inline int raw_irqs_disabled(void)
66{
67 unsigned long flags = __raw_local_save_flags();
68
69 return raw_irqs_disabled_flags(flags);
70}
71
72/*
73 * For spinlocks, etc:
74 */
75static inline unsigned long __raw_local_irq_save(void)
76{
77 unsigned long flags = __raw_local_save_flags();
78
79 raw_local_irq_disable();
80
81 return flags;
82}
83
84#define raw_local_irq_save(flags) \
85 do { (flags) = __raw_local_irq_save(); } while (0)
86
87#endif /* (__ASSEMBLY__) */
88
89#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h
index f69fe7d84b3c..fe07d00d0534 100644
--- a/include/asm-sparc/kdebug.h
+++ b/include/asm-sparc/kdebug.h
@@ -1,73 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_KDEBUG_H
2 * kdebug.h: Defines and definitions for debugging the Linux kernel 2#define ___ASM_SPARC_KDEBUG_H
3 * under various kernel debuggers. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/kdebug_64.h>
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5#else
6 */ 6#include <asm-sparc/kdebug_32.h>
7#ifndef _SPARC_KDEBUG_H 7#endif
8#define _SPARC_KDEBUG_H 8#endif
9
10#include <asm/openprom.h>
11#include <asm/vaddrs.h>
12
13/* Breakpoints are enter through trap table entry 126. So in sparc assembly
14 * if you want to drop into the debugger you do:
15 *
16 * t DEBUG_BP_TRAP
17 */
18
19#define DEBUG_BP_TRAP 126
20
21#ifndef __ASSEMBLY__
22/* The debug vector is passed in %o1 at boot time. It is a pointer to
23 * a structure in the debuggers address space. Here is its format.
24 */
25
26typedef unsigned int (*debugger_funct)(void);
27
28struct kernel_debug {
29 /* First the entry point into the debugger. You jump here
30 * to give control over to the debugger.
31 */
32 unsigned long kdebug_entry;
33 unsigned long kdebug_trapme; /* Figure out later... */
34 /* The following is the number of pages that the debugger has
35 * taken from to total pool.
36 */
37 unsigned long *kdebug_stolen_pages;
38 /* Ok, after you remap yourself and/or change the trap table
39 * from what you were left with at boot time you have to call
40 * this synchronization function so the debugger can check out
41 * what you have done.
42 */
43 debugger_funct teach_debugger;
44}; /* I think that is it... */
45
46extern struct kernel_debug *linux_dbvec;
47
48/* Use this macro in C-code to enter the debugger. */
49static inline void sp_enter_debugger(void)
50{
51 __asm__ __volatile__("jmpl %0, %%o7\n\t"
52 "nop\n\t" : :
53 "r" (linux_dbvec) : "o7", "memory");
54}
55
56#define SP_ENTER_DEBUGGER do { \
57 if((linux_dbvec!=0) && ((*(short *)linux_dbvec)!=-1)) \
58 sp_enter_debugger(); \
59 } while(0)
60
61enum die_val {
62 DIE_UNUSED,
63};
64
65#endif /* !(__ASSEMBLY__) */
66
67/* Some nice offset defines for assembler code. */
68#define KDEBUG_ENTRY_OFF 0x0
69#define KDEBUG_DUNNO_OFF 0x4
70#define KDEBUG_DUNNO2_OFF 0x8
71#define KDEBUG_TEACH_OFF 0xc
72
73#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/kdebug_32.h b/include/asm-sparc/kdebug_32.h
new file mode 100644
index 000000000000..f69fe7d84b3c
--- /dev/null
+++ b/include/asm-sparc/kdebug_32.h
@@ -0,0 +1,73 @@
1/*
2 * kdebug.h: Defines and definitions for debugging the Linux kernel
3 * under various kernel debuggers.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7#ifndef _SPARC_KDEBUG_H
8#define _SPARC_KDEBUG_H
9
10#include <asm/openprom.h>
11#include <asm/vaddrs.h>
12
13/* Breakpoints are enter through trap table entry 126. So in sparc assembly
14 * if you want to drop into the debugger you do:
15 *
16 * t DEBUG_BP_TRAP
17 */
18
19#define DEBUG_BP_TRAP 126
20
21#ifndef __ASSEMBLY__
22/* The debug vector is passed in %o1 at boot time. It is a pointer to
23 * a structure in the debuggers address space. Here is its format.
24 */
25
26typedef unsigned int (*debugger_funct)(void);
27
28struct kernel_debug {
29 /* First the entry point into the debugger. You jump here
30 * to give control over to the debugger.
31 */
32 unsigned long kdebug_entry;
33 unsigned long kdebug_trapme; /* Figure out later... */
34 /* The following is the number of pages that the debugger has
35 * taken from to total pool.
36 */
37 unsigned long *kdebug_stolen_pages;
38 /* Ok, after you remap yourself and/or change the trap table
39 * from what you were left with at boot time you have to call
40 * this synchronization function so the debugger can check out
41 * what you have done.
42 */
43 debugger_funct teach_debugger;
44}; /* I think that is it... */
45
46extern struct kernel_debug *linux_dbvec;
47
48/* Use this macro in C-code to enter the debugger. */
49static inline void sp_enter_debugger(void)
50{
51 __asm__ __volatile__("jmpl %0, %%o7\n\t"
52 "nop\n\t" : :
53 "r" (linux_dbvec) : "o7", "memory");
54}
55
56#define SP_ENTER_DEBUGGER do { \
57 if((linux_dbvec!=0) && ((*(short *)linux_dbvec)!=-1)) \
58 sp_enter_debugger(); \
59 } while(0)
60
61enum die_val {
62 DIE_UNUSED,
63};
64
65#endif /* !(__ASSEMBLY__) */
66
67/* Some nice offset defines for assembler code. */
68#define KDEBUG_ENTRY_OFF 0x0
69#define KDEBUG_DUNNO_OFF 0x4
70#define KDEBUG_DUNNO2_OFF 0x8
71#define KDEBUG_TEACH_OFF 0xc
72
73#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/kdebug_64.h b/include/asm-sparc/kdebug_64.h
new file mode 100644
index 000000000000..f905b773235a
--- /dev/null
+++ b/include/asm-sparc/kdebug_64.h
@@ -0,0 +1,19 @@
1#ifndef _SPARC64_KDEBUG_H
2#define _SPARC64_KDEBUG_H
3
4struct pt_regs;
5
6extern void bad_trap(struct pt_regs *, long);
7
8/* Grossly misnamed. */
9enum die_val {
10 DIE_OOPS = 1,
11 DIE_DEBUG, /* ta 0x70 */
12 DIE_DEBUG_2, /* ta 0x71 */
13 DIE_DIE,
14 DIE_TRAP,
15 DIE_TRAP_TL1,
16 DIE_CALL,
17};
18
19#endif
diff --git a/include/asm-sparc/mc146818rtc.h b/include/asm-sparc/mc146818rtc.h
index fa7eac926582..9ab65c21e9e4 100644
--- a/include/asm-sparc/mc146818rtc.h
+++ b/include/asm-sparc/mc146818rtc.h
@@ -1,29 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_MC146818RTC_H
2 * Machine dependent access functions for RTC registers. 2#define ___ASM_SPARC_MC146818RTC_H
3 */ 3#if defined(__sparc__) && defined(__arch64__)
4#ifndef __ASM_SPARC_MC146818RTC_H 4#include <asm-sparc/mc146818rtc_64.h>
5#define __ASM_SPARC_MC146818RTC_H 5#else
6 6#include <asm-sparc/mc146818rtc_32.h>
7#include <asm/io.h> 7#endif
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif 8#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#define RTC_IRQ 8
28
29#endif /* __ASM_SPARC_MC146818RTC_H */
diff --git a/include/asm-sparc/mc146818rtc_32.h b/include/asm-sparc/mc146818rtc_32.h
new file mode 100644
index 000000000000..fa7eac926582
--- /dev/null
+++ b/include/asm-sparc/mc146818rtc_32.h
@@ -0,0 +1,29 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef __ASM_SPARC_MC146818RTC_H
5#define __ASM_SPARC_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#define RTC_IRQ 8
28
29#endif /* __ASM_SPARC_MC146818RTC_H */
diff --git a/include/asm-sparc/mc146818rtc_64.h b/include/asm-sparc/mc146818rtc_64.h
new file mode 100644
index 000000000000..e9c0fcc25c6f
--- /dev/null
+++ b/include/asm-sparc/mc146818rtc_64.h
@@ -0,0 +1,34 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef __ASM_SPARC64_MC146818RTC_H
5#define __ASM_SPARC64_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#ifdef CONFIG_PCI
11extern unsigned long ds1287_regs;
12#else
13#define ds1287_regs (0UL)
14#endif
15#define RTC_PORT(x) (ds1287_regs + (x))
16#define RTC_ALWAYS_BCD 0
17#endif
18
19/*
20 * The yet supported machines all access the RTC index register via
21 * an ISA port access but the way to access the date register differs ...
22 */
23#define CMOS_READ(addr) ({ \
24outb_p((addr),RTC_PORT(0)); \
25inb_p(RTC_PORT(1)); \
26})
27#define CMOS_WRITE(val, addr) ({ \
28outb_p((addr),RTC_PORT(0)); \
29outb_p((val),RTC_PORT(1)); \
30})
31
32#define RTC_IRQ 8
33
34#endif /* __ASM_SPARC64_MC146818RTC_H */
diff --git a/include/asm-sparc/mmu.h b/include/asm-sparc/mmu.h
index ccd36d26615a..ee66bf6dcbd6 100644
--- a/include/asm-sparc/mmu.h
+++ b/include/asm-sparc/mmu.h
@@ -1,7 +1,8 @@
1#ifndef __MMU_H 1#ifndef ___ASM_SPARC_MMU_H
2#define __MMU_H 2#define ___ASM_SPARC_MMU_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* Default "unsigned long" context */ 4#include <asm-sparc/mmu_64.h>
5typedef unsigned long mm_context_t; 5#else
6 6#include <asm-sparc/mmu_32.h>
7#endif
7#endif 8#endif
diff --git a/include/asm-sparc/mmu_32.h b/include/asm-sparc/mmu_32.h
new file mode 100644
index 000000000000..ccd36d26615a
--- /dev/null
+++ b/include/asm-sparc/mmu_32.h
@@ -0,0 +1,7 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4/* Default "unsigned long" context */
5typedef unsigned long mm_context_t;
6
7#endif
diff --git a/include/asm-sparc/mmu_64.h b/include/asm-sparc/mmu_64.h
new file mode 100644
index 000000000000..8abc58f0f9d7
--- /dev/null
+++ b/include/asm-sparc/mmu_64.h
@@ -0,0 +1,127 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4#include <linux/const.h>
5#include <asm/page.h>
6#include <asm/hypervisor.h>
7
8#define CTX_NR_BITS 13
9
10#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
11
12/* UltraSPARC-III+ and later have a feature whereby you can
13 * select what page size the various Data-TLB instances in the
14 * chip. In order to gracefully support this, we put the version
15 * field in a spot outside of the areas of the context register
16 * where this parameter is specified.
17 */
18#define CTX_VERSION_SHIFT 22
19#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
20
21#define CTX_PGSZ_8KB _AC(0x0,UL)
22#define CTX_PGSZ_64KB _AC(0x1,UL)
23#define CTX_PGSZ_512KB _AC(0x2,UL)
24#define CTX_PGSZ_4MB _AC(0x3,UL)
25#define CTX_PGSZ_BITS _AC(0x7,UL)
26#define CTX_PGSZ0_NUC_SHIFT 61
27#define CTX_PGSZ1_NUC_SHIFT 58
28#define CTX_PGSZ0_SHIFT 16
29#define CTX_PGSZ1_SHIFT 19
30#define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \
31 (CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT))
32
33#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
34#define CTX_PGSZ_BASE CTX_PGSZ_8KB
35#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
36#define CTX_PGSZ_BASE CTX_PGSZ_64KB
37#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
38#define CTX_PGSZ_BASE CTX_PGSZ_512KB
39#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
40#define CTX_PGSZ_BASE CTX_PGSZ_4MB
41#else
42#error No page size specified in kernel configuration
43#endif
44
45#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
46#define CTX_PGSZ_HUGE CTX_PGSZ_4MB
47#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
48#define CTX_PGSZ_HUGE CTX_PGSZ_512KB
49#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
50#define CTX_PGSZ_HUGE CTX_PGSZ_64KB
51#endif
52
53#define CTX_PGSZ_KERN CTX_PGSZ_4MB
54
55/* Thus, when running on UltraSPARC-III+ and later, we use the following
56 * PRIMARY_CONTEXT register values for the kernel context.
57 */
58#define CTX_CHEETAH_PLUS_NUC \
59 ((CTX_PGSZ_KERN << CTX_PGSZ0_NUC_SHIFT) | \
60 (CTX_PGSZ_BASE << CTX_PGSZ1_NUC_SHIFT))
61
62#define CTX_CHEETAH_PLUS_CTX0 \
63 ((CTX_PGSZ_KERN << CTX_PGSZ0_SHIFT) | \
64 (CTX_PGSZ_BASE << CTX_PGSZ1_SHIFT))
65
66/* If you want "the TLB context number" use CTX_NR_MASK. If you
67 * want "the bits I program into the context registers" use
68 * CTX_HW_MASK.
69 */
70#define CTX_NR_MASK TAG_CONTEXT_BITS
71#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
72
73#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
74#define CTX_VALID(__ctx) \
75 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
76#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
77#define CTX_NRBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_NR_MASK)
78
79#ifndef __ASSEMBLY__
80
81#define TSB_ENTRY_ALIGNMENT 16
82
83struct tsb {
84 unsigned long tag;
85 unsigned long pte;
86} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
87
88extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
89extern void tsb_flush(unsigned long ent, unsigned long tag);
90extern void tsb_init(struct tsb *tsb, unsigned long size);
91
92struct tsb_config {
93 struct tsb *tsb;
94 unsigned long tsb_rss_limit;
95 unsigned long tsb_nentries;
96 unsigned long tsb_reg_val;
97 unsigned long tsb_map_vaddr;
98 unsigned long tsb_map_pte;
99};
100
101#define MM_TSB_BASE 0
102
103#ifdef CONFIG_HUGETLB_PAGE
104#define MM_TSB_HUGE 1
105#define MM_NUM_TSBS 2
106#else
107#define MM_NUM_TSBS 1
108#endif
109
110typedef struct {
111 spinlock_t lock;
112 unsigned long sparc64_ctx_val;
113 unsigned long huge_pte_count;
114 struct tsb_config tsb_block[MM_NUM_TSBS];
115 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
116} mm_context_t;
117
118#endif /* !__ASSEMBLY__ */
119
120#define TSB_CONFIG_TSB 0x00
121#define TSB_CONFIG_RSS_LIMIT 0x08
122#define TSB_CONFIG_NENTRIES 0x10
123#define TSB_CONFIG_REG_VAL 0x18
124#define TSB_CONFIG_MAP_VADDR 0x20
125#define TSB_CONFIG_MAP_PTE 0x28
126
127#endif /* __MMU_H */
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index 671a997b9e69..e14efb9532ff 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -1,42 +1,8 @@
1#ifndef __SPARC_MMU_CONTEXT_H 1#ifndef ___ASM_SPARC_MMU_CONTEXT_H
2#define __SPARC_MMU_CONTEXT_H 2#define ___ASM_SPARC_MMU_CONTEXT_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/btfixup.h> 4#include <asm-sparc/mmu_context_64.h>
5 5#else
6#ifndef __ASSEMBLY__ 6#include <asm-sparc/mmu_context_32.h>
7 7#endif
8#include <asm-generic/mm_hooks.h> 8#endif
9
10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11{
12}
13
14/*
15 * Initialize a new mmu context. This is invoked when a new
16 * address space instance (unique or shared) is instantiated.
17 */
18#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
19
20/*
21 * Destroy a dead context. This occurs when mmput drops the
22 * mm_users count to zero, the mmaps have been released, and
23 * all the page tables have been flushed. Our job is to destroy
24 * any remaining processor-specific state.
25 */
26BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
27
28#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
29
30/* Switch the current MM context. */
31BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
32
33#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
34
35#define deactivate_mm(tsk,mm) do { } while (0)
36
37/* Activate a new MM instance for the current task. */
38#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
39
40#endif /* !(__ASSEMBLY__) */
41
42#endif /* !(__SPARC_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc/mmu_context_32.h b/include/asm-sparc/mmu_context_32.h
new file mode 100644
index 000000000000..671a997b9e69
--- /dev/null
+++ b/include/asm-sparc/mmu_context_32.h
@@ -0,0 +1,42 @@
1#ifndef __SPARC_MMU_CONTEXT_H
2#define __SPARC_MMU_CONTEXT_H
3
4#include <asm/btfixup.h>
5
6#ifndef __ASSEMBLY__
7
8#include <asm-generic/mm_hooks.h>
9
10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11{
12}
13
14/*
15 * Initialize a new mmu context. This is invoked when a new
16 * address space instance (unique or shared) is instantiated.
17 */
18#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
19
20/*
21 * Destroy a dead context. This occurs when mmput drops the
22 * mm_users count to zero, the mmaps have been released, and
23 * all the page tables have been flushed. Our job is to destroy
24 * any remaining processor-specific state.
25 */
26BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
27
28#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
29
30/* Switch the current MM context. */
31BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
32
33#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
34
35#define deactivate_mm(tsk,mm) do { } while (0)
36
37/* Activate a new MM instance for the current task. */
38#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
39
40#endif /* !(__ASSEMBLY__) */
41
42#endif /* !(__SPARC_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc/mmu_context_64.h b/include/asm-sparc/mmu_context_64.h
new file mode 100644
index 000000000000..5693ab482606
--- /dev/null
+++ b/include/asm-sparc/mmu_context_64.h
@@ -0,0 +1,155 @@
1#ifndef __SPARC64_MMU_CONTEXT_H
2#define __SPARC64_MMU_CONTEXT_H
3
4/* Derived heavily from Linus's Alpha/AXP ASN code... */
5
6#ifndef __ASSEMBLY__
7
8#include <linux/spinlock.h>
9#include <asm/system.h>
10#include <asm/spitfire.h>
11#include <asm-generic/mm_hooks.h>
12
13static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14{
15}
16
17extern spinlock_t ctx_alloc_lock;
18extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[];
20
21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
29extern void destroy_context(struct mm_struct *mm);
30
31extern void __tsb_context_switch(unsigned long pgd_pa,
32 struct tsb_config *tsb_base,
33 struct tsb_config *tsb_huge,
34 unsigned long tsb_descr_pa);
35
36static inline void tsb_context_switch(struct mm_struct *mm)
37{
38 __tsb_context_switch(__pa(mm->pgd),
39 &mm->context.tsb_block[0],
40#ifdef CONFIG_HUGETLB_PAGE
41 (mm->context.tsb_block[1].tsb ?
42 &mm->context.tsb_block[1] :
43 NULL)
44#else
45 NULL
46#endif
47 , __pa(&mm->context.tsb_descr[0]));
48}
49
50extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
51#ifdef CONFIG_SMP
52extern void smp_tsb_sync(struct mm_struct *mm);
53#else
54#define smp_tsb_sync(__mm) do { } while (0)
55#endif
56
57/* Set MMU context in the actual hardware. */
58#define load_secondary_context(__mm) \
59 __asm__ __volatile__( \
60 "\n661: stxa %0, [%1] %2\n" \
61 " .section .sun4v_1insn_patch, \"ax\"\n" \
62 " .word 661b\n" \
63 " stxa %0, [%1] %3\n" \
64 " .previous\n" \
65 " flush %%g6\n" \
66 : /* No outputs */ \
67 : "r" (CTX_HWBITS((__mm)->context)), \
68 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
69
70extern void __flush_tlb_mm(unsigned long, unsigned long);
71
72/* Switch the current MM context. Interrupts are disabled. */
73static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
74{
75 unsigned long ctx_valid, flags;
76 int cpu;
77
78 if (unlikely(mm == &init_mm))
79 return;
80
81 spin_lock_irqsave(&mm->context.lock, flags);
82 ctx_valid = CTX_VALID(mm->context);
83 if (!ctx_valid)
84 get_new_mmu_context(mm);
85
86 /* We have to be extremely careful here or else we will miss
87 * a TSB grow if we switch back and forth between a kernel
88 * thread and an address space which has it's TSB size increased
89 * on another processor.
90 *
91 * It is possible to play some games in order to optimize the
92 * switch, but the safest thing to do is to unconditionally
93 * perform the secondary context load and the TSB context switch.
94 *
95 * For reference the bad case is, for address space "A":
96 *
97 * CPU 0 CPU 1
98 * run address space A
99 * set cpu0's bits in cpu_vm_mask
100 * switch to kernel thread, borrow
101 * address space A via entry_lazy_tlb
102 * run address space A
103 * set cpu1's bit in cpu_vm_mask
104 * flush_tlb_pending()
105 * reset cpu_vm_mask to just cpu1
106 * TSB grow
107 * run address space A
108 * context was valid, so skip
109 * TSB context switch
110 *
111 * At that point cpu0 continues to use a stale TSB, the one from
112 * before the TSB grow performed on cpu1. cpu1 did not cross-call
113 * cpu0 to update it's TSB because at that point the cpu_vm_mask
114 * only had cpu1 set in it.
115 */
116 load_secondary_context(mm);
117 tsb_context_switch(mm);
118
119 /* Any time a processor runs a context on an address space
120 * for the first time, we must flush that context out of the
121 * local TLB.
122 */
123 cpu = smp_processor_id();
124 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
125 cpu_set(cpu, mm->cpu_vm_mask);
126 __flush_tlb_mm(CTX_HWBITS(mm->context),
127 SECONDARY_CONTEXT);
128 }
129 spin_unlock_irqrestore(&mm->context.lock, flags);
130}
131
132#define deactivate_mm(tsk,mm) do { } while (0)
133
134/* Activate a new MM instance for the current task. */
135static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
136{
137 unsigned long flags;
138 int cpu;
139
140 spin_lock_irqsave(&mm->context.lock, flags);
141 if (!CTX_VALID(mm->context))
142 get_new_mmu_context(mm);
143 cpu = smp_processor_id();
144 if (!cpu_isset(cpu, mm->cpu_vm_mask))
145 cpu_set(cpu, mm->cpu_vm_mask);
146
147 load_secondary_context(mm);
148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
149 tsb_context_switch(mm);
150 spin_unlock_irqrestore(&mm->context.lock, flags);
151}
152
153#endif /* !(__ASSEMBLY__) */
154
155#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc/module.h b/include/asm-sparc/module.h
index cbd9e67b0c0b..516138fe681a 100644
--- a/include/asm-sparc/module.h
+++ b/include/asm-sparc/module.h
@@ -1,7 +1,8 @@
1#ifndef _ASM_SPARC_MODULE_H 1#ifndef ___ASM_SPARC_MODULE_H
2#define _ASM_SPARC_MODULE_H 2#define ___ASM_SPARC_MODULE_H
3struct mod_arch_specific { }; 3#if defined(__sparc__) && defined(__arch64__)
4#define Elf_Shdr Elf32_Shdr 4#include <asm-sparc/module_64.h>
5#define Elf_Sym Elf32_Sym 5#else
6#define Elf_Ehdr Elf32_Ehdr 6#include <asm-sparc/module_32.h>
7#endif /* _ASM_SPARC_MODULE_H */ 7#endif
8#endif
diff --git a/include/asm-sparc/module_32.h b/include/asm-sparc/module_32.h
new file mode 100644
index 000000000000..cbd9e67b0c0b
--- /dev/null
+++ b/include/asm-sparc/module_32.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_SPARC_MODULE_H
2#define _ASM_SPARC_MODULE_H
3struct mod_arch_specific { };
4#define Elf_Shdr Elf32_Shdr
5#define Elf_Sym Elf32_Sym
6#define Elf_Ehdr Elf32_Ehdr
7#endif /* _ASM_SPARC_MODULE_H */
diff --git a/include/asm-sparc/module_64.h b/include/asm-sparc/module_64.h
new file mode 100644
index 000000000000..3d77ba465783
--- /dev/null
+++ b/include/asm-sparc/module_64.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_SPARC64_MODULE_H
2#define _ASM_SPARC64_MODULE_H
3struct mod_arch_specific { };
4#define Elf_Shdr Elf64_Shdr
5#define Elf_Sym Elf64_Sym
6#define Elf_Ehdr Elf64_Ehdr
7#endif /* _ASM_SPARC64_MODULE_H */
diff --git a/include/asm-sparc/mostek.h b/include/asm-sparc/mostek.h
index c35c916162fc..5b9f7fec7ee7 100644
--- a/include/asm-sparc/mostek.h
+++ b/include/asm-sparc/mostek.h
@@ -1,171 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_MOSTEK_H
2 * mostek.h: Describes the various Mostek time of day clock registers. 2#define ___ASM_SPARC_MOSTEK_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/mostek_64.h>
5 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
6 * Added intersil code 05/25/98 Chris Davis (cdavis@cois.on.ca)
7 */
8
9#ifndef _SPARC_MOSTEK_H
10#define _SPARC_MOSTEK_H
11
12#include <asm/idprom.h>
13#include <asm/io.h>
14
15/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
16 *
17 * Data
18 * Address Function
19 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
20 * 7ff - - - - - - - - Year 00-99
21 * 7fe 0 0 0 - - - - - Month 01-12
22 * 7fd 0 0 - - - - - - Date 01-31
23 * 7fc 0 FT 0 0 0 - - - Day 01-07
24 * 7fb KS 0 - - - - - - Hours 00-23
25 * 7fa 0 - - - - - - - Minutes 00-59
26 * 7f9 ST - - - - - - - Seconds 00-59
27 * 7f8 W R S - - - - - Control
28 *
29 * * ST is STOP BIT
30 * * W is WRITE BIT
31 * * R is READ BIT
32 * * S is SIGN BIT
33 * * FT is FREQ TEST BIT
34 * * KS is KICK START BIT
35 */
36
37/* The Mostek 48t02 real time clock and NVRAM chip. The registers
38 * other than the control register are in binary coded decimal. Some
39 * control bits also live outside the control register.
40 */
41#define mostek_read(_addr) readb(_addr)
42#define mostek_write(_addr,_val) writeb(_val, _addr)
43#define MOSTEK_EEPROM 0x0000UL
44#define MOSTEK_IDPROM 0x07d8UL
45#define MOSTEK_CREG 0x07f8UL
46#define MOSTEK_SEC 0x07f9UL
47#define MOSTEK_MIN 0x07faUL
48#define MOSTEK_HOUR 0x07fbUL
49#define MOSTEK_DOW 0x07fcUL
50#define MOSTEK_DOM 0x07fdUL
51#define MOSTEK_MONTH 0x07feUL
52#define MOSTEK_YEAR 0x07ffUL
53
54struct mostek48t02 {
55 volatile char eeprom[2008]; /* This is the eeprom, don't touch! */
56 struct idprom idprom; /* The idprom lives here. */
57 volatile unsigned char creg; /* Control register */
58 volatile unsigned char sec; /* Seconds (0-59) */
59 volatile unsigned char min; /* Minutes (0-59) */
60 volatile unsigned char hour; /* Hour (0-23) */
61 volatile unsigned char dow; /* Day of the week (1-7) */
62 volatile unsigned char dom; /* Day of the month (1-31) */
63 volatile unsigned char month; /* Month of year (1-12) */
64 volatile unsigned char year; /* Year (0-99) */
65};
66
67extern spinlock_t mostek_lock;
68extern void __iomem *mstk48t02_regs;
69
70/* Control register values. */
71#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
72#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
73#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
74
75/* Control bits that live in the other registers. */
76#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
77#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
78#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
79
80#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
81#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
82
83/* Masks that define how much space each value takes up. */
84#define MSTK_SEC_MASK 0x7f
85#define MSTK_MIN_MASK 0x7f
86#define MSTK_HOUR_MASK 0x3f
87#define MSTK_DOW_MASK 0x07
88#define MSTK_DOM_MASK 0x3f
89#define MSTK_MONTH_MASK 0x1f
90#define MSTK_YEAR_MASK 0xffU
91
92/* Binary coded decimal conversion macros. */
93#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
94#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
95
96/* Generic register set and get macros for internal use. */
97#define MSTK_GET(regs,var,mask) (MSTK_REGVAL_TO_DECIMAL(((struct mostek48t02 *)regs)->var & MSTK_ ## mask ## _MASK))
98#define MSTK_SET(regs,var,value,mask) do { ((struct mostek48t02 *)regs)->var &= ~(MSTK_ ## mask ## _MASK); ((struct mostek48t02 *)regs)->var |= MSTK_DECIMAL_TO_REGVAL(value) & (MSTK_ ## mask ## _MASK); } while (0)
99
100/* Macros to make register access easier on our fingers. These give you
101 * the decimal value of the register requested if applicable. You pass
102 * the a pointer to a 'struct mostek48t02'.
103 */
104#define MSTK_REG_CREG(regs) (((struct mostek48t02 *)regs)->creg)
105#define MSTK_REG_SEC(regs) MSTK_GET(regs,sec,SEC)
106#define MSTK_REG_MIN(regs) MSTK_GET(regs,min,MIN)
107#define MSTK_REG_HOUR(regs) MSTK_GET(regs,hour,HOUR)
108#define MSTK_REG_DOW(regs) MSTK_GET(regs,dow,DOW)
109#define MSTK_REG_DOM(regs) MSTK_GET(regs,dom,DOM)
110#define MSTK_REG_MONTH(regs) MSTK_GET(regs,month,MONTH)
111#define MSTK_REG_YEAR(regs) MSTK_GET(regs,year,YEAR)
112
113#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,sec,value,SEC)
114#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,min,value,MIN)
115#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,hour,value,HOUR)
116#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,dow,value,DOW)
117#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,dom,value,DOM)
118#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,month,value,MONTH)
119#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,year,value,YEAR)
120
121
122/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
123 * same (basically) layout of the 48t02 chip except for the extra
124 * NVRAM on board (8 KB against the 48t02's 2 KB).
125 */
126struct mostek48t08 {
127 char offset[6*1024]; /* Magic things may be here, who knows? */
128 struct mostek48t02 regs; /* Here is what we are interested in. */
129};
130
131#ifdef CONFIG_SUN4
132enum sparc_clock_type { MSTK48T02, MSTK48T08, \
133INTERSIL, MSTK_INVALID };
134#else 5#else
135enum sparc_clock_type { MSTK48T02, MSTK48T08, \ 6#include <asm-sparc/mostek_32.h>
136MSTK_INVALID };
137#endif 7#endif
138
139#ifdef CONFIG_SUN4
140/* intersil on a sun 4/260 code data from harris doc */
141struct intersil_dt {
142 volatile unsigned char int_csec;
143 volatile unsigned char int_hour;
144 volatile unsigned char int_min;
145 volatile unsigned char int_sec;
146 volatile unsigned char int_month;
147 volatile unsigned char int_day;
148 volatile unsigned char int_year;
149 volatile unsigned char int_dow;
150};
151
152struct intersil {
153 struct intersil_dt clk;
154 struct intersil_dt cmp;
155 volatile unsigned char int_intr_reg;
156 volatile unsigned char int_cmd_reg;
157};
158
159#define INTERSIL_STOP 0x0
160#define INTERSIL_START 0x8
161#define INTERSIL_INTR_DISABLE 0x0
162#define INTERSIL_INTR_ENABLE 0x10
163#define INTERSIL_32K 0x0
164#define INTERSIL_NORMAL 0x0
165#define INTERSIL_24H 0x4
166#define INTERSIL_INT_100HZ 0x2
167
168/* end of intersil info */
169#endif 8#endif
170
171#endif /* !(_SPARC_MOSTEK_H) */
diff --git a/include/asm-sparc/mostek_32.h b/include/asm-sparc/mostek_32.h
new file mode 100644
index 000000000000..a99590c4c507
--- /dev/null
+++ b/include/asm-sparc/mostek_32.h
@@ -0,0 +1,171 @@
1/*
2 * mostek.h: Describes the various Mostek time of day clock registers.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
6 * Added intersil code 05/25/98 Chris Davis (cdavis@cois.on.ca)
7 */
8
9#ifndef _SPARC_MOSTEK_H
10#define _SPARC_MOSTEK_H
11
12#include <asm/idprom.h>
13#include <asm/io.h>
14
15/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
16 *
17 * Data
18 * Address Function
19 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
20 * 7ff - - - - - - - - Year 00-99
21 * 7fe 0 0 0 - - - - - Month 01-12
22 * 7fd 0 0 - - - - - - Date 01-31
23 * 7fc 0 FT 0 0 0 - - - Day 01-07
24 * 7fb KS 0 - - - - - - Hours 00-23
25 * 7fa 0 - - - - - - - Minutes 00-59
26 * 7f9 ST - - - - - - - Seconds 00-59
27 * 7f8 W R S - - - - - Control
28 *
29 * * ST is STOP BIT
30 * * W is WRITE BIT
31 * * R is READ BIT
32 * * S is SIGN BIT
33 * * FT is FREQ TEST BIT
34 * * KS is KICK START BIT
35 */
36
37/* The Mostek 48t02 real time clock and NVRAM chip. The registers
38 * other than the control register are in binary coded decimal. Some
39 * control bits also live outside the control register.
40 */
41#define mostek_read(_addr) readb(_addr)
42#define mostek_write(_addr,_val) writeb(_val, _addr)
43#define MOSTEK_EEPROM 0x0000UL
44#define MOSTEK_IDPROM 0x07d8UL
45#define MOSTEK_CREG 0x07f8UL
46#define MOSTEK_SEC 0x07f9UL
47#define MOSTEK_MIN 0x07faUL
48#define MOSTEK_HOUR 0x07fbUL
49#define MOSTEK_DOW 0x07fcUL
50#define MOSTEK_DOM 0x07fdUL
51#define MOSTEK_MONTH 0x07feUL
52#define MOSTEK_YEAR 0x07ffUL
53
54struct mostek48t02 {
55 volatile char eeprom[2008]; /* This is the eeprom, don't touch! */
56 struct idprom idprom; /* The idprom lives here. */
57 volatile unsigned char creg; /* Control register */
58 volatile unsigned char sec; /* Seconds (0-59) */
59 volatile unsigned char min; /* Minutes (0-59) */
60 volatile unsigned char hour; /* Hour (0-23) */
61 volatile unsigned char dow; /* Day of the week (1-7) */
62 volatile unsigned char dom; /* Day of the month (1-31) */
63 volatile unsigned char month; /* Month of year (1-12) */
64 volatile unsigned char year; /* Year (0-99) */
65};
66
67extern spinlock_t mostek_lock;
68extern void __iomem *mstk48t02_regs;
69
70/* Control register values. */
71#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
72#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
73#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
74
75/* Control bits that live in the other registers. */
76#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
77#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
78#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
79
80#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
81#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
82
83/* Masks that define how much space each value takes up. */
84#define MSTK_SEC_MASK 0x7f
85#define MSTK_MIN_MASK 0x7f
86#define MSTK_HOUR_MASK 0x3f
87#define MSTK_DOW_MASK 0x07
88#define MSTK_DOM_MASK 0x3f
89#define MSTK_MONTH_MASK 0x1f
90#define MSTK_YEAR_MASK 0xffU
91
92/* Binary coded decimal conversion macros. */
93#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
94#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
95
96/* Generic register set and get macros for internal use. */
97#define MSTK_GET(regs,var,mask) (MSTK_REGVAL_TO_DECIMAL(((struct mostek48t02 *)regs)->var & MSTK_ ## mask ## _MASK))
98#define MSTK_SET(regs,var,value,mask) do { ((struct mostek48t02 *)regs)->var &= ~(MSTK_ ## mask ## _MASK); ((struct mostek48t02 *)regs)->var |= MSTK_DECIMAL_TO_REGVAL(value) & (MSTK_ ## mask ## _MASK); } while (0)
99
100/* Macros to make register access easier on our fingers. These give you
101 * the decimal value of the register requested if applicable. You pass
102 * the a pointer to a 'struct mostek48t02'.
103 */
104#define MSTK_REG_CREG(regs) (((struct mostek48t02 *)regs)->creg)
105#define MSTK_REG_SEC(regs) MSTK_GET(regs,sec,SEC)
106#define MSTK_REG_MIN(regs) MSTK_GET(regs,min,MIN)
107#define MSTK_REG_HOUR(regs) MSTK_GET(regs,hour,HOUR)
108#define MSTK_REG_DOW(regs) MSTK_GET(regs,dow,DOW)
109#define MSTK_REG_DOM(regs) MSTK_GET(regs,dom,DOM)
110#define MSTK_REG_MONTH(regs) MSTK_GET(regs,month,MONTH)
111#define MSTK_REG_YEAR(regs) MSTK_GET(regs,year,YEAR)
112
113#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,sec,value,SEC)
114#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,min,value,MIN)
115#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,hour,value,HOUR)
116#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,dow,value,DOW)
117#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,dom,value,DOM)
118#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,month,value,MONTH)
119#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,year,value,YEAR)
120
121
122/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
123 * same (basically) layout of the 48t02 chip except for the extra
124 * NVRAM on board (8 KB against the 48t02's 2 KB).
125 */
126struct mostek48t08 {
127 char offset[6*1024]; /* Magic things may be here, who knows? */
128 struct mostek48t02 regs; /* Here is what we are interested in. */
129};
130
131#ifdef CONFIG_SUN4
132enum sparc_clock_type { MSTK48T02, MSTK48T08, \
133INTERSIL, MSTK_INVALID };
134#else
135enum sparc_clock_type { MSTK48T02, MSTK48T08, \
136MSTK_INVALID };
137#endif
138
139#ifdef CONFIG_SUN4
140/* intersil on a sun 4/260 code data from harris doc */
141struct intersil_dt {
142 volatile unsigned char int_csec;
143 volatile unsigned char int_hour;
144 volatile unsigned char int_min;
145 volatile unsigned char int_sec;
146 volatile unsigned char int_month;
147 volatile unsigned char int_day;
148 volatile unsigned char int_year;
149 volatile unsigned char int_dow;
150};
151
152struct intersil {
153 struct intersil_dt clk;
154 struct intersil_dt cmp;
155 volatile unsigned char int_intr_reg;
156 volatile unsigned char int_cmd_reg;
157};
158
159#define INTERSIL_STOP 0x0
160#define INTERSIL_START 0x8
161#define INTERSIL_INTR_DISABLE 0x0
162#define INTERSIL_INTR_ENABLE 0x10
163#define INTERSIL_32K 0x0
164#define INTERSIL_NORMAL 0x0
165#define INTERSIL_24H 0x4
166#define INTERSIL_INT_100HZ 0x2
167
168/* end of intersil info */
169#endif
170
171#endif /* !(_SPARC_MOSTEK_H) */
diff --git a/include/asm-sparc/mostek_64.h b/include/asm-sparc/mostek_64.h
new file mode 100644
index 000000000000..c5652de2ace2
--- /dev/null
+++ b/include/asm-sparc/mostek_64.h
@@ -0,0 +1,143 @@
1/* mostek.h: Describes the various Mostek time of day clock registers.
2 *
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
5 */
6
7#ifndef _SPARC64_MOSTEK_H
8#define _SPARC64_MOSTEK_H
9
10#include <asm/idprom.h>
11
12/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
13 *
14 * Data
15 * Address Function
16 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
17 * 7ff - - - - - - - - Year 00-99
18 * 7fe 0 0 0 - - - - - Month 01-12
19 * 7fd 0 0 - - - - - - Date 01-31
20 * 7fc 0 FT 0 0 0 - - - Day 01-07
21 * 7fb KS 0 - - - - - - Hours 00-23
22 * 7fa 0 - - - - - - - Minutes 00-59
23 * 7f9 ST - - - - - - - Seconds 00-59
24 * 7f8 W R S - - - - - Control
25 *
26 * * ST is STOP BIT
27 * * W is WRITE BIT
28 * * R is READ BIT
29 * * S is SIGN BIT
30 * * FT is FREQ TEST BIT
31 * * KS is KICK START BIT
32 */
33
34/* The Mostek 48t02 real time clock and NVRAM chip. The registers
35 * other than the control register are in binary coded decimal. Some
36 * control bits also live outside the control register.
37 *
38 * We now deal with physical addresses for I/O to the chip. -DaveM
39 */
40static inline u8 mostek_read(void __iomem *addr)
41{
42 u8 ret;
43
44 __asm__ __volatile__("lduba [%1] %2, %0"
45 : "=r" (ret)
46 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
47 return ret;
48}
49
50static inline void mostek_write(void __iomem *addr, u8 val)
51{
52 __asm__ __volatile__("stba %0, [%1] %2"
53 : /* no outputs */
54 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
55}
56
57#define MOSTEK_EEPROM 0x0000UL
58#define MOSTEK_IDPROM 0x07d8UL
59#define MOSTEK_CREG 0x07f8UL
60#define MOSTEK_SEC 0x07f9UL
61#define MOSTEK_MIN 0x07faUL
62#define MOSTEK_HOUR 0x07fbUL
63#define MOSTEK_DOW 0x07fcUL
64#define MOSTEK_DOM 0x07fdUL
65#define MOSTEK_MONTH 0x07feUL
66#define MOSTEK_YEAR 0x07ffUL
67
68extern spinlock_t mostek_lock;
69extern void __iomem *mstk48t02_regs;
70
71/* Control register values. */
72#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
73#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
74#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
75
76/* Control bits that live in the other registers. */
77#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
78#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
79#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
80
81#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
82#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
83
84/* Masks that define how much space each value takes up. */
85#define MSTK_SEC_MASK 0x7f
86#define MSTK_MIN_MASK 0x7f
87#define MSTK_HOUR_MASK 0x3f
88#define MSTK_DOW_MASK 0x07
89#define MSTK_DOM_MASK 0x3f
90#define MSTK_MONTH_MASK 0x1f
91#define MSTK_YEAR_MASK 0xffU
92
93/* Binary coded decimal conversion macros. */
94#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
95#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
96
97/* Generic register set and get macros for internal use. */
98#define MSTK_GET(regs,name) \
99 (MSTK_REGVAL_TO_DECIMAL(mostek_read(regs + MOSTEK_ ## name) & MSTK_ ## name ## _MASK))
100#define MSTK_SET(regs,name,value) \
101do { u8 __val = mostek_read(regs + MOSTEK_ ## name); \
102 __val &= ~(MSTK_ ## name ## _MASK); \
103 __val |= (MSTK_DECIMAL_TO_REGVAL(value) & \
104 (MSTK_ ## name ## _MASK)); \
105 mostek_write(regs + MOSTEK_ ## name, __val); \
106} while(0)
107
108/* Macros to make register access easier on our fingers. These give you
109 * the decimal value of the register requested if applicable. You pass
110 * the a pointer to a 'struct mostek48t02'.
111 */
112#define MSTK_REG_CREG(regs) (mostek_read((regs) + MOSTEK_CREG))
113#define MSTK_REG_SEC(regs) MSTK_GET(regs,SEC)
114#define MSTK_REG_MIN(regs) MSTK_GET(regs,MIN)
115#define MSTK_REG_HOUR(regs) MSTK_GET(regs,HOUR)
116#define MSTK_REG_DOW(regs) MSTK_GET(regs,DOW)
117#define MSTK_REG_DOM(regs) MSTK_GET(regs,DOM)
118#define MSTK_REG_MONTH(regs) MSTK_GET(regs,MONTH)
119#define MSTK_REG_YEAR(regs) MSTK_GET(regs,YEAR)
120
121#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,SEC,value)
122#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,MIN,value)
123#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,HOUR,value)
124#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,DOW,value)
125#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,DOM,value)
126#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,MONTH,value)
127#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,YEAR,value)
128
129
130/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
131 * same (basically) layout of the 48t02 chip except for the extra
132 * NVRAM on board (8 KB against the 48t02's 2 KB).
133 */
134#define MOSTEK_48T08_OFFSET 0x0000UL /* Lower NVRAM portions */
135#define MOSTEK_48T08_48T02 0x1800UL /* Offset to 48T02 chip */
136
137/* SUN5 systems usually have 48t59 model clock chipsets. But we keep the older
138 * clock chip definitions around just in case.
139 */
140#define MOSTEK_48T59_OFFSET 0x0000UL /* Lower NVRAM portions */
141#define MOSTEK_48T59_48T02 0x1800UL /* Offset to 48T02 chip */
142
143#endif /* !(_SPARC64_MOSTEK_H) */
diff --git a/include/asm-sparc/namei.h b/include/asm-sparc/namei.h
index 0646102fb020..eff944b8e321 100644
--- a/include/asm-sparc/namei.h
+++ b/include/asm-sparc/namei.h
@@ -1,13 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_NAMEI_H
2 * linux/include/asm-sparc/namei.h 2#define ___ASM_SPARC_NAMEI_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Routines to handle famous /usr/gnemul/s*. 4#include <asm-sparc/namei_64.h>
5 * Included from linux/fs/namei.c 5#else
6 */ 6#include <asm-sparc/namei_32.h>
7 7#endif
8#ifndef __SPARC_NAMEI_H 8#endif
9#define __SPARC_NAMEI_H
10
11#define __emul_prefix() NULL
12
13#endif /* __SPARC_NAMEI_H */
diff --git a/include/asm-sparc/namei_32.h b/include/asm-sparc/namei_32.h
new file mode 100644
index 000000000000..0646102fb020
--- /dev/null
+++ b/include/asm-sparc/namei_32.h
@@ -0,0 +1,13 @@
1/*
2 * linux/include/asm-sparc/namei.h
3 *
4 * Routines to handle famous /usr/gnemul/s*.
5 * Included from linux/fs/namei.c
6 */
7
8#ifndef __SPARC_NAMEI_H
9#define __SPARC_NAMEI_H
10
11#define __emul_prefix() NULL
12
13#endif /* __SPARC_NAMEI_H */
diff --git a/include/asm-sparc/namei_64.h b/include/asm-sparc/namei_64.h
new file mode 100644
index 000000000000..cbc1b4c06891
--- /dev/null
+++ b/include/asm-sparc/namei_64.h
@@ -0,0 +1,13 @@
1/*
2 * linux/include/asm-sparc64/namei.h
3 *
4 * Routines to handle famous /usr/gnemul/s*.
5 * Included from linux/fs/namei.c
6 */
7
8#ifndef __SPARC64_NAMEI_H
9#define __SPARC64_NAMEI_H
10
11#define __emul_prefix() NULL
12
13#endif /* __SPARC64_NAMEI_H */
diff --git a/include/asm-sparc/of_platform.h b/include/asm-sparc/of_platform.h
index 38334351c36b..851eb84d737e 100644
--- a/include/asm-sparc/of_platform.h
+++ b/include/asm-sparc/of_platform.h
@@ -1,24 +1,8 @@
1#ifndef _ASM_SPARC_OF_PLATFORM_H 1#ifndef ___ASM_SPARC_OF_PLATFORM_H
2#define _ASM_SPARC_OF_PLATFORM_H 2#define ___ASM_SPARC_OF_PLATFORM_H
3/* 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. 4#include <asm-sparc/of_platform_64.h>
5 * <benh@kernel.crashing.org> 5#else
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h 6#include <asm-sparc/of_platform_32.h>
7 * by Stephen Rothwell 7#endif
8 * 8#endif
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type ebus_bus_type;
20extern struct bus_type sbus_bus_type;
21
22#define of_bus_type of_platform_bus_type /* for compatibility */
23
24#endif /* _ASM_SPARC_OF_PLATFORM_H */
diff --git a/include/asm-sparc/of_platform_32.h b/include/asm-sparc/of_platform_32.h
new file mode 100644
index 000000000000..38334351c36b
--- /dev/null
+++ b/include/asm-sparc/of_platform_32.h
@@ -0,0 +1,24 @@
1#ifndef _ASM_SPARC_OF_PLATFORM_H
2#define _ASM_SPARC_OF_PLATFORM_H
3/*
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h
7 * by Stephen Rothwell
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type ebus_bus_type;
20extern struct bus_type sbus_bus_type;
21
22#define of_bus_type of_platform_bus_type /* for compatibility */
23
24#endif /* _ASM_SPARC_OF_PLATFORM_H */
diff --git a/include/asm-sparc/of_platform_64.h b/include/asm-sparc/of_platform_64.h
new file mode 100644
index 000000000000..78aa032b674c
--- /dev/null
+++ b/include/asm-sparc/of_platform_64.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_SPARC64_OF_PLATFORM_H
2#define _ASM_SPARC64_OF_PLATFORM_H
3/*
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h
7 * by Stephen Rothwell
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type isa_bus_type;
20extern struct bus_type ebus_bus_type;
21extern struct bus_type sbus_bus_type;
22
23#define of_bus_type of_platform_bus_type /* for compatibility */
24
25#endif /* _ASM_SPARC64_OF_PLATFORM_H */
diff --git a/include/asm-sparc/openprom.h b/include/asm-sparc/openprom.h
index e812cf3b0de7..8c349f061994 100644
--- a/include/asm-sparc/openprom.h
+++ b/include/asm-sparc/openprom.h
@@ -1,255 +1,8 @@
1#ifndef __SPARC_OPENPROM_H 1#ifndef ___ASM_SPARC_OPENPROM_H
2#define __SPARC_OPENPROM_H 2#define ___ASM_SPARC_OPENPROM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* openprom.h: Prom structures and defines for access to the OPENBOOT 4#include <asm-sparc/openprom_64.h>
5 * prom routines and data areas. 5#else
6 * 6#include <asm-sparc/openprom_32.h>
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 7#endif
8 */ 8#endif
9
10/* Empirical constants... */
11#define LINUX_OPPROM_MAGIC 0x10010407
12
13#ifndef __ASSEMBLY__
14/* V0 prom device operations. */
15struct linux_dev_v0_funcs {
16 int (*v0_devopen)(char *device_str);
17 int (*v0_devclose)(int dev_desc);
18 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
19 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
20 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
21 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
22 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
23 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
24 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
25};
26
27/* V2 and later prom device operations. */
28struct linux_dev_v2_funcs {
29 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
30 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
31 void (*v2_dumb_mem_free)(char *va, unsigned sz);
32
33 /* To map devices into virtual I/O space. */
34 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
35 void (*v2_dumb_munmap)(char *virta, unsigned size);
36
37 int (*v2_dev_open)(char *devpath);
38 void (*v2_dev_close)(int d);
39 int (*v2_dev_read)(int d, char *buf, int nbytes);
40 int (*v2_dev_write)(int d, char *buf, int nbytes);
41 int (*v2_dev_seek)(int d, int hi, int lo);
42
43 /* Never issued (multistage load support) */
44 void (*v2_wheee2)(void);
45 void (*v2_wheee3)(void);
46};
47
48struct linux_mlist_v0 {
49 struct linux_mlist_v0 *theres_more;
50 char *start_adr;
51 unsigned num_bytes;
52};
53
54struct linux_mem_v0 {
55 struct linux_mlist_v0 **v0_totphys;
56 struct linux_mlist_v0 **v0_prommap;
57 struct linux_mlist_v0 **v0_available; /* What we can use */
58};
59
60/* Arguments sent to the kernel from the boot prompt. */
61struct linux_arguments_v0 {
62 char *argv[8];
63 char args[100];
64 char boot_dev[2];
65 int boot_dev_ctrl;
66 int boot_dev_unit;
67 int dev_partition;
68 char *kernel_file_name;
69 void *aieee1; /* XXX */
70};
71
72/* V2 and up boot things. */
73struct linux_bootargs_v2 {
74 char **bootpath;
75 char **bootargs;
76 int *fd_stdin;
77 int *fd_stdout;
78};
79
80/* The top level PROM vector. */
81struct linux_romvec {
82 /* Version numbers. */
83 unsigned int pv_magic_cookie;
84 unsigned int pv_romvers;
85 unsigned int pv_plugin_revision;
86 unsigned int pv_printrev;
87
88 /* Version 0 memory descriptors. */
89 struct linux_mem_v0 pv_v0mem;
90
91 /* Node operations. */
92 struct linux_nodeops *pv_nodeops;
93
94 char **pv_bootstr;
95 struct linux_dev_v0_funcs pv_v0devops;
96
97 char *pv_stdin;
98 char *pv_stdout;
99#define PROMDEV_KBD 0 /* input from keyboard */
100#define PROMDEV_SCREEN 0 /* output to screen */
101#define PROMDEV_TTYA 1 /* in/out to ttya */
102#define PROMDEV_TTYB 2 /* in/out to ttyb */
103
104 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
105 int (*pv_getchar)(void);
106 void (*pv_putchar)(int ch);
107
108 /* Non-blocking variants. */
109 int (*pv_nbgetchar)(void);
110 int (*pv_nbputchar)(int ch);
111
112 void (*pv_putstr)(char *str, int len);
113
114 /* Miscellany. */
115 void (*pv_reboot)(char *bootstr);
116 void (*pv_printf)(__const__ char *fmt, ...);
117 void (*pv_abort)(void);
118 __volatile__ int *pv_ticks;
119 void (*pv_halt)(void);
120 void (**pv_synchook)(void);
121
122 /* Evaluate a forth string, not different proto for V0 and V2->up. */
123 union {
124 void (*v0_eval)(int len, char *str);
125 void (*v2_eval)(char *str);
126 } pv_fortheval;
127
128 struct linux_arguments_v0 **pv_v0bootargs;
129
130 /* Get ether address. */
131 unsigned int (*pv_enaddr)(int d, char *enaddr);
132
133 struct linux_bootargs_v2 pv_v2bootargs;
134 struct linux_dev_v2_funcs pv_v2devops;
135
136 int filler[15];
137
138 /* This one is sun4c/sun4 only. */
139 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
140
141 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
142 * No joke. Calling these when there is only one cpu probably
143 * crashes the machine, have to test this. :-)
144 */
145
146 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
147 * 'thiscontext' executing at address 'prog_counter'
148 */
149 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
150 int thiscontext, char *prog_counter);
151
152 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
153 * until a resume cpu call is made.
154 */
155 int (*v3_cpustop)(unsigned int whichcpu);
156
157 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
158 * resume cpu call is made.
159 */
160 int (*v3_cpuidle)(unsigned int whichcpu);
161
162 /* v3_cpuresume() will resume processor 'whichcpu' executing
163 * starting with whatever 'pc' and 'npc' were left at the
164 * last 'idle' or 'stop' call.
165 */
166 int (*v3_cpuresume)(unsigned int whichcpu);
167};
168
169/* Routines for traversing the prom device tree. */
170struct linux_nodeops {
171 int (*no_nextnode)(int node);
172 int (*no_child)(int node);
173 int (*no_proplen)(int node, char *name);
174 int (*no_getprop)(int node, char *name, char *val);
175 int (*no_setprop)(int node, char *name, char *val, int len);
176 char * (*no_nextprop)(int node, char *name);
177};
178
179/* More fun PROM structures for device probing. */
180#define PROMREG_MAX 16
181#define PROMVADDR_MAX 16
182#define PROMINTR_MAX 15
183
184struct linux_prom_registers {
185 unsigned int which_io; /* is this in OBIO space? */
186 unsigned int phys_addr; /* The physical address of this register */
187 unsigned int reg_size; /* How many bytes does this register take up? */
188};
189
190struct linux_prom_irqs {
191 int pri; /* IRQ priority */
192 int vector; /* This is foobar, what does it do? */
193};
194
195/* Element of the "ranges" vector */
196struct linux_prom_ranges {
197 unsigned int ot_child_space;
198 unsigned int ot_child_base; /* Bus feels this */
199 unsigned int ot_parent_space;
200 unsigned int ot_parent_base; /* CPU looks from here */
201 unsigned int or_size;
202};
203
204/* Ranges and reg properties are a bit different for PCI. */
205struct linux_prom_pci_registers {
206 /*
207 * We don't know what information this field contain.
208 * We guess, PCI device function is in bits 15:8
209 * So, ...
210 */
211 unsigned int which_io; /* Let it be which_io */
212
213 unsigned int phys_hi;
214 unsigned int phys_lo;
215
216 unsigned int size_hi;
217 unsigned int size_lo;
218};
219
220struct linux_prom_pci_ranges {
221 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
222 unsigned int child_phys_mid;
223 unsigned int child_phys_lo;
224
225 unsigned int parent_phys_hi;
226 unsigned int parent_phys_lo;
227
228 unsigned int size_hi;
229 unsigned int size_lo;
230};
231
232struct linux_prom_pci_assigned_addresses {
233 unsigned int which_io;
234
235 unsigned int phys_hi;
236 unsigned int phys_lo;
237
238 unsigned int size_hi;
239 unsigned int size_lo;
240};
241
242struct linux_prom_ebus_ranges {
243 unsigned int child_phys_hi;
244 unsigned int child_phys_lo;
245
246 unsigned int parent_phys_hi;
247 unsigned int parent_phys_mid;
248 unsigned int parent_phys_lo;
249
250 unsigned int size;
251};
252
253#endif /* !(__ASSEMBLY__) */
254
255#endif /* !(__SPARC_OPENPROM_H) */
diff --git a/include/asm-sparc/openprom_32.h b/include/asm-sparc/openprom_32.h
new file mode 100644
index 000000000000..8b1649f29ed9
--- /dev/null
+++ b/include/asm-sparc/openprom_32.h
@@ -0,0 +1,255 @@
1#ifndef __SPARC_OPENPROM_H
2#define __SPARC_OPENPROM_H
3
4/* openprom.h: Prom structures and defines for access to the OPENBOOT
5 * prom routines and data areas.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10/* Empirical constants... */
11#define LINUX_OPPROM_MAGIC 0x10010407
12
13#ifndef __ASSEMBLY__
14/* V0 prom device operations. */
15struct linux_dev_v0_funcs {
16 int (*v0_devopen)(char *device_str);
17 int (*v0_devclose)(int dev_desc);
18 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
19 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
20 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
21 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
22 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
23 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
24 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
25};
26
27/* V2 and later prom device operations. */
28struct linux_dev_v2_funcs {
29 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
30 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
31 void (*v2_dumb_mem_free)(char *va, unsigned sz);
32
33 /* To map devices into virtual I/O space. */
34 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
35 void (*v2_dumb_munmap)(char *virta, unsigned size);
36
37 int (*v2_dev_open)(char *devpath);
38 void (*v2_dev_close)(int d);
39 int (*v2_dev_read)(int d, char *buf, int nbytes);
40 int (*v2_dev_write)(int d, char *buf, int nbytes);
41 int (*v2_dev_seek)(int d, int hi, int lo);
42
43 /* Never issued (multistage load support) */
44 void (*v2_wheee2)(void);
45 void (*v2_wheee3)(void);
46};
47
48struct linux_mlist_v0 {
49 struct linux_mlist_v0 *theres_more;
50 char *start_adr;
51 unsigned num_bytes;
52};
53
54struct linux_mem_v0 {
55 struct linux_mlist_v0 **v0_totphys;
56 struct linux_mlist_v0 **v0_prommap;
57 struct linux_mlist_v0 **v0_available; /* What we can use */
58};
59
60/* Arguments sent to the kernel from the boot prompt. */
61struct linux_arguments_v0 {
62 char *argv[8];
63 char args[100];
64 char boot_dev[2];
65 int boot_dev_ctrl;
66 int boot_dev_unit;
67 int dev_partition;
68 char *kernel_file_name;
69 void *aieee1; /* XXX */
70};
71
72/* V2 and up boot things. */
73struct linux_bootargs_v2 {
74 char **bootpath;
75 char **bootargs;
76 int *fd_stdin;
77 int *fd_stdout;
78};
79
80/* The top level PROM vector. */
81struct linux_romvec {
82 /* Version numbers. */
83 unsigned int pv_magic_cookie;
84 unsigned int pv_romvers;
85 unsigned int pv_plugin_revision;
86 unsigned int pv_printrev;
87
88 /* Version 0 memory descriptors. */
89 struct linux_mem_v0 pv_v0mem;
90
91 /* Node operations. */
92 struct linux_nodeops *pv_nodeops;
93
94 char **pv_bootstr;
95 struct linux_dev_v0_funcs pv_v0devops;
96
97 char *pv_stdin;
98 char *pv_stdout;
99#define PROMDEV_KBD 0 /* input from keyboard */
100#define PROMDEV_SCREEN 0 /* output to screen */
101#define PROMDEV_TTYA 1 /* in/out to ttya */
102#define PROMDEV_TTYB 2 /* in/out to ttyb */
103
104 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
105 int (*pv_getchar)(void);
106 void (*pv_putchar)(int ch);
107
108 /* Non-blocking variants. */
109 int (*pv_nbgetchar)(void);
110 int (*pv_nbputchar)(int ch);
111
112 void (*pv_putstr)(char *str, int len);
113
114 /* Miscellany. */
115 void (*pv_reboot)(char *bootstr);
116 void (*pv_printf)(__const__ char *fmt, ...);
117 void (*pv_abort)(void);
118 __volatile__ int *pv_ticks;
119 void (*pv_halt)(void);
120 void (**pv_synchook)(void);
121
122 /* Evaluate a forth string, not different proto for V0 and V2->up. */
123 union {
124 void (*v0_eval)(int len, char *str);
125 void (*v2_eval)(char *str);
126 } pv_fortheval;
127
128 struct linux_arguments_v0 **pv_v0bootargs;
129
130 /* Get ether address. */
131 unsigned int (*pv_enaddr)(int d, char *enaddr);
132
133 struct linux_bootargs_v2 pv_v2bootargs;
134 struct linux_dev_v2_funcs pv_v2devops;
135
136 int filler[15];
137
138 /* This one is sun4c/sun4 only. */
139 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
140
141 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
142 * No joke. Calling these when there is only one cpu probably
143 * crashes the machine, have to test this. :-)
144 */
145
146 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
147 * 'thiscontext' executing at address 'prog_counter'
148 */
149 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
150 int thiscontext, char *prog_counter);
151
152 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
153 * until a resume cpu call is made.
154 */
155 int (*v3_cpustop)(unsigned int whichcpu);
156
157 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
158 * resume cpu call is made.
159 */
160 int (*v3_cpuidle)(unsigned int whichcpu);
161
162 /* v3_cpuresume() will resume processor 'whichcpu' executing
163 * starting with whatever 'pc' and 'npc' were left at the
164 * last 'idle' or 'stop' call.
165 */
166 int (*v3_cpuresume)(unsigned int whichcpu);
167};
168
169/* Routines for traversing the prom device tree. */
170struct linux_nodeops {
171 int (*no_nextnode)(int node);
172 int (*no_child)(int node);
173 int (*no_proplen)(int node, char *name);
174 int (*no_getprop)(int node, char *name, char *val);
175 int (*no_setprop)(int node, char *name, char *val, int len);
176 char * (*no_nextprop)(int node, char *name);
177};
178
179/* More fun PROM structures for device probing. */
180#define PROMREG_MAX 16
181#define PROMVADDR_MAX 16
182#define PROMINTR_MAX 15
183
184struct linux_prom_registers {
185 unsigned int which_io; /* is this in OBIO space? */
186 unsigned int phys_addr; /* The physical address of this register */
187 unsigned int reg_size; /* How many bytes does this register take up? */
188};
189
190struct linux_prom_irqs {
191 int pri; /* IRQ priority */
192 int vector; /* This is foobar, what does it do? */
193};
194
195/* Element of the "ranges" vector */
196struct linux_prom_ranges {
197 unsigned int ot_child_space;
198 unsigned int ot_child_base; /* Bus feels this */
199 unsigned int ot_parent_space;
200 unsigned int ot_parent_base; /* CPU looks from here */
201 unsigned int or_size;
202};
203
204/* Ranges and reg properties are a bit different for PCI. */
205struct linux_prom_pci_registers {
206 /*
207 * We don't know what information this field contain.
208 * We guess, PCI device function is in bits 15:8
209 * So, ...
210 */
211 unsigned int which_io; /* Let it be which_io */
212
213 unsigned int phys_hi;
214 unsigned int phys_lo;
215
216 unsigned int size_hi;
217 unsigned int size_lo;
218};
219
220struct linux_prom_pci_ranges {
221 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
222 unsigned int child_phys_mid;
223 unsigned int child_phys_lo;
224
225 unsigned int parent_phys_hi;
226 unsigned int parent_phys_lo;
227
228 unsigned int size_hi;
229 unsigned int size_lo;
230};
231
232struct linux_prom_pci_assigned_addresses {
233 unsigned int which_io;
234
235 unsigned int phys_hi;
236 unsigned int phys_lo;
237
238 unsigned int size_hi;
239 unsigned int size_lo;
240};
241
242struct linux_prom_ebus_ranges {
243 unsigned int child_phys_hi;
244 unsigned int child_phys_lo;
245
246 unsigned int parent_phys_hi;
247 unsigned int parent_phys_mid;
248 unsigned int parent_phys_lo;
249
250 unsigned int size;
251};
252
253#endif /* !(__ASSEMBLY__) */
254
255#endif /* !(__SPARC_OPENPROM_H) */
diff --git a/include/asm-sparc/openprom_64.h b/include/asm-sparc/openprom_64.h
new file mode 100644
index 000000000000..b69e4a8c9170
--- /dev/null
+++ b/include/asm-sparc/openprom_64.h
@@ -0,0 +1,280 @@
1#ifndef __SPARC64_OPENPROM_H
2#define __SPARC64_OPENPROM_H
3
4/* openprom.h: Prom structures and defines for access to the OPENBOOT
5 * prom routines and data areas.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10#ifndef __ASSEMBLY__
11/* V0 prom device operations. */
12struct linux_dev_v0_funcs {
13 int (*v0_devopen)(char *device_str);
14 int (*v0_devclose)(int dev_desc);
15 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
16 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
17 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
18 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
19 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
20 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
21 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
22};
23
24/* V2 and later prom device operations. */
25struct linux_dev_v2_funcs {
26 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
27 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
28 void (*v2_dumb_mem_free)(char *va, unsigned sz);
29
30 /* To map devices into virtual I/O space. */
31 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
32 void (*v2_dumb_munmap)(char *virta, unsigned size);
33
34 int (*v2_dev_open)(char *devpath);
35 void (*v2_dev_close)(int d);
36 int (*v2_dev_read)(int d, char *buf, int nbytes);
37 int (*v2_dev_write)(int d, char *buf, int nbytes);
38 int (*v2_dev_seek)(int d, int hi, int lo);
39
40 /* Never issued (multistage load support) */
41 void (*v2_wheee2)(void);
42 void (*v2_wheee3)(void);
43};
44
45struct linux_mlist_v0 {
46 struct linux_mlist_v0 *theres_more;
47 unsigned start_adr;
48 unsigned num_bytes;
49};
50
51struct linux_mem_v0 {
52 struct linux_mlist_v0 **v0_totphys;
53 struct linux_mlist_v0 **v0_prommap;
54 struct linux_mlist_v0 **v0_available; /* What we can use */
55};
56
57/* Arguments sent to the kernel from the boot prompt. */
58struct linux_arguments_v0 {
59 char *argv[8];
60 char args[100];
61 char boot_dev[2];
62 int boot_dev_ctrl;
63 int boot_dev_unit;
64 int dev_partition;
65 char *kernel_file_name;
66 void *aieee1; /* XXX */
67};
68
69/* V2 and up boot things. */
70struct linux_bootargs_v2 {
71 char **bootpath;
72 char **bootargs;
73 int *fd_stdin;
74 int *fd_stdout;
75};
76
77/* The top level PROM vector. */
78struct linux_romvec {
79 /* Version numbers. */
80 unsigned int pv_magic_cookie;
81 unsigned int pv_romvers;
82 unsigned int pv_plugin_revision;
83 unsigned int pv_printrev;
84
85 /* Version 0 memory descriptors. */
86 struct linux_mem_v0 pv_v0mem;
87
88 /* Node operations. */
89 struct linux_nodeops *pv_nodeops;
90
91 char **pv_bootstr;
92 struct linux_dev_v0_funcs pv_v0devops;
93
94 char *pv_stdin;
95 char *pv_stdout;
96#define PROMDEV_KBD 0 /* input from keyboard */
97#define PROMDEV_SCREEN 0 /* output to screen */
98#define PROMDEV_TTYA 1 /* in/out to ttya */
99#define PROMDEV_TTYB 2 /* in/out to ttyb */
100
101 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
102 int (*pv_getchar)(void);
103 void (*pv_putchar)(int ch);
104
105 /* Non-blocking variants. */
106 int (*pv_nbgetchar)(void);
107 int (*pv_nbputchar)(int ch);
108
109 void (*pv_putstr)(char *str, int len);
110
111 /* Miscellany. */
112 void (*pv_reboot)(char *bootstr);
113 void (*pv_printf)(__const__ char *fmt, ...);
114 void (*pv_abort)(void);
115 __volatile__ int *pv_ticks;
116 void (*pv_halt)(void);
117 void (**pv_synchook)(void);
118
119 /* Evaluate a forth string, not different proto for V0 and V2->up. */
120 union {
121 void (*v0_eval)(int len, char *str);
122 void (*v2_eval)(char *str);
123 } pv_fortheval;
124
125 struct linux_arguments_v0 **pv_v0bootargs;
126
127 /* Get ether address. */
128 unsigned int (*pv_enaddr)(int d, char *enaddr);
129
130 struct linux_bootargs_v2 pv_v2bootargs;
131 struct linux_dev_v2_funcs pv_v2devops;
132
133 int filler[15];
134
135 /* This one is sun4c/sun4 only. */
136 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
137
138 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
139 * No joke. Calling these when there is only one cpu probably
140 * crashes the machine, have to test this. :-)
141 */
142
143 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
144 * 'thiscontext' executing at address 'prog_counter'
145 */
146 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
147 int thiscontext, char *prog_counter);
148
149 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
150 * until a resume cpu call is made.
151 */
152 int (*v3_cpustop)(unsigned int whichcpu);
153
154 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
155 * resume cpu call is made.
156 */
157 int (*v3_cpuidle)(unsigned int whichcpu);
158
159 /* v3_cpuresume() will resume processor 'whichcpu' executing
160 * starting with whatever 'pc' and 'npc' were left at the
161 * last 'idle' or 'stop' call.
162 */
163 int (*v3_cpuresume)(unsigned int whichcpu);
164};
165
166/* Routines for traversing the prom device tree. */
167struct linux_nodeops {
168 int (*no_nextnode)(int node);
169 int (*no_child)(int node);
170 int (*no_proplen)(int node, char *name);
171 int (*no_getprop)(int node, char *name, char *val);
172 int (*no_setprop)(int node, char *name, char *val, int len);
173 char * (*no_nextprop)(int node, char *name);
174};
175
176/* More fun PROM structures for device probing. */
177#define PROMREG_MAX 24
178#define PROMVADDR_MAX 16
179#define PROMINTR_MAX 32
180
181struct linux_prom_registers {
182 unsigned which_io; /* hi part of physical address */
183 unsigned phys_addr; /* The physical address of this register */
184 int reg_size; /* How many bytes does this register take up? */
185};
186
187struct linux_prom64_registers {
188 unsigned long phys_addr;
189 unsigned long reg_size;
190};
191
192struct linux_prom_irqs {
193 int pri; /* IRQ priority */
194 int vector; /* This is foobar, what does it do? */
195};
196
197/* Element of the "ranges" vector */
198struct linux_prom_ranges {
199 unsigned int ot_child_space;
200 unsigned int ot_child_base; /* Bus feels this */
201 unsigned int ot_parent_space;
202 unsigned int ot_parent_base; /* CPU looks from here */
203 unsigned int or_size;
204};
205
206struct linux_prom64_ranges {
207 unsigned long ot_child_base; /* Bus feels this */
208 unsigned long ot_parent_base; /* CPU looks from here */
209 unsigned long or_size;
210};
211
212/* Ranges and reg properties are a bit different for PCI. */
213struct linux_prom_pci_registers {
214 unsigned int phys_hi;
215 unsigned int phys_mid;
216 unsigned int phys_lo;
217
218 unsigned int size_hi;
219 unsigned int size_lo;
220};
221
222struct linux_prom_pci_ranges {
223 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
224 unsigned int child_phys_mid;
225 unsigned int child_phys_lo;
226
227 unsigned int parent_phys_hi;
228 unsigned int parent_phys_lo;
229
230 unsigned int size_hi;
231 unsigned int size_lo;
232};
233
234struct linux_prom_pci_intmap {
235 unsigned int phys_hi;
236 unsigned int phys_mid;
237 unsigned int phys_lo;
238
239 unsigned int interrupt;
240
241 int cnode;
242 unsigned int cinterrupt;
243};
244
245struct linux_prom_pci_intmask {
246 unsigned int phys_hi;
247 unsigned int phys_mid;
248 unsigned int phys_lo;
249 unsigned int interrupt;
250};
251
252struct linux_prom_ebus_ranges {
253 unsigned int child_phys_hi;
254 unsigned int child_phys_lo;
255
256 unsigned int parent_phys_hi;
257 unsigned int parent_phys_mid;
258 unsigned int parent_phys_lo;
259
260 unsigned int size;
261};
262
263struct linux_prom_ebus_intmap {
264 unsigned int phys_hi;
265 unsigned int phys_lo;
266
267 unsigned int interrupt;
268
269 int cnode;
270 unsigned int cinterrupt;
271};
272
273struct linux_prom_ebus_intmask {
274 unsigned int phys_hi;
275 unsigned int phys_lo;
276 unsigned int interrupt;
277};
278#endif /* !(__ASSEMBLY__) */
279
280#endif /* !(__SPARC64_OPENPROM_H) */
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index 00f487e74d92..e88d7c04a292 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -1,272 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_OPLIB_H
2 * oplib.h: Describes the interface and available routines in the 2#define ___ASM_SPARC_OPLIB_H
3 * Linux Prom library. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/oplib_64.h>
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5#else
6 */ 6#include <asm-sparc/oplib_32.h>
7 7#endif
8#ifndef __SPARC_OPLIB_H
9#define __SPARC_OPLIB_H
10
11#include <asm/openprom.h>
12#include <linux/spinlock.h>
13#include <linux/compiler.h>
14
15/* The master romvec pointer... */
16extern struct linux_romvec *romvec;
17
18/* Enumeration to describe the prom major version we have detected. */
19enum prom_major_version {
20 PROM_V0, /* Original sun4c V0 prom */
21 PROM_V2, /* sun4c and early sun4m V2 prom */
22 PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
23 PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
24 PROM_SUN4, /* Old sun4 proms are totally different, but we'll shoehorn it to make it fit */
25};
26
27extern enum prom_major_version prom_vers;
28/* Revision, and firmware revision. */
29extern unsigned int prom_rev, prom_prev;
30
31/* Root node of the prom device tree, this stays constant after
32 * initialization is complete.
33 */
34extern int prom_root_node;
35
36/* Pointer to prom structure containing the device tree traversal
37 * and usage utility functions. Only prom-lib should use these,
38 * users use the interface defined by the library only!
39 */
40extern struct linux_nodeops *prom_nodeops;
41
42/* The functions... */
43
44/* You must call prom_init() before using any of the library services,
45 * preferably as early as possible. Pass it the romvec pointer.
46 */
47extern void prom_init(struct linux_romvec *rom_ptr);
48
49/* Boot argument acquisition, returns the boot command line string. */
50extern char *prom_getbootargs(void);
51
52/* Device utilities. */
53
54/* Map and unmap devices in IO space at virtual addresses. Note that the
55 * virtual address you pass is a request and the prom may put your mappings
56 * somewhere else, so check your return value as that is where your new
57 * mappings really are!
58 *
59 * Another note, these are only available on V2 or higher proms!
60 */
61extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
62extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
63
64/* Device operations. */
65
66/* Open the device described by the passed string. Note, that the format
67 * of the string is different on V0 vs. V2->higher proms. The caller must
68 * know what he/she is doing! Returns the device descriptor, an int.
69 */
70extern int prom_devopen(char *device_string);
71
72/* Close a previously opened device described by the passed integer
73 * descriptor.
74 */
75extern int prom_devclose(int device_handle);
76
77/* Do a seek operation on the device described by the passed integer
78 * descriptor.
79 */
80extern void prom_seek(int device_handle, unsigned int seek_hival,
81 unsigned int seek_lowval);
82
83/* Miscellaneous routines, don't really fit in any category per se. */
84
85/* Reboot the machine with the command line passed. */
86extern void prom_reboot(char *boot_command);
87
88/* Evaluate the forth string passed. */
89extern void prom_feval(char *forth_string);
90
91/* Enter the prom, with possibility of continuation with the 'go'
92 * command in newer proms.
93 */
94extern void prom_cmdline(void);
95
96/* Enter the prom, with no chance of continuation for the stand-alone
97 * which calls this.
98 */
99extern void prom_halt(void) __attribute__ ((noreturn));
100
101/* Set the PROM 'sync' callback function to the passed function pointer.
102 * When the user gives the 'sync' command at the prom prompt while the
103 * kernel is still active, the prom will call this routine.
104 *
105 * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
106 */
107typedef void (*sync_func_t)(void);
108extern void prom_setsync(sync_func_t func_ptr);
109
110/* Acquire the IDPROM of the root node in the prom device tree. This
111 * gets passed a buffer where you would like it stuffed. The return value
112 * is the format type of this idprom or 0xff on error.
113 */
114extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
115
116/* Get the prom major version. */
117extern int prom_version(void);
118
119/* Get the prom plugin revision. */
120extern int prom_getrev(void);
121
122/* Get the prom firmware revision. */
123extern int prom_getprev(void);
124
125/* Character operations to/from the console.... */
126
127/* Non-blocking get character from console. */
128extern int prom_nbgetchar(void);
129
130/* Non-blocking put character to console. */
131extern int prom_nbputchar(char character);
132
133/* Blocking get character from console. */
134extern char prom_getchar(void);
135
136/* Blocking put character to console. */
137extern void prom_putchar(char character);
138
139/* Prom's internal routines, don't use in kernel/boot code. */
140extern void prom_printf(char *fmt, ...);
141extern void prom_write(const char *buf, unsigned int len);
142
143/* Multiprocessor operations... */
144
145/* Start the CPU with the given device tree node, context table, and context
146 * at the passed program counter.
147 */
148extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
149 int context, char *program_counter);
150
151/* Stop the CPU with the passed device tree node. */
152extern int prom_stopcpu(int cpunode);
153
154/* Idle the CPU with the passed device tree node. */
155extern int prom_idlecpu(int cpunode);
156
157/* Re-Start the CPU with the passed device tree node. */
158extern int prom_restartcpu(int cpunode);
159
160/* PROM memory allocation facilities... */
161
162/* Allocated at possibly the given virtual address a chunk of the
163 * indicated size.
164 */
165extern char *prom_alloc(char *virt_hint, unsigned int size);
166
167/* Free a previously allocated chunk. */
168extern void prom_free(char *virt_addr, unsigned int size);
169
170/* Sun4/sun4c specific memory-management startup hook. */
171
172/* Map the passed segment in the given context at the passed
173 * virtual address.
174 */
175extern void prom_putsegment(int context, unsigned long virt_addr,
176 int physical_segment);
177
178
179/* PROM device tree traversal functions... */
180
181#ifdef PROMLIB_INTERNAL
182
183/* Internal version of prom_getchild. */
184extern int __prom_getchild(int parent_node);
185
186/* Internal version of prom_getsibling. */
187extern int __prom_getsibling(int node);
188
189#endif 8#endif
190
191
192/* Get the child node of the given node, or zero if no child exists. */
193extern int prom_getchild(int parent_node);
194
195/* Get the next sibling node of the given node, or zero if no further
196 * siblings exist.
197 */
198extern int prom_getsibling(int node);
199
200/* Get the length, at the passed node, of the given property type.
201 * Returns -1 on error (ie. no such property at this node).
202 */
203extern int prom_getproplen(int thisnode, char *property);
204
205/* Fetch the requested property using the given buffer. Returns
206 * the number of bytes the prom put into your buffer or -1 on error.
207 */
208extern int __must_check prom_getproperty(int thisnode, char *property,
209 char *prop_buffer, int propbuf_size);
210
211/* Acquire an integer property. */
212extern int prom_getint(int node, char *property);
213
214/* Acquire an integer property, with a default value. */
215extern int prom_getintdefault(int node, char *property, int defval);
216
217/* Acquire a boolean property, 0=FALSE 1=TRUE. */
218extern int prom_getbool(int node, char *prop);
219
220/* Acquire a string property, null string on error. */
221extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
222
223/* Does the passed node have the given "name"? YES=1 NO=0 */
224extern int prom_nodematch(int thisnode, char *name);
225
226/* Search all siblings starting at the passed node for "name" matching
227 * the given string. Returns the node on success, zero on failure.
228 */
229extern int prom_searchsiblings(int node_start, char *name);
230
231/* Return the first property type, as a string, for the given node.
232 * Returns a null string on error.
233 */
234extern char *prom_firstprop(int node, char *buffer);
235
236/* Returns the next property after the passed property for the given
237 * node. Returns null string on failure.
238 */
239extern char *prom_nextprop(int node, char *prev_property, char *buffer);
240
241/* Returns phandle of the path specified */
242extern int prom_finddevice(char *name);
243
244/* Returns 1 if the specified node has given property. */
245extern int prom_node_has_property(int node, char *property);
246
247/* Set the indicated property at the given node with the passed value.
248 * Returns the number of bytes of your value that the prom took.
249 */
250extern int prom_setprop(int node, char *prop_name, char *prop_value,
251 int value_size);
252
253extern int prom_pathtoinode(char *path);
254extern int prom_inst2pkg(int);
255
256/* Dorking with Bus ranges... */
257
258/* Apply promlib probes OBIO ranges to registers. */
259extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
260
261/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
262extern void prom_apply_generic_ranges(int node, int parent,
263 struct linux_prom_registers *sbusregs, int nregs);
264
265/* CPU probing helpers. */
266int cpu_find_by_instance(int instance, int *prom_node, int *mid);
267int cpu_find_by_mid(int mid, int *prom_node);
268int cpu_get_hwmid(int prom_node);
269
270extern spinlock_t prom_lock;
271
272#endif /* !(__SPARC_OPLIB_H) */
diff --git a/include/asm-sparc/oplib_32.h b/include/asm-sparc/oplib_32.h
new file mode 100644
index 000000000000..b2631da259e0
--- /dev/null
+++ b/include/asm-sparc/oplib_32.h
@@ -0,0 +1,272 @@
1/*
2 * oplib.h: Describes the interface and available routines in the
3 * Linux Prom library.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef __SPARC_OPLIB_H
9#define __SPARC_OPLIB_H
10
11#include <asm/openprom.h>
12#include <linux/spinlock.h>
13#include <linux/compiler.h>
14
15/* The master romvec pointer... */
16extern struct linux_romvec *romvec;
17
18/* Enumeration to describe the prom major version we have detected. */
19enum prom_major_version {
20 PROM_V0, /* Original sun4c V0 prom */
21 PROM_V2, /* sun4c and early sun4m V2 prom */
22 PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
23 PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
24 PROM_SUN4, /* Old sun4 proms are totally different, but we'll shoehorn it to make it fit */
25};
26
27extern enum prom_major_version prom_vers;
28/* Revision, and firmware revision. */
29extern unsigned int prom_rev, prom_prev;
30
31/* Root node of the prom device tree, this stays constant after
32 * initialization is complete.
33 */
34extern int prom_root_node;
35
36/* Pointer to prom structure containing the device tree traversal
37 * and usage utility functions. Only prom-lib should use these,
38 * users use the interface defined by the library only!
39 */
40extern struct linux_nodeops *prom_nodeops;
41
42/* The functions... */
43
44/* You must call prom_init() before using any of the library services,
45 * preferably as early as possible. Pass it the romvec pointer.
46 */
47extern void prom_init(struct linux_romvec *rom_ptr);
48
49/* Boot argument acquisition, returns the boot command line string. */
50extern char *prom_getbootargs(void);
51
52/* Device utilities. */
53
54/* Map and unmap devices in IO space at virtual addresses. Note that the
55 * virtual address you pass is a request and the prom may put your mappings
56 * somewhere else, so check your return value as that is where your new
57 * mappings really are!
58 *
59 * Another note, these are only available on V2 or higher proms!
60 */
61extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
62extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
63
64/* Device operations. */
65
66/* Open the device described by the passed string. Note, that the format
67 * of the string is different on V0 vs. V2->higher proms. The caller must
68 * know what he/she is doing! Returns the device descriptor, an int.
69 */
70extern int prom_devopen(char *device_string);
71
72/* Close a previously opened device described by the passed integer
73 * descriptor.
74 */
75extern int prom_devclose(int device_handle);
76
77/* Do a seek operation on the device described by the passed integer
78 * descriptor.
79 */
80extern void prom_seek(int device_handle, unsigned int seek_hival,
81 unsigned int seek_lowval);
82
83/* Miscellaneous routines, don't really fit in any category per se. */
84
85/* Reboot the machine with the command line passed. */
86extern void prom_reboot(char *boot_command);
87
88/* Evaluate the forth string passed. */
89extern void prom_feval(char *forth_string);
90
91/* Enter the prom, with possibility of continuation with the 'go'
92 * command in newer proms.
93 */
94extern void prom_cmdline(void);
95
96/* Enter the prom, with no chance of continuation for the stand-alone
97 * which calls this.
98 */
99extern void prom_halt(void) __attribute__ ((noreturn));
100
101/* Set the PROM 'sync' callback function to the passed function pointer.
102 * When the user gives the 'sync' command at the prom prompt while the
103 * kernel is still active, the prom will call this routine.
104 *
105 * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
106 */
107typedef void (*sync_func_t)(void);
108extern void prom_setsync(sync_func_t func_ptr);
109
110/* Acquire the IDPROM of the root node in the prom device tree. This
111 * gets passed a buffer where you would like it stuffed. The return value
112 * is the format type of this idprom or 0xff on error.
113 */
114extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
115
116/* Get the prom major version. */
117extern int prom_version(void);
118
119/* Get the prom plugin revision. */
120extern int prom_getrev(void);
121
122/* Get the prom firmware revision. */
123extern int prom_getprev(void);
124
125/* Character operations to/from the console.... */
126
127/* Non-blocking get character from console. */
128extern int prom_nbgetchar(void);
129
130/* Non-blocking put character to console. */
131extern int prom_nbputchar(char character);
132
133/* Blocking get character from console. */
134extern char prom_getchar(void);
135
136/* Blocking put character to console. */
137extern void prom_putchar(char character);
138
139/* Prom's internal routines, don't use in kernel/boot code. */
140extern void prom_printf(char *fmt, ...);
141extern void prom_write(const char *buf, unsigned int len);
142
143/* Multiprocessor operations... */
144
145/* Start the CPU with the given device tree node, context table, and context
146 * at the passed program counter.
147 */
148extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
149 int context, char *program_counter);
150
151/* Stop the CPU with the passed device tree node. */
152extern int prom_stopcpu(int cpunode);
153
154/* Idle the CPU with the passed device tree node. */
155extern int prom_idlecpu(int cpunode);
156
157/* Re-Start the CPU with the passed device tree node. */
158extern int prom_restartcpu(int cpunode);
159
160/* PROM memory allocation facilities... */
161
162/* Allocated at possibly the given virtual address a chunk of the
163 * indicated size.
164 */
165extern char *prom_alloc(char *virt_hint, unsigned int size);
166
167/* Free a previously allocated chunk. */
168extern void prom_free(char *virt_addr, unsigned int size);
169
170/* Sun4/sun4c specific memory-management startup hook. */
171
172/* Map the passed segment in the given context at the passed
173 * virtual address.
174 */
175extern void prom_putsegment(int context, unsigned long virt_addr,
176 int physical_segment);
177
178
179/* PROM device tree traversal functions... */
180
181#ifdef PROMLIB_INTERNAL
182
183/* Internal version of prom_getchild. */
184extern int __prom_getchild(int parent_node);
185
186/* Internal version of prom_getsibling. */
187extern int __prom_getsibling(int node);
188
189#endif
190
191
192/* Get the child node of the given node, or zero if no child exists. */
193extern int prom_getchild(int parent_node);
194
195/* Get the next sibling node of the given node, or zero if no further
196 * siblings exist.
197 */
198extern int prom_getsibling(int node);
199
200/* Get the length, at the passed node, of the given property type.
201 * Returns -1 on error (ie. no such property at this node).
202 */
203extern int prom_getproplen(int thisnode, char *property);
204
205/* Fetch the requested property using the given buffer. Returns
206 * the number of bytes the prom put into your buffer or -1 on error.
207 */
208extern int __must_check prom_getproperty(int thisnode, char *property,
209 char *prop_buffer, int propbuf_size);
210
211/* Acquire an integer property. */
212extern int prom_getint(int node, char *property);
213
214/* Acquire an integer property, with a default value. */
215extern int prom_getintdefault(int node, char *property, int defval);
216
217/* Acquire a boolean property, 0=FALSE 1=TRUE. */
218extern int prom_getbool(int node, char *prop);
219
220/* Acquire a string property, null string on error. */
221extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
222
223/* Does the passed node have the given "name"? YES=1 NO=0 */
224extern int prom_nodematch(int thisnode, char *name);
225
226/* Search all siblings starting at the passed node for "name" matching
227 * the given string. Returns the node on success, zero on failure.
228 */
229extern int prom_searchsiblings(int node_start, char *name);
230
231/* Return the first property type, as a string, for the given node.
232 * Returns a null string on error.
233 */
234extern char *prom_firstprop(int node, char *buffer);
235
236/* Returns the next property after the passed property for the given
237 * node. Returns null string on failure.
238 */
239extern char *prom_nextprop(int node, char *prev_property, char *buffer);
240
241/* Returns phandle of the path specified */
242extern int prom_finddevice(char *name);
243
244/* Returns 1 if the specified node has given property. */
245extern int prom_node_has_property(int node, char *property);
246
247/* Set the indicated property at the given node with the passed value.
248 * Returns the number of bytes of your value that the prom took.
249 */
250extern int prom_setprop(int node, char *prop_name, char *prop_value,
251 int value_size);
252
253extern int prom_pathtoinode(char *path);
254extern int prom_inst2pkg(int);
255
256/* Dorking with Bus ranges... */
257
258/* Apply promlib probes OBIO ranges to registers. */
259extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
260
261/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
262extern void prom_apply_generic_ranges(int node, int parent,
263 struct linux_prom_registers *sbusregs, int nregs);
264
265/* CPU probing helpers. */
266int cpu_find_by_instance(int instance, int *prom_node, int *mid);
267int cpu_find_by_mid(int mid, int *prom_node);
268int cpu_get_hwmid(int prom_node);
269
270extern spinlock_t prom_lock;
271
272#endif /* !(__SPARC_OPLIB_H) */
diff --git a/include/asm-sparc/oplib_64.h b/include/asm-sparc/oplib_64.h
new file mode 100644
index 000000000000..6d2c2ca98039
--- /dev/null
+++ b/include/asm-sparc/oplib_64.h
@@ -0,0 +1,322 @@
1/* oplib.h: Describes the interface and available routines in the
2 * Linux Prom library.
3 *
4 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef __SPARC64_OPLIB_H
9#define __SPARC64_OPLIB_H
10
11#include <asm/openprom.h>
12
13/* OBP version string. */
14extern char prom_version[];
15
16/* Root node of the prom device tree, this stays constant after
17 * initialization is complete.
18 */
19extern int prom_root_node;
20
21/* PROM stdin and stdout */
22extern int prom_stdin, prom_stdout;
23
24/* /chosen node of the prom device tree, this stays constant after
25 * initialization is complete.
26 */
27extern int prom_chosen_node;
28
29/* Helper values and strings in arch/sparc64/kernel/head.S */
30extern const char prom_peer_name[];
31extern const char prom_compatible_name[];
32extern const char prom_root_compatible[];
33extern const char prom_cpu_compatible[];
34extern const char prom_finddev_name[];
35extern const char prom_chosen_path[];
36extern const char prom_cpu_path[];
37extern const char prom_getprop_name[];
38extern const char prom_mmu_name[];
39extern const char prom_callmethod_name[];
40extern const char prom_translate_name[];
41extern const char prom_map_name[];
42extern const char prom_unmap_name[];
43extern int prom_mmu_ihandle_cache;
44extern unsigned int prom_boot_mapped_pc;
45extern unsigned int prom_boot_mapping_mode;
46extern unsigned long prom_boot_mapping_phys_high, prom_boot_mapping_phys_low;
47
48struct linux_mlist_p1275 {
49 struct linux_mlist_p1275 *theres_more;
50 unsigned long start_adr;
51 unsigned long num_bytes;
52};
53
54struct linux_mem_p1275 {
55 struct linux_mlist_p1275 **p1275_totphys;
56 struct linux_mlist_p1275 **p1275_prommap;
57 struct linux_mlist_p1275 **p1275_available; /* What we can use */
58};
59
60/* The functions... */
61
62/* You must call prom_init() before using any of the library services,
63 * preferably as early as possible. Pass it the romvec pointer.
64 */
65extern void prom_init(void *cif_handler, void *cif_stack);
66
67/* Boot argument acquisition, returns the boot command line string. */
68extern char *prom_getbootargs(void);
69
70/* Device utilities. */
71
72/* Device operations. */
73
74/* Open the device described by the passed string. Note, that the format
75 * of the string is different on V0 vs. V2->higher proms. The caller must
76 * know what he/she is doing! Returns the device descriptor, an int.
77 */
78extern int prom_devopen(const char *device_string);
79
80/* Close a previously opened device described by the passed integer
81 * descriptor.
82 */
83extern int prom_devclose(int device_handle);
84
85/* Do a seek operation on the device described by the passed integer
86 * descriptor.
87 */
88extern void prom_seek(int device_handle, unsigned int seek_hival,
89 unsigned int seek_lowval);
90
91/* Miscellaneous routines, don't really fit in any category per se. */
92
93/* Reboot the machine with the command line passed. */
94extern void prom_reboot(const char *boot_command);
95
96/* Evaluate the forth string passed. */
97extern void prom_feval(const char *forth_string);
98
99/* Enter the prom, with possibility of continuation with the 'go'
100 * command in newer proms.
101 */
102extern void prom_cmdline(void);
103
104/* Enter the prom, with no chance of continuation for the stand-alone
105 * which calls this.
106 */
107extern void prom_halt(void) __attribute__ ((noreturn));
108
109/* Halt and power-off the machine. */
110extern void prom_halt_power_off(void) __attribute__ ((noreturn));
111
112/* Set the PROM 'sync' callback function to the passed function pointer.
113 * When the user gives the 'sync' command at the prom prompt while the
114 * kernel is still active, the prom will call this routine.
115 *
116 */
117typedef int (*callback_func_t)(long *cmd);
118extern void prom_setcallback(callback_func_t func_ptr);
119
120/* Acquire the IDPROM of the root node in the prom device tree. This
121 * gets passed a buffer where you would like it stuffed. The return value
122 * is the format type of this idprom or 0xff on error.
123 */
124extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
125
126/* Character operations to/from the console.... */
127
128/* Non-blocking get character from console. */
129extern int prom_nbgetchar(void);
130
131/* Non-blocking put character to console. */
132extern int prom_nbputchar(char character);
133
134/* Blocking get character from console. */
135extern char prom_getchar(void);
136
137/* Blocking put character to console. */
138extern void prom_putchar(char character);
139
140/* Prom's internal routines, don't use in kernel/boot code. */
141extern void prom_printf(const char *fmt, ...);
142extern void prom_write(const char *buf, unsigned int len);
143
144/* Multiprocessor operations... */
145#ifdef CONFIG_SMP
146/* Start the CPU with the given device tree node at the passed program
147 * counter with the given arg passed in via register %o0.
148 */
149extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
150
151/* Start the CPU with the given cpu ID at the passed program
152 * counter with the given arg passed in via register %o0.
153 */
154extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
155
156/* Stop the CPU with the given cpu ID. */
157extern void prom_stopcpu_cpuid(int cpuid);
158
159/* Stop the current CPU. */
160extern void prom_stopself(void);
161
162/* Idle the current CPU. */
163extern void prom_idleself(void);
164
165/* Resume the CPU with the passed device tree node. */
166extern void prom_resumecpu(int cpunode);
167#endif
168
169/* Power management interfaces. */
170
171/* Put the current CPU to sleep. */
172extern void prom_sleepself(void);
173
174/* Put the entire system to sleep. */
175extern int prom_sleepsystem(void);
176
177/* Initiate a wakeup event. */
178extern int prom_wakeupsystem(void);
179
180/* MMU and memory related OBP interfaces. */
181
182/* Get unique string identifying SIMM at given physical address. */
183extern int prom_getunumber(int syndrome_code,
184 unsigned long phys_addr,
185 char *buf, int buflen);
186
187/* Retain physical memory to the caller across soft resets. */
188extern unsigned long prom_retain(const char *name,
189 unsigned long pa_low, unsigned long pa_high,
190 long size, long align);
191
192/* Load explicit I/D TLB entries into the calling processor. */
193extern long prom_itlb_load(unsigned long index,
194 unsigned long tte_data,
195 unsigned long vaddr);
196
197extern long prom_dtlb_load(unsigned long index,
198 unsigned long tte_data,
199 unsigned long vaddr);
200
201/* Map/Unmap client program address ranges. First the format of
202 * the mapping mode argument.
203 */
204#define PROM_MAP_WRITE 0x0001 /* Writable */
205#define PROM_MAP_READ 0x0002 /* Readable - sw */
206#define PROM_MAP_EXEC 0x0004 /* Executable - sw */
207#define PROM_MAP_LOCKED 0x0010 /* Locked, use i/dtlb load calls for this instead */
208#define PROM_MAP_CACHED 0x0020 /* Cacheable in both L1 and L2 caches */
209#define PROM_MAP_SE 0x0040 /* Side-Effects */
210#define PROM_MAP_GLOB 0x0080 /* Global */
211#define PROM_MAP_IE 0x0100 /* Invert-Endianness */
212#define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
213
214extern int prom_map(int mode, unsigned long size,
215 unsigned long vaddr, unsigned long paddr);
216extern void prom_unmap(unsigned long size, unsigned long vaddr);
217
218
219/* PROM device tree traversal functions... */
220
221#ifdef PROMLIB_INTERNAL
222
223/* Internal version of prom_getchild. */
224extern int __prom_getchild(int parent_node);
225
226/* Internal version of prom_getsibling. */
227extern int __prom_getsibling(int node);
228
229#endif
230
231/* Get the child node of the given node, or zero if no child exists. */
232extern int prom_getchild(int parent_node);
233
234/* Get the next sibling node of the given node, or zero if no further
235 * siblings exist.
236 */
237extern int prom_getsibling(int node);
238
239/* Get the length, at the passed node, of the given property type.
240 * Returns -1 on error (ie. no such property at this node).
241 */
242extern int prom_getproplen(int thisnode, const char *property);
243
244/* Fetch the requested property using the given buffer. Returns
245 * the number of bytes the prom put into your buffer or -1 on error.
246 */
247extern int prom_getproperty(int thisnode, const char *property,
248 char *prop_buffer, int propbuf_size);
249
250/* Acquire an integer property. */
251extern int prom_getint(int node, const char *property);
252
253/* Acquire an integer property, with a default value. */
254extern int prom_getintdefault(int node, const char *property, int defval);
255
256/* Acquire a boolean property, 0=FALSE 1=TRUE. */
257extern int prom_getbool(int node, const char *prop);
258
259/* Acquire a string property, null string on error. */
260extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
261
262/* Does the passed node have the given "name"? YES=1 NO=0 */
263extern int prom_nodematch(int thisnode, const char *name);
264
265/* Search all siblings starting at the passed node for "name" matching
266 * the given string. Returns the node on success, zero on failure.
267 */
268extern int prom_searchsiblings(int node_start, const char *name);
269
270/* Return the first property type, as a string, for the given node.
271 * Returns a null string on error. Buffer should be at least 32B long.
272 */
273extern char *prom_firstprop(int node, char *buffer);
274
275/* Returns the next property after the passed property for the given
276 * node. Returns null string on failure. Buffer should be at least 32B long.
277 */
278extern char *prom_nextprop(int node, const char *prev_property, char *buffer);
279
280/* Returns 1 if the specified node has given property. */
281extern int prom_node_has_property(int node, const char *property);
282
283/* Returns phandle of the path specified */
284extern int prom_finddevice(const char *name);
285
286/* Set the indicated property at the given node with the passed value.
287 * Returns the number of bytes of your value that the prom took.
288 */
289extern int prom_setprop(int node, const char *prop_name, char *prop_value,
290 int value_size);
291
292extern int prom_pathtoinode(const char *path);
293extern int prom_inst2pkg(int);
294extern int prom_service_exists(const char *service_name);
295extern void prom_sun4v_guest_soft_state(void);
296
297extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
298
299/* Client interface level routines. */
300extern long p1275_cmd(const char *, long, ...);
301
302#if 0
303#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
304#else
305#define P1275_SIZE(x) x
306#endif
307
308/* We support at most 16 input and 1 output argument */
309#define P1275_ARG_NUMBER 0
310#define P1275_ARG_IN_STRING 1
311#define P1275_ARG_OUT_BUF 2
312#define P1275_ARG_OUT_32B 3
313#define P1275_ARG_IN_FUNCTION 4
314#define P1275_ARG_IN_BUF 5
315#define P1275_ARG_IN_64B 6
316
317#define P1275_IN(x) ((x) & 0xf)
318#define P1275_OUT(x) (((x) << 4) & 0xf0)
319#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
320#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
321
322#endif /* !(__SPARC64_OPLIB_H) */
diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h
index 14de518cc38f..f32f49fcf75c 100644
--- a/include/asm-sparc/page.h
+++ b/include/asm-sparc/page.h
@@ -1,163 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_PAGE_H
2 * page.h: Various defines and such for MMU operations on the Sparc for 2#define ___ASM_SPARC_PAGE_H
3 * the Linux kernel. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/page_64.h>
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef _SPARC_PAGE_H
9#define _SPARC_PAGE_H
10
11#ifdef CONFIG_SUN4
12#define PAGE_SHIFT 13
13#else 5#else
14#define PAGE_SHIFT 12 6#include <asm-sparc/page_32.h>
15#endif 7#endif
16#ifndef __ASSEMBLY__
17/* I have my suspicions... -DaveM */
18#define PAGE_SIZE (1UL << PAGE_SHIFT)
19#else
20#define PAGE_SIZE (1 << PAGE_SHIFT)
21#endif
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24#include <asm/btfixup.h>
25
26#ifndef __ASSEMBLY__
27
28#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
29#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
30#define clear_user_page(addr, vaddr, page) \
31 do { clear_page(addr); \
32 sparc_flush_page_to_ram(page); \
33 } while (0)
34#define copy_user_page(to, from, vaddr, page) \
35 do { copy_page(to, from); \
36 sparc_flush_page_to_ram(page); \
37 } while (0)
38
39/* The following structure is used to hold the physical
40 * memory configuration of the machine. This is filled in
41 * prom_meminit() and is later used by mem_init() to set up
42 * mem_map[]. We statically allocate SPARC_PHYS_BANKS+1 of
43 * these structs, this is arbitrary. The entry after the
44 * last valid one has num_bytes==0.
45 */
46struct sparc_phys_banks {
47 unsigned long base_addr;
48 unsigned long num_bytes;
49};
50
51#define SPARC_PHYS_BANKS 32
52
53extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
54
55/* Cache alias structure. Entry is valid if context != -1. */
56struct cache_palias {
57 unsigned long vaddr;
58 int context;
59};
60
61/* passing structs on the Sparc slow us down tremendously... */
62
63/* #define STRICT_MM_TYPECHECKS */
64
65#ifdef STRICT_MM_TYPECHECKS
66/*
67 * These are used to make use of C type-checking..
68 */
69typedef struct { unsigned long pte; } pte_t;
70typedef struct { unsigned long iopte; } iopte_t;
71typedef struct { unsigned long pmdv[16]; } pmd_t;
72typedef struct { unsigned long pgd; } pgd_t;
73typedef struct { unsigned long ctxd; } ctxd_t;
74typedef struct { unsigned long pgprot; } pgprot_t;
75typedef struct { unsigned long iopgprot; } iopgprot_t;
76
77#define pte_val(x) ((x).pte)
78#define iopte_val(x) ((x).iopte)
79#define pmd_val(x) ((x).pmdv[0])
80#define pgd_val(x) ((x).pgd)
81#define ctxd_val(x) ((x).ctxd)
82#define pgprot_val(x) ((x).pgprot)
83#define iopgprot_val(x) ((x).iopgprot)
84
85#define __pte(x) ((pte_t) { (x) } )
86#define __iopte(x) ((iopte_t) { (x) } )
87/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
88#define __pgd(x) ((pgd_t) { (x) } )
89#define __ctxd(x) ((ctxd_t) { (x) } )
90#define __pgprot(x) ((pgprot_t) { (x) } )
91#define __iopgprot(x) ((iopgprot_t) { (x) } )
92
93#else
94/*
95 * .. while these make it easier on the compiler
96 */
97typedef unsigned long pte_t;
98typedef unsigned long iopte_t;
99typedef struct { unsigned long pmdv[16]; } pmd_t;
100typedef unsigned long pgd_t;
101typedef unsigned long ctxd_t;
102typedef unsigned long pgprot_t;
103typedef unsigned long iopgprot_t;
104
105#define pte_val(x) (x)
106#define iopte_val(x) (x)
107#define pmd_val(x) ((x).pmdv[0])
108#define pgd_val(x) (x)
109#define ctxd_val(x) (x)
110#define pgprot_val(x) (x)
111#define iopgprot_val(x) (x)
112
113#define __pte(x) (x)
114#define __iopte(x) (x)
115/* #define __pmd(x) (x) */ /* XXX later */
116#define __pgd(x) (x)
117#define __ctxd(x) (x)
118#define __pgprot(x) (x)
119#define __iopgprot(x) (x)
120
121#endif
122
123typedef struct page *pgtable_t;
124
125extern unsigned long sparc_unmapped_base;
126
127BTFIXUPDEF_SETHI(sparc_unmapped_base)
128
129#define TASK_UNMAPPED_BASE BTFIXUP_SETHI(sparc_unmapped_base)
130
131#else /* !(__ASSEMBLY__) */
132
133#define __pgprot(x) (x)
134
135#endif /* !(__ASSEMBLY__) */
136
137/* to align the pointer to the (next) page boundary */
138#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
139
140#define PAGE_OFFSET 0xf0000000
141#ifndef __ASSEMBLY__
142extern unsigned long phys_base;
143extern unsigned long pfn_base;
144#endif 8#endif
145#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base)
146#define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET))
147
148#define virt_to_phys __pa
149#define phys_to_virt __va
150
151#define ARCH_PFN_OFFSET (pfn_base)
152#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
153
154#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
155#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
156
157#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
158 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
159
160#include <asm-generic/memory_model.h>
161#include <asm-generic/page.h>
162
163#endif /* _SPARC_PAGE_H */
diff --git a/include/asm-sparc/page_32.h b/include/asm-sparc/page_32.h
new file mode 100644
index 000000000000..14de518cc38f
--- /dev/null
+++ b/include/asm-sparc/page_32.h
@@ -0,0 +1,163 @@
1/*
2 * page.h: Various defines and such for MMU operations on the Sparc for
3 * the Linux kernel.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef _SPARC_PAGE_H
9#define _SPARC_PAGE_H
10
11#ifdef CONFIG_SUN4
12#define PAGE_SHIFT 13
13#else
14#define PAGE_SHIFT 12
15#endif
16#ifndef __ASSEMBLY__
17/* I have my suspicions... -DaveM */
18#define PAGE_SIZE (1UL << PAGE_SHIFT)
19#else
20#define PAGE_SIZE (1 << PAGE_SHIFT)
21#endif
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24#include <asm/btfixup.h>
25
26#ifndef __ASSEMBLY__
27
28#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
29#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
30#define clear_user_page(addr, vaddr, page) \
31 do { clear_page(addr); \
32 sparc_flush_page_to_ram(page); \
33 } while (0)
34#define copy_user_page(to, from, vaddr, page) \
35 do { copy_page(to, from); \
36 sparc_flush_page_to_ram(page); \
37 } while (0)
38
39/* The following structure is used to hold the physical
40 * memory configuration of the machine. This is filled in
41 * prom_meminit() and is later used by mem_init() to set up
42 * mem_map[]. We statically allocate SPARC_PHYS_BANKS+1 of
43 * these structs, this is arbitrary. The entry after the
44 * last valid one has num_bytes==0.
45 */
46struct sparc_phys_banks {
47 unsigned long base_addr;
48 unsigned long num_bytes;
49};
50
51#define SPARC_PHYS_BANKS 32
52
53extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
54
55/* Cache alias structure. Entry is valid if context != -1. */
56struct cache_palias {
57 unsigned long vaddr;
58 int context;
59};
60
61/* passing structs on the Sparc slow us down tremendously... */
62
63/* #define STRICT_MM_TYPECHECKS */
64
65#ifdef STRICT_MM_TYPECHECKS
66/*
67 * These are used to make use of C type-checking..
68 */
69typedef struct { unsigned long pte; } pte_t;
70typedef struct { unsigned long iopte; } iopte_t;
71typedef struct { unsigned long pmdv[16]; } pmd_t;
72typedef struct { unsigned long pgd; } pgd_t;
73typedef struct { unsigned long ctxd; } ctxd_t;
74typedef struct { unsigned long pgprot; } pgprot_t;
75typedef struct { unsigned long iopgprot; } iopgprot_t;
76
77#define pte_val(x) ((x).pte)
78#define iopte_val(x) ((x).iopte)
79#define pmd_val(x) ((x).pmdv[0])
80#define pgd_val(x) ((x).pgd)
81#define ctxd_val(x) ((x).ctxd)
82#define pgprot_val(x) ((x).pgprot)
83#define iopgprot_val(x) ((x).iopgprot)
84
85#define __pte(x) ((pte_t) { (x) } )
86#define __iopte(x) ((iopte_t) { (x) } )
87/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
88#define __pgd(x) ((pgd_t) { (x) } )
89#define __ctxd(x) ((ctxd_t) { (x) } )
90#define __pgprot(x) ((pgprot_t) { (x) } )
91#define __iopgprot(x) ((iopgprot_t) { (x) } )
92
93#else
94/*
95 * .. while these make it easier on the compiler
96 */
97typedef unsigned long pte_t;
98typedef unsigned long iopte_t;
99typedef struct { unsigned long pmdv[16]; } pmd_t;
100typedef unsigned long pgd_t;
101typedef unsigned long ctxd_t;
102typedef unsigned long pgprot_t;
103typedef unsigned long iopgprot_t;
104
105#define pte_val(x) (x)
106#define iopte_val(x) (x)
107#define pmd_val(x) ((x).pmdv[0])
108#define pgd_val(x) (x)
109#define ctxd_val(x) (x)
110#define pgprot_val(x) (x)
111#define iopgprot_val(x) (x)
112
113#define __pte(x) (x)
114#define __iopte(x) (x)
115/* #define __pmd(x) (x) */ /* XXX later */
116#define __pgd(x) (x)
117#define __ctxd(x) (x)
118#define __pgprot(x) (x)
119#define __iopgprot(x) (x)
120
121#endif
122
123typedef struct page *pgtable_t;
124
125extern unsigned long sparc_unmapped_base;
126
127BTFIXUPDEF_SETHI(sparc_unmapped_base)
128
129#define TASK_UNMAPPED_BASE BTFIXUP_SETHI(sparc_unmapped_base)
130
131#else /* !(__ASSEMBLY__) */
132
133#define __pgprot(x) (x)
134
135#endif /* !(__ASSEMBLY__) */
136
137/* to align the pointer to the (next) page boundary */
138#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
139
140#define PAGE_OFFSET 0xf0000000
141#ifndef __ASSEMBLY__
142extern unsigned long phys_base;
143extern unsigned long pfn_base;
144#endif
145#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base)
146#define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET))
147
148#define virt_to_phys __pa
149#define phys_to_virt __va
150
151#define ARCH_PFN_OFFSET (pfn_base)
152#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
153
154#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
155#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
156
157#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
158 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
159
160#include <asm-generic/memory_model.h>
161#include <asm-generic/page.h>
162
163#endif /* _SPARC_PAGE_H */
diff --git a/include/asm-sparc/page_64.h b/include/asm-sparc/page_64.h
new file mode 100644
index 000000000000..93f0881b766e
--- /dev/null
+++ b/include/asm-sparc/page_64.h
@@ -0,0 +1,142 @@
1#ifndef _SPARC64_PAGE_H
2#define _SPARC64_PAGE_H
3
4#include <linux/const.h>
5
6#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
7#define PAGE_SHIFT 13
8#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
9#define PAGE_SHIFT 16
10#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
11#define PAGE_SHIFT 19
12#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
13#define PAGE_SHIFT 22
14#else
15#error No page size specified in kernel configuration
16#endif
17
18#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
19#define PAGE_MASK (~(PAGE_SIZE-1))
20
21/* Flushing for D-cache alias handling is only needed if
22 * the page size is smaller than 16K.
23 */
24#if PAGE_SHIFT < 14
25#define DCACHE_ALIASING_POSSIBLE
26#endif
27
28#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
29#define HPAGE_SHIFT 22
30#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
31#define HPAGE_SHIFT 19
32#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
33#define HPAGE_SHIFT 16
34#endif
35
36#ifdef CONFIG_HUGETLB_PAGE
37#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
38#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
39#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
41#endif
42
43#ifndef __ASSEMBLY__
44
45extern void _clear_page(void *page);
46#define clear_page(X) _clear_page((void *)(X))
47struct page;
48extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
49#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
50extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
51
52/* Unlike sparc32, sparc64's parameter passing API is more
53 * sane in that structures which as small enough are passed
54 * in registers instead of on the stack. Thus, setting
55 * STRICT_MM_TYPECHECKS does not generate worse code so
56 * let's enable it to get the type checking.
57 */
58
59#define STRICT_MM_TYPECHECKS
60
61#ifdef STRICT_MM_TYPECHECKS
62/* These are used to make use of C type-checking.. */
63typedef struct { unsigned long pte; } pte_t;
64typedef struct { unsigned long iopte; } iopte_t;
65typedef struct { unsigned int pmd; } pmd_t;
66typedef struct { unsigned int pgd; } pgd_t;
67typedef struct { unsigned long pgprot; } pgprot_t;
68
69#define pte_val(x) ((x).pte)
70#define iopte_val(x) ((x).iopte)
71#define pmd_val(x) ((x).pmd)
72#define pgd_val(x) ((x).pgd)
73#define pgprot_val(x) ((x).pgprot)
74
75#define __pte(x) ((pte_t) { (x) } )
76#define __iopte(x) ((iopte_t) { (x) } )
77#define __pmd(x) ((pmd_t) { (x) } )
78#define __pgd(x) ((pgd_t) { (x) } )
79#define __pgprot(x) ((pgprot_t) { (x) } )
80
81#else
82/* .. while these make it easier on the compiler */
83typedef unsigned long pte_t;
84typedef unsigned long iopte_t;
85typedef unsigned int pmd_t;
86typedef unsigned int pgd_t;
87typedef unsigned long pgprot_t;
88
89#define pte_val(x) (x)
90#define iopte_val(x) (x)
91#define pmd_val(x) (x)
92#define pgd_val(x) (x)
93#define pgprot_val(x) (x)
94
95#define __pte(x) (x)
96#define __iopte(x) (x)
97#define __pmd(x) (x)
98#define __pgd(x) (x)
99#define __pgprot(x) (x)
100
101#endif /* (STRICT_MM_TYPECHECKS) */
102
103typedef struct page *pgtable_t;
104
105#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
106 (_AC(0x0000000070000000,UL)) : \
107 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
108
109#include <asm-generic/memory_model.h>
110
111#endif /* !(__ASSEMBLY__) */
112
113/* to align the pointer to the (next) page boundary */
114#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
115
116/* We used to stick this into a hard-coded global register (%g4)
117 * but that does not make sense anymore.
118 */
119#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
120
121#ifndef __ASSEMBLY__
122
123#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
124#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
125
126#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
127
128#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
129
130#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
131
132#define virt_to_phys __pa
133#define phys_to_virt __va
134
135#endif /* !(__ASSEMBLY__) */
136
137#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
138 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
139
140#include <asm-generic/page.h>
141
142#endif /* _SPARC64_PAGE_H */
diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h
index b93b6c79e08f..b807d52a4809 100644
--- a/include/asm-sparc/pci.h
+++ b/include/asm-sparc/pci.h
@@ -1,170 +1,8 @@
1#ifndef __SPARC_PCI_H 1#ifndef ___ASM_SPARC_PCI_H
2#define __SPARC_PCI_H 2#define ___ASM_SPARC_PCI_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#ifdef __KERNEL__ 4#include <asm-sparc/pci_64.h>
5 5#else
6/* Can be used to override the logic in pci_scan_bus for skipping 6#include <asm-sparc/pci_32.h>
7 * already-configured bus numbers - to be used for buggy BIOSes 7#endif
8 * or architectures with incomplete PCI setup by the loader.
9 */
10#define pcibios_assign_all_busses() 0
11#define pcibios_scan_all_fns(a, b) 0
12
13#define PCIBIOS_MIN_IO 0UL
14#define PCIBIOS_MIN_MEM 0UL
15
16#define PCI_IRQ_NONE 0xffffffff
17
18static inline void pcibios_set_master(struct pci_dev *dev)
19{
20 /* No special bus mastering setup handling */
21}
22
23static inline void pcibios_penalize_isa_irq(int irq, int active)
24{
25 /* We don't do dynamic PCI IRQ allocation */
26}
27
28/* Dynamic DMA mapping stuff.
29 */
30#define PCI_DMA_BUS_IS_PHYS (0)
31
32#include <asm/scatterlist.h>
33
34struct pci_dev;
35
36/* Allocate and map kernel buffer using consistent mode DMA for a device.
37 * hwdev should be valid struct pci_dev pointer for PCI devices.
38 */
39extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
40
41/* Free and unmap a consistent DMA buffer.
42 * cpu_addr is what was returned from pci_alloc_consistent,
43 * size must be the same as what as passed into pci_alloc_consistent,
44 * and likewise dma_addr must be the same as what *dma_addrp was set to.
45 *
46 * References to the memory and mappings assosciated with cpu_addr/dma_addr
47 * past this call are illegal.
48 */
49extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
50
51/* Map a single buffer of the indicated size for DMA in streaming mode.
52 * The 32-bit bus address to use is returned.
53 *
54 * Once the device is given the dma address, the device owns this memory
55 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
56 */
57extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
58
59/* Unmap a single streaming mode DMA translation. The dma_addr and size
60 * must match what was provided for in a previous pci_map_single call. All
61 * other usages are undefined.
62 *
63 * After this call, reads by the cpu to the buffer are guaranteed to see
64 * whatever the device wrote there.
65 */
66extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
67
68/* pci_unmap_{single,page} is not a nop, thus... */
69#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
70 dma_addr_t ADDR_NAME;
71#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
72 __u32 LEN_NAME;
73#define pci_unmap_addr(PTR, ADDR_NAME) \
74 ((PTR)->ADDR_NAME)
75#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
76 (((PTR)->ADDR_NAME) = (VAL))
77#define pci_unmap_len(PTR, LEN_NAME) \
78 ((PTR)->LEN_NAME)
79#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
80 (((PTR)->LEN_NAME) = (VAL))
81
82/*
83 * Same as above, only with pages instead of mapped addresses.
84 */
85extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
86 unsigned long offset, size_t size, int direction);
87extern void pci_unmap_page(struct pci_dev *hwdev,
88 dma_addr_t dma_address, size_t size, int direction);
89
90/* Map a set of buffers described by scatterlist in streaming
91 * mode for DMA. This is the scather-gather version of the
92 * above pci_map_single interface. Here the scatter gather list
93 * elements are each tagged with the appropriate dma address
94 * and length. They are obtained via sg_dma_{address,length}(SG).
95 *
96 * NOTE: An implementation may be able to use a smaller number of
97 * DMA address/length pairs than there are SG table elements.
98 * (for example via virtual mapping capabilities)
99 * The routine returns the number of addr/length pairs actually
100 * used, at most nents.
101 *
102 * Device ownership issues as mentioned above for pci_map_single are
103 * the same here.
104 */
105extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
106
107/* Unmap a set of streaming mode DMA translations.
108 * Again, cpu read rules concerning calls here are the same as for
109 * pci_unmap_single() above.
110 */
111extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
112
113/* Make physical memory consistent for a single
114 * streaming mode DMA translation after a transfer.
115 *
116 * If you perform a pci_map_single() but wish to interrogate the
117 * buffer using the cpu, yet do not wish to teardown the PCI dma
118 * mapping, you must call this function before doing so. At the
119 * next point you give the PCI dma address back to the card, you
120 * must first perform a pci_dma_sync_for_device, and then the device
121 * again owns the buffer.
122 */
123extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
124extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
125
126/* Make physical memory consistent for a set of streaming
127 * mode DMA translations after a transfer.
128 *
129 * The same as pci_dma_sync_single_* but for a scatter-gather list,
130 * same rules and usage.
131 */
132extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
133extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
134
135/* Return whether the given PCI device DMA address mask can
136 * be supported properly. For example, if your device can
137 * only drive the low 24-bits during PCI bus mastering, then
138 * you would pass 0x00ffffff as the mask to this function.
139 */
140static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
141{
142 return 1;
143}
144
145#ifdef CONFIG_PCI
146static inline void pci_dma_burst_advice(struct pci_dev *pdev,
147 enum pci_dma_burst_strategy *strat,
148 unsigned long *strategy_parameter)
149{
150 *strat = PCI_DMA_BURST_INFINITY;
151 *strategy_parameter = ~0UL;
152}
153#endif 8#endif
154
155#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
156
157static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
158{
159 return (dma_addr == PCI_DMA_ERROR_CODE);
160}
161
162struct device_node;
163extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
164
165#endif /* __KERNEL__ */
166
167/* generic pci stuff */
168#include <asm-generic/pci.h>
169
170#endif /* __SPARC_PCI_H */
diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h
new file mode 100644
index 000000000000..b93b6c79e08f
--- /dev/null
+++ b/include/asm-sparc/pci_32.h
@@ -0,0 +1,170 @@
1#ifndef __SPARC_PCI_H
2#define __SPARC_PCI_H
3
4#ifdef __KERNEL__
5
6/* Can be used to override the logic in pci_scan_bus for skipping
7 * already-configured bus numbers - to be used for buggy BIOSes
8 * or architectures with incomplete PCI setup by the loader.
9 */
10#define pcibios_assign_all_busses() 0
11#define pcibios_scan_all_fns(a, b) 0
12
13#define PCIBIOS_MIN_IO 0UL
14#define PCIBIOS_MIN_MEM 0UL
15
16#define PCI_IRQ_NONE 0xffffffff
17
18static inline void pcibios_set_master(struct pci_dev *dev)
19{
20 /* No special bus mastering setup handling */
21}
22
23static inline void pcibios_penalize_isa_irq(int irq, int active)
24{
25 /* We don't do dynamic PCI IRQ allocation */
26}
27
28/* Dynamic DMA mapping stuff.
29 */
30#define PCI_DMA_BUS_IS_PHYS (0)
31
32#include <asm/scatterlist.h>
33
34struct pci_dev;
35
36/* Allocate and map kernel buffer using consistent mode DMA for a device.
37 * hwdev should be valid struct pci_dev pointer for PCI devices.
38 */
39extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
40
41/* Free and unmap a consistent DMA buffer.
42 * cpu_addr is what was returned from pci_alloc_consistent,
43 * size must be the same as what as passed into pci_alloc_consistent,
44 * and likewise dma_addr must be the same as what *dma_addrp was set to.
45 *
46 * References to the memory and mappings assosciated with cpu_addr/dma_addr
47 * past this call are illegal.
48 */
49extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
50
51/* Map a single buffer of the indicated size for DMA in streaming mode.
52 * The 32-bit bus address to use is returned.
53 *
54 * Once the device is given the dma address, the device owns this memory
55 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
56 */
57extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
58
59/* Unmap a single streaming mode DMA translation. The dma_addr and size
60 * must match what was provided for in a previous pci_map_single call. All
61 * other usages are undefined.
62 *
63 * After this call, reads by the cpu to the buffer are guaranteed to see
64 * whatever the device wrote there.
65 */
66extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
67
68/* pci_unmap_{single,page} is not a nop, thus... */
69#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
70 dma_addr_t ADDR_NAME;
71#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
72 __u32 LEN_NAME;
73#define pci_unmap_addr(PTR, ADDR_NAME) \
74 ((PTR)->ADDR_NAME)
75#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
76 (((PTR)->ADDR_NAME) = (VAL))
77#define pci_unmap_len(PTR, LEN_NAME) \
78 ((PTR)->LEN_NAME)
79#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
80 (((PTR)->LEN_NAME) = (VAL))
81
82/*
83 * Same as above, only with pages instead of mapped addresses.
84 */
85extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
86 unsigned long offset, size_t size, int direction);
87extern void pci_unmap_page(struct pci_dev *hwdev,
88 dma_addr_t dma_address, size_t size, int direction);
89
90/* Map a set of buffers described by scatterlist in streaming
91 * mode for DMA. This is the scather-gather version of the
92 * above pci_map_single interface. Here the scatter gather list
93 * elements are each tagged with the appropriate dma address
94 * and length. They are obtained via sg_dma_{address,length}(SG).
95 *
96 * NOTE: An implementation may be able to use a smaller number of
97 * DMA address/length pairs than there are SG table elements.
98 * (for example via virtual mapping capabilities)
99 * The routine returns the number of addr/length pairs actually
100 * used, at most nents.
101 *
102 * Device ownership issues as mentioned above for pci_map_single are
103 * the same here.
104 */
105extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
106
107/* Unmap a set of streaming mode DMA translations.
108 * Again, cpu read rules concerning calls here are the same as for
109 * pci_unmap_single() above.
110 */
111extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
112
113/* Make physical memory consistent for a single
114 * streaming mode DMA translation after a transfer.
115 *
116 * If you perform a pci_map_single() but wish to interrogate the
117 * buffer using the cpu, yet do not wish to teardown the PCI dma
118 * mapping, you must call this function before doing so. At the
119 * next point you give the PCI dma address back to the card, you
120 * must first perform a pci_dma_sync_for_device, and then the device
121 * again owns the buffer.
122 */
123extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
124extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
125
126/* Make physical memory consistent for a set of streaming
127 * mode DMA translations after a transfer.
128 *
129 * The same as pci_dma_sync_single_* but for a scatter-gather list,
130 * same rules and usage.
131 */
132extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
133extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
134
135/* Return whether the given PCI device DMA address mask can
136 * be supported properly. For example, if your device can
137 * only drive the low 24-bits during PCI bus mastering, then
138 * you would pass 0x00ffffff as the mask to this function.
139 */
140static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
141{
142 return 1;
143}
144
145#ifdef CONFIG_PCI
146static inline void pci_dma_burst_advice(struct pci_dev *pdev,
147 enum pci_dma_burst_strategy *strat,
148 unsigned long *strategy_parameter)
149{
150 *strat = PCI_DMA_BURST_INFINITY;
151 *strategy_parameter = ~0UL;
152}
153#endif
154
155#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
156
157static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
158{
159 return (dma_addr == PCI_DMA_ERROR_CODE);
160}
161
162struct device_node;
163extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
164
165#endif /* __KERNEL__ */
166
167/* generic pci stuff */
168#include <asm-generic/pci.h>
169
170#endif /* __SPARC_PCI_H */
diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h
new file mode 100644
index 000000000000..f59f2571295b
--- /dev/null
+++ b/include/asm-sparc/pci_64.h
@@ -0,0 +1,209 @@
1#ifndef __SPARC64_PCI_H
2#define __SPARC64_PCI_H
3
4#ifdef __KERNEL__
5
6#include <linux/dma-mapping.h>
7
8/* Can be used to override the logic in pci_scan_bus for skipping
9 * already-configured bus numbers - to be used for buggy BIOSes
10 * or architectures with incomplete PCI setup by the loader.
11 */
12#define pcibios_assign_all_busses() 0
13#define pcibios_scan_all_fns(a, b) 0
14
15#define PCIBIOS_MIN_IO 0UL
16#define PCIBIOS_MIN_MEM 0UL
17
18#define PCI_IRQ_NONE 0xffffffff
19
20#define PCI_CACHE_LINE_BYTES 64
21
22static inline void pcibios_set_master(struct pci_dev *dev)
23{
24 /* No special bus mastering setup handling */
25}
26
27static inline void pcibios_penalize_isa_irq(int irq, int active)
28{
29 /* We don't do dynamic PCI IRQ allocation */
30}
31
32/* The PCI address space does not equal the physical memory
33 * address space. The networking and block device layers use
34 * this boolean for bounce buffer decisions.
35 */
36#define PCI_DMA_BUS_IS_PHYS (0)
37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME;
72#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
73 __u32 LEN_NAME;
74#define pci_unmap_addr(PTR, ADDR_NAME) \
75 ((PTR)->ADDR_NAME)
76#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
77 (((PTR)->ADDR_NAME) = (VAL))
78#define pci_unmap_len(PTR, LEN_NAME) \
79 ((PTR)->LEN_NAME)
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL))
82
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */
135
136/* PCI 64-bit addressing works for all slots on all controller
137 * types on sparc64. However, it requires that the device
138 * can drive enough of the 64 bits.
139 */
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL
142
143static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
144{
145 return dma_mapping_error(dma_addr);
146}
147
148#ifdef CONFIG_PCI
149static inline void pci_dma_burst_advice(struct pci_dev *pdev,
150 enum pci_dma_burst_strategy *strat,
151 unsigned long *strategy_parameter)
152{
153 unsigned long cacheline_size;
154 u8 byte;
155
156 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
157 if (byte == 0)
158 cacheline_size = 1024;
159 else
160 cacheline_size = (int) byte * 4;
161
162 *strat = PCI_DMA_BURST_BOUNDARY;
163 *strategy_parameter = cacheline_size;
164}
165#endif
166
167/* Return the index of the PCI controller for device PDEV. */
168
169extern int pci_domain_nr(struct pci_bus *bus);
170static inline int pci_proc_domain(struct pci_bus *bus)
171{
172 return 1;
173}
174
175/* Platform support for /proc/bus/pci/X/Y mmap()s. */
176
177#define HAVE_PCI_MMAP
178#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
179#define get_pci_unmapped_area get_fb_unmapped_area
180
181extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
182 enum pci_mmap_state mmap_state,
183 int write_combine);
184
185extern void
186pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
187 struct resource *res);
188
189extern void
190pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
191 struct pci_bus_region *region);
192
193extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *);
194
195static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
196{
197 return PCI_IRQ_NONE;
198}
199
200struct device_node;
201extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
202
203#define HAVE_ARCH_PCI_RESOURCE_TO_USER
204extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
205 const struct resource *rsrc,
206 resource_size_t *start, resource_size_t *end);
207#endif /* __KERNEL__ */
208
209#endif /* __SPARC64_PCI_H */
diff --git a/include/asm-sparc/percpu.h b/include/asm-sparc/percpu.h
index 06066a7aaec3..d98ed6cf2e36 100644
--- a/include/asm-sparc/percpu.h
+++ b/include/asm-sparc/percpu.h
@@ -1,6 +1,8 @@
1#ifndef __ARCH_SPARC_PERCPU__ 1#ifndef ___ASM_SPARC_PERCPU_H
2#define __ARCH_SPARC_PERCPU__ 2#define ___ASM_SPARC_PERCPU_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/percpu.h> 4#include <asm-sparc/percpu_64.h>
5 5#else
6#endif /* __ARCH_SPARC_PERCPU__ */ 6#include <asm-sparc/percpu_32.h>
7#endif
8#endif
diff --git a/include/asm-sparc/percpu_32.h b/include/asm-sparc/percpu_32.h
new file mode 100644
index 000000000000..06066a7aaec3
--- /dev/null
+++ b/include/asm-sparc/percpu_32.h
@@ -0,0 +1,6 @@
1#ifndef __ARCH_SPARC_PERCPU__
2#define __ARCH_SPARC_PERCPU__
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ARCH_SPARC_PERCPU__ */
diff --git a/include/asm-sparc/percpu_64.h b/include/asm-sparc/percpu_64.h
new file mode 100644
index 000000000000..bee64593023e
--- /dev/null
+++ b/include/asm-sparc/percpu_64.h
@@ -0,0 +1,28 @@
1#ifndef __ARCH_SPARC64_PERCPU__
2#define __ARCH_SPARC64_PERCPU__
3
4#include <linux/compiler.h>
5
6register unsigned long __local_per_cpu_offset asm("g5");
7
8#ifdef CONFIG_SMP
9
10extern void real_setup_per_cpu_areas(void);
11
12extern unsigned long __per_cpu_base;
13extern unsigned long __per_cpu_shift;
14#define __per_cpu_offset(__cpu) \
15 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
16#define per_cpu_offset(x) (__per_cpu_offset(x))
17
18#define __my_cpu_offset __local_per_cpu_offset
19
20#else /* ! SMP */
21
22#define real_setup_per_cpu_areas() do { } while (0)
23
24#endif /* SMP */
25
26#include <asm-generic/percpu.h>
27
28#endif /* __ARCH_SPARC64_PERCPU__ */
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index 681582d26969..7fa02b53d392 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -1,68 +1,8 @@
1#ifndef _SPARC_PGALLOC_H 1#ifndef ___ASM_SPARC_PGALLOC_H
2#define _SPARC_PGALLOC_H 2#define ___ASM_SPARC_PGALLOC_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/kernel.h> 4#include <asm-sparc/pgalloc_64.h>
5#include <linux/sched.h> 5#else
6 6#include <asm-sparc/pgalloc_32.h>
7#include <asm/page.h> 7#endif
8#include <asm/btfixup.h> 8#endif
9
10struct page;
11
12extern struct pgtable_cache_struct {
13 unsigned long *pgd_cache;
14 unsigned long *pte_cache;
15 unsigned long pgtable_cache_sz;
16 unsigned long pgd_cache_sz;
17} pgt_quicklists;
18#define pgd_quicklist (pgt_quicklists.pgd_cache)
19#define pmd_quicklist ((unsigned long *)0)
20#define pte_quicklist (pgt_quicklists.pte_cache)
21#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
22#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
23
24extern void check_pgt_cache(void);
25BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int)
26#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
27
28BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
29#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
30
31BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
32#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
33
34#define pgd_free(mm, pgd) free_pgd_fast(pgd)
35#define pgd_alloc(mm) get_pgd_fast()
36
37BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
38#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
39#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
40
41BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
42#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
43
44BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
45#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
46
47#define pmd_free(mm, pmd) free_pmd_fast(pmd)
48#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
49
50BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
51#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
52#define pmd_pgtable(pmd) pmd_page(pmd)
53BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
54#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
55
56BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
57#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
58BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
59#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
60
61BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
62#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
63
64BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
65#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
66#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
67
68#endif /* _SPARC_PGALLOC_H */
diff --git a/include/asm-sparc/pgalloc_32.h b/include/asm-sparc/pgalloc_32.h
new file mode 100644
index 000000000000..681582d26969
--- /dev/null
+++ b/include/asm-sparc/pgalloc_32.h
@@ -0,0 +1,68 @@
1#ifndef _SPARC_PGALLOC_H
2#define _SPARC_PGALLOC_H
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6
7#include <asm/page.h>
8#include <asm/btfixup.h>
9
10struct page;
11
12extern struct pgtable_cache_struct {
13 unsigned long *pgd_cache;
14 unsigned long *pte_cache;
15 unsigned long pgtable_cache_sz;
16 unsigned long pgd_cache_sz;
17} pgt_quicklists;
18#define pgd_quicklist (pgt_quicklists.pgd_cache)
19#define pmd_quicklist ((unsigned long *)0)
20#define pte_quicklist (pgt_quicklists.pte_cache)
21#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
22#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
23
24extern void check_pgt_cache(void);
25BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int)
26#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
27
28BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
29#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
30
31BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
32#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
33
34#define pgd_free(mm, pgd) free_pgd_fast(pgd)
35#define pgd_alloc(mm) get_pgd_fast()
36
37BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
38#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
39#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
40
41BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
42#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
43
44BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
45#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
46
47#define pmd_free(mm, pmd) free_pmd_fast(pmd)
48#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
49
50BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
51#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
52#define pmd_pgtable(pmd) pmd_page(pmd)
53BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
54#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
55
56BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
57#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
58BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
59#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
60
61BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
62#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
63
64BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
65#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
66#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
67
68#endif /* _SPARC_PGALLOC_H */
diff --git a/include/asm-sparc/pgalloc_64.h b/include/asm-sparc/pgalloc_64.h
new file mode 100644
index 000000000000..5bdfa2c6e400
--- /dev/null
+++ b/include/asm-sparc/pgalloc_64.h
@@ -0,0 +1,81 @@
1#ifndef _SPARC64_PGALLOC_H
2#define _SPARC64_PGALLOC_H
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/quicklist.h>
9
10#include <asm/spitfire.h>
11#include <asm/cpudata.h>
12#include <asm/cacheflush.h>
13#include <asm/page.h>
14
15/* Page table allocation/freeing. */
16
17static inline pgd_t *pgd_alloc(struct mm_struct *mm)
18{
19 return quicklist_alloc(0, GFP_KERNEL, NULL);
20}
21
22static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23{
24 quicklist_free(0, NULL, pgd);
25}
26
27#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
28
29static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
30{
31 return quicklist_alloc(0, GFP_KERNEL, NULL);
32}
33
34static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
35{
36 quicklist_free(0, NULL, pmd);
37}
38
39static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
40 unsigned long address)
41{
42 return quicklist_alloc(0, GFP_KERNEL, NULL);
43}
44
45static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
46 unsigned long address)
47{
48 struct page *page;
49 void *pg;
50
51 pg = quicklist_alloc(0, GFP_KERNEL, NULL);
52 if (!pg)
53 return NULL;
54 page = virt_to_page(pg);
55 pgtable_page_ctor(page);
56 return page;
57}
58
59static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
60{
61 quicklist_free(0, NULL, pte);
62}
63
64static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
65{
66 pgtable_page_dtor(ptepage);
67 quicklist_free_page(0, NULL, ptepage);
68}
69
70
71#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
72#define pmd_populate(MM,PMD,PTE_PAGE) \
73 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
74#define pmd_pgtable(pmd) pmd_page(pmd)
75
76static inline void check_pgt_cache(void)
77{
78 quicklist_trim(0, NULL, 25, 16);
79}
80
81#endif /* _SPARC64_PGALLOC_H */
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index b63ac6b4119f..63cdef53bc52 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -1,480 +1,8 @@
1#ifndef _SPARC_PGTABLE_H 1#ifndef ___ASM_SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H 2#define ___ASM_SPARC_PGTABLE_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* asm-sparc/pgtable.h: Defines and functions used to work 4#include <asm-sparc/pgtable_64.h>
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#ifndef __ASSEMBLY__
12#include <asm-generic/4level-fixup.h>
13
14#include <linux/spinlock.h>
15#include <linux/swap.h>
16#include <asm/types.h>
17#ifdef CONFIG_SUN4
18#include <asm/pgtsun4.h>
19#else 5#else
20#include <asm/pgtsun4c.h> 6#include <asm-sparc/pgtable_32.h>
21#endif 7#endif
22#include <asm/pgtsrmmu.h>
23#include <asm/vac-ops.h>
24#include <asm/oplib.h>
25#include <asm/btfixup.h>
26#include <asm/system.h>
27
28
29struct vm_area_struct;
30struct page;
31
32extern void load_mmu(void);
33extern unsigned long calc_highpages(void);
34
35BTFIXUPDEF_SIMM13(pgdir_shift)
36BTFIXUPDEF_SETHI(pgdir_size)
37BTFIXUPDEF_SETHI(pgdir_mask)
38
39BTFIXUPDEF_SIMM13(ptrs_per_pmd)
40BTFIXUPDEF_SIMM13(ptrs_per_pgd)
41BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
42
43#define pte_ERROR(e) __builtin_trap()
44#define pmd_ERROR(e) __builtin_trap()
45#define pgd_ERROR(e) __builtin_trap()
46
47BTFIXUPDEF_INT(page_none)
48BTFIXUPDEF_INT(page_copy)
49BTFIXUPDEF_INT(page_readonly)
50BTFIXUPDEF_INT(page_kernel)
51
52#define PMD_SHIFT SUN4C_PMD_SHIFT
53#define PMD_SIZE (1UL << PMD_SHIFT)
54#define PMD_MASK (~(PMD_SIZE-1))
55#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
56#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
57#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
58#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
59#define PTRS_PER_PTE 1024
60#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
61#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
62#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
63#define FIRST_USER_ADDRESS 0
64#define PTE_SIZE (PTRS_PER_PTE*4)
65
66#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
67extern pgprot_t PAGE_SHARED;
68#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
69#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
70
71extern unsigned long page_kernel;
72
73#ifdef MODULE
74#define PAGE_KERNEL page_kernel
75#else
76#define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
77#endif 8#endif
78
79/* Top-level page directory */
80extern pgd_t swapper_pg_dir[1024];
81
82extern void paging_init(void);
83
84/* Page table for 0-4MB for everybody, on the Sparc this
85 * holds the same as on the i386.
86 */
87extern pte_t pg0[1024];
88extern pte_t pg1[1024];
89extern pte_t pg2[1024];
90extern pte_t pg3[1024];
91
92extern unsigned long ptr_in_current_pgd;
93
94/* Here is a trick, since mmap.c need the initializer elements for
95 * protection_map[] to be constant at compile time, I set the following
96 * to all zeros. I set it to the real values after I link in the
97 * appropriate MMU page table routines at boot time.
98 */
99#define __P000 __pgprot(0)
100#define __P001 __pgprot(0)
101#define __P010 __pgprot(0)
102#define __P011 __pgprot(0)
103#define __P100 __pgprot(0)
104#define __P101 __pgprot(0)
105#define __P110 __pgprot(0)
106#define __P111 __pgprot(0)
107
108#define __S000 __pgprot(0)
109#define __S001 __pgprot(0)
110#define __S010 __pgprot(0)
111#define __S011 __pgprot(0)
112#define __S100 __pgprot(0)
113#define __S101 __pgprot(0)
114#define __S110 __pgprot(0)
115#define __S111 __pgprot(0)
116
117extern int num_contexts;
118
119/* First physical page can be anywhere, the following is needed so that
120 * va-->pa and vice versa conversions work properly without performance
121 * hit for all __pa()/__va() operations.
122 */
123extern unsigned long phys_base;
124extern unsigned long pfn_base;
125
126/*
127 * BAD_PAGETABLE is used when we need a bogus page-table, while
128 * BAD_PAGE is used for a bogus page.
129 *
130 * ZERO_PAGE is a global shared page that is always zero: used
131 * for zero-mapped memory areas etc..
132 */
133extern pte_t * __bad_pagetable(void);
134extern pte_t __bad_page(void);
135extern unsigned long empty_zero_page;
136
137#define BAD_PAGETABLE __bad_pagetable()
138#define BAD_PAGE __bad_page()
139#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
140
141/*
142 */
143BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
144BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
145
146#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
147#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
148
149BTFIXUPDEF_SETHI(none_mask)
150BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
151BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
152
153static inline int pte_none(pte_t pte)
154{
155 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
156}
157
158#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
159#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
160
161BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
162BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
163BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
164
165static inline int pmd_none(pmd_t pmd)
166{
167 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
168}
169
170#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
171#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
172#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
173
174BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
175BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
176BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
177BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
178
179#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
180#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
181#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
182#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
183
184/*
185 * The following only work if pte_present() is true.
186 * Undefined behaviour if not..
187 */
188BTFIXUPDEF_HALF(pte_writei)
189BTFIXUPDEF_HALF(pte_dirtyi)
190BTFIXUPDEF_HALF(pte_youngi)
191
192static int pte_write(pte_t pte) __attribute_const__;
193static inline int pte_write(pte_t pte)
194{
195 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
196}
197
198static int pte_dirty(pte_t pte) __attribute_const__;
199static inline int pte_dirty(pte_t pte)
200{
201 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
202}
203
204static int pte_young(pte_t pte) __attribute_const__;
205static inline int pte_young(pte_t pte)
206{
207 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
208}
209
210/*
211 * The following only work if pte_present() is not true.
212 */
213BTFIXUPDEF_HALF(pte_filei)
214
215static int pte_file(pte_t pte) __attribute_const__;
216static inline int pte_file(pte_t pte)
217{
218 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
219}
220
221static inline int pte_special(pte_t pte)
222{
223 return 0;
224}
225
226/*
227 */
228BTFIXUPDEF_HALF(pte_wrprotecti)
229BTFIXUPDEF_HALF(pte_mkcleani)
230BTFIXUPDEF_HALF(pte_mkoldi)
231
232static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
233static inline pte_t pte_wrprotect(pte_t pte)
234{
235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
236}
237
238static pte_t pte_mkclean(pte_t pte) __attribute_const__;
239static inline pte_t pte_mkclean(pte_t pte)
240{
241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
242}
243
244static pte_t pte_mkold(pte_t pte) __attribute_const__;
245static inline pte_t pte_mkold(pte_t pte)
246{
247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
248}
249
250BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
251BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
252BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
253
254#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
255#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
256#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
257
258#define pte_mkspecial(pte) (pte)
259
260#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
261
262BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
263#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
264#define pte_page(pte) pfn_to_page(pte_pfn(pte))
265
266/*
267 * Conversion functions: convert a page and protection to a page entry,
268 * and a page entry and page directory to the page they refer to.
269 */
270BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
271
272BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
273BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
274BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
275
276#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
277#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
278#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
279
280#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
281
282BTFIXUPDEF_INT(pte_modify_mask)
283
284static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
285static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
286{
287 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
288 pgprot_val(newprot));
289}
290
291#define pgd_index(address) ((address) >> PGDIR_SHIFT)
292
293/* to find an entry in a page-table-directory */
294#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
295
296/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298
299/* Find an entry in the second-level page table.. */
300BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
301#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
302
303/* Find an entry in the third-level page table.. */
304BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
305#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
306
307/*
308 * This shortcut works on sun4m (and sun4d) because the nocache area is static,
309 * and sun4c is guaranteed to have no highmem anyway.
310 */
311#define pte_offset_map(d, a) pte_offset_kernel(d,a)
312#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
313
314#define pte_unmap(pte) do{}while(0)
315#define pte_unmap_nested(pte) do{}while(0)
316
317/* Certain architectures need to do special things when pte's
318 * within a page table are directly modified. Thus, the following
319 * hook is made available.
320 */
321
322BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
323
324#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
325#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
326
327struct seq_file;
328BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
329
330#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
331
332/* Fault handler stuff... */
333#define FAULT_CODE_PROT 0x1
334#define FAULT_CODE_WRITE 0x2
335#define FAULT_CODE_USER 0x4
336
337BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
338
339#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
340
341BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
342 unsigned long, unsigned int)
343BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
344#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
345#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
346
347extern int invalid_segment;
348
349/* Encode and de-code a swap entry */
350BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
351BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
352BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
353
354#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
355#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
356#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
357
358#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
359#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
360
361/* file-offset-in-pte helpers */
362BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
363BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
364
365#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
366#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
367
368/*
369 * This is made a constant because mm/fremap.c required a constant.
370 * Note that layout of these bits is different between sun4c.c and srmmu.c.
371 */
372#define PTE_FILE_MAX_BITS 24
373
374/*
375 */
376struct ctx_list {
377 struct ctx_list *next;
378 struct ctx_list *prev;
379 unsigned int ctx_number;
380 struct mm_struct *ctx_mm;
381};
382
383extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
384extern struct ctx_list ctx_free; /* Head of free list */
385extern struct ctx_list ctx_used; /* Head of used contexts list */
386
387#define NO_CONTEXT -1
388
389static inline void remove_from_ctx_list(struct ctx_list *entry)
390{
391 entry->next->prev = entry->prev;
392 entry->prev->next = entry->next;
393}
394
395static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
396{
397 entry->next = head;
398 (entry->prev = head->prev)->next = entry;
399 head->prev = entry;
400}
401#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
402#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
403
404static inline unsigned long
405__get_phys (unsigned long addr)
406{
407 switch (sparc_cpu_model){
408 case sun4:
409 case sun4c:
410 return sun4c_get_pte (addr) << PAGE_SHIFT;
411 case sun4m:
412 case sun4d:
413 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
414 default:
415 return 0;
416 }
417}
418
419static inline int
420__get_iospace (unsigned long addr)
421{
422 switch (sparc_cpu_model){
423 case sun4:
424 case sun4c:
425 return -1; /* Don't check iospace on sun4c */
426 case sun4m:
427 case sun4d:
428 return (srmmu_get_pte (addr) >> 28);
429 default:
430 return -1;
431 }
432}
433
434extern unsigned long *sparc_valid_addr_bitmap;
435
436/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
437#define kern_addr_valid(addr) \
438 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
439
440extern int io_remap_pfn_range(struct vm_area_struct *vma,
441 unsigned long from, unsigned long pfn,
442 unsigned long size, pgprot_t prot);
443
444/*
445 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
446 * its high 4 bits. These macros/functions put it there or get it from there.
447 */
448#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
449#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
450#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
451
452#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
453#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
454({ \
455 int __changed = !pte_same(*(__ptep), __entry); \
456 if (__changed) { \
457 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
458 flush_tlb_page(__vma, __address); \
459 } \
460 (sparc_cpu_model == sun4c) || __changed; \
461})
462
463#include <asm-generic/pgtable.h>
464
465#endif /* !(__ASSEMBLY__) */
466
467#define VMALLOC_START 0xfe600000
468/* XXX Alter this when I get around to fixing sun4c - Anton */
469#define VMALLOC_END 0xffc00000
470
471
472/* We provide our own get_unmapped_area to cope with VA holes for userland */
473#define HAVE_ARCH_UNMAPPED_AREA
474
475/*
476 * No page table caches to initialise
477 */
478#define pgtable_cache_init() do { } while (0)
479
480#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/include/asm-sparc/pgtable_32.h b/include/asm-sparc/pgtable_32.h
new file mode 100644
index 000000000000..781bd4694a1c
--- /dev/null
+++ b/include/asm-sparc/pgtable_32.h
@@ -0,0 +1,480 @@
1#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm-sparc/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#ifndef __ASSEMBLY__
12#include <asm-generic/4level-fixup.h>
13
14#include <linux/spinlock.h>
15#include <linux/swap.h>
16#include <asm/types.h>
17#ifdef CONFIG_SUN4
18#include <asm/pgtsun4.h>
19#else
20#include <asm/pgtsun4c.h>
21#endif
22#include <asm/pgtsrmmu.h>
23#include <asm/vac-ops.h>
24#include <asm/oplib.h>
25#include <asm/btfixup.h>
26#include <asm/system.h>
27
28
29struct vm_area_struct;
30struct page;
31
32extern void load_mmu(void);
33extern unsigned long calc_highpages(void);
34
35BTFIXUPDEF_SIMM13(pgdir_shift)
36BTFIXUPDEF_SETHI(pgdir_size)
37BTFIXUPDEF_SETHI(pgdir_mask)
38
39BTFIXUPDEF_SIMM13(ptrs_per_pmd)
40BTFIXUPDEF_SIMM13(ptrs_per_pgd)
41BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
42
43#define pte_ERROR(e) __builtin_trap()
44#define pmd_ERROR(e) __builtin_trap()
45#define pgd_ERROR(e) __builtin_trap()
46
47BTFIXUPDEF_INT(page_none)
48BTFIXUPDEF_INT(page_copy)
49BTFIXUPDEF_INT(page_readonly)
50BTFIXUPDEF_INT(page_kernel)
51
52#define PMD_SHIFT SUN4C_PMD_SHIFT
53#define PMD_SIZE (1UL << PMD_SHIFT)
54#define PMD_MASK (~(PMD_SIZE-1))
55#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
56#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
57#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
58#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
59#define PTRS_PER_PTE 1024
60#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
61#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
62#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
63#define FIRST_USER_ADDRESS 0
64#define PTE_SIZE (PTRS_PER_PTE*4)
65
66#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
67extern pgprot_t PAGE_SHARED;
68#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
69#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
70
71extern unsigned long page_kernel;
72
73#ifdef MODULE
74#define PAGE_KERNEL page_kernel
75#else
76#define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
77#endif
78
79/* Top-level page directory */
80extern pgd_t swapper_pg_dir[1024];
81
82extern void paging_init(void);
83
84/* Page table for 0-4MB for everybody, on the Sparc this
85 * holds the same as on the i386.
86 */
87extern pte_t pg0[1024];
88extern pte_t pg1[1024];
89extern pte_t pg2[1024];
90extern pte_t pg3[1024];
91
92extern unsigned long ptr_in_current_pgd;
93
94/* Here is a trick, since mmap.c need the initializer elements for
95 * protection_map[] to be constant at compile time, I set the following
96 * to all zeros. I set it to the real values after I link in the
97 * appropriate MMU page table routines at boot time.
98 */
99#define __P000 __pgprot(0)
100#define __P001 __pgprot(0)
101#define __P010 __pgprot(0)
102#define __P011 __pgprot(0)
103#define __P100 __pgprot(0)
104#define __P101 __pgprot(0)
105#define __P110 __pgprot(0)
106#define __P111 __pgprot(0)
107
108#define __S000 __pgprot(0)
109#define __S001 __pgprot(0)
110#define __S010 __pgprot(0)
111#define __S011 __pgprot(0)
112#define __S100 __pgprot(0)
113#define __S101 __pgprot(0)
114#define __S110 __pgprot(0)
115#define __S111 __pgprot(0)
116
117extern int num_contexts;
118
119/* First physical page can be anywhere, the following is needed so that
120 * va-->pa and vice versa conversions work properly without performance
121 * hit for all __pa()/__va() operations.
122 */
123extern unsigned long phys_base;
124extern unsigned long pfn_base;
125
126/*
127 * BAD_PAGETABLE is used when we need a bogus page-table, while
128 * BAD_PAGE is used for a bogus page.
129 *
130 * ZERO_PAGE is a global shared page that is always zero: used
131 * for zero-mapped memory areas etc..
132 */
133extern pte_t * __bad_pagetable(void);
134extern pte_t __bad_page(void);
135extern unsigned long empty_zero_page;
136
137#define BAD_PAGETABLE __bad_pagetable()
138#define BAD_PAGE __bad_page()
139#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
140
141/*
142 */
143BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
144BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
145
146#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
147#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
148
149BTFIXUPDEF_SETHI(none_mask)
150BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
151BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
152
153static inline int pte_none(pte_t pte)
154{
155 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
156}
157
158#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
159#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
160
161BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
162BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
163BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
164
165static inline int pmd_none(pmd_t pmd)
166{
167 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
168}
169
170#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
171#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
172#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
173
174BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
175BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
176BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
177BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
178
179#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
180#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
181#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
182#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
183
184/*
185 * The following only work if pte_present() is true.
186 * Undefined behaviour if not..
187 */
188BTFIXUPDEF_HALF(pte_writei)
189BTFIXUPDEF_HALF(pte_dirtyi)
190BTFIXUPDEF_HALF(pte_youngi)
191
192static int pte_write(pte_t pte) __attribute_const__;
193static inline int pte_write(pte_t pte)
194{
195 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
196}
197
198static int pte_dirty(pte_t pte) __attribute_const__;
199static inline int pte_dirty(pte_t pte)
200{
201 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
202}
203
204static int pte_young(pte_t pte) __attribute_const__;
205static inline int pte_young(pte_t pte)
206{
207 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
208}
209
210/*
211 * The following only work if pte_present() is not true.
212 */
213BTFIXUPDEF_HALF(pte_filei)
214
215static int pte_file(pte_t pte) __attribute_const__;
216static inline int pte_file(pte_t pte)
217{
218 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
219}
220
221static inline int pte_special(pte_t pte)
222{
223 return 0;
224}
225
226/*
227 */
228BTFIXUPDEF_HALF(pte_wrprotecti)
229BTFIXUPDEF_HALF(pte_mkcleani)
230BTFIXUPDEF_HALF(pte_mkoldi)
231
232static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
233static inline pte_t pte_wrprotect(pte_t pte)
234{
235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
236}
237
238static pte_t pte_mkclean(pte_t pte) __attribute_const__;
239static inline pte_t pte_mkclean(pte_t pte)
240{
241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
242}
243
244static pte_t pte_mkold(pte_t pte) __attribute_const__;
245static inline pte_t pte_mkold(pte_t pte)
246{
247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
248}
249
250BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
251BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
252BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
253
254#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
255#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
256#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
257
258#define pte_mkspecial(pte) (pte)
259
260#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
261
262BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
263#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
264#define pte_page(pte) pfn_to_page(pte_pfn(pte))
265
266/*
267 * Conversion functions: convert a page and protection to a page entry,
268 * and a page entry and page directory to the page they refer to.
269 */
270BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
271
272BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
273BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
274BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
275
276#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
277#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
278#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
279
280#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
281
282BTFIXUPDEF_INT(pte_modify_mask)
283
284static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
285static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
286{
287 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
288 pgprot_val(newprot));
289}
290
291#define pgd_index(address) ((address) >> PGDIR_SHIFT)
292
293/* to find an entry in a page-table-directory */
294#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
295
296/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298
299/* Find an entry in the second-level page table.. */
300BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
301#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
302
303/* Find an entry in the third-level page table.. */
304BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
305#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
306
307/*
308 * This shortcut works on sun4m (and sun4d) because the nocache area is static,
309 * and sun4c is guaranteed to have no highmem anyway.
310 */
311#define pte_offset_map(d, a) pte_offset_kernel(d,a)
312#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
313
314#define pte_unmap(pte) do{}while(0)
315#define pte_unmap_nested(pte) do{}while(0)
316
317/* Certain architectures need to do special things when pte's
318 * within a page table are directly modified. Thus, the following
319 * hook is made available.
320 */
321
322BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
323
324#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
325#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
326
327struct seq_file;
328BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
329
330#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
331
332/* Fault handler stuff... */
333#define FAULT_CODE_PROT 0x1
334#define FAULT_CODE_WRITE 0x2
335#define FAULT_CODE_USER 0x4
336
337BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
338
339#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
340
341BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
342 unsigned long, unsigned int)
343BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
344#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
345#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
346
347extern int invalid_segment;
348
349/* Encode and de-code a swap entry */
350BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
351BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
352BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
353
354#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
355#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
356#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
357
358#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
359#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
360
361/* file-offset-in-pte helpers */
362BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
363BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
364
365#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
366#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
367
368/*
369 * This is made a constant because mm/fremap.c required a constant.
370 * Note that layout of these bits is different between sun4c.c and srmmu.c.
371 */
372#define PTE_FILE_MAX_BITS 24
373
374/*
375 */
376struct ctx_list {
377 struct ctx_list *next;
378 struct ctx_list *prev;
379 unsigned int ctx_number;
380 struct mm_struct *ctx_mm;
381};
382
383extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
384extern struct ctx_list ctx_free; /* Head of free list */
385extern struct ctx_list ctx_used; /* Head of used contexts list */
386
387#define NO_CONTEXT -1
388
389static inline void remove_from_ctx_list(struct ctx_list *entry)
390{
391 entry->next->prev = entry->prev;
392 entry->prev->next = entry->next;
393}
394
395static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
396{
397 entry->next = head;
398 (entry->prev = head->prev)->next = entry;
399 head->prev = entry;
400}
401#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
402#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
403
404static inline unsigned long
405__get_phys (unsigned long addr)
406{
407 switch (sparc_cpu_model){
408 case sun4:
409 case sun4c:
410 return sun4c_get_pte (addr) << PAGE_SHIFT;
411 case sun4m:
412 case sun4d:
413 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
414 default:
415 return 0;
416 }
417}
418
419static inline int
420__get_iospace (unsigned long addr)
421{
422 switch (sparc_cpu_model){
423 case sun4:
424 case sun4c:
425 return -1; /* Don't check iospace on sun4c */
426 case sun4m:
427 case sun4d:
428 return (srmmu_get_pte (addr) >> 28);
429 default:
430 return -1;
431 }
432}
433
434extern unsigned long *sparc_valid_addr_bitmap;
435
436/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
437#define kern_addr_valid(addr) \
438 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
439
440extern int io_remap_pfn_range(struct vm_area_struct *vma,
441 unsigned long from, unsigned long pfn,
442 unsigned long size, pgprot_t prot);
443
444/*
445 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
446 * its high 4 bits. These macros/functions put it there or get it from there.
447 */
448#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
449#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
450#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
451
452#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
453#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
454({ \
455 int __changed = !pte_same(*(__ptep), __entry); \
456 if (__changed) { \
457 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
458 flush_tlb_page(__vma, __address); \
459 } \
460 (sparc_cpu_model == sun4c) || __changed; \
461})
462
463#include <asm-generic/pgtable.h>
464
465#endif /* !(__ASSEMBLY__) */
466
467#define VMALLOC_START 0xfe600000
468/* XXX Alter this when I get around to fixing sun4c - Anton */
469#define VMALLOC_END 0xffc00000
470
471
472/* We provide our own get_unmapped_area to cope with VA holes for userland */
473#define HAVE_ARCH_UNMAPPED_AREA
474
475/*
476 * No page table caches to initialise
477 */
478#define pgtable_cache_init() do { } while (0)
479
480#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/include/asm-sparc/pgtable_64.h b/include/asm-sparc/pgtable_64.h
new file mode 100644
index 000000000000..78d5594964a3
--- /dev/null
+++ b/include/asm-sparc/pgtable_64.h
@@ -0,0 +1,781 @@
1/*
2 * pgtable.h: SpitFire page table operations.
3 *
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef _SPARC64_PGTABLE_H
9#define _SPARC64_PGTABLE_H
10
11/* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
13 */
14
15#include <asm-generic/pgtable-nopud.h>
16
17#include <linux/compiler.h>
18#include <linux/const.h>
19#include <asm/types.h>
20#include <asm/spitfire.h>
21#include <asm/asi.h>
22#include <asm/system.h>
23#include <asm/page.h>
24#include <asm/processor.h>
25
26/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The TSB is mapped in the 0x8000000 to 0xa000000 range.
29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space,
32 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
33 * There is a single static kernel PMD which maps from 0x0 to address
34 * 0x400000000.
35 */
36#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
37#define TSBMAP_BASE _AC(0x0000000008000000,UL)
38#define MODULES_VADDR _AC(0x0000000010000000,UL)
39#define MODULES_LEN _AC(0x00000000e0000000,UL)
40#define MODULES_END _AC(0x00000000f0000000,UL)
41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43#define VMALLOC_START _AC(0x0000000100000000,UL)
44#define VMALLOC_END _AC(0x0000000200000000,UL)
45#define VMEMMAP_BASE _AC(0x0000000200000000,UL)
46
47#define vmemmap ((struct page *)VMEMMAP_BASE)
48
49/* XXX All of this needs to be rethought so we can take advantage
50 * XXX cheetah's full 64-bit virtual address space, ie. no more hole
51 * XXX in the middle like on spitfire. -DaveM
52 */
53/*
54 * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
55 * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
56 * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
57 * table is a single page long). The next higher PMD_BITS determine pmd#
58 * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
59 * since the pmd entries are 4 bytes, and each pmd page is a single page
60 * long). Finally, the higher few bits determine pgde#.
61 */
62
63/* PMD_SHIFT determines the size of the area a second-level page
64 * table can map
65 */
66#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
67#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
68#define PMD_MASK (~(PMD_SIZE-1))
69#define PMD_BITS (PAGE_SHIFT - 2)
70
71/* PGDIR_SHIFT determines what a third-level page table entry can map */
72#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
73#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
74#define PGDIR_MASK (~(PGDIR_SIZE-1))
75#define PGDIR_BITS (PAGE_SHIFT - 2)
76
77#ifndef __ASSEMBLY__
78
79#include <linux/sched.h>
80
81/* Entries per page directory level. */
82#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
83#define PTRS_PER_PMD (1UL << PMD_BITS)
84#define PTRS_PER_PGD (1UL << PGDIR_BITS)
85
86/* Kernel has a separate 44bit address space. */
87#define FIRST_USER_ADDRESS 0
88
89#define pte_ERROR(e) __builtin_trap()
90#define pmd_ERROR(e) __builtin_trap()
91#define pgd_ERROR(e) __builtin_trap()
92
93#endif /* !(__ASSEMBLY__) */
94
95/* PTE bits which are the same in SUN4U and SUN4V format. */
96#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
97#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
98
99/* SUN4U pte bits... */
100#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
101#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
102#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
103#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
104#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
105#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
106#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
107#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
108#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
109#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
110#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
111#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
112#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
113#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
114#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
115#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
116#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
117#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
118#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
119#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
120#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
121#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
122#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
123#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
124#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
125#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
126#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
127#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
128
129/* SUN4V pte bits... */
130#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
131#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
132#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
133#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
134#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
135#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
136#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
137#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
138#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
139#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
140#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
141#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
142#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
143#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
144#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
145#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
146#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
147#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
148#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
149#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
150#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
151#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
152#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
153#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
154#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
155#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
156#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
157
158#if PAGE_SHIFT == 13
159#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
160#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
161#elif PAGE_SHIFT == 16
162#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
163#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
164#elif PAGE_SHIFT == 19
165#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
166#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
167#elif PAGE_SHIFT == 22
168#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
169#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
170#else
171#error Wrong PAGE_SHIFT specified
172#endif
173
174#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
175#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
176#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
177#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
178#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
179#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
180#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
181#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
182#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
183#endif
184
185/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
186#define __P000 __pgprot(0)
187#define __P001 __pgprot(0)
188#define __P010 __pgprot(0)
189#define __P011 __pgprot(0)
190#define __P100 __pgprot(0)
191#define __P101 __pgprot(0)
192#define __P110 __pgprot(0)
193#define __P111 __pgprot(0)
194
195#define __S000 __pgprot(0)
196#define __S001 __pgprot(0)
197#define __S010 __pgprot(0)
198#define __S011 __pgprot(0)
199#define __S100 __pgprot(0)
200#define __S101 __pgprot(0)
201#define __S110 __pgprot(0)
202#define __S111 __pgprot(0)
203
204#ifndef __ASSEMBLY__
205
206extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
207
208extern unsigned long pte_sz_bits(unsigned long size);
209
210extern pgprot_t PAGE_KERNEL;
211extern pgprot_t PAGE_KERNEL_LOCKED;
212extern pgprot_t PAGE_COPY;
213extern pgprot_t PAGE_SHARED;
214
215/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
216extern unsigned long _PAGE_IE;
217extern unsigned long _PAGE_E;
218extern unsigned long _PAGE_CACHE;
219
220extern unsigned long pg_iobits;
221extern unsigned long _PAGE_ALL_SZ_BITS;
222extern unsigned long _PAGE_SZBITS;
223
224extern struct page *mem_map_zero;
225#define ZERO_PAGE(vaddr) (mem_map_zero)
226
227/* PFNs are real physical page numbers. However, mem_map only begins to record
228 * per-page information starting at pfn_base. This is to handle systems where
229 * the first physical page in the machine is at some huge physical address,
230 * such as 4GB. This is common on a partitioned E10000, for example.
231 */
232static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
233{
234 unsigned long paddr = pfn << PAGE_SHIFT;
235 unsigned long sz_bits;
236
237 sz_bits = 0UL;
238 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
239 __asm__ __volatile__(
240 "\n661: sethi %%uhi(%1), %0\n"
241 " sllx %0, 32, %0\n"
242 " .section .sun4v_2insn_patch, \"ax\"\n"
243 " .word 661b\n"
244 " mov %2, %0\n"
245 " nop\n"
246 " .previous\n"
247 : "=r" (sz_bits)
248 : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
249 }
250 return __pte(paddr | sz_bits | pgprot_val(prot));
251}
252#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
253
254/* This one can be done with two shifts. */
255static inline unsigned long pte_pfn(pte_t pte)
256{
257 unsigned long ret;
258
259 __asm__ __volatile__(
260 "\n661: sllx %1, %2, %0\n"
261 " srlx %0, %3, %0\n"
262 " .section .sun4v_2insn_patch, \"ax\"\n"
263 " .word 661b\n"
264 " sllx %1, %4, %0\n"
265 " srlx %0, %5, %0\n"
266 " .previous\n"
267 : "=r" (ret)
268 : "r" (pte_val(pte)),
269 "i" (21), "i" (21 + PAGE_SHIFT),
270 "i" (8), "i" (8 + PAGE_SHIFT));
271
272 return ret;
273}
274#define pte_page(x) pfn_to_page(pte_pfn(x))
275
276static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
277{
278 unsigned long mask, tmp;
279
280 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
281 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
282 *
283 * Even if we use negation tricks the result is still a 6
284 * instruction sequence, so don't try to play fancy and just
285 * do the most straightforward implementation.
286 *
287 * Note: We encode this into 3 sun4v 2-insn patch sequences.
288 */
289
290 __asm__ __volatile__(
291 "\n661: sethi %%uhi(%2), %1\n"
292 " sethi %%hi(%2), %0\n"
293 "\n662: or %1, %%ulo(%2), %1\n"
294 " or %0, %%lo(%2), %0\n"
295 "\n663: sllx %1, 32, %1\n"
296 " or %0, %1, %0\n"
297 " .section .sun4v_2insn_patch, \"ax\"\n"
298 " .word 661b\n"
299 " sethi %%uhi(%3), %1\n"
300 " sethi %%hi(%3), %0\n"
301 " .word 662b\n"
302 " or %1, %%ulo(%3), %1\n"
303 " or %0, %%lo(%3), %0\n"
304 " .word 663b\n"
305 " sllx %1, 32, %1\n"
306 " or %0, %1, %0\n"
307 " .previous\n"
308 : "=r" (mask), "=r" (tmp)
309 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
310 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
311 _PAGE_SZBITS_4U),
312 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
313 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
314 _PAGE_SZBITS_4V));
315
316 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
317}
318
319static inline pte_t pgoff_to_pte(unsigned long off)
320{
321 off <<= PAGE_SHIFT;
322
323 __asm__ __volatile__(
324 "\n661: or %0, %2, %0\n"
325 " .section .sun4v_1insn_patch, \"ax\"\n"
326 " .word 661b\n"
327 " or %0, %3, %0\n"
328 " .previous\n"
329 : "=r" (off)
330 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
331
332 return __pte(off);
333}
334
335static inline pgprot_t pgprot_noncached(pgprot_t prot)
336{
337 unsigned long val = pgprot_val(prot);
338
339 __asm__ __volatile__(
340 "\n661: andn %0, %2, %0\n"
341 " or %0, %3, %0\n"
342 " .section .sun4v_2insn_patch, \"ax\"\n"
343 " .word 661b\n"
344 " andn %0, %4, %0\n"
345 " or %0, %5, %0\n"
346 " .previous\n"
347 : "=r" (val)
348 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
349 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
350
351 return __pgprot(val);
352}
353/* Various pieces of code check for platform support by ifdef testing
354 * on "pgprot_noncached". That's broken and should be fixed, but for
355 * now...
356 */
357#define pgprot_noncached pgprot_noncached
358
359#ifdef CONFIG_HUGETLB_PAGE
360static inline pte_t pte_mkhuge(pte_t pte)
361{
362 unsigned long mask;
363
364 __asm__ __volatile__(
365 "\n661: sethi %%uhi(%1), %0\n"
366 " sllx %0, 32, %0\n"
367 " .section .sun4v_2insn_patch, \"ax\"\n"
368 " .word 661b\n"
369 " mov %2, %0\n"
370 " nop\n"
371 " .previous\n"
372 : "=r" (mask)
373 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
374
375 return __pte(pte_val(pte) | mask);
376}
377#endif
378
379static inline pte_t pte_mkdirty(pte_t pte)
380{
381 unsigned long val = pte_val(pte), tmp;
382
383 __asm__ __volatile__(
384 "\n661: or %0, %3, %0\n"
385 " nop\n"
386 "\n662: nop\n"
387 " nop\n"
388 " .section .sun4v_2insn_patch, \"ax\"\n"
389 " .word 661b\n"
390 " sethi %%uhi(%4), %1\n"
391 " sllx %1, 32, %1\n"
392 " .word 662b\n"
393 " or %1, %%lo(%4), %1\n"
394 " or %0, %1, %0\n"
395 " .previous\n"
396 : "=r" (val), "=r" (tmp)
397 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
398 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
399
400 return __pte(val);
401}
402
403static inline pte_t pte_mkclean(pte_t pte)
404{
405 unsigned long val = pte_val(pte), tmp;
406
407 __asm__ __volatile__(
408 "\n661: andn %0, %3, %0\n"
409 " nop\n"
410 "\n662: nop\n"
411 " nop\n"
412 " .section .sun4v_2insn_patch, \"ax\"\n"
413 " .word 661b\n"
414 " sethi %%uhi(%4), %1\n"
415 " sllx %1, 32, %1\n"
416 " .word 662b\n"
417 " or %1, %%lo(%4), %1\n"
418 " andn %0, %1, %0\n"
419 " .previous\n"
420 : "=r" (val), "=r" (tmp)
421 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
422 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
423
424 return __pte(val);
425}
426
427static inline pte_t pte_mkwrite(pte_t pte)
428{
429 unsigned long val = pte_val(pte), mask;
430
431 __asm__ __volatile__(
432 "\n661: mov %1, %0\n"
433 " nop\n"
434 " .section .sun4v_2insn_patch, \"ax\"\n"
435 " .word 661b\n"
436 " sethi %%uhi(%2), %0\n"
437 " sllx %0, 32, %0\n"
438 " .previous\n"
439 : "=r" (mask)
440 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
441
442 return __pte(val | mask);
443}
444
445static inline pte_t pte_wrprotect(pte_t pte)
446{
447 unsigned long val = pte_val(pte), tmp;
448
449 __asm__ __volatile__(
450 "\n661: andn %0, %3, %0\n"
451 " nop\n"
452 "\n662: nop\n"
453 " nop\n"
454 " .section .sun4v_2insn_patch, \"ax\"\n"
455 " .word 661b\n"
456 " sethi %%uhi(%4), %1\n"
457 " sllx %1, 32, %1\n"
458 " .word 662b\n"
459 " or %1, %%lo(%4), %1\n"
460 " andn %0, %1, %0\n"
461 " .previous\n"
462 : "=r" (val), "=r" (tmp)
463 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
464 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
465
466 return __pte(val);
467}
468
469static inline pte_t pte_mkold(pte_t pte)
470{
471 unsigned long mask;
472
473 __asm__ __volatile__(
474 "\n661: mov %1, %0\n"
475 " nop\n"
476 " .section .sun4v_2insn_patch, \"ax\"\n"
477 " .word 661b\n"
478 " sethi %%uhi(%2), %0\n"
479 " sllx %0, 32, %0\n"
480 " .previous\n"
481 : "=r" (mask)
482 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
483
484 mask |= _PAGE_R;
485
486 return __pte(pte_val(pte) & ~mask);
487}
488
489static inline pte_t pte_mkyoung(pte_t pte)
490{
491 unsigned long mask;
492
493 __asm__ __volatile__(
494 "\n661: mov %1, %0\n"
495 " nop\n"
496 " .section .sun4v_2insn_patch, \"ax\"\n"
497 " .word 661b\n"
498 " sethi %%uhi(%2), %0\n"
499 " sllx %0, 32, %0\n"
500 " .previous\n"
501 : "=r" (mask)
502 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
503
504 mask |= _PAGE_R;
505
506 return __pte(pte_val(pte) | mask);
507}
508
509static inline pte_t pte_mkspecial(pte_t pte)
510{
511 return pte;
512}
513
514static inline unsigned long pte_young(pte_t pte)
515{
516 unsigned long mask;
517
518 __asm__ __volatile__(
519 "\n661: mov %1, %0\n"
520 " nop\n"
521 " .section .sun4v_2insn_patch, \"ax\"\n"
522 " .word 661b\n"
523 " sethi %%uhi(%2), %0\n"
524 " sllx %0, 32, %0\n"
525 " .previous\n"
526 : "=r" (mask)
527 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
528
529 return (pte_val(pte) & mask);
530}
531
532static inline unsigned long pte_dirty(pte_t pte)
533{
534 unsigned long mask;
535
536 __asm__ __volatile__(
537 "\n661: mov %1, %0\n"
538 " nop\n"
539 " .section .sun4v_2insn_patch, \"ax\"\n"
540 " .word 661b\n"
541 " sethi %%uhi(%2), %0\n"
542 " sllx %0, 32, %0\n"
543 " .previous\n"
544 : "=r" (mask)
545 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
546
547 return (pte_val(pte) & mask);
548}
549
550static inline unsigned long pte_write(pte_t pte)
551{
552 unsigned long mask;
553
554 __asm__ __volatile__(
555 "\n661: mov %1, %0\n"
556 " nop\n"
557 " .section .sun4v_2insn_patch, \"ax\"\n"
558 " .word 661b\n"
559 " sethi %%uhi(%2), %0\n"
560 " sllx %0, 32, %0\n"
561 " .previous\n"
562 : "=r" (mask)
563 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
564
565 return (pte_val(pte) & mask);
566}
567
568static inline unsigned long pte_exec(pte_t pte)
569{
570 unsigned long mask;
571
572 __asm__ __volatile__(
573 "\n661: sethi %%hi(%1), %0\n"
574 " .section .sun4v_1insn_patch, \"ax\"\n"
575 " .word 661b\n"
576 " mov %2, %0\n"
577 " .previous\n"
578 : "=r" (mask)
579 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
580
581 return (pte_val(pte) & mask);
582}
583
584static inline unsigned long pte_file(pte_t pte)
585{
586 unsigned long val = pte_val(pte);
587
588 __asm__ __volatile__(
589 "\n661: and %0, %2, %0\n"
590 " .section .sun4v_1insn_patch, \"ax\"\n"
591 " .word 661b\n"
592 " and %0, %3, %0\n"
593 " .previous\n"
594 : "=r" (val)
595 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
596
597 return val;
598}
599
600static inline unsigned long pte_present(pte_t pte)
601{
602 unsigned long val = pte_val(pte);
603
604 __asm__ __volatile__(
605 "\n661: and %0, %2, %0\n"
606 " .section .sun4v_1insn_patch, \"ax\"\n"
607 " .word 661b\n"
608 " and %0, %3, %0\n"
609 " .previous\n"
610 : "=r" (val)
611 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
612
613 return val;
614}
615
616static inline int pte_special(pte_t pte)
617{
618 return 0;
619}
620
621#define pmd_set(pmdp, ptep) \
622 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
623#define pud_set(pudp, pmdp) \
624 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
625#define __pmd_page(pmd) \
626 ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
627#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
628#define pud_page_vaddr(pud) \
629 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
630#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
631#define pmd_none(pmd) (!pmd_val(pmd))
632#define pmd_bad(pmd) (0)
633#define pmd_present(pmd) (pmd_val(pmd) != 0U)
634#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
635#define pud_none(pud) (!pud_val(pud))
636#define pud_bad(pud) (0)
637#define pud_present(pud) (pud_val(pud) != 0U)
638#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
639
640/* Same in both SUN4V and SUN4U. */
641#define pte_none(pte) (!pte_val(pte))
642
643/* to find an entry in a page-table-directory. */
644#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
645#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
646
647/* to find an entry in a kernel page-table-directory */
648#define pgd_offset_k(address) pgd_offset(&init_mm, address)
649
650/* Find an entry in the second-level page table.. */
651#define pmd_offset(pudp, address) \
652 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
653 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
654
655/* Find an entry in the third-level page table.. */
656#define pte_index(dir, address) \
657 ((pte_t *) __pmd_page(*(dir)) + \
658 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
659#define pte_offset_kernel pte_index
660#define pte_offset_map pte_index
661#define pte_offset_map_nested pte_index
662#define pte_unmap(pte) do { } while (0)
663#define pte_unmap_nested(pte) do { } while (0)
664
665/* Actual page table PTE updates. */
666extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
667
668static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
669{
670 pte_t orig = *ptep;
671
672 *ptep = pte;
673
674 /* It is more efficient to let flush_tlb_kernel_range()
675 * handle init_mm tlb flushes.
676 *
677 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
678 * and SUN4V pte layout, so this inline test is fine.
679 */
680 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
681 tlb_batch_add(mm, addr, ptep, orig);
682}
683
684#define pte_clear(mm,addr,ptep) \
685 set_pte_at((mm), (addr), (ptep), __pte(0UL))
686
687#ifdef DCACHE_ALIASING_POSSIBLE
688#define __HAVE_ARCH_MOVE_PTE
689#define move_pte(pte, prot, old_addr, new_addr) \
690({ \
691 pte_t newpte = (pte); \
692 if (tlb_type != hypervisor && pte_present(pte)) { \
693 unsigned long this_pfn = pte_pfn(pte); \
694 \
695 if (pfn_valid(this_pfn) && \
696 (((old_addr) ^ (new_addr)) & (1 << 13))) \
697 flush_dcache_page_all(current->mm, \
698 pfn_to_page(this_pfn)); \
699 } \
700 newpte; \
701})
702#endif
703
704extern pgd_t swapper_pg_dir[2048];
705extern pmd_t swapper_low_pmd_dir[2048];
706
707extern void paging_init(void);
708extern unsigned long find_ecache_flush_span(unsigned long size);
709
710/* These do nothing with the way I have things setup. */
711#define mmu_lockarea(vaddr, len) (vaddr)
712#define mmu_unlockarea(vaddr, len) do { } while(0)
713
714struct vm_area_struct;
715extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
716
717/* Encode and de-code a swap entry */
718#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
719#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
720#define __swp_entry(type, offset) \
721 ( (swp_entry_t) \
722 { \
723 (((long)(type) << PAGE_SHIFT) | \
724 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
725 } )
726#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
727#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
728
729/* File offset in PTE support. */
730extern unsigned long pte_file(pte_t);
731#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
732extern pte_t pgoff_to_pte(unsigned long);
733#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
734
735extern unsigned long *sparc64_valid_addr_bitmap;
736
737/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
738#define kern_addr_valid(addr) \
739 (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
740
741extern int page_in_phys_avail(unsigned long paddr);
742
743extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
744 unsigned long pfn,
745 unsigned long size, pgprot_t prot);
746
747/*
748 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
749 * its high 4 bits. These macros/functions put it there or get it from there.
750 */
751#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
752#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
753#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
754
755#include <asm-generic/pgtable.h>
756
757/* We provide our own get_unmapped_area to cope with VA holes and
758 * SHM area cache aliasing for userland.
759 */
760#define HAVE_ARCH_UNMAPPED_AREA
761#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
762
763/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
764 * the largest alignment possible such that larget PTEs can be used.
765 */
766extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
767 unsigned long, unsigned long,
768 unsigned long);
769#define HAVE_ARCH_FB_UNMAPPED_AREA
770
771extern void pgtable_cache_init(void);
772extern void sun4v_register_fault_status(void);
773extern void sun4v_ktsb_register(void);
774extern void __init cheetah_ecache_flush_init(void);
775extern void sun4v_patch_tlb_handlers(void);
776
777extern unsigned long cmdline_memory_size;
778
779#endif /* !(__ASSEMBLY__) */
780
781#endif /* !(_SPARC64_PGTABLE_H) */
diff --git a/include/asm-sparc/posix_types.h b/include/asm-sparc/posix_types.h
index dcc07eb5e181..58c820d75e83 100644
--- a/include/asm-sparc/posix_types.h
+++ b/include/asm-sparc/posix_types.h
@@ -1,118 +1,8 @@
1#ifndef __ARCH_SPARC_POSIX_TYPES_H 1#ifndef ___ASM_SPARC_POSIX_TYPES_H
2#define __ARCH_SPARC_POSIX_TYPES_H 2#define ___ASM_SPARC_POSIX_TYPES_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/posix_types_64.h>
5 * This file is generally used by user-level software, so you need to 5#else
6 * be a little careful about namespace pollution etc. Also, we cannot 6#include <asm-sparc/posix_types_32.h>
7 * assume GCC is being used. 7#endif
8 */
9
10typedef unsigned int __kernel_size_t;
11typedef int __kernel_ssize_t;
12typedef long int __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_suseconds_t;
15typedef long __kernel_clock_t;
16typedef int __kernel_pid_t;
17typedef unsigned short __kernel_ipc_pid_t;
18typedef unsigned short __kernel_uid_t;
19typedef unsigned short __kernel_gid_t;
20typedef unsigned long __kernel_ino_t;
21typedef unsigned short __kernel_mode_t;
22typedef unsigned short __kernel_umode_t;
23typedef short __kernel_nlink_t;
24typedef long __kernel_daddr_t;
25typedef long __kernel_off_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned short __kernel_uid16_t;
28typedef unsigned short __kernel_gid16_t;
29typedef unsigned int __kernel_uid32_t;
30typedef unsigned int __kernel_gid32_t;
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef unsigned short __kernel_old_dev_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_timer_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif 8#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
49{
50 unsigned long _tmp = fd / __NFDBITS;
51 unsigned long _rem = fd % __NFDBITS;
52 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
53}
54
55#undef __FD_CLR
56static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
57{
58 unsigned long _tmp = fd / __NFDBITS;
59 unsigned long _rem = fd % __NFDBITS;
60 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
61}
62
63#undef __FD_ISSET
64static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
65{
66 unsigned long _tmp = fd / __NFDBITS;
67 unsigned long _rem = fd % __NFDBITS;
68 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
69}
70
71/*
72 * This will unroll the loop for the normal constant cases (8 or 32 longs,
73 * for 256 and 1024-bit fd_sets respectively)
74 */
75#undef __FD_ZERO
76static inline void __FD_ZERO(__kernel_fd_set *p)
77{
78 unsigned long *tmp = p->fds_bits;
79 int i;
80
81 if (__builtin_constant_p(__FDSET_LONGS)) {
82 switch (__FDSET_LONGS) {
83 case 32:
84 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
85 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
86 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
87 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
88 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
89 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
90 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
91 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
92 return;
93 case 16:
94 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
95 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
96 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
97 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
98 return;
99 case 8:
100 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
101 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
102 return;
103 case 4:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 return;
106 }
107 }
108 i = __FDSET_LONGS;
109 while (i) {
110 i--;
111 *tmp = 0;
112 tmp++;
113 }
114}
115
116#endif /* defined(__KERNEL__) */
117
118#endif /* !(__ARCH_SPARC_POSIX_TYPES_H) */
diff --git a/include/asm-sparc/posix_types_32.h b/include/asm-sparc/posix_types_32.h
new file mode 100644
index 000000000000..6bb6eb1ca0f2
--- /dev/null
+++ b/include/asm-sparc/posix_types_32.h
@@ -0,0 +1,118 @@
1#ifndef __ARCH_SPARC_POSIX_TYPES_H
2#define __ARCH_SPARC_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned int __kernel_size_t;
11typedef int __kernel_ssize_t;
12typedef long int __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_suseconds_t;
15typedef long __kernel_clock_t;
16typedef int __kernel_pid_t;
17typedef unsigned short __kernel_ipc_pid_t;
18typedef unsigned short __kernel_uid_t;
19typedef unsigned short __kernel_gid_t;
20typedef unsigned long __kernel_ino_t;
21typedef unsigned short __kernel_mode_t;
22typedef unsigned short __kernel_umode_t;
23typedef short __kernel_nlink_t;
24typedef long __kernel_daddr_t;
25typedef long __kernel_off_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned short __kernel_uid16_t;
28typedef unsigned short __kernel_gid16_t;
29typedef unsigned int __kernel_uid32_t;
30typedef unsigned int __kernel_gid32_t;
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef unsigned short __kernel_old_dev_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_timer_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
49{
50 unsigned long _tmp = fd / __NFDBITS;
51 unsigned long _rem = fd % __NFDBITS;
52 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
53}
54
55#undef __FD_CLR
56static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
57{
58 unsigned long _tmp = fd / __NFDBITS;
59 unsigned long _rem = fd % __NFDBITS;
60 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
61}
62
63#undef __FD_ISSET
64static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
65{
66 unsigned long _tmp = fd / __NFDBITS;
67 unsigned long _rem = fd % __NFDBITS;
68 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
69}
70
71/*
72 * This will unroll the loop for the normal constant cases (8 or 32 longs,
73 * for 256 and 1024-bit fd_sets respectively)
74 */
75#undef __FD_ZERO
76static inline void __FD_ZERO(__kernel_fd_set *p)
77{
78 unsigned long *tmp = p->fds_bits;
79 int i;
80
81 if (__builtin_constant_p(__FDSET_LONGS)) {
82 switch (__FDSET_LONGS) {
83 case 32:
84 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
85 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
86 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
87 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
88 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
89 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
90 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
91 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
92 return;
93 case 16:
94 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
95 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
96 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
97 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
98 return;
99 case 8:
100 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
101 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
102 return;
103 case 4:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 return;
106 }
107 }
108 i = __FDSET_LONGS;
109 while (i) {
110 i--;
111 *tmp = 0;
112 tmp++;
113 }
114}
115
116#endif /* defined(__KERNEL__) */
117
118#endif /* !(__ARCH_SPARC_POSIX_TYPES_H) */
diff --git a/include/asm-sparc/posix_types_64.h b/include/asm-sparc/posix_types_64.h
new file mode 100644
index 000000000000..ba8f93295763
--- /dev/null
+++ b/include/asm-sparc/posix_types_64.h
@@ -0,0 +1,122 @@
1#ifndef __ARCH_SPARC64_POSIX_TYPES_H
2#define __ARCH_SPARC64_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_size_t;
11typedef long __kernel_ssize_t;
12typedef long __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_clock_t;
15typedef int __kernel_pid_t;
16typedef int __kernel_ipc_pid_t;
17typedef unsigned int __kernel_uid_t;
18typedef unsigned int __kernel_gid_t;
19typedef unsigned long __kernel_ino_t;
20typedef unsigned int __kernel_mode_t;
21typedef unsigned short __kernel_umode_t;
22typedef unsigned int __kernel_nlink_t;
23typedef int __kernel_daddr_t;
24typedef long __kernel_off_t;
25typedef char * __kernel_caddr_t;
26typedef unsigned short __kernel_uid16_t;
27typedef unsigned short __kernel_gid16_t;
28typedef int __kernel_clockid_t;
29typedef int __kernel_timer_t;
30
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef __kernel_uid_t __kernel_uid32_t;
34typedef __kernel_gid_t __kernel_gid32_t;
35
36typedef unsigned int __kernel_old_dev_t;
37
38/* Note this piece of asymmetry from the v9 ABI. */
39typedef int __kernel_suseconds_t;
40
41#ifdef __GNUC__
42typedef long long __kernel_loff_t;
43#endif
44
45typedef struct {
46 int val[2];
47} __kernel_fsid_t;
48
49#if defined(__KERNEL__)
50
51#undef __FD_SET
52static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
53{
54 unsigned long _tmp = fd / __NFDBITS;
55 unsigned long _rem = fd % __NFDBITS;
56 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
57}
58
59#undef __FD_CLR
60static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
61{
62 unsigned long _tmp = fd / __NFDBITS;
63 unsigned long _rem = fd % __NFDBITS;
64 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
65}
66
67#undef __FD_ISSET
68static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
69{
70 unsigned long _tmp = fd / __NFDBITS;
71 unsigned long _rem = fd % __NFDBITS;
72 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
73}
74
75/*
76 * This will unroll the loop for the normal constant cases (8 or 32 longs,
77 * for 256 and 1024-bit fd_sets respectively)
78 */
79#undef __FD_ZERO
80static inline void __FD_ZERO(__kernel_fd_set *p)
81{
82 unsigned long *tmp = p->fds_bits;
83 int i;
84
85 if (__builtin_constant_p(__FDSET_LONGS)) {
86 switch (__FDSET_LONGS) {
87 case 32:
88 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
89 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
90 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
91 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
92 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
93 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
94 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
95 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
96 return;
97 case 16:
98 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
99 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
100 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
101 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
102 return;
103 case 8:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
106 return;
107 case 4:
108 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
109 return;
110 }
111 }
112 i = __FDSET_LONGS;
113 while (i) {
114 i--;
115 *tmp = 0;
116 tmp++;
117 }
118}
119
120#endif /* defined(__KERNEL__) */
121
122#endif /* !(__ARCH_SPARC64_POSIX_TYPES_H) */
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index 8898efbbbe07..11a66bb02eaa 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -1,128 +1,8 @@
1/* include/asm-sparc/processor.h 1#ifndef ___ASM_SPARC_PROCESSOR_H
2 * 2#define ___ASM_SPARC_PROCESSOR_H
3 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/processor_64.h>
5 5#else
6#ifndef __ASM_SPARC_PROCESSOR_H 6#include <asm-sparc/processor_32.h>
7#define __ASM_SPARC_PROCESSOR_H
8
9/*
10 * Sparc32 implementation of macro that returns current
11 * instruction pointer ("program counter").
12 */
13#define current_text_addr() ({ void *pc; __asm__("sethi %%hi(1f), %0; or %0, %%lo(1f), %0;\n1:" : "=r" (pc)); pc; })
14
15#include <asm/psr.h>
16#include <asm/ptrace.h>
17#include <asm/head.h>
18#include <asm/signal.h>
19#include <asm/btfixup.h>
20#include <asm/page.h>
21
22/*
23 * The sparc has no problems with write protection
24 */
25#define wp_works_ok 1
26#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
27
28/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
29 * That one page is used to protect kernel from intruders, so that
30 * we can make our access_ok test faster
31 */
32#define TASK_SIZE PAGE_OFFSET
33#ifdef __KERNEL__
34#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
35#define STACK_TOP_MAX STACK_TOP
36#endif /* __KERNEL__ */
37
38struct task_struct;
39
40#ifdef __KERNEL__
41struct fpq {
42 unsigned long *insn_addr;
43 unsigned long insn;
44};
45#endif 7#endif
46
47typedef struct {
48 int seg;
49} mm_segment_t;
50
51/* The Sparc processor specific thread struct. */
52struct thread_struct {
53 struct pt_regs *kregs;
54 unsigned int _pad1;
55
56 /* Special child fork kpsr/kwim values. */
57 unsigned long fork_kpsr __attribute__ ((aligned (8)));
58 unsigned long fork_kwim;
59
60 /* Floating point regs */
61 unsigned long float_regs[32] __attribute__ ((aligned (8)));
62 unsigned long fsr;
63 unsigned long fpqdepth;
64 struct fpq fpqueue[16];
65 unsigned long flags;
66 mm_segment_t current_ds;
67};
68
69#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
70#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
71
72#define INIT_THREAD { \
73 .flags = SPARC_FLAG_KTHREAD, \
74 .current_ds = KERNEL_DS, \
75}
76
77/* Return saved PC of a blocked thread. */
78extern unsigned long thread_saved_pc(struct task_struct *t);
79
80/* Do necessary setup to start up a newly executed thread. */
81static inline void start_thread(struct pt_regs * regs, unsigned long pc,
82 unsigned long sp)
83{
84 register unsigned long zero asm("g1");
85
86 regs->psr = (regs->psr & (PSR_CWP)) | PSR_S;
87 regs->pc = ((pc & (~3)) - 4);
88 regs->npc = regs->pc + 4;
89 regs->y = 0;
90 zero = 0;
91 __asm__ __volatile__("std\t%%g0, [%0 + %3 + 0x00]\n\t"
92 "std\t%%g0, [%0 + %3 + 0x08]\n\t"
93 "std\t%%g0, [%0 + %3 + 0x10]\n\t"
94 "std\t%%g0, [%0 + %3 + 0x18]\n\t"
95 "std\t%%g0, [%0 + %3 + 0x20]\n\t"
96 "std\t%%g0, [%0 + %3 + 0x28]\n\t"
97 "std\t%%g0, [%0 + %3 + 0x30]\n\t"
98 "st\t%1, [%0 + %3 + 0x38]\n\t"
99 "st\t%%g0, [%0 + %3 + 0x3c]"
100 : /* no outputs */
101 : "r" (regs),
102 "r" (sp - sizeof(struct reg_window)),
103 "r" (zero),
104 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))
105 : "memory");
106}
107
108/* Free all resources held by a thread. */
109#define release_thread(tsk) do { } while(0)
110extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
111
112/* Prepare to copy thread state - unlazy all lazy status */
113#define prepare_to_copy(tsk) do { } while (0)
114
115extern unsigned long get_wchan(struct task_struct *);
116
117#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
118#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
119
120#ifdef __KERNEL__
121
122extern struct task_struct *last_task_used_math;
123
124#define cpu_relax() barrier()
125
126#endif 8#endif
127
128#endif /* __ASM_SPARC_PROCESSOR_H */
diff --git a/include/asm-sparc/processor_32.h b/include/asm-sparc/processor_32.h
new file mode 100644
index 000000000000..562c0d69c537
--- /dev/null
+++ b/include/asm-sparc/processor_32.h
@@ -0,0 +1,128 @@
1/* include/asm-sparc/processor.h
2 *
3 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __ASM_SPARC_PROCESSOR_H
7#define __ASM_SPARC_PROCESSOR_H
8
9/*
10 * Sparc32 implementation of macro that returns current
11 * instruction pointer ("program counter").
12 */
13#define current_text_addr() ({ void *pc; __asm__("sethi %%hi(1f), %0; or %0, %%lo(1f), %0;\n1:" : "=r" (pc)); pc; })
14
15#include <asm/psr.h>
16#include <asm/ptrace.h>
17#include <asm/head.h>
18#include <asm/signal.h>
19#include <asm/btfixup.h>
20#include <asm/page.h>
21
22/*
23 * The sparc has no problems with write protection
24 */
25#define wp_works_ok 1
26#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
27
28/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
29 * That one page is used to protect kernel from intruders, so that
30 * we can make our access_ok test faster
31 */
32#define TASK_SIZE PAGE_OFFSET
33#ifdef __KERNEL__
34#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
35#define STACK_TOP_MAX STACK_TOP
36#endif /* __KERNEL__ */
37
38struct task_struct;
39
40#ifdef __KERNEL__
41struct fpq {
42 unsigned long *insn_addr;
43 unsigned long insn;
44};
45#endif
46
47typedef struct {
48 int seg;
49} mm_segment_t;
50
51/* The Sparc processor specific thread struct. */
52struct thread_struct {
53 struct pt_regs *kregs;
54 unsigned int _pad1;
55
56 /* Special child fork kpsr/kwim values. */
57 unsigned long fork_kpsr __attribute__ ((aligned (8)));
58 unsigned long fork_kwim;
59
60 /* Floating point regs */
61 unsigned long float_regs[32] __attribute__ ((aligned (8)));
62 unsigned long fsr;
63 unsigned long fpqdepth;
64 struct fpq fpqueue[16];
65 unsigned long flags;
66 mm_segment_t current_ds;
67};
68
69#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
70#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
71
72#define INIT_THREAD { \
73 .flags = SPARC_FLAG_KTHREAD, \
74 .current_ds = KERNEL_DS, \
75}
76
77/* Return saved PC of a blocked thread. */
78extern unsigned long thread_saved_pc(struct task_struct *t);
79
80/* Do necessary setup to start up a newly executed thread. */
81static inline void start_thread(struct pt_regs * regs, unsigned long pc,
82 unsigned long sp)
83{
84 register unsigned long zero asm("g1");
85
86 regs->psr = (regs->psr & (PSR_CWP)) | PSR_S;
87 regs->pc = ((pc & (~3)) - 4);
88 regs->npc = regs->pc + 4;
89 regs->y = 0;
90 zero = 0;
91 __asm__ __volatile__("std\t%%g0, [%0 + %3 + 0x00]\n\t"
92 "std\t%%g0, [%0 + %3 + 0x08]\n\t"
93 "std\t%%g0, [%0 + %3 + 0x10]\n\t"
94 "std\t%%g0, [%0 + %3 + 0x18]\n\t"
95 "std\t%%g0, [%0 + %3 + 0x20]\n\t"
96 "std\t%%g0, [%0 + %3 + 0x28]\n\t"
97 "std\t%%g0, [%0 + %3 + 0x30]\n\t"
98 "st\t%1, [%0 + %3 + 0x38]\n\t"
99 "st\t%%g0, [%0 + %3 + 0x3c]"
100 : /* no outputs */
101 : "r" (regs),
102 "r" (sp - sizeof(struct reg_window)),
103 "r" (zero),
104 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))
105 : "memory");
106}
107
108/* Free all resources held by a thread. */
109#define release_thread(tsk) do { } while(0)
110extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
111
112/* Prepare to copy thread state - unlazy all lazy status */
113#define prepare_to_copy(tsk) do { } while (0)
114
115extern unsigned long get_wchan(struct task_struct *);
116
117#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
118#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
119
120#ifdef __KERNEL__
121
122extern struct task_struct *last_task_used_math;
123
124#define cpu_relax() barrier()
125
126#endif
127
128#endif /* __ASM_SPARC_PROCESSOR_H */
diff --git a/include/asm-sparc/processor_64.h b/include/asm-sparc/processor_64.h
new file mode 100644
index 000000000000..70d42801a0d2
--- /dev/null
+++ b/include/asm-sparc/processor_64.h
@@ -0,0 +1,237 @@
1/*
2 * include/asm-sparc64/processor.h
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef __ASM_SPARC64_PROCESSOR_H
8#define __ASM_SPARC64_PROCESSOR_H
9
10/*
11 * Sparc64 implementation of macro that returns current
12 * instruction pointer ("program counter").
13 */
14#define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
15
16#include <asm/asi.h>
17#include <asm/pstate.h>
18#include <asm/ptrace.h>
19#include <asm/page.h>
20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/*
26 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
28 * address that the kernel will allocate out.
29 *
30 * XXX No longer using virtual page tables, kill this upper limit...
31 */
32#define VA_BITS 44
33#ifndef __ASSEMBLY__
34#define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
35#else
36#define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
37#endif
38
39#define TASK_SIZE ((unsigned long)-VPTE_SIZE)
40#define TASK_SIZE_OF(tsk) \
41 (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
42 (1UL << 32UL) : TASK_SIZE)
43#ifdef __KERNEL__
44
45#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
46#define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
47
48#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
49 STACK_TOP32 : STACK_TOP64)
50
51#define STACK_TOP_MAX STACK_TOP64
52
53#endif
54
55#ifndef __ASSEMBLY__
56
57typedef struct {
58 unsigned char seg;
59} mm_segment_t;
60
61/* The Sparc processor specific thread struct. */
62/* XXX This should die, everything can go into thread_info now. */
63struct thread_struct {
64#ifdef CONFIG_DEBUG_SPINLOCK
65 /* How many spinlocks held by this thread.
66 * Used with spin lock debugging to catch tasks
67 * sleeping illegally with locks held.
68 */
69 int smp_lock_count;
70 unsigned int smp_lock_pc;
71#else
72 int dummy; /* f'in gcc bug... */
73#endif
74};
75
76#endif /* !(__ASSEMBLY__) */
77
78#ifndef CONFIG_DEBUG_SPINLOCK
79#define INIT_THREAD { \
80 0, \
81}
82#else /* CONFIG_DEBUG_SPINLOCK */
83#define INIT_THREAD { \
84/* smp_lock_count, smp_lock_pc, */ \
85 0, 0, \
86}
87#endif /* !(CONFIG_DEBUG_SPINLOCK) */
88
89#ifndef __ASSEMBLY__
90
91#include <linux/types.h>
92
93/* Return saved PC of a blocked thread. */
94struct task_struct;
95extern unsigned long thread_saved_pc(struct task_struct *);
96
97/* On Uniprocessor, even in RMO processes see TSO semantics */
98#ifdef CONFIG_SMP
99#define TSTATE_INITIAL_MM TSTATE_TSO
100#else
101#define TSTATE_INITIAL_MM TSTATE_RMO
102#endif
103
104/* Do necessary setup to start up a newly executed thread. */
105#define start_thread(regs, pc, sp) \
106do { \
107 unsigned long __asi = ASI_PNF; \
108 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
109 regs->tpc = ((pc & (~3)) - 4); \
110 regs->tnpc = regs->tpc + 4; \
111 regs->y = 0; \
112 set_thread_wstate(1 << 3); \
113 if (current_thread_info()->utraps) { \
114 if (*(current_thread_info()->utraps) < 2) \
115 kfree(current_thread_info()->utraps); \
116 else \
117 (*(current_thread_info()->utraps))--; \
118 current_thread_info()->utraps = NULL; \
119 } \
120 __asm__ __volatile__( \
121 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
122 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
123 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
124 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
125 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
126 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
127 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
128 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
129 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
130 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
131 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
132 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
133 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
134 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
135 "stx %1, [%0 + %2 + 0x70]\n\t" \
136 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
137 "wrpr %%g0, (1 << 3), %%wstate\n\t" \
138 : \
139 : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
140 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
141} while (0)
142
143#define start_thread32(regs, pc, sp) \
144do { \
145 unsigned long __asi = ASI_PNF; \
146 pc &= 0x00000000ffffffffUL; \
147 sp &= 0x00000000ffffffffUL; \
148 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
149 regs->tpc = ((pc & (~3)) - 4); \
150 regs->tnpc = regs->tpc + 4; \
151 regs->y = 0; \
152 set_thread_wstate(2 << 3); \
153 if (current_thread_info()->utraps) { \
154 if (*(current_thread_info()->utraps) < 2) \
155 kfree(current_thread_info()->utraps); \
156 else \
157 (*(current_thread_info()->utraps))--; \
158 current_thread_info()->utraps = NULL; \
159 } \
160 __asm__ __volatile__( \
161 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
162 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
163 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
164 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
165 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
166 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
167 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
168 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
169 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
170 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
171 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
172 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
173 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
174 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
175 "stx %1, [%0 + %2 + 0x70]\n\t" \
176 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
177 "wrpr %%g0, (2 << 3), %%wstate\n\t" \
178 : \
179 : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
180 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
181} while (0)
182
183/* Free all resources held by a thread. */
184#define release_thread(tsk) do { } while (0)
185
186/* Prepare to copy thread state - unlazy all lazy status */
187#define prepare_to_copy(tsk) do { } while (0)
188
189extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
190
191extern unsigned long get_wchan(struct task_struct *task);
192
193#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
194#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
195#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
196
197#define cpu_relax() barrier()
198
199/* Prefetch support. This is tuned for UltraSPARC-III and later.
200 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
201 * a shallower prefetch queue than later chips.
202 */
203#define ARCH_HAS_PREFETCH
204#define ARCH_HAS_PREFETCHW
205#define ARCH_HAS_SPINLOCK_PREFETCH
206
207static inline void prefetch(const void *x)
208{
209 /* We do not use the read prefetch mnemonic because that
210 * prefetches into the prefetch-cache which only is accessible
211 * by floating point operations in UltraSPARC-III and later.
212 * By contrast, "#one_write" prefetches into the L2 cache
213 * in shared state.
214 */
215 __asm__ __volatile__("prefetch [%0], #one_write"
216 : /* no outputs */
217 : "r" (x));
218}
219
220static inline void prefetchw(const void *x)
221{
222 /* The most optimal prefetch to use for writes is
223 * "#n_writes". This brings the cacheline into the
224 * L2 cache in "owned" state.
225 */
226 __asm__ __volatile__("prefetch [%0], #n_writes"
227 : /* no outputs */
228 : "r" (x));
229}
230
231#define spin_lock_prefetch(x) prefetchw(x)
232
233#define HAVE_ARCH_PICK_MMAP_LAYOUT
234
235#endif /* !(__ASSEMBLY__) */
236
237#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/include/asm-sparc/ptrace.h b/include/asm-sparc/ptrace.h
index 11f3bc2bb3f5..f36ab6c30ff3 100644
--- a/include/asm-sparc/ptrace.h
+++ b/include/asm-sparc/ptrace.h
@@ -1,175 +1,8 @@
1#ifndef _SPARC_PTRACE_H 1#ifndef ___ASM_SPARC_PTRACE_H
2#define _SPARC_PTRACE_H 2#define ___ASM_SPARC_PTRACE_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/psr.h> 4#include <asm-sparc/ptrace_64.h>
5 5#else
6/* This struct defines the way the registers are stored on the 6#include <asm-sparc/ptrace_32.h>
7 * stack during a system call and basically all traps.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14struct pt_regs {
15 unsigned long psr;
16 unsigned long pc;
17 unsigned long npc;
18 unsigned long y;
19 unsigned long u_regs[16]; /* globals and ins */
20};
21
22#define UREG_G0 0
23#define UREG_G1 1
24#define UREG_G2 2
25#define UREG_G3 3
26#define UREG_G4 4
27#define UREG_G5 5
28#define UREG_G6 6
29#define UREG_G7 7
30#define UREG_I0 8
31#define UREG_I1 9
32#define UREG_I2 10
33#define UREG_I3 11
34#define UREG_I4 12
35#define UREG_I5 13
36#define UREG_I6 14
37#define UREG_I7 15
38#define UREG_WIM UREG_G0
39#define UREG_FADDR UREG_G0
40#define UREG_FP UREG_I6
41#define UREG_RETPC UREG_I7
42
43static inline bool pt_regs_is_syscall(struct pt_regs *regs)
44{
45 return (regs->psr & PSR_SYSCALL);
46}
47
48static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
49{
50 return (regs->psr &= ~PSR_SYSCALL);
51}
52
53/* A register window */
54struct reg_window {
55 unsigned long locals[8];
56 unsigned long ins[8];
57};
58
59/* A Sparc stack frame */
60struct sparc_stackf {
61 unsigned long locals[8];
62 unsigned long ins[6];
63 struct sparc_stackf *fp;
64 unsigned long callers_pc;
65 char *structptr;
66 unsigned long xargs[6];
67 unsigned long xxargs[1];
68};
69
70#define TRACEREG_SZ sizeof(struct pt_regs)
71#define STACKFRAME_SZ sizeof(struct sparc_stackf)
72
73#ifdef __KERNEL__
74
75#define user_mode(regs) (!((regs)->psr & PSR_PS))
76#define instruction_pointer(regs) ((regs)->pc)
77unsigned long profile_pc(struct pt_regs *);
78extern void show_regs(struct pt_regs *);
79#endif 7#endif
80
81#else /* __ASSEMBLY__ */
82/* For assembly code. */
83#define TRACEREG_SZ 0x50
84#define STACKFRAME_SZ 0x60
85#endif 8#endif
86
87/*
88 * The asm-offsets.h is a generated file, so we cannot include it.
89 * It may be OK for glibc headers, but it's utterly pointless for C code.
90 * The assembly code using those offsets has to include it explicitly.
91 */
92/* #include <asm/asm-offsets.h> */
93
94/* These are for pt_regs. */
95#define PT_PSR 0x0
96#define PT_PC 0x4
97#define PT_NPC 0x8
98#define PT_Y 0xc
99#define PT_G0 0x10
100#define PT_WIM PT_G0
101#define PT_G1 0x14
102#define PT_G2 0x18
103#define PT_G3 0x1c
104#define PT_G4 0x20
105#define PT_G5 0x24
106#define PT_G6 0x28
107#define PT_G7 0x2c
108#define PT_I0 0x30
109#define PT_I1 0x34
110#define PT_I2 0x38
111#define PT_I3 0x3c
112#define PT_I4 0x40
113#define PT_I5 0x44
114#define PT_I6 0x48
115#define PT_FP PT_I6
116#define PT_I7 0x4c
117
118/* Reg_window offsets */
119#define RW_L0 0x00
120#define RW_L1 0x04
121#define RW_L2 0x08
122#define RW_L3 0x0c
123#define RW_L4 0x10
124#define RW_L5 0x14
125#define RW_L6 0x18
126#define RW_L7 0x1c
127#define RW_I0 0x20
128#define RW_I1 0x24
129#define RW_I2 0x28
130#define RW_I3 0x2c
131#define RW_I4 0x30
132#define RW_I5 0x34
133#define RW_I6 0x38
134#define RW_I7 0x3c
135
136/* Stack_frame offsets */
137#define SF_L0 0x00
138#define SF_L1 0x04
139#define SF_L2 0x08
140#define SF_L3 0x0c
141#define SF_L4 0x10
142#define SF_L5 0x14
143#define SF_L6 0x18
144#define SF_L7 0x1c
145#define SF_I0 0x20
146#define SF_I1 0x24
147#define SF_I2 0x28
148#define SF_I3 0x2c
149#define SF_I4 0x30
150#define SF_I5 0x34
151#define SF_FP 0x38
152#define SF_PC 0x3c
153#define SF_RETP 0x40
154#define SF_XARG0 0x44
155#define SF_XARG1 0x48
156#define SF_XARG2 0x4c
157#define SF_XARG3 0x50
158#define SF_XARG4 0x54
159#define SF_XARG5 0x58
160#define SF_XXARG 0x5c
161
162/* Stuff for the ptrace system call */
163#define PTRACE_SPARC_DETACH 11
164#define PTRACE_GETREGS 12
165#define PTRACE_SETREGS 13
166#define PTRACE_GETFPREGS 14
167#define PTRACE_SETFPREGS 15
168#define PTRACE_READDATA 16
169#define PTRACE_WRITEDATA 17
170#define PTRACE_READTEXT 18
171#define PTRACE_WRITETEXT 19
172#define PTRACE_GETFPAREGS 20
173#define PTRACE_SETFPAREGS 21
174
175#endif /* !(_SPARC_PTRACE_H) */
diff --git a/include/asm-sparc/ptrace_32.h b/include/asm-sparc/ptrace_32.h
new file mode 100644
index 000000000000..0401cc7ec38e
--- /dev/null
+++ b/include/asm-sparc/ptrace_32.h
@@ -0,0 +1,175 @@
1#ifndef _SPARC_PTRACE_H
2#define _SPARC_PTRACE_H
3
4#include <asm/psr.h>
5
6/* This struct defines the way the registers are stored on the
7 * stack during a system call and basically all traps.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14struct pt_regs {
15 unsigned long psr;
16 unsigned long pc;
17 unsigned long npc;
18 unsigned long y;
19 unsigned long u_regs[16]; /* globals and ins */
20};
21
22#define UREG_G0 0
23#define UREG_G1 1
24#define UREG_G2 2
25#define UREG_G3 3
26#define UREG_G4 4
27#define UREG_G5 5
28#define UREG_G6 6
29#define UREG_G7 7
30#define UREG_I0 8
31#define UREG_I1 9
32#define UREG_I2 10
33#define UREG_I3 11
34#define UREG_I4 12
35#define UREG_I5 13
36#define UREG_I6 14
37#define UREG_I7 15
38#define UREG_WIM UREG_G0
39#define UREG_FADDR UREG_G0
40#define UREG_FP UREG_I6
41#define UREG_RETPC UREG_I7
42
43static inline bool pt_regs_is_syscall(struct pt_regs *regs)
44{
45 return (regs->psr & PSR_SYSCALL);
46}
47
48static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
49{
50 return (regs->psr &= ~PSR_SYSCALL);
51}
52
53/* A register window */
54struct reg_window {
55 unsigned long locals[8];
56 unsigned long ins[8];
57};
58
59/* A Sparc stack frame */
60struct sparc_stackf {
61 unsigned long locals[8];
62 unsigned long ins[6];
63 struct sparc_stackf *fp;
64 unsigned long callers_pc;
65 char *structptr;
66 unsigned long xargs[6];
67 unsigned long xxargs[1];
68};
69
70#define TRACEREG_SZ sizeof(struct pt_regs)
71#define STACKFRAME_SZ sizeof(struct sparc_stackf)
72
73#ifdef __KERNEL__
74
75#define user_mode(regs) (!((regs)->psr & PSR_PS))
76#define instruction_pointer(regs) ((regs)->pc)
77unsigned long profile_pc(struct pt_regs *);
78extern void show_regs(struct pt_regs *);
79#endif
80
81#else /* __ASSEMBLY__ */
82/* For assembly code. */
83#define TRACEREG_SZ 0x50
84#define STACKFRAME_SZ 0x60
85#endif
86
87/*
88 * The asm-offsets.h is a generated file, so we cannot include it.
89 * It may be OK for glibc headers, but it's utterly pointless for C code.
90 * The assembly code using those offsets has to include it explicitly.
91 */
92/* #include <asm/asm-offsets.h> */
93
94/* These are for pt_regs. */
95#define PT_PSR 0x0
96#define PT_PC 0x4
97#define PT_NPC 0x8
98#define PT_Y 0xc
99#define PT_G0 0x10
100#define PT_WIM PT_G0
101#define PT_G1 0x14
102#define PT_G2 0x18
103#define PT_G3 0x1c
104#define PT_G4 0x20
105#define PT_G5 0x24
106#define PT_G6 0x28
107#define PT_G7 0x2c
108#define PT_I0 0x30
109#define PT_I1 0x34
110#define PT_I2 0x38
111#define PT_I3 0x3c
112#define PT_I4 0x40
113#define PT_I5 0x44
114#define PT_I6 0x48
115#define PT_FP PT_I6
116#define PT_I7 0x4c
117
118/* Reg_window offsets */
119#define RW_L0 0x00
120#define RW_L1 0x04
121#define RW_L2 0x08
122#define RW_L3 0x0c
123#define RW_L4 0x10
124#define RW_L5 0x14
125#define RW_L6 0x18
126#define RW_L7 0x1c
127#define RW_I0 0x20
128#define RW_I1 0x24
129#define RW_I2 0x28
130#define RW_I3 0x2c
131#define RW_I4 0x30
132#define RW_I5 0x34
133#define RW_I6 0x38
134#define RW_I7 0x3c
135
136/* Stack_frame offsets */
137#define SF_L0 0x00
138#define SF_L1 0x04
139#define SF_L2 0x08
140#define SF_L3 0x0c
141#define SF_L4 0x10
142#define SF_L5 0x14
143#define SF_L6 0x18
144#define SF_L7 0x1c
145#define SF_I0 0x20
146#define SF_I1 0x24
147#define SF_I2 0x28
148#define SF_I3 0x2c
149#define SF_I4 0x30
150#define SF_I5 0x34
151#define SF_FP 0x38
152#define SF_PC 0x3c
153#define SF_RETP 0x40
154#define SF_XARG0 0x44
155#define SF_XARG1 0x48
156#define SF_XARG2 0x4c
157#define SF_XARG3 0x50
158#define SF_XARG4 0x54
159#define SF_XARG5 0x58
160#define SF_XXARG 0x5c
161
162/* Stuff for the ptrace system call */
163#define PTRACE_SPARC_DETACH 11
164#define PTRACE_GETREGS 12
165#define PTRACE_SETREGS 13
166#define PTRACE_GETFPREGS 14
167#define PTRACE_SETFPREGS 15
168#define PTRACE_READDATA 16
169#define PTRACE_WRITEDATA 17
170#define PTRACE_READTEXT 18
171#define PTRACE_WRITETEXT 19
172#define PTRACE_GETFPAREGS 20
173#define PTRACE_SETFPAREGS 21
174
175#endif /* !(_SPARC_PTRACE_H) */
diff --git a/include/asm-sparc/ptrace_64.h b/include/asm-sparc/ptrace_64.h
new file mode 100644
index 000000000000..a682e66d5c4a
--- /dev/null
+++ b/include/asm-sparc/ptrace_64.h
@@ -0,0 +1,346 @@
1#ifndef _SPARC64_PTRACE_H
2#define _SPARC64_PTRACE_H
3
4#include <asm/pstate.h>
5
6/* This struct defines the way the registers are stored on the
7 * stack during a system call and basically all traps.
8 */
9
10/* This magic value must have the low 9 bits clear,
11 * as that is where we encode the %tt value, see below.
12 */
13#define PT_REGS_MAGIC 0x57ac6c00
14
15#ifndef __ASSEMBLY__
16
17#include <linux/types.h>
18
19struct pt_regs {
20 unsigned long u_regs[16]; /* globals and ins */
21 unsigned long tstate;
22 unsigned long tpc;
23 unsigned long tnpc;
24 unsigned int y;
25
26 /* We encode a magic number, PT_REGS_MAGIC, along
27 * with the %tt (trap type) register value at trap
28 * entry time. The magic number allows us to identify
29 * accurately a trap stack frame in the stack
30 * unwinder, and the %tt value allows us to test
31 * things like "in a system call" etc. for an arbitray
32 * process.
33 *
34 * The PT_REGS_MAGIC is choosen such that it can be
35 * loaded completely using just a sethi instruction.
36 */
37 unsigned int magic;
38};
39
40static inline int pt_regs_trap_type(struct pt_regs *regs)
41{
42 return regs->magic & 0x1ff;
43}
44
45static inline bool pt_regs_is_syscall(struct pt_regs *regs)
46{
47 return (regs->tstate & TSTATE_SYSCALL);
48}
49
50static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
51{
52 return (regs->tstate &= ~TSTATE_SYSCALL);
53}
54
55struct pt_regs32 {
56 unsigned int psr;
57 unsigned int pc;
58 unsigned int npc;
59 unsigned int y;
60 unsigned int u_regs[16]; /* globals and ins */
61};
62
63#define UREG_G0 0
64#define UREG_G1 1
65#define UREG_G2 2
66#define UREG_G3 3
67#define UREG_G4 4
68#define UREG_G5 5
69#define UREG_G6 6
70#define UREG_G7 7
71#define UREG_I0 8
72#define UREG_I1 9
73#define UREG_I2 10
74#define UREG_I3 11
75#define UREG_I4 12
76#define UREG_I5 13
77#define UREG_I6 14
78#define UREG_I7 15
79#define UREG_FP UREG_I6
80#define UREG_RETPC UREG_I7
81
82/* A V9 register window */
83struct reg_window {
84 unsigned long locals[8];
85 unsigned long ins[8];
86};
87
88/* A 32-bit register window. */
89struct reg_window32 {
90 unsigned int locals[8];
91 unsigned int ins[8];
92};
93
94/* A V9 Sparc stack frame */
95struct sparc_stackf {
96 unsigned long locals[8];
97 unsigned long ins[6];
98 struct sparc_stackf *fp;
99 unsigned long callers_pc;
100 char *structptr;
101 unsigned long xargs[6];
102 unsigned long xxargs[1];
103};
104
105/* A 32-bit Sparc stack frame */
106struct sparc_stackf32 {
107 unsigned int locals[8];
108 unsigned int ins[6];
109 unsigned int fp;
110 unsigned int callers_pc;
111 unsigned int structptr;
112 unsigned int xargs[6];
113 unsigned int xxargs[1];
114};
115
116struct sparc_trapf {
117 unsigned long locals[8];
118 unsigned long ins[8];
119 unsigned long _unused;
120 struct pt_regs *regs;
121};
122
123#define TRACEREG_SZ sizeof(struct pt_regs)
124#define STACKFRAME_SZ sizeof(struct sparc_stackf)
125
126#define TRACEREG32_SZ sizeof(struct pt_regs32)
127#define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
128
129#ifdef __KERNEL__
130
131struct global_reg_snapshot {
132 unsigned long tstate;
133 unsigned long tpc;
134 unsigned long tnpc;
135 unsigned long o7;
136 unsigned long i7;
137 struct thread_info *thread;
138 unsigned long pad1;
139 unsigned long pad2;
140};
141
142#define __ARCH_WANT_COMPAT_SYS_PTRACE
143
144#define force_successful_syscall_return() \
145do { current_thread_info()->syscall_noerror = 1; \
146} while (0)
147#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
148#define instruction_pointer(regs) ((regs)->tpc)
149#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
150#ifdef CONFIG_SMP
151extern unsigned long profile_pc(struct pt_regs *);
152#else
153#define profile_pc(regs) instruction_pointer(regs)
154#endif
155extern void show_regs(struct pt_regs *);
156extern void __show_regs(struct pt_regs *);
157#endif
158
159#else /* __ASSEMBLY__ */
160/* For assembly code. */
161#define TRACEREG_SZ 0xa0
162#define STACKFRAME_SZ 0xc0
163
164#define TRACEREG32_SZ 0x50
165#define STACKFRAME32_SZ 0x60
166#endif
167
168#ifdef __KERNEL__
169#define STACK_BIAS 2047
170#endif
171
172/* These are for pt_regs. */
173#define PT_V9_G0 0x00
174#define PT_V9_G1 0x08
175#define PT_V9_G2 0x10
176#define PT_V9_G3 0x18
177#define PT_V9_G4 0x20
178#define PT_V9_G5 0x28
179#define PT_V9_G6 0x30
180#define PT_V9_G7 0x38
181#define PT_V9_I0 0x40
182#define PT_V9_I1 0x48
183#define PT_V9_I2 0x50
184#define PT_V9_I3 0x58
185#define PT_V9_I4 0x60
186#define PT_V9_I5 0x68
187#define PT_V9_I6 0x70
188#define PT_V9_FP PT_V9_I6
189#define PT_V9_I7 0x78
190#define PT_V9_TSTATE 0x80
191#define PT_V9_TPC 0x88
192#define PT_V9_TNPC 0x90
193#define PT_V9_Y 0x98
194#define PT_V9_MAGIC 0x9c
195#define PT_TSTATE PT_V9_TSTATE
196#define PT_TPC PT_V9_TPC
197#define PT_TNPC PT_V9_TNPC
198
199/* These for pt_regs32. */
200#define PT_PSR 0x0
201#define PT_PC 0x4
202#define PT_NPC 0x8
203#define PT_Y 0xc
204#define PT_G0 0x10
205#define PT_WIM PT_G0
206#define PT_G1 0x14
207#define PT_G2 0x18
208#define PT_G3 0x1c
209#define PT_G4 0x20
210#define PT_G5 0x24
211#define PT_G6 0x28
212#define PT_G7 0x2c
213#define PT_I0 0x30
214#define PT_I1 0x34
215#define PT_I2 0x38
216#define PT_I3 0x3c
217#define PT_I4 0x40
218#define PT_I5 0x44
219#define PT_I6 0x48
220#define PT_FP PT_I6
221#define PT_I7 0x4c
222
223/* Reg_window offsets */
224#define RW_V9_L0 0x00
225#define RW_V9_L1 0x08
226#define RW_V9_L2 0x10
227#define RW_V9_L3 0x18
228#define RW_V9_L4 0x20
229#define RW_V9_L5 0x28
230#define RW_V9_L6 0x30
231#define RW_V9_L7 0x38
232#define RW_V9_I0 0x40
233#define RW_V9_I1 0x48
234#define RW_V9_I2 0x50
235#define RW_V9_I3 0x58
236#define RW_V9_I4 0x60
237#define RW_V9_I5 0x68
238#define RW_V9_I6 0x70
239#define RW_V9_I7 0x78
240
241#define RW_L0 0x00
242#define RW_L1 0x04
243#define RW_L2 0x08
244#define RW_L3 0x0c
245#define RW_L4 0x10
246#define RW_L5 0x14
247#define RW_L6 0x18
248#define RW_L7 0x1c
249#define RW_I0 0x20
250#define RW_I1 0x24
251#define RW_I2 0x28
252#define RW_I3 0x2c
253#define RW_I4 0x30
254#define RW_I5 0x34
255#define RW_I6 0x38
256#define RW_I7 0x3c
257
258/* Stack_frame offsets */
259#define SF_V9_L0 0x00
260#define SF_V9_L1 0x08
261#define SF_V9_L2 0x10
262#define SF_V9_L3 0x18
263#define SF_V9_L4 0x20
264#define SF_V9_L5 0x28
265#define SF_V9_L6 0x30
266#define SF_V9_L7 0x38
267#define SF_V9_I0 0x40
268#define SF_V9_I1 0x48
269#define SF_V9_I2 0x50
270#define SF_V9_I3 0x58
271#define SF_V9_I4 0x60
272#define SF_V9_I5 0x68
273#define SF_V9_FP 0x70
274#define SF_V9_PC 0x78
275#define SF_V9_RETP 0x80
276#define SF_V9_XARG0 0x88
277#define SF_V9_XARG1 0x90
278#define SF_V9_XARG2 0x98
279#define SF_V9_XARG3 0xa0
280#define SF_V9_XARG4 0xa8
281#define SF_V9_XARG5 0xb0
282#define SF_V9_XXARG 0xb8
283
284#define SF_L0 0x00
285#define SF_L1 0x04
286#define SF_L2 0x08
287#define SF_L3 0x0c
288#define SF_L4 0x10
289#define SF_L5 0x14
290#define SF_L6 0x18
291#define SF_L7 0x1c
292#define SF_I0 0x20
293#define SF_I1 0x24
294#define SF_I2 0x28
295#define SF_I3 0x2c
296#define SF_I4 0x30
297#define SF_I5 0x34
298#define SF_FP 0x38
299#define SF_PC 0x3c
300#define SF_RETP 0x40
301#define SF_XARG0 0x44
302#define SF_XARG1 0x48
303#define SF_XARG2 0x4c
304#define SF_XARG3 0x50
305#define SF_XARG4 0x54
306#define SF_XARG5 0x58
307#define SF_XXARG 0x5c
308
309#ifdef __KERNEL__
310
311/* global_reg_snapshot offsets */
312#define GR_SNAP_TSTATE 0x00
313#define GR_SNAP_TPC 0x08
314#define GR_SNAP_TNPC 0x10
315#define GR_SNAP_O7 0x18
316#define GR_SNAP_I7 0x20
317#define GR_SNAP_THREAD 0x28
318#define GR_SNAP_PAD1 0x30
319#define GR_SNAP_PAD2 0x38
320
321#endif /* __KERNEL__ */
322
323/* Stuff for the ptrace system call */
324#define PTRACE_SPARC_DETACH 11
325#define PTRACE_GETREGS 12
326#define PTRACE_SETREGS 13
327#define PTRACE_GETFPREGS 14
328#define PTRACE_SETFPREGS 15
329#define PTRACE_READDATA 16
330#define PTRACE_WRITEDATA 17
331#define PTRACE_READTEXT 18
332#define PTRACE_WRITETEXT 19
333#define PTRACE_GETFPAREGS 20
334#define PTRACE_SETFPAREGS 21
335
336/* There are for debugging 64-bit processes, either from a 32 or 64 bit
337 * parent. Thus their complements are for debugging 32-bit processes only.
338 */
339
340#define PTRACE_GETREGS64 22
341#define PTRACE_SETREGS64 23
342/* PTRACE_SYSCALL is 24 */
343#define PTRACE_GETFPREGS64 25
344#define PTRACE_SETFPREGS64 26
345
346#endif /* !(_SPARC64_PTRACE_H) */
diff --git a/include/asm-sparc/reg.h b/include/asm-sparc/reg.h
index ea0a7e590bb3..cb34b0a49aad 100644
--- a/include/asm-sparc/reg.h
+++ b/include/asm-sparc/reg.h
@@ -1,79 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_REG_H
2 * linux/include/asm-sparc/reg.h 2#define ___ASM_SPARC_REG_H
3 * Layout of the registers as expected by gdb on the Sparc 3#if defined(__sparc__) && defined(__arch64__)
4 * we should replace the user.h definitions with those in 4#include <asm-sparc/reg_64.h>
5 * this file, we don't even use the other 5#else
6 * -miguel 6#include <asm-sparc/reg_32.h>
7 * 7#endif
8 * The names of the structures, constants and aliases in this file 8#endif
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC_REG_H
15#define __SPARC_REG_H
16
17struct regs {
18 int r_psr;
19#define r_ps r_psr
20 int r_pc;
21 int r_npc;
22 int r_y;
23 int r_g1;
24 int r_g2;
25 int r_g3;
26 int r_g4;
27 int r_g5;
28 int r_g6;
29 int r_g7;
30 int r_o0;
31 int r_o1;
32 int r_o2;
33 int r_o3;
34 int r_o4;
35 int r_o5;
36 int r_o6;
37 int r_o7;
38};
39
40struct fpq {
41 unsigned long *addr;
42 unsigned long instr;
43};
44
45struct fq {
46 union {
47 double whole;
48 struct fpq fpq;
49 } FQu;
50};
51
52#define FPU_REGS_TYPE unsigned int
53#define FPU_FSR_TYPE unsigned
54
55struct fp_status {
56 union {
57 FPU_REGS_TYPE Fpu_regs[32];
58 double Fpu_dregs[16];
59 } fpu_fr;
60 FPU_FSR_TYPE Fpu_fsr;
61 unsigned Fpu_flags;
62 unsigned Fpu_extra;
63 unsigned Fpu_qcnt;
64 struct fq Fpu_q[16];
65};
66
67#define fpu_regs f_fpstatus.fpu_fr.Fpu_regs
68#define fpu_dregs f_fpstatus.fpu_fr.Fpu_dregs
69#define fpu_fsr f_fpstatus.Fpu_fsr
70#define fpu_flags f_fpstatus.Fpu_flags
71#define fpu_extra f_fpstatus.Fpu_extra
72#define fpu_q f_fpstatus.Fpu_q
73#define fpu_qcnt f_fpstatus.Fpu_qcnt
74
75struct fpu {
76 struct fp_status f_fpstatus;
77};
78
79#endif /* __SPARC_REG_H */
diff --git a/include/asm-sparc/reg_32.h b/include/asm-sparc/reg_32.h
new file mode 100644
index 000000000000..42fecfcd97e7
--- /dev/null
+++ b/include/asm-sparc/reg_32.h
@@ -0,0 +1,79 @@
1/*
2 * linux/include/asm-sparc/reg.h
3 * Layout of the registers as expected by gdb on the Sparc
4 * we should replace the user.h definitions with those in
5 * this file, we don't even use the other
6 * -miguel
7 *
8 * The names of the structures, constants and aliases in this file
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC_REG_H
15#define __SPARC_REG_H
16
17struct regs {
18 int r_psr;
19#define r_ps r_psr
20 int r_pc;
21 int r_npc;
22 int r_y;
23 int r_g1;
24 int r_g2;
25 int r_g3;
26 int r_g4;
27 int r_g5;
28 int r_g6;
29 int r_g7;
30 int r_o0;
31 int r_o1;
32 int r_o2;
33 int r_o3;
34 int r_o4;
35 int r_o5;
36 int r_o6;
37 int r_o7;
38};
39
40struct fpq {
41 unsigned long *addr;
42 unsigned long instr;
43};
44
45struct fq {
46 union {
47 double whole;
48 struct fpq fpq;
49 } FQu;
50};
51
52#define FPU_REGS_TYPE unsigned int
53#define FPU_FSR_TYPE unsigned
54
55struct fp_status {
56 union {
57 FPU_REGS_TYPE Fpu_regs[32];
58 double Fpu_dregs[16];
59 } fpu_fr;
60 FPU_FSR_TYPE Fpu_fsr;
61 unsigned Fpu_flags;
62 unsigned Fpu_extra;
63 unsigned Fpu_qcnt;
64 struct fq Fpu_q[16];
65};
66
67#define fpu_regs f_fpstatus.fpu_fr.Fpu_regs
68#define fpu_dregs f_fpstatus.fpu_fr.Fpu_dregs
69#define fpu_fsr f_fpstatus.Fpu_fsr
70#define fpu_flags f_fpstatus.Fpu_flags
71#define fpu_extra f_fpstatus.Fpu_extra
72#define fpu_q f_fpstatus.Fpu_q
73#define fpu_qcnt f_fpstatus.Fpu_qcnt
74
75struct fpu {
76 struct fp_status f_fpstatus;
77};
78
79#endif /* __SPARC_REG_H */
diff --git a/include/asm-sparc/reg_64.h b/include/asm-sparc/reg_64.h
new file mode 100644
index 000000000000..eb24a07ff4d5
--- /dev/null
+++ b/include/asm-sparc/reg_64.h
@@ -0,0 +1,56 @@
1/*
2 * linux/asm-sparc64/reg.h
3 * Layout of the registers as expected by gdb on the Sparc
4 * we should replace the user.h definitions with those in
5 * this file, we don't even use the other
6 * -miguel
7 *
8 * The names of the structures, constants and aliases in this file
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC64_REG_H
15#define __SPARC64_REG_H
16
17struct regs {
18 unsigned long r_g1;
19 unsigned long r_g2;
20 unsigned long r_g3;
21 unsigned long r_g4;
22 unsigned long r_g5;
23 unsigned long r_g6;
24 unsigned long r_g7;
25 unsigned long r_o0;
26 unsigned long r_o1;
27 unsigned long r_o2;
28 unsigned long r_o3;
29 unsigned long r_o4;
30 unsigned long r_o5;
31 unsigned long r_o6;
32 unsigned long r_o7;
33 unsigned long __pad;
34 unsigned long r_tstate;
35 unsigned long r_tpc;
36 unsigned long r_tnpc;
37 unsigned int r_y;
38 unsigned int r_fprs;
39};
40
41#define FPU_REGS_TYPE unsigned int
42#define FPU_FSR_TYPE unsigned long
43
44struct fp_status {
45 unsigned long fpu_fr[32];
46 unsigned long Fpu_fsr;
47};
48
49struct fpu {
50 struct fp_status f_fpstatus;
51};
52
53#define fpu_regs f_fpstatus.fpu_fr
54#define fpu_fsr f_fpstatus.Fpu_fsr
55
56#endif /* __SPARC64_REG_H */
diff --git a/include/asm-sparc/sbus.h b/include/asm-sparc/sbus.h
index f1d2fe1c9a30..8f29a1979665 100644
--- a/include/asm-sparc/sbus.h
+++ b/include/asm-sparc/sbus.h
@@ -1,153 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_SBUS_H
2 * sbus.h: Defines for the Sun SBus. 2#define ___ASM_SPARC_SBUS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/sbus_64.h>
5 */ 5#else
6 6#include <asm-sparc/sbus_32.h>
7#ifndef _SPARC_SBUS_H 7#endif
8#define _SPARC_SBUS_H 8#endif
9
10#include <linux/dma-mapping.h>
11#include <linux/ioport.h>
12
13#include <asm/oplib.h>
14#include <asm/prom.h>
15#include <asm/of_device.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0xf8000000
26#define SBUS_OFF_MASK 0x01ffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Link to devices on this SBus */
72 struct sbus_bus *next; /* next SBus, if more than one SBus */
73 int prom_node; /* PROM device tree node for this SBus */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int devid;
81 int board;
82};
83#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
84
85extern struct sbus_bus *sbus_root;
86
87static inline int
88sbus_is_slave(struct sbus_dev *dev)
89{
90 /* XXX Have to write this for sun4c's */
91 return 0;
92}
93
94/* Device probing routines could find these handy */
95#define for_each_sbus(bus) \
96 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
97
98#define for_each_sbusdev(device, bus) \
99 for((device) = (bus)->devices; (device); (device)=(device)->next)
100
101#define for_all_sbusdev(device, bus) \
102 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
103 for ((device) = (bus)->devices; (device); (device) = (device)->next)
104
105/* Driver DVMA interfaces. */
106#define sbus_can_dma_64bit(sdev) (0) /* actually, sparc_cpu_model==sun4d */
107#define sbus_can_burst64(sdev) (0) /* actually, sparc_cpu_model==sun4d */
108extern void sbus_set_sbus64(struct sbus_dev *, int);
109extern void sbus_fill_device_irq(struct sbus_dev *);
110
111/* These yield IOMMU mappings in consistent mode. */
112extern void *sbus_alloc_consistent(struct sbus_dev *, long, u32 *dma_addrp);
113extern void sbus_free_consistent(struct sbus_dev *, long, void *, u32);
114void prom_adjust_ranges(struct linux_prom_ranges *, int,
115 struct linux_prom_ranges *, int);
116
117#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
118#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
119#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
120#define SBUS_DMA_NONE DMA_NONE
121
122/* All the rest use streaming mode mappings. */
123extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int);
124extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int);
125extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int);
126extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int);
127
128/* Finally, allow explicit synchronization of streamable mappings. */
129extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int);
130#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
131extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int);
132extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int);
133#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
134extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int);
135
136/* Eric Brower (ebrower@usa.net)
137 * Translate SBus interrupt levels to ino values--
138 * this is used when converting sbus "interrupts" OBP
139 * node values to "intr" node values, and is platform
140 * dependent. If only we could call OBP with
141 * "sbus-intr>cpu (sbint -- ino)" from kernel...
142 * See .../drivers/sbus/sbus.c for details.
143 */
144BTFIXUPDEF_CALL(unsigned int, sbint_to_irq, struct sbus_dev *sdev, unsigned int)
145#define sbint_to_irq(sdev, sbint) BTFIXUP_CALL(sbint_to_irq)(sdev, sbint)
146
147extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
148extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
149extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
150extern int sbus_arch_preinit(void);
151extern void sbus_arch_postinit(void);
152
153#endif /* !(_SPARC_SBUS_H) */
diff --git a/include/asm-sparc/sbus_32.h b/include/asm-sparc/sbus_32.h
new file mode 100644
index 000000000000..77b5d3aadc99
--- /dev/null
+++ b/include/asm-sparc/sbus_32.h
@@ -0,0 +1,153 @@
1/*
2 * sbus.h: Defines for the Sun SBus.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC_SBUS_H
8#define _SPARC_SBUS_H
9
10#include <linux/dma-mapping.h>
11#include <linux/ioport.h>
12
13#include <asm/oplib.h>
14#include <asm/prom.h>
15#include <asm/of_device.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0xf8000000
26#define SBUS_OFF_MASK 0x01ffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Link to devices on this SBus */
72 struct sbus_bus *next; /* next SBus, if more than one SBus */
73 int prom_node; /* PROM device tree node for this SBus */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int devid;
81 int board;
82};
83#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
84
85extern struct sbus_bus *sbus_root;
86
87static inline int
88sbus_is_slave(struct sbus_dev *dev)
89{
90 /* XXX Have to write this for sun4c's */
91 return 0;
92}
93
94/* Device probing routines could find these handy */
95#define for_each_sbus(bus) \
96 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
97
98#define for_each_sbusdev(device, bus) \
99 for((device) = (bus)->devices; (device); (device)=(device)->next)
100
101#define for_all_sbusdev(device, bus) \
102 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
103 for ((device) = (bus)->devices; (device); (device) = (device)->next)
104
105/* Driver DVMA interfaces. */
106#define sbus_can_dma_64bit(sdev) (0) /* actually, sparc_cpu_model==sun4d */
107#define sbus_can_burst64(sdev) (0) /* actually, sparc_cpu_model==sun4d */
108extern void sbus_set_sbus64(struct sbus_dev *, int);
109extern void sbus_fill_device_irq(struct sbus_dev *);
110
111/* These yield IOMMU mappings in consistent mode. */
112extern void *sbus_alloc_consistent(struct sbus_dev *, long, u32 *dma_addrp);
113extern void sbus_free_consistent(struct sbus_dev *, long, void *, u32);
114void prom_adjust_ranges(struct linux_prom_ranges *, int,
115 struct linux_prom_ranges *, int);
116
117#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
118#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
119#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
120#define SBUS_DMA_NONE DMA_NONE
121
122/* All the rest use streaming mode mappings. */
123extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int);
124extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int);
125extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int);
126extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int);
127
128/* Finally, allow explicit synchronization of streamable mappings. */
129extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int);
130#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
131extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int);
132extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int);
133#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
134extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int);
135
136/* Eric Brower (ebrower@usa.net)
137 * Translate SBus interrupt levels to ino values--
138 * this is used when converting sbus "interrupts" OBP
139 * node values to "intr" node values, and is platform
140 * dependent. If only we could call OBP with
141 * "sbus-intr>cpu (sbint -- ino)" from kernel...
142 * See .../drivers/sbus/sbus.c for details.
143 */
144BTFIXUPDEF_CALL(unsigned int, sbint_to_irq, struct sbus_dev *sdev, unsigned int)
145#define sbint_to_irq(sdev, sbint) BTFIXUP_CALL(sbint_to_irq)(sdev, sbint)
146
147extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
148extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
149extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
150extern int sbus_arch_preinit(void);
151extern void sbus_arch_postinit(void);
152
153#endif /* !(_SPARC_SBUS_H) */
diff --git a/include/asm-sparc/sbus_64.h b/include/asm-sparc/sbus_64.h
new file mode 100644
index 000000000000..0e16b6dd7e96
--- /dev/null
+++ b/include/asm-sparc/sbus_64.h
@@ -0,0 +1,190 @@
1/* sbus.h: Defines for the Sun SBus.
2 *
3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SBUS_H
7#define _SPARC64_SBUS_H
8
9#include <linux/dma-mapping.h>
10#include <linux/ioport.h>
11
12#include <asm/oplib.h>
13#include <asm/prom.h>
14#include <asm/of_device.h>
15#include <asm/iommu.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0x00000000
26#define SBUS_OFF_MASK 0x0fffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Tree of SBUS devices */
72 struct sbus_bus *next; /* Next SBUS in system */
73 int prom_node; /* OBP node of SBUS */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int portid;
81};
82#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
83
84extern struct sbus_bus *sbus_root;
85
86/* Device probing routines could find these handy */
87#define for_each_sbus(bus) \
88 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
89
90#define for_each_sbusdev(device, bus) \
91 for((device) = (bus)->devices; (device); (device)=(device)->next)
92
93#define for_all_sbusdev(device, bus) \
94 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
95 for ((device) = (bus)->devices; (device); (device) = (device)->next)
96
97/* Driver DVMA interfaces. */
98#define sbus_can_dma_64bit(sdev) (1)
99#define sbus_can_burst64(sdev) (1)
100extern void sbus_set_sbus64(struct sbus_dev *, int);
101extern void sbus_fill_device_irq(struct sbus_dev *);
102
103static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
104 dma_addr_t *dma_handle)
105{
106 return dma_alloc_coherent(&sdev->ofdev.dev, size,
107 dma_handle, GFP_ATOMIC);
108}
109
110static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
111 void *vaddr, dma_addr_t dma_handle)
112{
113 return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
114}
115
116#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
117#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
118#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
119#define SBUS_DMA_NONE DMA_NONE
120
121/* All the rest use streaming mode mappings. */
122static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
123 size_t size, int direction)
124{
125 return dma_map_single(&sdev->ofdev.dev, ptr, size,
126 (enum dma_data_direction) direction);
127}
128
129static inline void sbus_unmap_single(struct sbus_dev *sdev,
130 dma_addr_t dma_addr, size_t size,
131 int direction)
132{
133 dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
134 (enum dma_data_direction) direction);
135}
136
137static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
138 int nents, int direction)
139{
140 return dma_map_sg(&sdev->ofdev.dev, sg, nents,
141 (enum dma_data_direction) direction);
142}
143
144static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
145 int nents, int direction)
146{
147 dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
148 (enum dma_data_direction) direction);
149}
150
151/* Finally, allow explicit synchronization of streamable mappings. */
152static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
153 dma_addr_t dma_handle,
154 size_t size, int direction)
155{
156 dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
157 (enum dma_data_direction) direction);
158}
159#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
160
161static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
162 dma_addr_t dma_handle,
163 size_t size, int direction)
164{
165 /* No flushing needed to sync cpu writes to the device. */
166}
167
168static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
169 struct scatterlist *sg,
170 int nents, int direction)
171{
172 dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
173 (enum dma_data_direction) direction);
174}
175#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
176
177static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
178 struct scatterlist *sg,
179 int nents, int direction)
180{
181 /* No flushing needed to sync cpu writes to the device. */
182}
183
184extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
185extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
186extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
187extern int sbus_arch_preinit(void);
188extern void sbus_arch_postinit(void);
189
190#endif /* !(_SPARC64_SBUS_H) */
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index c82609ca1d0f..b1a0e316c2b6 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -1,26 +1,8 @@
1#ifndef _SPARC_SCATTERLIST_H 1#ifndef ___ASM_SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H 2#define ___ASM_SPARC_SCATTERLIST_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/types.h> 4#include <asm-sparc/scatterlist_64.h>
5 5#else
6struct scatterlist { 6#include <asm-sparc/scatterlist_32.h>
7#ifdef CONFIG_DEBUG_SG 7#endif
8 unsigned long sg_magic;
9#endif 8#endif
10 unsigned long page_link;
11 unsigned int offset;
12
13 unsigned int length;
14
15 __u32 dvma_address; /* A place to hang host-specific addresses at. */
16 __u32 dvma_length;
17};
18
19#define sg_dma_address(sg) ((sg)->dvma_address)
20#define sg_dma_len(sg) ((sg)->dvma_length)
21
22#define ISA_DMA_THRESHOLD (~0UL)
23
24#define ARCH_HAS_SG_CHAIN
25
26#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc/scatterlist_32.h b/include/asm-sparc/scatterlist_32.h
new file mode 100644
index 000000000000..c82609ca1d0f
--- /dev/null
+++ b/include/asm-sparc/scatterlist_32.h
@@ -0,0 +1,26 @@
1#ifndef _SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H
3
4#include <linux/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12
13 unsigned int length;
14
15 __u32 dvma_address; /* A place to hang host-specific addresses at. */
16 __u32 dvma_length;
17};
18
19#define sg_dma_address(sg) ((sg)->dvma_address)
20#define sg_dma_len(sg) ((sg)->dvma_length)
21
22#define ISA_DMA_THRESHOLD (~0UL)
23
24#define ARCH_HAS_SG_CHAIN
25
26#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc/scatterlist_64.h b/include/asm-sparc/scatterlist_64.h
new file mode 100644
index 000000000000..81bd058f9382
--- /dev/null
+++ b/include/asm-sparc/scatterlist_64.h
@@ -0,0 +1,27 @@
1#ifndef _SPARC64_SCATTERLIST_H
2#define _SPARC64_SCATTERLIST_H
3
4#include <asm/page.h>
5#include <asm/types.h>
6
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 dma_addr_t dma_address;
17 __u32 dma_length;
18};
19
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->dma_length)
22
23#define ISA_DMA_THRESHOLD (~0UL)
24
25#define ARCH_HAS_SG_CHAIN
26
27#endif /* !(_SPARC64_SCATTERLIST_H) */
diff --git a/include/asm-sparc/sections.h b/include/asm-sparc/sections.h
index 6832841df051..cbd019162425 100644
--- a/include/asm-sparc/sections.h
+++ b/include/asm-sparc/sections.h
@@ -1,6 +1,8 @@
1#ifndef _SPARC_SECTIONS_H 1#ifndef ___ASM_SPARC_SECTIONS_H
2#define _SPARC_SECTIONS_H 2#define ___ASM_SPARC_SECTIONS_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/sections.h> 4#include <asm-sparc/sections_64.h>
5 5#else
6#include <asm-sparc/sections_32.h>
7#endif
6#endif 8#endif
diff --git a/include/asm-sparc/sections_32.h b/include/asm-sparc/sections_32.h
new file mode 100644
index 000000000000..6832841df051
--- /dev/null
+++ b/include/asm-sparc/sections_32.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC_SECTIONS_H
2#define _SPARC_SECTIONS_H
3
4#include <asm-generic/sections.h>
5
6#endif
diff --git a/include/asm-sparc/sections_64.h b/include/asm-sparc/sections_64.h
new file mode 100644
index 000000000000..3f4b9fdc28d0
--- /dev/null
+++ b/include/asm-sparc/sections_64.h
@@ -0,0 +1,9 @@
1#ifndef _SPARC64_SECTIONS_H
2#define _SPARC64_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7extern char _start[];
8
9#endif
diff --git a/include/asm-sparc/sfp-machine.h b/include/asm-sparc/sfp-machine.h
index 266a42b8f99f..c676fcc2dd27 100644
--- a/include/asm-sparc/sfp-machine.h
+++ b/include/asm-sparc/sfp-machine.h
@@ -1,212 +1,8 @@
1/* Machine-dependent software floating-point definitions. 1#ifndef ___ASM_SPARC_SFP_MACHINE_H
2 Sparc userland (_Q_*) version. 2#define ___ASM_SPARC_SFP_MACHINE_H
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. 3#if defined(__sparc__) && defined(__arch64__)
4 This file is part of the GNU C Library. 4#include <asm-sparc/sfp-machine_64.h>
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz),
7 David S. Miller (davem@redhat.com) and
8 Peter Maydell (pmaydell@chiark.greenend.org.uk).
9
10 The GNU C Library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Library General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 The GNU C Library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
19
20 You should have received a copy of the GNU Library General Public
21 License along with the GNU C Library; see the file COPYING.LIB. If
22 not, write to the Free Software Foundation, Inc.,
23 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
24
25#ifndef _SFP_MACHINE_H
26#define _SFP_MACHINE_H
27
28
29#define _FP_W_TYPE_SIZE 32
30#define _FP_W_TYPE unsigned long
31#define _FP_WS_TYPE signed long
32#define _FP_I_TYPE long
33
34#define _FP_MUL_MEAT_S(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_D(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
38#define _FP_MUL_MEAT_Q(R,X,Y) \
39 _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
40
41#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
42#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
43#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
44
45#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
46#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
47#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
48#define _FP_NANSIGN_S 0
49#define _FP_NANSIGN_D 0
50#define _FP_NANSIGN_Q 0
51
52#define _FP_KEEPNANFRACP 1
53
54/* If one NaN is signaling and the other is not,
55 * we choose that one, otherwise we choose X.
56 */
57/* For _Qp_* and _Q_*, this should prefer X, for
58 * CPU instruction emulation this should prefer Y.
59 * (see SPAMv9 B.2.2 section).
60 */
61#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
62 do { \
63 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
64 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
65 { \
66 R##_s = X##_s; \
67 _FP_FRAC_COPY_##wc(R,X); \
68 } \
69 else \
70 { \
71 R##_s = Y##_s; \
72 _FP_FRAC_COPY_##wc(R,Y); \
73 } \
74 R##_c = FP_CLS_NAN; \
75 } while (0)
76
77/* Some assembly to speed things up. */
78#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
79 __asm__ ("addcc %r7,%8,%2\n\t" \
80 "addxcc %r5,%6,%1\n\t" \
81 "addx %r3,%4,%0\n" \
82 : "=r" ((USItype)(r2)), \
83 "=&r" ((USItype)(r1)), \
84 "=&r" ((USItype)(r0)) \
85 : "%rJ" ((USItype)(x2)), \
86 "rI" ((USItype)(y2)), \
87 "%rJ" ((USItype)(x1)), \
88 "rI" ((USItype)(y1)), \
89 "%rJ" ((USItype)(x0)), \
90 "rI" ((USItype)(y0)) \
91 : "cc")
92
93#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
94 __asm__ ("subcc %r7,%8,%2\n\t" \
95 "subxcc %r5,%6,%1\n\t" \
96 "subx %r3,%4,%0\n" \
97 : "=r" ((USItype)(r2)), \
98 "=&r" ((USItype)(r1)), \
99 "=&r" ((USItype)(r0)) \
100 : "%rJ" ((USItype)(x2)), \
101 "rI" ((USItype)(y2)), \
102 "%rJ" ((USItype)(x1)), \
103 "rI" ((USItype)(y1)), \
104 "%rJ" ((USItype)(x0)), \
105 "rI" ((USItype)(y0)) \
106 : "cc")
107
108#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
109 do { \
110 /* We need to fool gcc, as we need to pass more than 10 \
111 input/outputs. */ \
112 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
113 __asm__ __volatile__ ( \
114 "addcc %r8,%9,%1\n\t" \
115 "addxcc %r6,%7,%0\n\t" \
116 "addxcc %r4,%5,%%g2\n\t" \
117 "addx %r2,%3,%%g1\n\t" \
118 : "=&r" ((USItype)(r1)), \
119 "=&r" ((USItype)(r0)) \
120 : "%rJ" ((USItype)(x3)), \
121 "rI" ((USItype)(y3)), \
122 "%rJ" ((USItype)(x2)), \
123 "rI" ((USItype)(y2)), \
124 "%rJ" ((USItype)(x1)), \
125 "rI" ((USItype)(y1)), \
126 "%rJ" ((USItype)(x0)), \
127 "rI" ((USItype)(y0)) \
128 : "cc", "g1", "g2"); \
129 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
130 r3 = _t1; r2 = _t2; \
131 } while (0)
132
133#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
134 do { \
135 /* We need to fool gcc, as we need to pass more than 10 \
136 input/outputs. */ \
137 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
138 __asm__ __volatile__ ( \
139 "subcc %r8,%9,%1\n\t" \
140 "subxcc %r6,%7,%0\n\t" \
141 "subxcc %r4,%5,%%g2\n\t" \
142 "subx %r2,%3,%%g1\n\t" \
143 : "=&r" ((USItype)(r1)), \
144 "=&r" ((USItype)(r0)) \
145 : "%rJ" ((USItype)(x3)), \
146 "rI" ((USItype)(y3)), \
147 "%rJ" ((USItype)(x2)), \
148 "rI" ((USItype)(y2)), \
149 "%rJ" ((USItype)(x1)), \
150 "rI" ((USItype)(y1)), \
151 "%rJ" ((USItype)(x0)), \
152 "rI" ((USItype)(y0)) \
153 : "cc", "g1", "g2"); \
154 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
155 r3 = _t1; r2 = _t2; \
156 } while (0)
157
158#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
159
160#define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y0)
161
162#define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \
163 __asm__ ("addcc %3,%4,%3\n\t" \
164 "addxcc %2,%%g0,%2\n\t" \
165 "addxcc %1,%%g0,%1\n\t" \
166 "addx %0,%%g0,%0\n\t" \
167 : "=&r" ((USItype)(x3)), \
168 "=&r" ((USItype)(x2)), \
169 "=&r" ((USItype)(x1)), \
170 "=&r" ((USItype)(x0)) \
171 : "rI" ((USItype)(i)), \
172 "0" ((USItype)(x3)), \
173 "1" ((USItype)(x2)), \
174 "2" ((USItype)(x1)), \
175 "3" ((USItype)(x0)) \
176 : "cc")
177
178#ifndef CONFIG_SMP
179extern struct task_struct *last_task_used_math;
180#endif
181
182/* Obtain the current rounding mode. */
183#ifndef FP_ROUNDMODE
184#ifdef CONFIG_SMP
185#define FP_ROUNDMODE ((current->thread.fsr >> 30) & 0x3)
186#else
187#define FP_ROUNDMODE ((last_task_used_math->thread.fsr >> 30) & 0x3)
188#endif
189#endif
190
191/* Exception flags. */
192#define FP_EX_INVALID (1 << 4)
193#define FP_EX_OVERFLOW (1 << 3)
194#define FP_EX_UNDERFLOW (1 << 2)
195#define FP_EX_DIVZERO (1 << 1)
196#define FP_EX_INEXACT (1 << 0)
197
198#define FP_HANDLE_EXCEPTIONS return _fex
199
200#ifdef CONFIG_SMP
201#define FP_INHIBIT_RESULTS ((current->thread.fsr >> 23) & _fex)
202#else
203#define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex)
204#endif
205
206#ifdef CONFIG_SMP
207#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f)
208#else 5#else
209#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f) 6#include <asm-sparc/sfp-machine_32.h>
210#endif 7#endif
211
212#endif 8#endif
diff --git a/include/asm-sparc/sfp-machine_32.h b/include/asm-sparc/sfp-machine_32.h
new file mode 100644
index 000000000000..01d9c3b5a73b
--- /dev/null
+++ b/include/asm-sparc/sfp-machine_32.h
@@ -0,0 +1,212 @@
1/* Machine-dependent software floating-point definitions.
2 Sparc userland (_Q_*) version.
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz),
7 David S. Miller (davem@redhat.com) and
8 Peter Maydell (pmaydell@chiark.greenend.org.uk).
9
10 The GNU C Library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Library General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 The GNU C Library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
19
20 You should have received a copy of the GNU Library General Public
21 License along with the GNU C Library; see the file COPYING.LIB. If
22 not, write to the Free Software Foundation, Inc.,
23 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
24
25#ifndef _SFP_MACHINE_H
26#define _SFP_MACHINE_H
27
28
29#define _FP_W_TYPE_SIZE 32
30#define _FP_W_TYPE unsigned long
31#define _FP_WS_TYPE signed long
32#define _FP_I_TYPE long
33
34#define _FP_MUL_MEAT_S(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_D(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
38#define _FP_MUL_MEAT_Q(R,X,Y) \
39 _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
40
41#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
42#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
43#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
44
45#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
46#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
47#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
48#define _FP_NANSIGN_S 0
49#define _FP_NANSIGN_D 0
50#define _FP_NANSIGN_Q 0
51
52#define _FP_KEEPNANFRACP 1
53
54/* If one NaN is signaling and the other is not,
55 * we choose that one, otherwise we choose X.
56 */
57/* For _Qp_* and _Q_*, this should prefer X, for
58 * CPU instruction emulation this should prefer Y.
59 * (see SPAMv9 B.2.2 section).
60 */
61#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
62 do { \
63 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
64 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
65 { \
66 R##_s = X##_s; \
67 _FP_FRAC_COPY_##wc(R,X); \
68 } \
69 else \
70 { \
71 R##_s = Y##_s; \
72 _FP_FRAC_COPY_##wc(R,Y); \
73 } \
74 R##_c = FP_CLS_NAN; \
75 } while (0)
76
77/* Some assembly to speed things up. */
78#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
79 __asm__ ("addcc %r7,%8,%2\n\t" \
80 "addxcc %r5,%6,%1\n\t" \
81 "addx %r3,%4,%0\n" \
82 : "=r" ((USItype)(r2)), \
83 "=&r" ((USItype)(r1)), \
84 "=&r" ((USItype)(r0)) \
85 : "%rJ" ((USItype)(x2)), \
86 "rI" ((USItype)(y2)), \
87 "%rJ" ((USItype)(x1)), \
88 "rI" ((USItype)(y1)), \
89 "%rJ" ((USItype)(x0)), \
90 "rI" ((USItype)(y0)) \
91 : "cc")
92
93#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
94 __asm__ ("subcc %r7,%8,%2\n\t" \
95 "subxcc %r5,%6,%1\n\t" \
96 "subx %r3,%4,%0\n" \
97 : "=r" ((USItype)(r2)), \
98 "=&r" ((USItype)(r1)), \
99 "=&r" ((USItype)(r0)) \
100 : "%rJ" ((USItype)(x2)), \
101 "rI" ((USItype)(y2)), \
102 "%rJ" ((USItype)(x1)), \
103 "rI" ((USItype)(y1)), \
104 "%rJ" ((USItype)(x0)), \
105 "rI" ((USItype)(y0)) \
106 : "cc")
107
108#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
109 do { \
110 /* We need to fool gcc, as we need to pass more than 10 \
111 input/outputs. */ \
112 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
113 __asm__ __volatile__ ( \
114 "addcc %r8,%9,%1\n\t" \
115 "addxcc %r6,%7,%0\n\t" \
116 "addxcc %r4,%5,%%g2\n\t" \
117 "addx %r2,%3,%%g1\n\t" \
118 : "=&r" ((USItype)(r1)), \
119 "=&r" ((USItype)(r0)) \
120 : "%rJ" ((USItype)(x3)), \
121 "rI" ((USItype)(y3)), \
122 "%rJ" ((USItype)(x2)), \
123 "rI" ((USItype)(y2)), \
124 "%rJ" ((USItype)(x1)), \
125 "rI" ((USItype)(y1)), \
126 "%rJ" ((USItype)(x0)), \
127 "rI" ((USItype)(y0)) \
128 : "cc", "g1", "g2"); \
129 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
130 r3 = _t1; r2 = _t2; \
131 } while (0)
132
133#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
134 do { \
135 /* We need to fool gcc, as we need to pass more than 10 \
136 input/outputs. */ \
137 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
138 __asm__ __volatile__ ( \
139 "subcc %r8,%9,%1\n\t" \
140 "subxcc %r6,%7,%0\n\t" \
141 "subxcc %r4,%5,%%g2\n\t" \
142 "subx %r2,%3,%%g1\n\t" \
143 : "=&r" ((USItype)(r1)), \
144 "=&r" ((USItype)(r0)) \
145 : "%rJ" ((USItype)(x3)), \
146 "rI" ((USItype)(y3)), \
147 "%rJ" ((USItype)(x2)), \
148 "rI" ((USItype)(y2)), \
149 "%rJ" ((USItype)(x1)), \
150 "rI" ((USItype)(y1)), \
151 "%rJ" ((USItype)(x0)), \
152 "rI" ((USItype)(y0)) \
153 : "cc", "g1", "g2"); \
154 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
155 r3 = _t1; r2 = _t2; \
156 } while (0)
157
158#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
159
160#define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y0)
161
162#define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \
163 __asm__ ("addcc %3,%4,%3\n\t" \
164 "addxcc %2,%%g0,%2\n\t" \
165 "addxcc %1,%%g0,%1\n\t" \
166 "addx %0,%%g0,%0\n\t" \
167 : "=&r" ((USItype)(x3)), \
168 "=&r" ((USItype)(x2)), \
169 "=&r" ((USItype)(x1)), \
170 "=&r" ((USItype)(x0)) \
171 : "rI" ((USItype)(i)), \
172 "0" ((USItype)(x3)), \
173 "1" ((USItype)(x2)), \
174 "2" ((USItype)(x1)), \
175 "3" ((USItype)(x0)) \
176 : "cc")
177
178#ifndef CONFIG_SMP
179extern struct task_struct *last_task_used_math;
180#endif
181
182/* Obtain the current rounding mode. */
183#ifndef FP_ROUNDMODE
184#ifdef CONFIG_SMP
185#define FP_ROUNDMODE ((current->thread.fsr >> 30) & 0x3)
186#else
187#define FP_ROUNDMODE ((last_task_used_math->thread.fsr >> 30) & 0x3)
188#endif
189#endif
190
191/* Exception flags. */
192#define FP_EX_INVALID (1 << 4)
193#define FP_EX_OVERFLOW (1 << 3)
194#define FP_EX_UNDERFLOW (1 << 2)
195#define FP_EX_DIVZERO (1 << 1)
196#define FP_EX_INEXACT (1 << 0)
197
198#define FP_HANDLE_EXCEPTIONS return _fex
199
200#ifdef CONFIG_SMP
201#define FP_INHIBIT_RESULTS ((current->thread.fsr >> 23) & _fex)
202#else
203#define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex)
204#endif
205
206#ifdef CONFIG_SMP
207#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f)
208#else
209#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f)
210#endif
211
212#endif
diff --git a/include/asm-sparc/sfp-machine_64.h b/include/asm-sparc/sfp-machine_64.h
new file mode 100644
index 000000000000..ca913ef40bd5
--- /dev/null
+++ b/include/asm-sparc/sfp-machine_64.h
@@ -0,0 +1,93 @@
1/* Machine-dependent software floating-point definitions.
2 Sparc64 kernel version.
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz) and
7 David S. Miller (davem@redhat.com).
8
9 The GNU C Library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Library General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version.
13
14 The GNU C Library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Library General Public License for more details.
18
19 You should have received a copy of the GNU Library General Public
20 License along with the GNU C Library; see the file COPYING.LIB. If
21 not, write to the Free Software Foundation, Inc.,
22 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24#ifndef _SFP_MACHINE_H
25#define _SFP_MACHINE_H
26
27#define _FP_W_TYPE_SIZE 64
28#define _FP_W_TYPE unsigned long
29#define _FP_WS_TYPE signed long
30#define _FP_I_TYPE long
31
32#define _FP_MUL_MEAT_S(R,X,Y) \
33 _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
41#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
42
43#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
44#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1)
45#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1
46#define _FP_NANSIGN_S 0
47#define _FP_NANSIGN_D 0
48#define _FP_NANSIGN_Q 0
49
50#define _FP_KEEPNANFRACP 1
51
52/* If one NaN is signaling and the other is not,
53 * we choose that one, otherwise we choose X.
54 */
55/* For _Qp_* and _Q_*, this should prefer X, for
56 * CPU instruction emulation this should prefer Y.
57 * (see SPAMv9 B.2.2 section).
58 */
59#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
60 do { \
61 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
62 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
63 { \
64 R##_s = X##_s; \
65 _FP_FRAC_COPY_##wc(R,X); \
66 } \
67 else \
68 { \
69 R##_s = Y##_s; \
70 _FP_FRAC_COPY_##wc(R,Y); \
71 } \
72 R##_c = FP_CLS_NAN; \
73 } while (0)
74
75/* Obtain the current rounding mode. */
76#ifndef FP_ROUNDMODE
77#define FP_ROUNDMODE ((current_thread_info()->xfsr[0] >> 30) & 0x3)
78#endif
79
80/* Exception flags. */
81#define FP_EX_INVALID (1 << 4)
82#define FP_EX_OVERFLOW (1 << 3)
83#define FP_EX_UNDERFLOW (1 << 2)
84#define FP_EX_DIVZERO (1 << 1)
85#define FP_EX_INEXACT (1 << 0)
86
87#define FP_HANDLE_EXCEPTIONS return _fex
88
89#define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex)
90
91#define FP_TRAPPING_EXCEPTIONS ((current_thread_info()->xfsr[0] >> 23) & 0x1f)
92
93#endif
diff --git a/include/asm-sparc/shmparam.h b/include/asm-sparc/shmparam.h
index 59a1243c12f3..16fda7e9acc8 100644
--- a/include/asm-sparc/shmparam.h
+++ b/include/asm-sparc/shmparam.h
@@ -1,11 +1,8 @@
1#ifndef _ASMSPARC_SHMPARAM_H 1#ifndef ___ASM_SPARC_SHMPARAM_H
2#define _ASMSPARC_SHMPARAM_H 2#define ___ASM_SPARC_SHMPARAM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define __ARCH_FORCE_SHMLBA 1 4#include <asm-sparc/shmparam_64.h>
5 5#else
6extern int vac_cache_size; 6#include <asm-sparc/shmparam_32.h>
7#define SHMLBA (vac_cache_size ? vac_cache_size : \ 7#endif
8 (sparc_cpu_model == sun4c ? (64 * 1024) : \ 8#endif
9 (sparc_cpu_model == sun4 ? (128 * 1024) : PAGE_SIZE)))
10
11#endif /* _ASMSPARC_SHMPARAM_H */
diff --git a/include/asm-sparc/shmparam_32.h b/include/asm-sparc/shmparam_32.h
new file mode 100644
index 000000000000..59a1243c12f3
--- /dev/null
+++ b/include/asm-sparc/shmparam_32.h
@@ -0,0 +1,11 @@
1#ifndef _ASMSPARC_SHMPARAM_H
2#define _ASMSPARC_SHMPARAM_H
3
4#define __ARCH_FORCE_SHMLBA 1
5
6extern int vac_cache_size;
7#define SHMLBA (vac_cache_size ? vac_cache_size : \
8 (sparc_cpu_model == sun4c ? (64 * 1024) : \
9 (sparc_cpu_model == sun4 ? (128 * 1024) : PAGE_SIZE)))
10
11#endif /* _ASMSPARC_SHMPARAM_H */
diff --git a/include/asm-sparc/shmparam_64.h b/include/asm-sparc/shmparam_64.h
new file mode 100644
index 000000000000..1ed0d6701a9b
--- /dev/null
+++ b/include/asm-sparc/shmparam_64.h
@@ -0,0 +1,10 @@
1#ifndef _ASMSPARC64_SHMPARAM_H
2#define _ASMSPARC64_SHMPARAM_H
3
4#include <asm/spitfire.h>
5
6#define __ARCH_FORCE_SHMLBA 1
7/* attach addr a multiple of this */
8#define SHMLBA ((PAGE_SIZE > L1DCACHE_SIZE) ? PAGE_SIZE : L1DCACHE_SIZE)
9
10#endif /* _ASMSPARC64_SHMPARAM_H */
diff --git a/include/asm-sparc/sigcontext.h b/include/asm-sparc/sigcontext.h
index c5fb60dcbd75..82fc7d54a4fa 100644
--- a/include/asm-sparc/sigcontext.h
+++ b/include/asm-sparc/sigcontext.h
@@ -1,62 +1,8 @@
1#ifndef __SPARC_SIGCONTEXT_H 1#ifndef ___ASM_SPARC_SIGCONTEXT_H
2#define __SPARC_SIGCONTEXT_H 2#define ___ASM_SPARC_SIGCONTEXT_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#ifdef __KERNEL__ 4#include <asm-sparc/sigcontext_64.h>
5#include <asm/ptrace.h> 5#else
6 6#include <asm-sparc/sigcontext_32.h>
7#ifndef __ASSEMBLY__ 7#endif
8 8#endif
9#define __SUNOS_MAXWIN 31
10
11/* This is what SunOS does, so shall I. */
12struct sigcontext {
13 int sigc_onstack; /* state to restore */
14 int sigc_mask; /* sigmask to restore */
15 int sigc_sp; /* stack pointer */
16 int sigc_pc; /* program counter */
17 int sigc_npc; /* next program counter */
18 int sigc_psr; /* for condition codes etc */
19 int sigc_g1; /* User uses these two registers */
20 int sigc_o0; /* within the trampoline code. */
21
22 /* Now comes information regarding the users window set
23 * at the time of the signal.
24 */
25 int sigc_oswins; /* outstanding windows */
26
27 /* stack ptrs for each regwin buf */
28 char *sigc_spbuf[__SUNOS_MAXWIN];
29
30 /* Windows to restore after signal */
31 struct {
32 unsigned long locals[8];
33 unsigned long ins[8];
34 } sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37typedef struct {
38 struct {
39 unsigned long psr;
40 unsigned long pc;
41 unsigned long npc;
42 unsigned long y;
43 unsigned long u_regs[16]; /* globals and ins */
44 } si_regs;
45 int si_mask;
46} __siginfo_t;
47
48typedef struct {
49 unsigned long si_float_regs [32];
50 unsigned long si_fsr;
51 unsigned long si_fpqdepth;
52 struct {
53 unsigned long *insn_addr;
54 unsigned long insn;
55 } si_fpqueue [16];
56} __siginfo_fpu_t;
57
58#endif /* !(__ASSEMBLY__) */
59
60#endif /* (__KERNEL__) */
61
62#endif /* !(__SPARC_SIGCONTEXT_H) */
diff --git a/include/asm-sparc/sigcontext_32.h b/include/asm-sparc/sigcontext_32.h
new file mode 100644
index 000000000000..c5fb60dcbd75
--- /dev/null
+++ b/include/asm-sparc/sigcontext_32.h
@@ -0,0 +1,62 @@
1#ifndef __SPARC_SIGCONTEXT_H
2#define __SPARC_SIGCONTEXT_H
3
4#ifdef __KERNEL__
5#include <asm/ptrace.h>
6
7#ifndef __ASSEMBLY__
8
9#define __SUNOS_MAXWIN 31
10
11/* This is what SunOS does, so shall I. */
12struct sigcontext {
13 int sigc_onstack; /* state to restore */
14 int sigc_mask; /* sigmask to restore */
15 int sigc_sp; /* stack pointer */
16 int sigc_pc; /* program counter */
17 int sigc_npc; /* next program counter */
18 int sigc_psr; /* for condition codes etc */
19 int sigc_g1; /* User uses these two registers */
20 int sigc_o0; /* within the trampoline code. */
21
22 /* Now comes information regarding the users window set
23 * at the time of the signal.
24 */
25 int sigc_oswins; /* outstanding windows */
26
27 /* stack ptrs for each regwin buf */
28 char *sigc_spbuf[__SUNOS_MAXWIN];
29
30 /* Windows to restore after signal */
31 struct {
32 unsigned long locals[8];
33 unsigned long ins[8];
34 } sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37typedef struct {
38 struct {
39 unsigned long psr;
40 unsigned long pc;
41 unsigned long npc;
42 unsigned long y;
43 unsigned long u_regs[16]; /* globals and ins */
44 } si_regs;
45 int si_mask;
46} __siginfo_t;
47
48typedef struct {
49 unsigned long si_float_regs [32];
50 unsigned long si_fsr;
51 unsigned long si_fpqdepth;
52 struct {
53 unsigned long *insn_addr;
54 unsigned long insn;
55 } si_fpqueue [16];
56} __siginfo_fpu_t;
57
58#endif /* !(__ASSEMBLY__) */
59
60#endif /* (__KERNEL__) */
61
62#endif /* !(__SPARC_SIGCONTEXT_H) */
diff --git a/include/asm-sparc/sigcontext_64.h b/include/asm-sparc/sigcontext_64.h
new file mode 100644
index 000000000000..1c868d680cfc
--- /dev/null
+++ b/include/asm-sparc/sigcontext_64.h
@@ -0,0 +1,87 @@
1#ifndef __SPARC64_SIGCONTEXT_H
2#define __SPARC64_SIGCONTEXT_H
3
4#ifdef __KERNEL__
5#include <asm/ptrace.h>
6#endif
7
8#ifndef __ASSEMBLY__
9
10#ifdef __KERNEL__
11
12#define __SUNOS_MAXWIN 31
13
14/* This is what SunOS does, so shall I unless we use new 32bit signals or rt signals. */
15struct sigcontext32 {
16 int sigc_onstack; /* state to restore */
17 int sigc_mask; /* sigmask to restore */
18 int sigc_sp; /* stack pointer */
19 int sigc_pc; /* program counter */
20 int sigc_npc; /* next program counter */
21 int sigc_psr; /* for condition codes etc */
22 int sigc_g1; /* User uses these two registers */
23 int sigc_o0; /* within the trampoline code. */
24
25 /* Now comes information regarding the users window set
26 * at the time of the signal.
27 */
28 int sigc_oswins; /* outstanding windows */
29
30 /* stack ptrs for each regwin buf */
31 unsigned sigc_spbuf[__SUNOS_MAXWIN];
32
33 /* Windows to restore after signal */
34 struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37#endif
38
39#ifdef __KERNEL__
40
41/* This is what we use for 32bit new non-rt signals. */
42
43typedef struct {
44 struct {
45 unsigned int psr;
46 unsigned int pc;
47 unsigned int npc;
48 unsigned int y;
49 unsigned int u_regs[16]; /* globals and ins */
50 } si_regs;
51 int si_mask;
52} __siginfo32_t;
53
54#endif
55
56typedef struct {
57 unsigned int si_float_regs [64];
58 unsigned long si_fsr;
59 unsigned long si_gsr;
60 unsigned long si_fprs;
61} __siginfo_fpu_t;
62
63/* This is what SunOS doesn't, so we have to write this alone
64 and do it properly. */
65struct sigcontext {
66 /* The size of this array has to match SI_MAX_SIZE from siginfo.h */
67 char sigc_info[128];
68 struct {
69 unsigned long u_regs[16]; /* globals and ins */
70 unsigned long tstate;
71 unsigned long tpc;
72 unsigned long tnpc;
73 unsigned int y;
74 unsigned int fprs;
75 } sigc_regs;
76 __siginfo_fpu_t * sigc_fpu_save;
77 struct {
78 void * ss_sp;
79 int ss_flags;
80 unsigned long ss_size;
81 } sigc_stack;
82 unsigned long sigc_mask;
83};
84
85#endif /* !(__ASSEMBLY__) */
86
87#endif /* !(__SPARC64_SIGCONTEXT_H) */
diff --git a/include/asm-sparc/siginfo.h b/include/asm-sparc/siginfo.h
index 3c71af135c52..2c9fccf4ce18 100644
--- a/include/asm-sparc/siginfo.h
+++ b/include/asm-sparc/siginfo.h
@@ -1,17 +1,8 @@
1#ifndef _SPARC_SIGINFO_H 1#ifndef ___ASM_SPARC_SIGINFO_H
2#define _SPARC_SIGINFO_H 2#define ___ASM_SPARC_SIGINFO_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define __ARCH_SI_UID_T unsigned int 4#include <asm-sparc/siginfo_64.h>
5#define __ARCH_SI_TRAPNO 5#else
6 6#include <asm-sparc/siginfo_32.h>
7#include <asm-generic/siginfo.h> 7#endif
8 8#endif
9#define SI_NOINFO 32767 /* no information in siginfo_t */
10
11/*
12 * SIGEMT si_codes
13 */
14#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
15#define NSIGEMT 1
16
17#endif /* !(_SPARC_SIGINFO_H) */
diff --git a/include/asm-sparc/siginfo_32.h b/include/asm-sparc/siginfo_32.h
new file mode 100644
index 000000000000..3c71af135c52
--- /dev/null
+++ b/include/asm-sparc/siginfo_32.h
@@ -0,0 +1,17 @@
1#ifndef _SPARC_SIGINFO_H
2#define _SPARC_SIGINFO_H
3
4#define __ARCH_SI_UID_T unsigned int
5#define __ARCH_SI_TRAPNO
6
7#include <asm-generic/siginfo.h>
8
9#define SI_NOINFO 32767 /* no information in siginfo_t */
10
11/*
12 * SIGEMT si_codes
13 */
14#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
15#define NSIGEMT 1
16
17#endif /* !(_SPARC_SIGINFO_H) */
diff --git a/include/asm-sparc/siginfo_64.h b/include/asm-sparc/siginfo_64.h
new file mode 100644
index 000000000000..c96e6c30f8b0
--- /dev/null
+++ b/include/asm-sparc/siginfo_64.h
@@ -0,0 +1,32 @@
1#ifndef _SPARC64_SIGINFO_H
2#define _SPARC64_SIGINFO_H
3
4#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
5
6#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
7#define __ARCH_SI_TRAPNO
8#define __ARCH_SI_BAND_T int
9
10#include <asm-generic/siginfo.h>
11
12#ifdef __KERNEL__
13
14#include <linux/compat.h>
15
16#ifdef CONFIG_COMPAT
17
18struct compat_siginfo;
19
20#endif /* CONFIG_COMPAT */
21
22#endif /* __KERNEL__ */
23
24#define SI_NOINFO 32767 /* no information in siginfo_t */
25
26/*
27 * SIGEMT si_codes
28 */
29#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
30#define NSIGEMT 1
31
32#endif
diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h
index 683657d6e685..36f5f9e482f7 100644
--- a/include/asm-sparc/signal.h
+++ b/include/asm-sparc/signal.h
@@ -1,207 +1,8 @@
1#ifndef _ASMSPARC_SIGNAL_H 1#ifndef ___ASM_SPARC_SIGNAL_H
2#define _ASMSPARC_SIGNAL_H 2#define ___ASM_SPARC_SIGNAL_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/sigcontext.h> 4#include <asm-sparc/signal_64.h>
5#include <linux/compiler.h>
6
7#ifdef __KERNEL__
8#ifndef __ASSEMBLY__
9#include <linux/personality.h>
10#include <linux/types.h>
11#endif
12#endif
13
14/* On the Sparc the signal handlers get passed a 'sub-signal' code
15 * for certain signal types, which we document here.
16 */
17#define SIGHUP 1
18#define SIGINT 2
19#define SIGQUIT 3
20#define SIGILL 4
21#define SUBSIG_STACK 0
22#define SUBSIG_ILLINST 2
23#define SUBSIG_PRIVINST 3
24#define SUBSIG_BADTRAP(t) (0x80 + (t))
25
26#define SIGTRAP 5
27#define SIGABRT 6
28#define SIGIOT 6
29
30#define SIGEMT 7
31#define SUBSIG_TAG 10
32
33#define SIGFPE 8
34#define SUBSIG_FPDISABLED 0x400
35#define SUBSIG_FPERROR 0x404
36#define SUBSIG_FPINTOVFL 0x001
37#define SUBSIG_FPSTSIG 0x002
38#define SUBSIG_IDIVZERO 0x014
39#define SUBSIG_FPINEXACT 0x0c4
40#define SUBSIG_FPDIVZERO 0x0c8
41#define SUBSIG_FPUNFLOW 0x0cc
42#define SUBSIG_FPOPERROR 0x0d0
43#define SUBSIG_FPOVFLOW 0x0d4
44
45#define SIGKILL 9
46#define SIGBUS 10
47#define SUBSIG_BUSTIMEOUT 1
48#define SUBSIG_ALIGNMENT 2
49#define SUBSIG_MISCERROR 5
50
51#define SIGSEGV 11
52#define SUBSIG_NOMAPPING 3
53#define SUBSIG_PROTECTION 4
54#define SUBSIG_SEGERROR 5
55
56#define SIGSYS 12
57
58#define SIGPIPE 13
59#define SIGALRM 14
60#define SIGTERM 15
61#define SIGURG 16
62
63/* SunOS values which deviate from the Linux/i386 ones */
64#define SIGSTOP 17
65#define SIGTSTP 18
66#define SIGCONT 19
67#define SIGCHLD 20
68#define SIGTTIN 21
69#define SIGTTOU 22
70#define SIGIO 23
71#define SIGPOLL SIGIO /* SysV name for SIGIO */
72#define SIGXCPU 24
73#define SIGXFSZ 25
74#define SIGVTALRM 26
75#define SIGPROF 27
76#define SIGWINCH 28
77#define SIGLOST 29
78#define SIGPWR SIGLOST
79#define SIGUSR1 30
80#define SIGUSR2 31
81
82/* Most things should be clean enough to redefine this at will, if care
83 * is taken to make libc match.
84 */
85
86#define __OLD_NSIG 32
87#define __NEW_NSIG 64
88#define _NSIG_BPW 32
89#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
90
91#define SIGRTMIN 32
92#define SIGRTMAX __NEW_NSIG
93
94#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
95#define _NSIG __NEW_NSIG
96#define __new_sigset_t sigset_t
97#define __new_sigaction sigaction
98#define __old_sigset_t old_sigset_t
99#define __old_sigaction old_sigaction
100#else 5#else
101#define _NSIG __OLD_NSIG 6#include <asm-sparc/signal_32.h>
102#define __old_sigset_t sigset_t
103#define __old_sigaction sigaction
104#endif
105
106#ifndef __ASSEMBLY__
107
108typedef unsigned long __old_sigset_t;
109
110typedef struct {
111 unsigned long sig[_NSIG_WORDS];
112} __new_sigset_t;
113
114
115#ifdef __KERNEL__
116/* A SunOS sigstack */
117struct sigstack {
118 char *the_stack;
119 int cur_status;
120};
121#endif 7#endif
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145#define SIG_BLOCK 0x01 /* for blocking signals */
146#define SIG_UNBLOCK 0x02 /* for unblocking signals */
147#define SIG_SETMASK 0x04 /* for setting the signal mask */
148
149/*
150 * sigaltstack controls
151 */
152#define SS_ONSTACK 1
153#define SS_DISABLE 2
154
155#define MINSIGSTKSZ 4096
156#define SIGSTKSZ 16384
157
158#ifdef __KERNEL__
159/*
160 * DJHR
161 * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
162 * interrupt handler's irq structure should be statically allocated
163 * by the request_irq routine.
164 * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
165 * of interrupt usage and that sucks. Also without a flag like this
166 * it may be possible for the free_irq routine to attempt to free
167 * statically allocated data.. which is NOT GOOD.
168 *
169 */
170#define SA_STATIC_ALLOC 0x8000
171#endif 8#endif
172
173#include <asm-generic/signal.h>
174
175#ifdef __KERNEL__
176struct __new_sigaction {
177 __sighandler_t sa_handler;
178 unsigned long sa_flags;
179 void (*sa_restorer)(void); /* Not used by Linux/SPARC */
180 __new_sigset_t sa_mask;
181};
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188struct __old_sigaction {
189 __sighandler_t sa_handler;
190 __old_sigset_t sa_mask;
191 unsigned long sa_flags;
192 void (*sa_restorer) (void); /* not used by Linux/SPARC */
193};
194
195typedef struct sigaltstack {
196 void __user *ss_sp;
197 int ss_flags;
198 size_t ss_size;
199} stack_t;
200
201#define ptrace_signal_deliver(regs, cookie) do { } while (0)
202
203#endif /* !(__KERNEL__) */
204
205#endif /* !(__ASSEMBLY__) */
206
207#endif /* !(_ASMSPARC_SIGNAL_H) */
diff --git a/include/asm-sparc/signal_32.h b/include/asm-sparc/signal_32.h
new file mode 100644
index 000000000000..96a60ab03ca1
--- /dev/null
+++ b/include/asm-sparc/signal_32.h
@@ -0,0 +1,207 @@
1#ifndef _ASMSPARC_SIGNAL_H
2#define _ASMSPARC_SIGNAL_H
3
4#include <asm/sigcontext.h>
5#include <linux/compiler.h>
6
7#ifdef __KERNEL__
8#ifndef __ASSEMBLY__
9#include <linux/personality.h>
10#include <linux/types.h>
11#endif
12#endif
13
14/* On the Sparc the signal handlers get passed a 'sub-signal' code
15 * for certain signal types, which we document here.
16 */
17#define SIGHUP 1
18#define SIGINT 2
19#define SIGQUIT 3
20#define SIGILL 4
21#define SUBSIG_STACK 0
22#define SUBSIG_ILLINST 2
23#define SUBSIG_PRIVINST 3
24#define SUBSIG_BADTRAP(t) (0x80 + (t))
25
26#define SIGTRAP 5
27#define SIGABRT 6
28#define SIGIOT 6
29
30#define SIGEMT 7
31#define SUBSIG_TAG 10
32
33#define SIGFPE 8
34#define SUBSIG_FPDISABLED 0x400
35#define SUBSIG_FPERROR 0x404
36#define SUBSIG_FPINTOVFL 0x001
37#define SUBSIG_FPSTSIG 0x002
38#define SUBSIG_IDIVZERO 0x014
39#define SUBSIG_FPINEXACT 0x0c4
40#define SUBSIG_FPDIVZERO 0x0c8
41#define SUBSIG_FPUNFLOW 0x0cc
42#define SUBSIG_FPOPERROR 0x0d0
43#define SUBSIG_FPOVFLOW 0x0d4
44
45#define SIGKILL 9
46#define SIGBUS 10
47#define SUBSIG_BUSTIMEOUT 1
48#define SUBSIG_ALIGNMENT 2
49#define SUBSIG_MISCERROR 5
50
51#define SIGSEGV 11
52#define SUBSIG_NOMAPPING 3
53#define SUBSIG_PROTECTION 4
54#define SUBSIG_SEGERROR 5
55
56#define SIGSYS 12
57
58#define SIGPIPE 13
59#define SIGALRM 14
60#define SIGTERM 15
61#define SIGURG 16
62
63/* SunOS values which deviate from the Linux/i386 ones */
64#define SIGSTOP 17
65#define SIGTSTP 18
66#define SIGCONT 19
67#define SIGCHLD 20
68#define SIGTTIN 21
69#define SIGTTOU 22
70#define SIGIO 23
71#define SIGPOLL SIGIO /* SysV name for SIGIO */
72#define SIGXCPU 24
73#define SIGXFSZ 25
74#define SIGVTALRM 26
75#define SIGPROF 27
76#define SIGWINCH 28
77#define SIGLOST 29
78#define SIGPWR SIGLOST
79#define SIGUSR1 30
80#define SIGUSR2 31
81
82/* Most things should be clean enough to redefine this at will, if care
83 * is taken to make libc match.
84 */
85
86#define __OLD_NSIG 32
87#define __NEW_NSIG 64
88#define _NSIG_BPW 32
89#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
90
91#define SIGRTMIN 32
92#define SIGRTMAX __NEW_NSIG
93
94#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
95#define _NSIG __NEW_NSIG
96#define __new_sigset_t sigset_t
97#define __new_sigaction sigaction
98#define __old_sigset_t old_sigset_t
99#define __old_sigaction old_sigaction
100#else
101#define _NSIG __OLD_NSIG
102#define __old_sigset_t sigset_t
103#define __old_sigaction sigaction
104#endif
105
106#ifndef __ASSEMBLY__
107
108typedef unsigned long __old_sigset_t;
109
110typedef struct {
111 unsigned long sig[_NSIG_WORDS];
112} __new_sigset_t;
113
114
115#ifdef __KERNEL__
116/* A SunOS sigstack */
117struct sigstack {
118 char *the_stack;
119 int cur_status;
120};
121#endif
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145#define SIG_BLOCK 0x01 /* for blocking signals */
146#define SIG_UNBLOCK 0x02 /* for unblocking signals */
147#define SIG_SETMASK 0x04 /* for setting the signal mask */
148
149/*
150 * sigaltstack controls
151 */
152#define SS_ONSTACK 1
153#define SS_DISABLE 2
154
155#define MINSIGSTKSZ 4096
156#define SIGSTKSZ 16384
157
158#ifdef __KERNEL__
159/*
160 * DJHR
161 * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
162 * interrupt handler's irq structure should be statically allocated
163 * by the request_irq routine.
164 * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
165 * of interrupt usage and that sucks. Also without a flag like this
166 * it may be possible for the free_irq routine to attempt to free
167 * statically allocated data.. which is NOT GOOD.
168 *
169 */
170#define SA_STATIC_ALLOC 0x8000
171#endif
172
173#include <asm-generic/signal.h>
174
175#ifdef __KERNEL__
176struct __new_sigaction {
177 __sighandler_t sa_handler;
178 unsigned long sa_flags;
179 void (*sa_restorer)(void); /* Not used by Linux/SPARC */
180 __new_sigset_t sa_mask;
181};
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188struct __old_sigaction {
189 __sighandler_t sa_handler;
190 __old_sigset_t sa_mask;
191 unsigned long sa_flags;
192 void (*sa_restorer) (void); /* not used by Linux/SPARC */
193};
194
195typedef struct sigaltstack {
196 void __user *ss_sp;
197 int ss_flags;
198 size_t ss_size;
199} stack_t;
200
201#define ptrace_signal_deliver(regs, cookie) do { } while (0)
202
203#endif /* !(__KERNEL__) */
204
205#endif /* !(__ASSEMBLY__) */
206
207#endif /* !(_ASMSPARC_SIGNAL_H) */
diff --git a/include/asm-sparc/signal_64.h b/include/asm-sparc/signal_64.h
new file mode 100644
index 000000000000..ab1509a101c5
--- /dev/null
+++ b/include/asm-sparc/signal_64.h
@@ -0,0 +1,194 @@
1#ifndef _ASMSPARC64_SIGNAL_H
2#define _ASMSPARC64_SIGNAL_H
3
4#include <asm/sigcontext.h>
5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__
8#include <linux/personality.h>
9#include <linux/types.h>
10#endif
11#endif
12
13/* On the Sparc the signal handlers get passed a 'sub-signal' code
14 * for certain signal types, which we document here.
15 */
16#define SIGHUP 1
17#define SIGINT 2
18#define SIGQUIT 3
19#define SIGILL 4
20#define SUBSIG_STACK 0
21#define SUBSIG_ILLINST 2
22#define SUBSIG_PRIVINST 3
23#define SUBSIG_BADTRAP(t) (0x80 + (t))
24
25#define SIGTRAP 5
26#define SIGABRT 6
27#define SIGIOT 6
28
29#define SIGEMT 7
30#define SUBSIG_TAG 10
31
32#define SIGFPE 8
33#define SUBSIG_FPDISABLED 0x400
34#define SUBSIG_FPERROR 0x404
35#define SUBSIG_FPINTOVFL 0x001
36#define SUBSIG_FPSTSIG 0x002
37#define SUBSIG_IDIVZERO 0x014
38#define SUBSIG_FPINEXACT 0x0c4
39#define SUBSIG_FPDIVZERO 0x0c8
40#define SUBSIG_FPUNFLOW 0x0cc
41#define SUBSIG_FPOPERROR 0x0d0
42#define SUBSIG_FPOVFLOW 0x0d4
43
44#define SIGKILL 9
45#define SIGBUS 10
46#define SUBSIG_BUSTIMEOUT 1
47#define SUBSIG_ALIGNMENT 2
48#define SUBSIG_MISCERROR 5
49
50#define SIGSEGV 11
51#define SUBSIG_NOMAPPING 3
52#define SUBSIG_PROTECTION 4
53#define SUBSIG_SEGERROR 5
54
55#define SIGSYS 12
56
57#define SIGPIPE 13
58#define SIGALRM 14
59#define SIGTERM 15
60#define SIGURG 16
61
62/* SunOS values which deviate from the Linux/i386 ones */
63#define SIGSTOP 17
64#define SIGTSTP 18
65#define SIGCONT 19
66#define SIGCHLD 20
67#define SIGTTIN 21
68#define SIGTTOU 22
69#define SIGIO 23
70#define SIGPOLL SIGIO /* SysV name for SIGIO */
71#define SIGXCPU 24
72#define SIGXFSZ 25
73#define SIGVTALRM 26
74#define SIGPROF 27
75#define SIGWINCH 28
76#define SIGLOST 29
77#define SIGPWR SIGLOST
78#define SIGUSR1 30
79#define SIGUSR2 31
80
81/* Most things should be clean enough to redefine this at will, if care
82 is taken to make libc match. */
83
84#define __OLD_NSIG 32
85#define __NEW_NSIG 64
86#define _NSIG_BPW 64
87#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
88
89#define SIGRTMIN 32
90#define SIGRTMAX __NEW_NSIG
91
92#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
93#define _NSIG __NEW_NSIG
94#define __new_sigset_t sigset_t
95#define __new_sigaction sigaction
96#define __new_sigaction32 sigaction32
97#define __old_sigset_t old_sigset_t
98#define __old_sigaction old_sigaction
99#define __old_sigaction32 old_sigaction32
100#else
101#define _NSIG __OLD_NSIG
102#define NSIG _NSIG
103#define __old_sigset_t sigset_t
104#define __old_sigaction sigaction
105#define __old_sigaction32 sigaction32
106#endif
107
108#ifndef __ASSEMBLY__
109
110typedef unsigned long __old_sigset_t; /* at least 32 bits */
111
112typedef struct {
113 unsigned long sig[_NSIG_WORDS];
114} __new_sigset_t;
115
116/* A SunOS sigstack */
117struct sigstack {
118 /* XXX 32-bit pointers pinhead XXX */
119 char *the_stack;
120 int cur_status;
121};
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145
146#define SIG_BLOCK 0x01 /* for blocking signals */
147#define SIG_UNBLOCK 0x02 /* for unblocking signals */
148#define SIG_SETMASK 0x04 /* for setting the signal mask */
149
150/*
151 * sigaltstack controls
152 */
153#define SS_ONSTACK 1
154#define SS_DISABLE 2
155
156#define MINSIGSTKSZ 4096
157#define SIGSTKSZ 16384
158
159#include <asm-generic/signal.h>
160
161struct __new_sigaction {
162 __sighandler_t sa_handler;
163 unsigned long sa_flags;
164 __sigrestore_t sa_restorer; /* not used by Linux/SPARC yet */
165 __new_sigset_t sa_mask;
166};
167
168struct __old_sigaction {
169 __sighandler_t sa_handler;
170 __old_sigset_t sa_mask;
171 unsigned long sa_flags;
172 void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
173};
174
175typedef struct sigaltstack {
176 void __user *ss_sp;
177 int ss_flags;
178 size_t ss_size;
179} stack_t;
180
181#ifdef __KERNEL__
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188#define ptrace_signal_deliver(regs, cookie) do { } while (0)
189
190#endif /* !(__KERNEL__) */
191
192#endif /* !(__ASSEMBLY__) */
193
194#endif /* !(_ASMSPARC64_SIGNAL_H) */
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index b61e74bea06a..1f9dedfbabd8 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -1,173 +1,8 @@
1/* smp.h: Sparc specific SMP stuff. 1#ifndef ___ASM_SPARC_SMP_H
2 * 2#define ___ASM_SPARC_SMP_H
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/smp_64.h>
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
25#include <asm/atomic.h>
26
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long);
37
38/*
39 * General functions that each host system must provide.
40 */
41
42void sun4m_init_smp(void);
43void sun4d_init_smp(void);
44
45void smp_callin(void);
46void smp_boot_cpus(void);
47void smp_store_cpu_info(int);
48
49struct seq_file;
50void smp_bogo(struct seq_file *);
51void smp_info(struct seq_file *);
52
53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
56BTFIXUPDEF_BLACKBOX(load_current)
57
58#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
59
60static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
61static inline void xc1(smpfunc_t func, unsigned long arg1)
62{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
64{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
66 unsigned long arg3)
67{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3, unsigned long arg4)
70{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
71static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74
75static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
76{
77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0;
79}
80
81static inline int cpu_logical_map(int cpu)
82{
83 return cpu;
84}
85
86static inline int hard_smp4m_processor_id(void)
87{
88 int cpuid;
89
90 __asm__ __volatile__("rd %%tbr, %0\n\t"
91 "srl %0, 12, %0\n\t"
92 "and %0, 3, %0\n\t" :
93 "=&r" (cpuid));
94 return cpuid;
95}
96
97static inline int hard_smp4d_processor_id(void)
98{
99 int cpuid;
100
101 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
102 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
103 return cpuid;
104}
105
106#ifndef MODULE
107static inline int hard_smp_processor_id(void)
108{
109 int cpuid;
110
111 /* Black box - sun4m
112 __asm__ __volatile__("rd %%tbr, %0\n\t"
113 "srl %0, 12, %0\n\t"
114 "and %0, 3, %0\n\t" :
115 "=&r" (cpuid));
116 - sun4d
117 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
118 "nop; nop" :
119 "=&r" (cpuid));
120 See btfixup.h and btfixupprep.c to understand how a blackbox works.
121 */
122 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
123 "sethi %%hi(boot_cpu_id), %0\n\t"
124 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
125 "=&r" (cpuid));
126 return cpuid;
127}
128#else 5#else
129static inline int hard_smp_processor_id(void) 6#include <asm-sparc/smp_32.h>
130{ 7#endif
131 int cpuid;
132
133 __asm__ __volatile__("mov %%o7, %%g1\n\t"
134 "call ___f___hard_smp_processor_id\n\t"
135 " nop\n\t"
136 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
137 return cpuid;
138}
139#endif 8#endif
140
141#define raw_smp_processor_id() (current_thread_info()->cpu)
142
143#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
144#define prof_counter(__cpu) cpu_data(__cpu).counter
145
146void smp_setup_cpu_possible_map(void);
147
148#endif /* !(__ASSEMBLY__) */
149
150/* Sparc specific messages. */
151#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
152
153/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
154 * contains something other than one of these then the ipi is from
155 * Linux's active_kernel_processor. This facility exists so that
156 * the boot monitor can capture all the other cpus when one catches
157 * a watchdog reset or the user enters the monitor using L1-A keys.
158 */
159#define MBOX_STOPCPU 0xFB
160#define MBOX_IDLECPU 0xFC
161#define MBOX_IDLECPU2 0xFD
162#define MBOX_STOPCPU2 0xFE
163
164#else /* SMP */
165
166#define hard_smp_processor_id() 0
167#define smp_setup_cpu_possible_map() do { } while (0)
168
169#endif /* !(SMP) */
170
171#define NO_PROC_ID 0xFF
172
173#endif /* !(_SPARC_SMP_H) */
diff --git a/include/asm-sparc/smp_32.h b/include/asm-sparc/smp_32.h
new file mode 100644
index 000000000000..7201752cf934
--- /dev/null
+++ b/include/asm-sparc/smp_32.h
@@ -0,0 +1,173 @@
1/* smp.h: Sparc specific SMP stuff.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
25#include <asm/atomic.h>
26
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long);
37
38/*
39 * General functions that each host system must provide.
40 */
41
42void sun4m_init_smp(void);
43void sun4d_init_smp(void);
44
45void smp_callin(void);
46void smp_boot_cpus(void);
47void smp_store_cpu_info(int);
48
49struct seq_file;
50void smp_bogo(struct seq_file *);
51void smp_info(struct seq_file *);
52
53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
56BTFIXUPDEF_BLACKBOX(load_current)
57
58#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
59
60static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
61static inline void xc1(smpfunc_t func, unsigned long arg1)
62{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
64{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
66 unsigned long arg3)
67{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3, unsigned long arg4)
70{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
71static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74
75static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
76{
77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0;
79}
80
81static inline int cpu_logical_map(int cpu)
82{
83 return cpu;
84}
85
86static inline int hard_smp4m_processor_id(void)
87{
88 int cpuid;
89
90 __asm__ __volatile__("rd %%tbr, %0\n\t"
91 "srl %0, 12, %0\n\t"
92 "and %0, 3, %0\n\t" :
93 "=&r" (cpuid));
94 return cpuid;
95}
96
97static inline int hard_smp4d_processor_id(void)
98{
99 int cpuid;
100
101 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
102 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
103 return cpuid;
104}
105
106#ifndef MODULE
107static inline int hard_smp_processor_id(void)
108{
109 int cpuid;
110
111 /* Black box - sun4m
112 __asm__ __volatile__("rd %%tbr, %0\n\t"
113 "srl %0, 12, %0\n\t"
114 "and %0, 3, %0\n\t" :
115 "=&r" (cpuid));
116 - sun4d
117 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
118 "nop; nop" :
119 "=&r" (cpuid));
120 See btfixup.h and btfixupprep.c to understand how a blackbox works.
121 */
122 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
123 "sethi %%hi(boot_cpu_id), %0\n\t"
124 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
125 "=&r" (cpuid));
126 return cpuid;
127}
128#else
129static inline int hard_smp_processor_id(void)
130{
131 int cpuid;
132
133 __asm__ __volatile__("mov %%o7, %%g1\n\t"
134 "call ___f___hard_smp_processor_id\n\t"
135 " nop\n\t"
136 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
137 return cpuid;
138}
139#endif
140
141#define raw_smp_processor_id() (current_thread_info()->cpu)
142
143#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
144#define prof_counter(__cpu) cpu_data(__cpu).counter
145
146void smp_setup_cpu_possible_map(void);
147
148#endif /* !(__ASSEMBLY__) */
149
150/* Sparc specific messages. */
151#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
152
153/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
154 * contains something other than one of these then the ipi is from
155 * Linux's active_kernel_processor. This facility exists so that
156 * the boot monitor can capture all the other cpus when one catches
157 * a watchdog reset or the user enters the monitor using L1-A keys.
158 */
159#define MBOX_STOPCPU 0xFB
160#define MBOX_IDLECPU 0xFC
161#define MBOX_IDLECPU2 0xFD
162#define MBOX_STOPCPU2 0xFE
163
164#else /* SMP */
165
166#define hard_smp_processor_id() 0
167#define smp_setup_cpu_possible_map() do { } while (0)
168
169#endif /* !(SMP) */
170
171#define NO_PROC_ID 0xFF
172
173#endif /* !(_SPARC_SMP_H) */
diff --git a/include/asm-sparc/smp_64.h b/include/asm-sparc/smp_64.h
new file mode 100644
index 000000000000..4cfe09c51f1f
--- /dev/null
+++ b/include/asm-sparc/smp_64.h
@@ -0,0 +1,64 @@
1/* smp.h: Sparc64 specific SMP stuff.
2 *
3 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SMP_H
7#define _SPARC64_SMP_H
8
9#include <linux/threads.h>
10#include <asm/asi.h>
11#include <asm/starfire.h>
12#include <asm/spitfire.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/cpumask.h>
17#include <linux/cache.h>
18
19#endif /* !(__ASSEMBLY__) */
20
21#ifdef CONFIG_SMP
22
23#ifndef __ASSEMBLY__
24
25/*
26 * Private routines/data
27 */
28
29#include <linux/bitops.h>
30#include <asm/atomic.h>
31#include <asm/percpu.h>
32
33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
34extern cpumask_t cpu_core_map[NR_CPUS];
35extern int sparc64_multi_core;
36
37/*
38 * General functions that each host system must provide.
39 */
40
41extern int hard_smp_processor_id(void);
42#define raw_smp_processor_id() (current_thread_info()->cpu)
43
44extern void smp_fill_in_sib_core_maps(void);
45extern void cpu_play_dead(void);
46
47extern void smp_fetch_global_regs(void);
48
49#ifdef CONFIG_HOTPLUG_CPU
50extern int __cpu_disable(void);
51extern void __cpu_die(unsigned int cpu);
52#endif
53
54#endif /* !(__ASSEMBLY__) */
55
56#else
57
58#define hard_smp_processor_id() 0
59#define smp_fill_in_sib_core_maps() do { } while (0)
60#define smp_fetch_global_regs() do { } while (0)
61
62#endif /* !(CONFIG_SMP) */
63
64#endif /* !(_SPARC64_SMP_H) */
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h
index de2249b267c6..3b71c50b72eb 100644
--- a/include/asm-sparc/spinlock.h
+++ b/include/asm-sparc/spinlock.h
@@ -1,192 +1,8 @@
1/* spinlock.h: 32-bit Sparc spinlock support. 1#ifndef ___ASM_SPARC_SPINLOCK_H
2 * 2#define ___ASM_SPARC_SPINLOCK_H
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/spinlock_64.h>
5 5#else
6#ifndef __SPARC_SPINLOCK_H 6#include <asm-sparc/spinlock_32.h>
7#define __SPARC_SPINLOCK_H 7#endif
8 8#endif
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13#include <asm/psr.h>
14
15#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16
17#define __raw_spin_unlock_wait(lock) \
18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
19
20static inline void __raw_spin_lock(raw_spinlock_t *lock)
21{
22 __asm__ __volatile__(
23 "\n1:\n\t"
24 "ldstub [%0], %%g2\n\t"
25 "orcc %%g2, 0x0, %%g0\n\t"
26 "bne,a 2f\n\t"
27 " ldub [%0], %%g2\n\t"
28 ".subsection 2\n"
29 "2:\n\t"
30 "orcc %%g2, 0x0, %%g0\n\t"
31 "bne,a 2b\n\t"
32 " ldub [%0], %%g2\n\t"
33 "b,a 1b\n\t"
34 ".previous\n"
35 : /* no outputs */
36 : "r" (lock)
37 : "g2", "memory", "cc");
38}
39
40static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41{
42 unsigned int result;
43 __asm__ __volatile__("ldstub [%1], %0"
44 : "=r" (result)
45 : "r" (lock)
46 : "memory");
47 return (result == 0);
48}
49
50static inline void __raw_spin_unlock(raw_spinlock_t *lock)
51{
52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
53}
54
55/* Read-write spinlocks, allowing multiple readers
56 * but only one writer.
57 *
58 * NOTE! it is quite common to have readers in interrupts
59 * but no interrupt writers. For those circumstances we
60 * can "mix" irq-safe locks - any writer needs to get a
61 * irq-safe write-lock, but readers can get non-irqsafe
62 * read-locks.
63 *
64 * XXX This might create some problems with my dual spinlock
65 * XXX scheme, deadlocks etc. -DaveM
66 *
67 * Sort of like atomic_t's on Sparc, but even more clever.
68 *
69 * ------------------------------------
70 * | 24-bit counter | wlock | raw_rwlock_t
71 * ------------------------------------
72 * 31 8 7 0
73 *
74 * wlock signifies the one writer is in or somebody is updating
75 * counter. For a writer, if he successfully acquires the wlock,
76 * but counter is non-zero, he has to release the lock and wait,
77 * till both counter and wlock are zero.
78 *
79 * Unfortunately this scheme limits us to ~16,000,000 cpus.
80 */
81static inline void __read_lock(raw_rwlock_t *rw)
82{
83 register raw_rwlock_t *lp asm("g1");
84 lp = rw;
85 __asm__ __volatile__(
86 "mov %%o7, %%g4\n\t"
87 "call ___rw_read_enter\n\t"
88 " ldstub [%%g1 + 3], %%g2\n"
89 : /* no outputs */
90 : "r" (lp)
91 : "g2", "g4", "memory", "cc");
92}
93
94#define __raw_read_lock(lock) \
95do { unsigned long flags; \
96 local_irq_save(flags); \
97 __read_lock(lock); \
98 local_irq_restore(flags); \
99} while(0)
100
101static inline void __read_unlock(raw_rwlock_t *rw)
102{
103 register raw_rwlock_t *lp asm("g1");
104 lp = rw;
105 __asm__ __volatile__(
106 "mov %%o7, %%g4\n\t"
107 "call ___rw_read_exit\n\t"
108 " ldstub [%%g1 + 3], %%g2\n"
109 : /* no outputs */
110 : "r" (lp)
111 : "g2", "g4", "memory", "cc");
112}
113
114#define __raw_read_unlock(lock) \
115do { unsigned long flags; \
116 local_irq_save(flags); \
117 __read_unlock(lock); \
118 local_irq_restore(flags); \
119} while(0)
120
121static inline void __raw_write_lock(raw_rwlock_t *rw)
122{
123 register raw_rwlock_t *lp asm("g1");
124 lp = rw;
125 __asm__ __volatile__(
126 "mov %%o7, %%g4\n\t"
127 "call ___rw_write_enter\n\t"
128 " ldstub [%%g1 + 3], %%g2\n"
129 : /* no outputs */
130 : "r" (lp)
131 : "g2", "g4", "memory", "cc");
132 *(volatile __u32 *)&lp->lock = ~0U;
133}
134
135static inline int __raw_write_trylock(raw_rwlock_t *rw)
136{
137 unsigned int val;
138
139 __asm__ __volatile__("ldstub [%1 + 3], %0"
140 : "=r" (val)
141 : "r" (&rw->lock)
142 : "memory");
143
144 if (val == 0) {
145 val = rw->lock & ~0xff;
146 if (val)
147 ((volatile u8*)&rw->lock)[3] = 0;
148 else
149 *(volatile u32*)&rw->lock = ~0U;
150 }
151
152 return (val == 0);
153}
154
155static inline int __read_trylock(raw_rwlock_t *rw)
156{
157 register raw_rwlock_t *lp asm("g1");
158 register int res asm("o0");
159 lp = rw;
160 __asm__ __volatile__(
161 "mov %%o7, %%g4\n\t"
162 "call ___rw_read_try\n\t"
163 " ldstub [%%g1 + 3], %%g2\n"
164 : "=r" (res)
165 : "r" (lp)
166 : "g2", "g4", "memory", "cc");
167 return res;
168}
169
170#define __raw_read_trylock(lock) \
171({ unsigned long flags; \
172 int res; \
173 local_irq_save(flags); \
174 res = __read_trylock(lock); \
175 local_irq_restore(flags); \
176 res; \
177})
178
179#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
180
181#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
182
183#define _raw_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax()
186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock)
189
190#endif /* !(__ASSEMBLY__) */
191
192#endif /* __SPARC_SPINLOCK_H */
diff --git a/include/asm-sparc/spinlock_32.h b/include/asm-sparc/spinlock_32.h
new file mode 100644
index 000000000000..de2249b267c6
--- /dev/null
+++ b/include/asm-sparc/spinlock_32.h
@@ -0,0 +1,192 @@
1/* spinlock.h: 32-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC_SPINLOCK_H
7#define __SPARC_SPINLOCK_H
8
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13#include <asm/psr.h>
14
15#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16
17#define __raw_spin_unlock_wait(lock) \
18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
19
20static inline void __raw_spin_lock(raw_spinlock_t *lock)
21{
22 __asm__ __volatile__(
23 "\n1:\n\t"
24 "ldstub [%0], %%g2\n\t"
25 "orcc %%g2, 0x0, %%g0\n\t"
26 "bne,a 2f\n\t"
27 " ldub [%0], %%g2\n\t"
28 ".subsection 2\n"
29 "2:\n\t"
30 "orcc %%g2, 0x0, %%g0\n\t"
31 "bne,a 2b\n\t"
32 " ldub [%0], %%g2\n\t"
33 "b,a 1b\n\t"
34 ".previous\n"
35 : /* no outputs */
36 : "r" (lock)
37 : "g2", "memory", "cc");
38}
39
40static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41{
42 unsigned int result;
43 __asm__ __volatile__("ldstub [%1], %0"
44 : "=r" (result)
45 : "r" (lock)
46 : "memory");
47 return (result == 0);
48}
49
50static inline void __raw_spin_unlock(raw_spinlock_t *lock)
51{
52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
53}
54
55/* Read-write spinlocks, allowing multiple readers
56 * but only one writer.
57 *
58 * NOTE! it is quite common to have readers in interrupts
59 * but no interrupt writers. For those circumstances we
60 * can "mix" irq-safe locks - any writer needs to get a
61 * irq-safe write-lock, but readers can get non-irqsafe
62 * read-locks.
63 *
64 * XXX This might create some problems with my dual spinlock
65 * XXX scheme, deadlocks etc. -DaveM
66 *
67 * Sort of like atomic_t's on Sparc, but even more clever.
68 *
69 * ------------------------------------
70 * | 24-bit counter | wlock | raw_rwlock_t
71 * ------------------------------------
72 * 31 8 7 0
73 *
74 * wlock signifies the one writer is in or somebody is updating
75 * counter. For a writer, if he successfully acquires the wlock,
76 * but counter is non-zero, he has to release the lock and wait,
77 * till both counter and wlock are zero.
78 *
79 * Unfortunately this scheme limits us to ~16,000,000 cpus.
80 */
81static inline void __read_lock(raw_rwlock_t *rw)
82{
83 register raw_rwlock_t *lp asm("g1");
84 lp = rw;
85 __asm__ __volatile__(
86 "mov %%o7, %%g4\n\t"
87 "call ___rw_read_enter\n\t"
88 " ldstub [%%g1 + 3], %%g2\n"
89 : /* no outputs */
90 : "r" (lp)
91 : "g2", "g4", "memory", "cc");
92}
93
94#define __raw_read_lock(lock) \
95do { unsigned long flags; \
96 local_irq_save(flags); \
97 __read_lock(lock); \
98 local_irq_restore(flags); \
99} while(0)
100
101static inline void __read_unlock(raw_rwlock_t *rw)
102{
103 register raw_rwlock_t *lp asm("g1");
104 lp = rw;
105 __asm__ __volatile__(
106 "mov %%o7, %%g4\n\t"
107 "call ___rw_read_exit\n\t"
108 " ldstub [%%g1 + 3], %%g2\n"
109 : /* no outputs */
110 : "r" (lp)
111 : "g2", "g4", "memory", "cc");
112}
113
114#define __raw_read_unlock(lock) \
115do { unsigned long flags; \
116 local_irq_save(flags); \
117 __read_unlock(lock); \
118 local_irq_restore(flags); \
119} while(0)
120
121static inline void __raw_write_lock(raw_rwlock_t *rw)
122{
123 register raw_rwlock_t *lp asm("g1");
124 lp = rw;
125 __asm__ __volatile__(
126 "mov %%o7, %%g4\n\t"
127 "call ___rw_write_enter\n\t"
128 " ldstub [%%g1 + 3], %%g2\n"
129 : /* no outputs */
130 : "r" (lp)
131 : "g2", "g4", "memory", "cc");
132 *(volatile __u32 *)&lp->lock = ~0U;
133}
134
135static inline int __raw_write_trylock(raw_rwlock_t *rw)
136{
137 unsigned int val;
138
139 __asm__ __volatile__("ldstub [%1 + 3], %0"
140 : "=r" (val)
141 : "r" (&rw->lock)
142 : "memory");
143
144 if (val == 0) {
145 val = rw->lock & ~0xff;
146 if (val)
147 ((volatile u8*)&rw->lock)[3] = 0;
148 else
149 *(volatile u32*)&rw->lock = ~0U;
150 }
151
152 return (val == 0);
153}
154
155static inline int __read_trylock(raw_rwlock_t *rw)
156{
157 register raw_rwlock_t *lp asm("g1");
158 register int res asm("o0");
159 lp = rw;
160 __asm__ __volatile__(
161 "mov %%o7, %%g4\n\t"
162 "call ___rw_read_try\n\t"
163 " ldstub [%%g1 + 3], %%g2\n"
164 : "=r" (res)
165 : "r" (lp)
166 : "g2", "g4", "memory", "cc");
167 return res;
168}
169
170#define __raw_read_trylock(lock) \
171({ unsigned long flags; \
172 int res; \
173 local_irq_save(flags); \
174 res = __read_trylock(lock); \
175 local_irq_restore(flags); \
176 res; \
177})
178
179#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
180
181#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
182
183#define _raw_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax()
186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock)
189
190#endif /* !(__ASSEMBLY__) */
191
192#endif /* __SPARC_SPINLOCK_H */
diff --git a/include/asm-sparc/spinlock_64.h b/include/asm-sparc/spinlock_64.h
new file mode 100644
index 000000000000..0006fe9f8c7a
--- /dev/null
+++ b/include/asm-sparc/spinlock_64.h
@@ -0,0 +1,250 @@
1/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13/* To get debugging spinlocks which detect and catch
14 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
15 * and rebuild your kernel.
16 */
17
18/* All of these locking primitives are expected to work properly
19 * even in an RMO memory model, which currently is what the kernel
20 * runs in.
21 *
22 * There is another issue. Because we play games to save cycles
23 * in the non-contention case, we need to be extra careful about
24 * branch targets into the "spinning" code. They live in their
25 * own section, but the newer V9 branches have a shorter range
26 * than the traditional 32-bit sparc branch variants. The rule
27 * is that the branches that go into and out of the spinner sections
28 * must be pre-V9 branches.
29 */
30
31#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
32
33#define __raw_spin_unlock_wait(lp) \
34 do { rmb(); \
35 } while((lp)->lock)
36
37static inline void __raw_spin_lock(raw_spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42"1: ldstub [%1], %0\n"
43" membar #StoreLoad | #StoreStore\n"
44" brnz,pn %0, 2f\n"
45" nop\n"
46" .subsection 2\n"
47"2: ldub [%1], %0\n"
48" membar #LoadLoad\n"
49" brnz,pt %0, 2b\n"
50" nop\n"
51" ba,a,pt %%xcc, 1b\n"
52" .previous"
53 : "=&r" (tmp)
54 : "r" (lock)
55 : "memory");
56}
57
58static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59{
60 unsigned long result;
61
62 __asm__ __volatile__(
63" ldstub [%1], %0\n"
64" membar #StoreLoad | #StoreStore"
65 : "=r" (result)
66 : "r" (lock)
67 : "memory");
68
69 return (result == 0UL);
70}
71
72static inline void __raw_spin_unlock(raw_spinlock_t *lock)
73{
74 __asm__ __volatile__(
75" membar #StoreStore | #LoadStore\n"
76" stb %%g0, [%0]"
77 : /* No outputs */
78 : "r" (lock)
79 : "memory");
80}
81
82static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
83{
84 unsigned long tmp1, tmp2;
85
86 __asm__ __volatile__(
87"1: ldstub [%2], %0\n"
88" membar #StoreLoad | #StoreStore\n"
89" brnz,pn %0, 2f\n"
90" nop\n"
91" .subsection 2\n"
92"2: rdpr %%pil, %1\n"
93" wrpr %3, %%pil\n"
94"3: ldub [%2], %0\n"
95" membar #LoadLoad\n"
96" brnz,pt %0, 3b\n"
97" nop\n"
98" ba,pt %%xcc, 1b\n"
99" wrpr %1, %%pil\n"
100" .previous"
101 : "=&r" (tmp1), "=&r" (tmp2)
102 : "r"(lock), "r"(flags)
103 : "memory");
104}
105
106/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
107
108static void inline __read_lock(raw_rwlock_t *lock)
109{
110 unsigned long tmp1, tmp2;
111
112 __asm__ __volatile__ (
113"1: ldsw [%2], %0\n"
114" brlz,pn %0, 2f\n"
115"4: add %0, 1, %1\n"
116" cas [%2], %0, %1\n"
117" cmp %0, %1\n"
118" membar #StoreLoad | #StoreStore\n"
119" bne,pn %%icc, 1b\n"
120" nop\n"
121" .subsection 2\n"
122"2: ldsw [%2], %0\n"
123" membar #LoadLoad\n"
124" brlz,pt %0, 2b\n"
125" nop\n"
126" ba,a,pt %%xcc, 4b\n"
127" .previous"
128 : "=&r" (tmp1), "=&r" (tmp2)
129 : "r" (lock)
130 : "memory");
131}
132
133static int inline __read_trylock(raw_rwlock_t *lock)
134{
135 int tmp1, tmp2;
136
137 __asm__ __volatile__ (
138"1: ldsw [%2], %0\n"
139" brlz,a,pn %0, 2f\n"
140" mov 0, %0\n"
141" add %0, 1, %1\n"
142" cas [%2], %0, %1\n"
143" cmp %0, %1\n"
144" membar #StoreLoad | #StoreStore\n"
145" bne,pn %%icc, 1b\n"
146" mov 1, %0\n"
147"2:"
148 : "=&r" (tmp1), "=&r" (tmp2)
149 : "r" (lock)
150 : "memory");
151
152 return tmp1;
153}
154
155static void inline __read_unlock(raw_rwlock_t *lock)
156{
157 unsigned long tmp1, tmp2;
158
159 __asm__ __volatile__(
160" membar #StoreLoad | #LoadLoad\n"
161"1: lduw [%2], %0\n"
162" sub %0, 1, %1\n"
163" cas [%2], %0, %1\n"
164" cmp %0, %1\n"
165" bne,pn %%xcc, 1b\n"
166" nop"
167 : "=&r" (tmp1), "=&r" (tmp2)
168 : "r" (lock)
169 : "memory");
170}
171
172static void inline __write_lock(raw_rwlock_t *lock)
173{
174 unsigned long mask, tmp1, tmp2;
175
176 mask = 0x80000000UL;
177
178 __asm__ __volatile__(
179"1: lduw [%2], %0\n"
180" brnz,pn %0, 2f\n"
181"4: or %0, %3, %1\n"
182" cas [%2], %0, %1\n"
183" cmp %0, %1\n"
184" membar #StoreLoad | #StoreStore\n"
185" bne,pn %%icc, 1b\n"
186" nop\n"
187" .subsection 2\n"
188"2: lduw [%2], %0\n"
189" membar #LoadLoad\n"
190" brnz,pt %0, 2b\n"
191" nop\n"
192" ba,a,pt %%xcc, 4b\n"
193" .previous"
194 : "=&r" (tmp1), "=&r" (tmp2)
195 : "r" (lock), "r" (mask)
196 : "memory");
197}
198
199static void inline __write_unlock(raw_rwlock_t *lock)
200{
201 __asm__ __volatile__(
202" membar #LoadStore | #StoreStore\n"
203" stw %%g0, [%0]"
204 : /* no outputs */
205 : "r" (lock)
206 : "memory");
207}
208
209static int inline __write_trylock(raw_rwlock_t *lock)
210{
211 unsigned long mask, tmp1, tmp2, result;
212
213 mask = 0x80000000UL;
214
215 __asm__ __volatile__(
216" mov 0, %2\n"
217"1: lduw [%3], %0\n"
218" brnz,pn %0, 2f\n"
219" or %0, %4, %1\n"
220" cas [%3], %0, %1\n"
221" cmp %0, %1\n"
222" membar #StoreLoad | #StoreStore\n"
223" bne,pn %%icc, 1b\n"
224" nop\n"
225" mov 1, %2\n"
226"2:"
227 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
228 : "r" (lock), "r" (mask)
229 : "memory");
230
231 return result;
232}
233
234#define __raw_read_lock(p) __read_lock(p)
235#define __raw_read_trylock(p) __read_trylock(p)
236#define __raw_read_unlock(p) __read_unlock(p)
237#define __raw_write_lock(p) __write_lock(p)
238#define __raw_write_unlock(p) __write_unlock(p)
239#define __raw_write_trylock(p) __write_trylock(p)
240
241#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
242#define __raw_write_can_lock(rw) (!(rw)->lock)
243
244#define _raw_spin_relax(lock) cpu_relax()
245#define _raw_read_relax(lock) cpu_relax()
246#define _raw_write_relax(lock) cpu_relax()
247
248#endif /* !(__ASSEMBLY__) */
249
250#endif /* !(__SPARC64_SPINLOCK_H) */
diff --git a/include/asm-sparc/stat.h b/include/asm-sparc/stat.h
index 2299e1d5d94c..9fdcaf8c9cd3 100644
--- a/include/asm-sparc/stat.h
+++ b/include/asm-sparc/stat.h
@@ -1,76 +1,8 @@
1#ifndef _SPARC_STAT_H 1#ifndef ___ASM_SPARC_STAT_H
2#define _SPARC_STAT_H 2#define ___ASM_SPARC_STAT_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/types.h> 4#include <asm-sparc/stat_64.h>
5 5#else
6struct __old_kernel_stat { 6#include <asm-sparc/stat_32.h>
7 unsigned short st_dev; 7#endif
8 unsigned short st_ino;
9 unsigned short st_mode;
10 unsigned short st_nlink;
11 unsigned short st_uid;
12 unsigned short st_gid;
13 unsigned short st_rdev;
14 unsigned long st_size;
15 unsigned long st_atime;
16 unsigned long st_mtime;
17 unsigned long st_ctime;
18};
19
20struct stat {
21 unsigned short st_dev;
22 unsigned long st_ino;
23 unsigned short st_mode;
24 short st_nlink;
25 unsigned short st_uid;
26 unsigned short st_gid;
27 unsigned short st_rdev;
28 long st_size;
29 long st_atime;
30 unsigned long st_atime_nsec;
31 long st_mtime;
32 unsigned long st_mtime_nsec;
33 long st_ctime;
34 unsigned long st_ctime_nsec;
35 long st_blksize;
36 long st_blocks;
37 unsigned long __unused4[2];
38};
39
40#define STAT_HAVE_NSEC 1
41
42struct stat64 {
43 unsigned long long st_dev;
44
45 unsigned long long st_ino;
46
47 unsigned int st_mode;
48 unsigned int st_nlink;
49
50 unsigned int st_uid;
51 unsigned int st_gid;
52
53 unsigned long long st_rdev;
54
55 unsigned char __pad3[8];
56
57 long long st_size;
58 unsigned int st_blksize;
59
60 unsigned char __pad4[8];
61 unsigned int st_blocks;
62
63 unsigned int st_atime;
64 unsigned int st_atime_nsec;
65
66 unsigned int st_mtime;
67 unsigned int st_mtime_nsec;
68
69 unsigned int st_ctime;
70 unsigned int st_ctime_nsec;
71
72 unsigned int __unused4;
73 unsigned int __unused5;
74};
75
76#endif 8#endif
diff --git a/include/asm-sparc/stat_32.h b/include/asm-sparc/stat_32.h
new file mode 100644
index 000000000000..2299e1d5d94c
--- /dev/null
+++ b/include/asm-sparc/stat_32.h
@@ -0,0 +1,76 @@
1#ifndef _SPARC_STAT_H
2#define _SPARC_STAT_H
3
4#include <linux/types.h>
5
6struct __old_kernel_stat {
7 unsigned short st_dev;
8 unsigned short st_ino;
9 unsigned short st_mode;
10 unsigned short st_nlink;
11 unsigned short st_uid;
12 unsigned short st_gid;
13 unsigned short st_rdev;
14 unsigned long st_size;
15 unsigned long st_atime;
16 unsigned long st_mtime;
17 unsigned long st_ctime;
18};
19
20struct stat {
21 unsigned short st_dev;
22 unsigned long st_ino;
23 unsigned short st_mode;
24 short st_nlink;
25 unsigned short st_uid;
26 unsigned short st_gid;
27 unsigned short st_rdev;
28 long st_size;
29 long st_atime;
30 unsigned long st_atime_nsec;
31 long st_mtime;
32 unsigned long st_mtime_nsec;
33 long st_ctime;
34 unsigned long st_ctime_nsec;
35 long st_blksize;
36 long st_blocks;
37 unsigned long __unused4[2];
38};
39
40#define STAT_HAVE_NSEC 1
41
42struct stat64 {
43 unsigned long long st_dev;
44
45 unsigned long long st_ino;
46
47 unsigned int st_mode;
48 unsigned int st_nlink;
49
50 unsigned int st_uid;
51 unsigned int st_gid;
52
53 unsigned long long st_rdev;
54
55 unsigned char __pad3[8];
56
57 long long st_size;
58 unsigned int st_blksize;
59
60 unsigned char __pad4[8];
61 unsigned int st_blocks;
62
63 unsigned int st_atime;
64 unsigned int st_atime_nsec;
65
66 unsigned int st_mtime;
67 unsigned int st_mtime_nsec;
68
69 unsigned int st_ctime;
70 unsigned int st_ctime_nsec;
71
72 unsigned int __unused4;
73 unsigned int __unused5;
74};
75
76#endif
diff --git a/include/asm-sparc/stat_64.h b/include/asm-sparc/stat_64.h
new file mode 100644
index 000000000000..9650fdea847f
--- /dev/null
+++ b/include/asm-sparc/stat_64.h
@@ -0,0 +1,47 @@
1#ifndef _SPARC64_STAT_H
2#define _SPARC64_STAT_H
3
4#include <linux/types.h>
5
6struct stat {
7 unsigned st_dev;
8 ino_t st_ino;
9 mode_t st_mode;
10 short st_nlink;
11 uid_t st_uid;
12 gid_t st_gid;
13 unsigned st_rdev;
14 off_t st_size;
15 time_t st_atime;
16 time_t st_mtime;
17 time_t st_ctime;
18 off_t st_blksize;
19 off_t st_blocks;
20 unsigned long __unused4[2];
21};
22
23struct stat64 {
24 unsigned long st_dev;
25 unsigned long st_ino;
26 unsigned long st_nlink;
27
28 unsigned int st_mode;
29 unsigned int st_uid;
30 unsigned int st_gid;
31 unsigned int __pad0;
32
33 unsigned long st_rdev;
34 long st_size;
35 long st_blksize;
36 long st_blocks;
37
38 unsigned long st_atime;
39 unsigned long st_atime_nsec;
40 unsigned long st_mtime;
41 unsigned long st_mtime_nsec;
42 unsigned long st_ctime;
43 unsigned long st_ctime_nsec;
44 long __unused[3];
45};
46
47#endif
diff --git a/include/asm-sparc/statfs.h b/include/asm-sparc/statfs.h
index 304520fa8863..a70cc52e7018 100644
--- a/include/asm-sparc/statfs.h
+++ b/include/asm-sparc/statfs.h
@@ -1,6 +1,8 @@
1#ifndef _SPARC_STATFS_H 1#ifndef ___ASM_SPARC_STATFS_H
2#define _SPARC_STATFS_H 2#define ___ASM_SPARC_STATFS_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/statfs.h> 4#include <asm-sparc/statfs_64.h>
5 5#else
6#include <asm-sparc/statfs_32.h>
7#endif
6#endif 8#endif
diff --git a/include/asm-sparc/statfs_32.h b/include/asm-sparc/statfs_32.h
new file mode 100644
index 000000000000..304520fa8863
--- /dev/null
+++ b/include/asm-sparc/statfs_32.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC_STATFS_H
2#define _SPARC_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif
diff --git a/include/asm-sparc/statfs_64.h b/include/asm-sparc/statfs_64.h
new file mode 100644
index 000000000000..79b3c890a5fa
--- /dev/null
+++ b/include/asm-sparc/statfs_64.h
@@ -0,0 +1,54 @@
1#ifndef _SPARC64_STATFS_H
2#define _SPARC64_STATFS_H
3
4#ifndef __KERNEL_STRICT_NAMES
5
6#include <linux/types.h>
7
8typedef __kernel_fsid_t fsid_t;
9
10#endif
11
12struct statfs {
13 long f_type;
14 long f_bsize;
15 long f_blocks;
16 long f_bfree;
17 long f_bavail;
18 long f_files;
19 long f_ffree;
20 __kernel_fsid_t f_fsid;
21 long f_namelen;
22 long f_frsize;
23 long f_spare[5];
24};
25
26struct statfs64 {
27 long f_type;
28 long f_bsize;
29 long f_blocks;
30 long f_bfree;
31 long f_bavail;
32 long f_files;
33 long f_ffree;
34 __kernel_fsid_t f_fsid;
35 long f_namelen;
36 long f_frsize;
37 long f_spare[5];
38};
39
40struct compat_statfs64 {
41 __u32 f_type;
42 __u32 f_bsize;
43 __u64 f_blocks;
44 __u64 f_bfree;
45 __u64 f_bavail;
46 __u64 f_files;
47 __u64 f_ffree;
48 __kernel_fsid_t f_fsid;
49 __u32 f_namelen;
50 __u32 f_frsize;
51 __u32 f_spare[5];
52};
53
54#endif
diff --git a/include/asm-sparc/string.h b/include/asm-sparc/string.h
index 8d7c0dd4f299..14c04c7697a5 100644
--- a/include/asm-sparc/string.h
+++ b/include/asm-sparc/string.h
@@ -1,205 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_STRING_H
2 * string.h: External definitions for optimized assembly string 2#define ___ASM_SPARC_STRING_H
3 * routines for the Linux Kernel. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/string_64.h>
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) 5#else
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6#include <asm-sparc/string_32.h>
7 */ 7#endif
8 8#endif
9#ifndef __SPARC_STRING_H__
10#define __SPARC_STRING_H__
11
12#include <asm/page.h>
13
14/* Really, userland/ksyms should not see any of this stuff. */
15
16#ifdef __KERNEL__
17
18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21
22#ifndef EXPORT_SYMTAB_STROPS
23
24/* First the mem*() things. */
25#define __HAVE_ARCH_MEMMOVE
26#undef memmove
27#define memmove(_to, _from, _n) \
28({ \
29 void *_t = (_to); \
30 __memmove(_t, (_from), (_n)); \
31 _t; \
32})
33
34#define __HAVE_ARCH_MEMCPY
35
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69
70#define __HAVE_ARCH_MEMSET
71
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111
112#define __HAVE_ARCH_MEMSCAN
113
114#undef memscan
115#define memscan(__arg0, __char, __arg2) \
116({ \
117 extern void *__memscan_zero(void *, size_t); \
118 extern void *__memscan_generic(void *, int, size_t); \
119 void *__retval, *__addr = (__arg0); \
120 size_t __size = (__arg2); \
121 \
122 if(__builtin_constant_p(__char) && !(__char)) \
123 __retval = __memscan_zero(__addr, __size); \
124 else \
125 __retval = __memscan_generic(__addr, (__char), __size); \
126 \
127 __retval; \
128})
129
130#define __HAVE_ARCH_MEMCMP
131extern int memcmp(const void *,const void *,__kernel_size_t);
132
133/* Now the str*() stuff... */
134#define __HAVE_ARCH_STRLEN
135extern __kernel_size_t strlen(const char *);
136
137#define __HAVE_ARCH_STRNCMP
138
139extern int __strncmp(const char *, const char *, __kernel_size_t);
140
141static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
142{
143 register int retval;
144 switch(count) {
145 case 0: return 0;
146 case 1: return (src[0] - dest[0]);
147 case 2: retval = (src[0] - dest[0]);
148 if(!retval && src[0])
149 retval = (src[1] - dest[1]);
150 return retval;
151 case 3: retval = (src[0] - dest[0]);
152 if(!retval && src[0]) {
153 retval = (src[1] - dest[1]);
154 if(!retval && src[1])
155 retval = (src[2] - dest[2]);
156 }
157 return retval;
158 case 4: retval = (src[0] - dest[0]);
159 if(!retval && src[0]) {
160 retval = (src[1] - dest[1]);
161 if(!retval && src[1]) {
162 retval = (src[2] - dest[2]);
163 if (!retval && src[2])
164 retval = (src[3] - dest[3]);
165 }
166 }
167 return retval;
168 case 5: retval = (src[0] - dest[0]);
169 if(!retval && src[0]) {
170 retval = (src[1] - dest[1]);
171 if(!retval && src[1]) {
172 retval = (src[2] - dest[2]);
173 if (!retval && src[2]) {
174 retval = (src[3] - dest[3]);
175 if (!retval && src[3])
176 retval = (src[4] - dest[4]);
177 }
178 }
179 }
180 return retval;
181 default:
182 retval = (src[0] - dest[0]);
183 if(!retval && src[0]) {
184 retval = (src[1] - dest[1]);
185 if(!retval && src[1]) {
186 retval = (src[2] - dest[2]);
187 if(!retval && src[2])
188 retval = __strncmp(src+3,dest+3,count-3);
189 }
190 }
191 return retval;
192 }
193}
194
195#undef strncmp
196#define strncmp(__arg0, __arg1, __arg2) \
197(__builtin_constant_p(__arg2) ? \
198 __constant_strncmp(__arg0, __arg1, __arg2) : \
199 __strncmp(__arg0, __arg1, __arg2))
200
201#endif /* !EXPORT_SYMTAB_STROPS */
202
203#endif /* __KERNEL__ */
204
205#endif /* !(__SPARC_STRING_H__) */
diff --git a/include/asm-sparc/string_32.h b/include/asm-sparc/string_32.h
new file mode 100644
index 000000000000..6c5fddb7e6b5
--- /dev/null
+++ b/include/asm-sparc/string_32.h
@@ -0,0 +1,205 @@
1/*
2 * string.h: External definitions for optimized assembly string
3 * routines for the Linux Kernel.
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#ifndef __SPARC_STRING_H__
10#define __SPARC_STRING_H__
11
12#include <asm/page.h>
13
14/* Really, userland/ksyms should not see any of this stuff. */
15
16#ifdef __KERNEL__
17
18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21
22#ifndef EXPORT_SYMTAB_STROPS
23
24/* First the mem*() things. */
25#define __HAVE_ARCH_MEMMOVE
26#undef memmove
27#define memmove(_to, _from, _n) \
28({ \
29 void *_t = (_to); \
30 __memmove(_t, (_from), (_n)); \
31 _t; \
32})
33
34#define __HAVE_ARCH_MEMCPY
35
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69
70#define __HAVE_ARCH_MEMSET
71
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111
112#define __HAVE_ARCH_MEMSCAN
113
114#undef memscan
115#define memscan(__arg0, __char, __arg2) \
116({ \
117 extern void *__memscan_zero(void *, size_t); \
118 extern void *__memscan_generic(void *, int, size_t); \
119 void *__retval, *__addr = (__arg0); \
120 size_t __size = (__arg2); \
121 \
122 if(__builtin_constant_p(__char) && !(__char)) \
123 __retval = __memscan_zero(__addr, __size); \
124 else \
125 __retval = __memscan_generic(__addr, (__char), __size); \
126 \
127 __retval; \
128})
129
130#define __HAVE_ARCH_MEMCMP
131extern int memcmp(const void *,const void *,__kernel_size_t);
132
133/* Now the str*() stuff... */
134#define __HAVE_ARCH_STRLEN
135extern __kernel_size_t strlen(const char *);
136
137#define __HAVE_ARCH_STRNCMP
138
139extern int __strncmp(const char *, const char *, __kernel_size_t);
140
141static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
142{
143 register int retval;
144 switch(count) {
145 case 0: return 0;
146 case 1: return (src[0] - dest[0]);
147 case 2: retval = (src[0] - dest[0]);
148 if(!retval && src[0])
149 retval = (src[1] - dest[1]);
150 return retval;
151 case 3: retval = (src[0] - dest[0]);
152 if(!retval && src[0]) {
153 retval = (src[1] - dest[1]);
154 if(!retval && src[1])
155 retval = (src[2] - dest[2]);
156 }
157 return retval;
158 case 4: retval = (src[0] - dest[0]);
159 if(!retval && src[0]) {
160 retval = (src[1] - dest[1]);
161 if(!retval && src[1]) {
162 retval = (src[2] - dest[2]);
163 if (!retval && src[2])
164 retval = (src[3] - dest[3]);
165 }
166 }
167 return retval;
168 case 5: retval = (src[0] - dest[0]);
169 if(!retval && src[0]) {
170 retval = (src[1] - dest[1]);
171 if(!retval && src[1]) {
172 retval = (src[2] - dest[2]);
173 if (!retval && src[2]) {
174 retval = (src[3] - dest[3]);
175 if (!retval && src[3])
176 retval = (src[4] - dest[4]);
177 }
178 }
179 }
180 return retval;
181 default:
182 retval = (src[0] - dest[0]);
183 if(!retval && src[0]) {
184 retval = (src[1] - dest[1]);
185 if(!retval && src[1]) {
186 retval = (src[2] - dest[2]);
187 if(!retval && src[2])
188 retval = __strncmp(src+3,dest+3,count-3);
189 }
190 }
191 return retval;
192 }
193}
194
195#undef strncmp
196#define strncmp(__arg0, __arg1, __arg2) \
197(__builtin_constant_p(__arg2) ? \
198 __constant_strncmp(__arg0, __arg1, __arg2) : \
199 __strncmp(__arg0, __arg1, __arg2))
200
201#endif /* !EXPORT_SYMTAB_STROPS */
202
203#endif /* __KERNEL__ */
204
205#endif /* !(__SPARC_STRING_H__) */
diff --git a/include/asm-sparc/string_64.h b/include/asm-sparc/string_64.h
new file mode 100644
index 000000000000..43161f2d17eb
--- /dev/null
+++ b/include/asm-sparc/string_64.h
@@ -0,0 +1,83 @@
1/*
2 * string.h: External definitions for optimized assembly string
3 * routines for the Linux Kernel.
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997,1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#ifndef __SPARC64_STRING_H__
10#define __SPARC64_STRING_H__
11
12/* Really, userland/ksyms should not see any of this stuff. */
13
14#ifdef __KERNEL__
15
16#include <asm/asi.h>
17
18extern void *__memset(void *,int,__kernel_size_t);
19
20#ifndef EXPORT_SYMTAB_STROPS
21
22/* First the mem*() things. */
23#define __HAVE_ARCH_MEMMOVE
24extern void *memmove(void *, const void *, __kernel_size_t);
25
26#define __HAVE_ARCH_MEMCPY
27extern void *memcpy(void *, const void *, __kernel_size_t);
28
29#define __HAVE_ARCH_MEMSET
30extern void *__builtin_memset(void *,int,__kernel_size_t);
31
32static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
33{
34 extern __kernel_size_t __bzero(void *, __kernel_size_t);
35
36 if (!c) {
37 __bzero(s, count);
38 return s;
39 } else
40 return __memset(s, c, count);
41}
42
43#undef memset
44#define memset(s, c, count) \
45((__builtin_constant_p(count) && (count) <= 32) ? \
46 __builtin_memset((s), (c), (count)) : \
47 (__builtin_constant_p(c) ? \
48 __constant_memset((s), (c), (count)) : \
49 __memset((s), (c), (count))))
50
51#define __HAVE_ARCH_MEMSCAN
52
53#undef memscan
54#define memscan(__arg0, __char, __arg2) \
55({ \
56 extern void *__memscan_zero(void *, size_t); \
57 extern void *__memscan_generic(void *, int, size_t); \
58 void *__retval, *__addr = (__arg0); \
59 size_t __size = (__arg2); \
60 \
61 if(__builtin_constant_p(__char) && !(__char)) \
62 __retval = __memscan_zero(__addr, __size); \
63 else \
64 __retval = __memscan_generic(__addr, (__char), __size); \
65 \
66 __retval; \
67})
68
69#define __HAVE_ARCH_MEMCMP
70extern int memcmp(const void *,const void *,__kernel_size_t);
71
72/* Now the str*() stuff... */
73#define __HAVE_ARCH_STRLEN
74extern __kernel_size_t strlen(const char *);
75
76#define __HAVE_ARCH_STRNCMP
77extern int strncmp(const char *, const char *, __kernel_size_t);
78
79#endif /* !EXPORT_SYMTAB_STROPS */
80
81#endif /* __KERNEL__ */
82
83#endif /* !(__SPARC64_STRING_H__) */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index b4b024445fc9..15e2a3bc4f61 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -1,288 +1,8 @@
1#ifndef __SPARC_SYSTEM_H 1#ifndef ___ASM_SPARC_SYSTEM_H
2#define __SPARC_SYSTEM_H 2#define ___ASM_SPARC_SYSTEM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/kernel.h> 4#include <asm-sparc/system_64.h>
5#include <linux/threads.h> /* NR_CPUS */
6#include <linux/thread_info.h>
7
8#include <asm/page.h>
9#include <asm/psr.h>
10#include <asm/ptrace.h>
11#include <asm/btfixup.h>
12#include <asm/smp.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/irqflags.h>
17
18/*
19 * Sparc (general) CPU types
20 */
21enum sparc_cpu {
22 sun4 = 0x00,
23 sun4c = 0x01,
24 sun4m = 0x02,
25 sun4d = 0x03,
26 sun4e = 0x04,
27 sun4u = 0x05, /* V8 ploos ploos */
28 sun_unknown = 0x06,
29 ap1000 = 0x07, /* almost a sun4m */
30};
31
32/* Really, userland should not be looking at any of this... */
33#ifdef __KERNEL__
34
35extern enum sparc_cpu sparc_cpu_model;
36
37#ifndef CONFIG_SUN4
38#define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
39#define ARCH_SUN4 0
40#else 5#else
41#define ARCH_SUN4C_SUN4 1 6#include <asm-sparc/system_32.h>
42#define ARCH_SUN4 1
43#endif 7#endif
44
45#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
46
47extern char reboot_command[];
48
49extern struct thread_info *current_set[NR_CPUS];
50
51extern unsigned long empty_bad_page;
52extern unsigned long empty_bad_page_table;
53extern unsigned long empty_zero_page;
54
55extern void sun_do_break(void);
56extern int serial_console;
57extern int stop_a_enabled;
58
59static inline int con_is_present(void)
60{
61 return serial_console ? 0 : 1;
62}
63
64/* When a context switch happens we must flush all user windows so that
65 * the windows of the current process are flushed onto its stack. This
66 * way the windows are all clean for the next process and the stack
67 * frames are up to date.
68 */
69extern void flush_user_windows(void);
70extern void kill_user_windows(void);
71extern void synchronize_user_stack(void);
72extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
73 void *fpqueue, unsigned long *fpqdepth);
74
75#ifdef CONFIG_SMP
76#define SWITCH_ENTER(prv) \
77 do { \
78 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
79 put_psr(get_psr() | PSR_EF); \
80 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
81 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
82 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
83 (prv)->thread.kregs->psr &= ~PSR_EF; \
84 } \
85 } while(0)
86
87#define SWITCH_DO_LAZY_FPU(next) /* */
88#else
89#define SWITCH_ENTER(prv) /* */
90#define SWITCH_DO_LAZY_FPU(nxt) \
91 do { \
92 if (last_task_used_math != (nxt)) \
93 (nxt)->thread.kregs->psr&=~PSR_EF; \
94 } while(0)
95#endif
96
97extern void flushw_all(void);
98
99/*
100 * Flush windows so that the VM switch which follows
101 * would not pull the stack from under us.
102 *
103 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
104 * XXX WTF is the above comment? Found in late teen 2.4.x.
105 */
106#define prepare_arch_switch(next) do { \
107 __asm__ __volatile__( \
108 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
109 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
110 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
111 "save %sp, -0x40, %sp\n\t" \
112 "restore; restore; restore; restore; restore; restore; restore"); \
113} while(0)
114
115 /* Much care has gone into this code, do not touch it.
116 *
117 * We need to loadup regs l0/l1 for the newly forked child
118 * case because the trap return path relies on those registers
119 * holding certain values, gcc is told that they are clobbered.
120 * Gcc needs registers for 3 values in and 1 value out, so we
121 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
122 *
123 * Hey Dave, that do not touch sign is too much of an incentive
124 * - Anton & Pete
125 */
126#define switch_to(prev, next, last) do { \
127 SWITCH_ENTER(prev); \
128 SWITCH_DO_LAZY_FPU(next); \
129 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \
130 __asm__ __volatile__( \
131 "sethi %%hi(here - 0x8), %%o7\n\t" \
132 "mov %%g6, %%g3\n\t" \
133 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
134 "rd %%psr, %%g4\n\t" \
135 "std %%sp, [%%g6 + %4]\n\t" \
136 "rd %%wim, %%g5\n\t" \
137 "wr %%g4, 0x20, %%psr\n\t" \
138 "nop\n\t" \
139 "std %%g4, [%%g6 + %3]\n\t" \
140 "ldd [%2 + %3], %%g4\n\t" \
141 "mov %2, %%g6\n\t" \
142 ".globl patchme_store_new_current\n" \
143"patchme_store_new_current:\n\t" \
144 "st %2, [%1]\n\t" \
145 "wr %%g4, 0x20, %%psr\n\t" \
146 "nop\n\t" \
147 "nop\n\t" \
148 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
149 "ldd [%%g6 + %4], %%sp\n\t" \
150 "wr %%g5, 0x0, %%wim\n\t" \
151 "ldd [%%sp + 0x00], %%l0\n\t" \
152 "ldd [%%sp + 0x38], %%i6\n\t" \
153 "wr %%g4, 0x0, %%psr\n\t" \
154 "nop\n\t" \
155 "nop\n\t" \
156 "jmpl %%o7 + 0x8, %%g0\n\t" \
157 " ld [%%g3 + %5], %0\n\t" \
158 "here:\n" \
159 : "=&r" (last) \
160 : "r" (&(current_set[hard_smp_processor_id()])), \
161 "r" (task_thread_info(next)), \
162 "i" (TI_KPSR), \
163 "i" (TI_KSP), \
164 "i" (TI_TASK) \
165 : "g1", "g2", "g3", "g4", "g5", "g7", \
166 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
167 "i0", "i1", "i2", "i3", "i4", "i5", \
168 "o0", "o1", "o2", "o3", "o7"); \
169 } while(0)
170
171/* XXX Change this if we ever use a PSO mode kernel. */
172#define mb() __asm__ __volatile__ ("" : : : "memory")
173#define rmb() mb()
174#define wmb() mb()
175#define read_barrier_depends() do { } while(0)
176#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
177#define smp_mb() __asm__ __volatile__("":::"memory")
178#define smp_rmb() __asm__ __volatile__("":::"memory")
179#define smp_wmb() __asm__ __volatile__("":::"memory")
180#define smp_read_barrier_depends() do { } while(0)
181
182#define nop() __asm__ __volatile__ ("nop")
183
184/* This has special calling conventions */
185#ifndef CONFIG_SMP
186BTFIXUPDEF_CALL(void, ___xchg32, void)
187#endif
188
189static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
190{
191#ifdef CONFIG_SMP
192 __asm__ __volatile__("swap [%2], %0"
193 : "=&r" (val)
194 : "0" (val), "r" (m)
195 : "memory");
196 return val;
197#else
198 register unsigned long *ptr asm("g1");
199 register unsigned long ret asm("g2");
200
201 ptr = (unsigned long *) m;
202 ret = val;
203
204 /* Note: this is magic and the nop there is
205 really needed. */
206 __asm__ __volatile__(
207 "mov %%o7, %%g4\n\t"
208 "call ___f____xchg32\n\t"
209 " nop\n\t"
210 : "=&r" (ret)
211 : "0" (ret), "r" (ptr)
212 : "g3", "g4", "g7", "memory", "cc");
213
214 return ret;
215#endif 8#endif
216}
217
218#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
219
220extern void __xchg_called_with_bad_pointer(void);
221
222static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
223{
224 switch (size) {
225 case 4:
226 return xchg_u32(ptr, x);
227 };
228 __xchg_called_with_bad_pointer();
229 return x;
230}
231
232/* Emulate cmpxchg() the same way we emulate atomics,
233 * by hashing the object address and indexing into an array
234 * of spinlocks to get a bit of performance...
235 *
236 * See arch/sparc/lib/atomic32.c for implementation.
237 *
238 * Cribbed from <asm-parisc/atomic.h>
239 */
240#define __HAVE_ARCH_CMPXCHG 1
241
242/* bug catcher for when unsupported size is used - won't link */
243extern void __cmpxchg_called_with_bad_pointer(void);
244/* we only need to support cmpxchg of a u32 on sparc */
245extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
246
247/* don't worry...optimizer will get rid of most of this */
248static inline unsigned long
249__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
250{
251 switch (size) {
252 case 4:
253 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
254 default:
255 __cmpxchg_called_with_bad_pointer();
256 break;
257 }
258 return old;
259}
260
261#define cmpxchg(ptr, o, n) \
262({ \
263 __typeof__(*(ptr)) _o_ = (o); \
264 __typeof__(*(ptr)) _n_ = (n); \
265 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
266 (unsigned long)_n_, sizeof(*(ptr))); \
267})
268
269#include <asm-generic/cmpxchg-local.h>
270
271/*
272 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
273 * them available.
274 */
275#define cmpxchg_local(ptr, o, n) \
276 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
277 (unsigned long)(n), sizeof(*(ptr))))
278#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
279
280extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
281
282#endif /* __KERNEL__ */
283
284#endif /* __ASSEMBLY__ */
285
286#define arch_align_stack(x) (x)
287
288#endif /* !(__SPARC_SYSTEM_H) */
diff --git a/include/asm-sparc/system_32.h b/include/asm-sparc/system_32.h
new file mode 100644
index 000000000000..b4b024445fc9
--- /dev/null
+++ b/include/asm-sparc/system_32.h
@@ -0,0 +1,288 @@
1#ifndef __SPARC_SYSTEM_H
2#define __SPARC_SYSTEM_H
3
4#include <linux/kernel.h>
5#include <linux/threads.h> /* NR_CPUS */
6#include <linux/thread_info.h>
7
8#include <asm/page.h>
9#include <asm/psr.h>
10#include <asm/ptrace.h>
11#include <asm/btfixup.h>
12#include <asm/smp.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/irqflags.h>
17
18/*
19 * Sparc (general) CPU types
20 */
21enum sparc_cpu {
22 sun4 = 0x00,
23 sun4c = 0x01,
24 sun4m = 0x02,
25 sun4d = 0x03,
26 sun4e = 0x04,
27 sun4u = 0x05, /* V8 ploos ploos */
28 sun_unknown = 0x06,
29 ap1000 = 0x07, /* almost a sun4m */
30};
31
32/* Really, userland should not be looking at any of this... */
33#ifdef __KERNEL__
34
35extern enum sparc_cpu sparc_cpu_model;
36
37#ifndef CONFIG_SUN4
38#define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
39#define ARCH_SUN4 0
40#else
41#define ARCH_SUN4C_SUN4 1
42#define ARCH_SUN4 1
43#endif
44
45#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
46
47extern char reboot_command[];
48
49extern struct thread_info *current_set[NR_CPUS];
50
51extern unsigned long empty_bad_page;
52extern unsigned long empty_bad_page_table;
53extern unsigned long empty_zero_page;
54
55extern void sun_do_break(void);
56extern int serial_console;
57extern int stop_a_enabled;
58
59static inline int con_is_present(void)
60{
61 return serial_console ? 0 : 1;
62}
63
64/* When a context switch happens we must flush all user windows so that
65 * the windows of the current process are flushed onto its stack. This
66 * way the windows are all clean for the next process and the stack
67 * frames are up to date.
68 */
69extern void flush_user_windows(void);
70extern void kill_user_windows(void);
71extern void synchronize_user_stack(void);
72extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
73 void *fpqueue, unsigned long *fpqdepth);
74
75#ifdef CONFIG_SMP
76#define SWITCH_ENTER(prv) \
77 do { \
78 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
79 put_psr(get_psr() | PSR_EF); \
80 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
81 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
82 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
83 (prv)->thread.kregs->psr &= ~PSR_EF; \
84 } \
85 } while(0)
86
87#define SWITCH_DO_LAZY_FPU(next) /* */
88#else
89#define SWITCH_ENTER(prv) /* */
90#define SWITCH_DO_LAZY_FPU(nxt) \
91 do { \
92 if (last_task_used_math != (nxt)) \
93 (nxt)->thread.kregs->psr&=~PSR_EF; \
94 } while(0)
95#endif
96
97extern void flushw_all(void);
98
99/*
100 * Flush windows so that the VM switch which follows
101 * would not pull the stack from under us.
102 *
103 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
104 * XXX WTF is the above comment? Found in late teen 2.4.x.
105 */
106#define prepare_arch_switch(next) do { \
107 __asm__ __volatile__( \
108 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
109 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
110 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
111 "save %sp, -0x40, %sp\n\t" \
112 "restore; restore; restore; restore; restore; restore; restore"); \
113} while(0)
114
115 /* Much care has gone into this code, do not touch it.
116 *
117 * We need to loadup regs l0/l1 for the newly forked child
118 * case because the trap return path relies on those registers
119 * holding certain values, gcc is told that they are clobbered.
120 * Gcc needs registers for 3 values in and 1 value out, so we
121 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
122 *
123 * Hey Dave, that do not touch sign is too much of an incentive
124 * - Anton & Pete
125 */
126#define switch_to(prev, next, last) do { \
127 SWITCH_ENTER(prev); \
128 SWITCH_DO_LAZY_FPU(next); \
129 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \
130 __asm__ __volatile__( \
131 "sethi %%hi(here - 0x8), %%o7\n\t" \
132 "mov %%g6, %%g3\n\t" \
133 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
134 "rd %%psr, %%g4\n\t" \
135 "std %%sp, [%%g6 + %4]\n\t" \
136 "rd %%wim, %%g5\n\t" \
137 "wr %%g4, 0x20, %%psr\n\t" \
138 "nop\n\t" \
139 "std %%g4, [%%g6 + %3]\n\t" \
140 "ldd [%2 + %3], %%g4\n\t" \
141 "mov %2, %%g6\n\t" \
142 ".globl patchme_store_new_current\n" \
143"patchme_store_new_current:\n\t" \
144 "st %2, [%1]\n\t" \
145 "wr %%g4, 0x20, %%psr\n\t" \
146 "nop\n\t" \
147 "nop\n\t" \
148 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
149 "ldd [%%g6 + %4], %%sp\n\t" \
150 "wr %%g5, 0x0, %%wim\n\t" \
151 "ldd [%%sp + 0x00], %%l0\n\t" \
152 "ldd [%%sp + 0x38], %%i6\n\t" \
153 "wr %%g4, 0x0, %%psr\n\t" \
154 "nop\n\t" \
155 "nop\n\t" \
156 "jmpl %%o7 + 0x8, %%g0\n\t" \
157 " ld [%%g3 + %5], %0\n\t" \
158 "here:\n" \
159 : "=&r" (last) \
160 : "r" (&(current_set[hard_smp_processor_id()])), \
161 "r" (task_thread_info(next)), \
162 "i" (TI_KPSR), \
163 "i" (TI_KSP), \
164 "i" (TI_TASK) \
165 : "g1", "g2", "g3", "g4", "g5", "g7", \
166 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
167 "i0", "i1", "i2", "i3", "i4", "i5", \
168 "o0", "o1", "o2", "o3", "o7"); \
169 } while(0)
170
171/* XXX Change this if we ever use a PSO mode kernel. */
172#define mb() __asm__ __volatile__ ("" : : : "memory")
173#define rmb() mb()
174#define wmb() mb()
175#define read_barrier_depends() do { } while(0)
176#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
177#define smp_mb() __asm__ __volatile__("":::"memory")
178#define smp_rmb() __asm__ __volatile__("":::"memory")
179#define smp_wmb() __asm__ __volatile__("":::"memory")
180#define smp_read_barrier_depends() do { } while(0)
181
182#define nop() __asm__ __volatile__ ("nop")
183
184/* This has special calling conventions */
185#ifndef CONFIG_SMP
186BTFIXUPDEF_CALL(void, ___xchg32, void)
187#endif
188
189static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
190{
191#ifdef CONFIG_SMP
192 __asm__ __volatile__("swap [%2], %0"
193 : "=&r" (val)
194 : "0" (val), "r" (m)
195 : "memory");
196 return val;
197#else
198 register unsigned long *ptr asm("g1");
199 register unsigned long ret asm("g2");
200
201 ptr = (unsigned long *) m;
202 ret = val;
203
204 /* Note: this is magic and the nop there is
205 really needed. */
206 __asm__ __volatile__(
207 "mov %%o7, %%g4\n\t"
208 "call ___f____xchg32\n\t"
209 " nop\n\t"
210 : "=&r" (ret)
211 : "0" (ret), "r" (ptr)
212 : "g3", "g4", "g7", "memory", "cc");
213
214 return ret;
215#endif
216}
217
218#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
219
220extern void __xchg_called_with_bad_pointer(void);
221
222static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
223{
224 switch (size) {
225 case 4:
226 return xchg_u32(ptr, x);
227 };
228 __xchg_called_with_bad_pointer();
229 return x;
230}
231
232/* Emulate cmpxchg() the same way we emulate atomics,
233 * by hashing the object address and indexing into an array
234 * of spinlocks to get a bit of performance...
235 *
236 * See arch/sparc/lib/atomic32.c for implementation.
237 *
238 * Cribbed from <asm-parisc/atomic.h>
239 */
240#define __HAVE_ARCH_CMPXCHG 1
241
242/* bug catcher for when unsupported size is used - won't link */
243extern void __cmpxchg_called_with_bad_pointer(void);
244/* we only need to support cmpxchg of a u32 on sparc */
245extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
246
247/* don't worry...optimizer will get rid of most of this */
248static inline unsigned long
249__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
250{
251 switch (size) {
252 case 4:
253 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
254 default:
255 __cmpxchg_called_with_bad_pointer();
256 break;
257 }
258 return old;
259}
260
261#define cmpxchg(ptr, o, n) \
262({ \
263 __typeof__(*(ptr)) _o_ = (o); \
264 __typeof__(*(ptr)) _n_ = (n); \
265 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
266 (unsigned long)_n_, sizeof(*(ptr))); \
267})
268
269#include <asm-generic/cmpxchg-local.h>
270
271/*
272 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
273 * them available.
274 */
275#define cmpxchg_local(ptr, o, n) \
276 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
277 (unsigned long)(n), sizeof(*(ptr))))
278#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
279
280extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
281
282#endif /* __KERNEL__ */
283
284#endif /* __ASSEMBLY__ */
285
286#define arch_align_stack(x) (x)
287
288#endif /* !(__SPARC_SYSTEM_H) */
diff --git a/include/asm-sparc/system_64.h b/include/asm-sparc/system_64.h
new file mode 100644
index 000000000000..db9e742a406a
--- /dev/null
+++ b/include/asm-sparc/system_64.h
@@ -0,0 +1,355 @@
1#ifndef __SPARC64_SYSTEM_H
2#define __SPARC64_SYSTEM_H
3
4#include <asm/ptrace.h>
5#include <asm/processor.h>
6#include <asm/visasm.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/irqflags.h>
11#include <asm-generic/cmpxchg-local.h>
12
13/*
14 * Sparc (general) CPU types
15 */
16enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
25};
26
27#define sparc_cpu_model sun4u
28
29/* This cannot ever be a sun4c nor sun4 :) That's just history. */
30#define ARCH_SUN4C_SUN4 0
31#define ARCH_SUN4 0
32
33extern char reboot_command[];
34
35/* These are here in an effort to more fully work around Spitfire Errata
36 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
37 * branch, the chip can stop executing instructions until a trap occurs.
38 * Therefore, if interrupts are disabled, the chip can hang forever.
39 *
40 * It used to be believed that the memory barrier had to be right in the
41 * delay slot, but a case has been traced recently wherein the memory barrier
42 * was one instruction after the branch delay slot and the chip still hung.
43 * The offending sequence was the following in sym_wakeup_done() of the
44 * sym53c8xx_2 driver:
45 *
46 * call sym_ccb_from_dsa, 0
47 * movge %icc, 0, %l0
48 * brz,pn %o0, .LL1303
49 * mov %o0, %l2
50 * membar #LoadLoad
51 *
52 * The branch has to be mispredicted for the bug to occur. Therefore, we put
53 * the memory barrier explicitly into a "branch always, predicted taken"
54 * delay slot to avoid the problem case.
55 */
56#define membar_safe(type) \
57do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
58 " membar " type "\n" \
59 "1:\n" \
60 : : : "memory"); \
61} while (0)
62
63#define mb() \
64 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
65#define rmb() \
66 membar_safe("#LoadLoad")
67#define wmb() \
68 membar_safe("#StoreStore")
69#define membar_storeload() \
70 membar_safe("#StoreLoad")
71#define membar_storeload_storestore() \
72 membar_safe("#StoreLoad | #StoreStore")
73#define membar_storeload_loadload() \
74 membar_safe("#StoreLoad | #LoadLoad")
75#define membar_storestore_loadstore() \
76 membar_safe("#StoreStore | #LoadStore")
77
78#endif
79
80#define nop() __asm__ __volatile__ ("nop")
81
82#define read_barrier_depends() do { } while(0)
83#define set_mb(__var, __value) \
84 do { __var = __value; membar_storeload_storestore(); } while(0)
85
86#ifdef CONFIG_SMP
87#define smp_mb() mb()
88#define smp_rmb() rmb()
89#define smp_wmb() wmb()
90#define smp_read_barrier_depends() read_barrier_depends()
91#else
92#define smp_mb() __asm__ __volatile__("":::"memory")
93#define smp_rmb() __asm__ __volatile__("":::"memory")
94#define smp_wmb() __asm__ __volatile__("":::"memory")
95#define smp_read_barrier_depends() do { } while(0)
96#endif
97
98#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
99
100#define flushw_all() __asm__ __volatile__("flushw")
101
102/* Performance counter register access. */
103#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
104#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
105#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
106
107/* Blackbird errata workaround. See commentary in
108 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
109 * for more information.
110 */
111#define reset_pic() \
112 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
113 ".align 64\n" \
114 "99:wr %g0, 0x0, %pic\n\t" \
115 "rd %pic, %g0")
116
117#ifndef __ASSEMBLY__
118
119extern void sun_do_break(void);
120extern int stop_a_enabled;
121
122extern void fault_in_user_windows(void);
123extern void synchronize_user_stack(void);
124
125extern void __flushw_user(void);
126#define flushw_user() __flushw_user()
127
128#define flush_user_windows flushw_user
129#define flush_register_windows flushw_all
130
131/* Don't hold the runqueue lock over context switch */
132#define __ARCH_WANT_UNLOCKED_CTXSW
133#define prepare_arch_switch(next) \
134do { \
135 flushw_all(); \
136} while (0)
137
138 /* See what happens when you design the chip correctly?
139 *
140 * We tell gcc we clobber all non-fixed-usage registers except
141 * for l0/l1. It will use one for 'next' and the other to hold
142 * the output value of 'last'. 'next' is not referenced again
143 * past the invocation of switch_to in the scheduler, so we need
144 * not preserve it's value. Hairy, but it lets us remove 2 loads
145 * and 2 stores in this critical code path. -DaveM
146 */
147#define switch_to(prev, next, last) \
148do { if (test_thread_flag(TIF_PERFCTR)) { \
149 unsigned long __tmp; \
150 read_pcr(__tmp); \
151 current_thread_info()->pcr_reg = __tmp; \
152 read_pic(__tmp); \
153 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
154 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
155 } \
156 flush_tlb_pending(); \
157 save_and_clear_fpu(); \
158 /* If you are tempted to conditionalize the following */ \
159 /* so that ASI is only written if it changes, think again. */ \
160 __asm__ __volatile__("wr %%g0, %0, %%asi" \
161 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
162 trap_block[current_thread_info()->cpu].thread = \
163 task_thread_info(next); \
164 __asm__ __volatile__( \
165 "mov %%g4, %%g7\n\t" \
166 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
167 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
168 "rdpr %%wstate, %%o5\n\t" \
169 "stx %%o6, [%%g6 + %6]\n\t" \
170 "stb %%o5, [%%g6 + %5]\n\t" \
171 "rdpr %%cwp, %%o5\n\t" \
172 "stb %%o5, [%%g6 + %8]\n\t" \
173 "mov %4, %%g6\n\t" \
174 "ldub [%4 + %8], %%g1\n\t" \
175 "wrpr %%g1, %%cwp\n\t" \
176 "ldx [%%g6 + %6], %%o6\n\t" \
177 "ldub [%%g6 + %5], %%o5\n\t" \
178 "ldub [%%g6 + %7], %%o7\n\t" \
179 "wrpr %%o5, 0x0, %%wstate\n\t" \
180 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
181 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
182 "ldx [%%g6 + %9], %%g4\n\t" \
183 "brz,pt %%o7, switch_to_pc\n\t" \
184 " mov %%g7, %0\n\t" \
185 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
186 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
187 " nop\n\t" \
188 ".globl switch_to_pc\n\t" \
189 "switch_to_pc:\n\t" \
190 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
191 "=r" (__local_per_cpu_offset) \
192 : "0" (task_thread_info(next)), \
193 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
194 "i" (TI_CWP), "i" (TI_TASK) \
195 : "cc", \
196 "g1", "g2", "g3", "g7", \
197 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
198 "i0", "i1", "i2", "i3", "i4", "i5", \
199 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
200 /* If you fuck with this, update ret_from_syscall code too. */ \
201 if (test_thread_flag(TIF_PERFCTR)) { \
202 write_pcr(current_thread_info()->pcr_reg); \
203 reset_pic(); \
204 } \
205} while(0)
206
207static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
208{
209 unsigned long tmp1, tmp2;
210
211 __asm__ __volatile__(
212" membar #StoreLoad | #LoadLoad\n"
213" mov %0, %1\n"
214"1: lduw [%4], %2\n"
215" cas [%4], %2, %0\n"
216" cmp %2, %0\n"
217" bne,a,pn %%icc, 1b\n"
218" mov %1, %0\n"
219" membar #StoreLoad | #StoreStore\n"
220 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
221 : "0" (val), "r" (m)
222 : "cc", "memory");
223 return val;
224}
225
226static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
227{
228 unsigned long tmp1, tmp2;
229
230 __asm__ __volatile__(
231" membar #StoreLoad | #LoadLoad\n"
232" mov %0, %1\n"
233"1: ldx [%4], %2\n"
234" casx [%4], %2, %0\n"
235" cmp %2, %0\n"
236" bne,a,pn %%xcc, 1b\n"
237" mov %1, %0\n"
238" membar #StoreLoad | #StoreStore\n"
239 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
240 : "0" (val), "r" (m)
241 : "cc", "memory");
242 return val;
243}
244
245#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
246
247extern void __xchg_called_with_bad_pointer(void);
248
249static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
250 int size)
251{
252 switch (size) {
253 case 4:
254 return xchg32(ptr, x);
255 case 8:
256 return xchg64(ptr, x);
257 };
258 __xchg_called_with_bad_pointer();
259 return x;
260}
261
262extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
263
264/*
265 * Atomic compare and exchange. Compare OLD with MEM, if identical,
266 * store NEW in MEM. Return the initial value in MEM. Success is
267 * indicated by comparing RETURN with OLD.
268 */
269
270#define __HAVE_ARCH_CMPXCHG 1
271
272static inline unsigned long
273__cmpxchg_u32(volatile int *m, int old, int new)
274{
275 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
276 "cas [%2], %3, %0\n\t"
277 "membar #StoreLoad | #StoreStore"
278 : "=&r" (new)
279 : "0" (new), "r" (m), "r" (old)
280 : "memory");
281
282 return new;
283}
284
285static inline unsigned long
286__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
287{
288 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
289 "casx [%2], %3, %0\n\t"
290 "membar #StoreLoad | #StoreStore"
291 : "=&r" (new)
292 : "0" (new), "r" (m), "r" (old)
293 : "memory");
294
295 return new;
296}
297
298/* This function doesn't exist, so you'll get a linker error
299 if something tries to do an invalid cmpxchg(). */
300extern void __cmpxchg_called_with_bad_pointer(void);
301
302static inline unsigned long
303__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
304{
305 switch (size) {
306 case 4:
307 return __cmpxchg_u32(ptr, old, new);
308 case 8:
309 return __cmpxchg_u64(ptr, old, new);
310 }
311 __cmpxchg_called_with_bad_pointer();
312 return old;
313}
314
315#define cmpxchg(ptr,o,n) \
316 ({ \
317 __typeof__(*(ptr)) _o_ = (o); \
318 __typeof__(*(ptr)) _n_ = (n); \
319 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
320 (unsigned long)_n_, sizeof(*(ptr))); \
321 })
322
323/*
324 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
325 * them available.
326 */
327
328static inline unsigned long __cmpxchg_local(volatile void *ptr,
329 unsigned long old,
330 unsigned long new, int size)
331{
332 switch (size) {
333 case 4:
334 case 8: return __cmpxchg(ptr, old, new, size);
335 default:
336 return __cmpxchg_local_generic(ptr, old, new, size);
337 }
338
339 return old;
340}
341
342#define cmpxchg_local(ptr, o, n) \
343 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
344 (unsigned long)(n), sizeof(*(ptr))))
345#define cmpxchg64_local(ptr, o, n) \
346 ({ \
347 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
348 cmpxchg_local((ptr), (o), (n)); \
349 })
350
351#endif /* !(__ASSEMBLY__) */
352
353#define arch_align_stack(x) (x)
354
355#endif /* !(__SPARC64_SYSTEM_H) */
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index 91b9f5888c85..64155cf89f37 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -1,151 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_THREAD_INFO_H
2 * thread_info.h: sparc low-level thread information 2#define ___ASM_SPARC_THREAD_INFO_H
3 * adapted from the ppc version by Pete Zaitcev, which was 3#if defined(__sparc__) && defined(__arch64__)
4 * adapted from the i386 version by Paul Mackerras 4#include <asm-sparc/thread_info_64.h>
5 * 5#else
6 * Copyright (C) 2002 David Howells (dhowells@redhat.com) 6#include <asm-sparc/thread_info_32.h>
7 * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com) 7#endif
8 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
9 */
10
11#ifndef _ASM_THREAD_INFO_H
12#define _ASM_THREAD_INFO_H
13
14#ifdef __KERNEL__
15
16#ifndef __ASSEMBLY__
17
18#include <asm/btfixup.h>
19#include <asm/ptrace.h>
20#include <asm/page.h>
21
22/*
23 * Low level task data.
24 *
25 * If you change this, change the TI_* offsets below to match.
26 */
27#define NSWINS 8
28struct thread_info {
29 unsigned long uwinmask;
30 struct task_struct *task; /* main task structure */
31 struct exec_domain *exec_domain; /* execution domain */
32 unsigned long flags; /* low level flags */
33 int cpu; /* cpu we're on */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 int softirq_count;
37 int hardirq_count;
38
39 /* Context switch saved kernel state. */
40 unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
41 unsigned long kpc;
42 unsigned long kpsr;
43 unsigned long kwim;
44
45 /* A place to store user windows and stack pointers
46 * when the stack needs inspection.
47 */
48 struct reg_window reg_window[NSWINS]; /* align for ldd! */
49 unsigned long rwbuf_stkptrs[NSWINS];
50 unsigned long w_saved;
51
52 struct restart_block restart_block;
53};
54
55/*
56 * macros/functions for gaining access to the thread information structure
57 *
58 * preempt_count needs to be 1 initially, until the scheduler is functional.
59 */
60#define INIT_THREAD_INFO(tsk) \
61{ \
62 .uwinmask = 0, \
63 .task = &tsk, \
64 .exec_domain = &default_exec_domain, \
65 .flags = 0, \
66 .cpu = 0, \
67 .preempt_count = 1, \
68 .restart_block = { \
69 .fn = do_no_restart_syscall, \
70 }, \
71}
72
73#define init_thread_info (init_thread_union.thread_info)
74#define init_stack (init_thread_union.stack)
75
76/* how to get the thread information struct from C */
77register struct thread_info *current_thread_info_reg asm("g6");
78#define current_thread_info() (current_thread_info_reg)
79
80/*
81 * thread information allocation
82 */
83#if PAGE_SHIFT == 13
84#define THREAD_INFO_ORDER 0
85#else /* PAGE_SHIFT */
86#define THREAD_INFO_ORDER 1
87#endif 8#endif
88
89BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
90#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
91
92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
94
95#endif /* __ASSEMBLY__ */
96
97/*
98 * Size of kernel stack for each process.
99 * Observe the order of get_free_pages() in alloc_thread_info().
100 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
101 */
102#define THREAD_SIZE 8192
103
104/*
105 * Offsets in thread_info structure, used in assembly code
106 * The "#define REGWIN_SZ 0x40" was abolished, so no multiplications.
107 */
108#define TI_UWINMASK 0x00 /* uwinmask */
109#define TI_TASK 0x04
110#define TI_EXECDOMAIN 0x08 /* exec_domain */
111#define TI_FLAGS 0x0c
112#define TI_CPU 0x10
113#define TI_PREEMPT 0x14 /* preempt_count */
114#define TI_SOFTIRQ 0x18 /* softirq_count */
115#define TI_HARDIRQ 0x1c /* hardirq_count */
116#define TI_KSP 0x20 /* ksp */
117#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
118#define TI_KPSR 0x28 /* kpsr */
119#define TI_KWIM 0x2c /* kwim (ldd'ed with kpsr) */
120#define TI_REG_WINDOW 0x30
121#define TI_RWIN_SPTRS 0x230
122#define TI_W_SAVED 0x250
123/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
124
125#define PREEMPT_ACTIVE 0x4000000
126
127/*
128 * thread information flag bit numbers
129 */
130#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
131/* flag bit 1 is available */
132#define TIF_SIGPENDING 2 /* signal pending */
133#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
134#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
135#define TIF_USEDFPU 8 /* FPU was used by this task
136 * this quantum (SMP) */
137#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
138 * TIF_NEED_RESCHED */
139#define TIF_MEMDIE 10
140
141/* as above, but as bit values */
142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
144#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
145#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
146#define _TIF_USEDFPU (1<<TIF_USEDFPU)
147#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
148
149#endif /* __KERNEL__ */
150
151#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc/thread_info_32.h b/include/asm-sparc/thread_info_32.h
new file mode 100644
index 000000000000..91b9f5888c85
--- /dev/null
+++ b/include/asm-sparc/thread_info_32.h
@@ -0,0 +1,151 @@
1/*
2 * thread_info.h: sparc low-level thread information
3 * adapted from the ppc version by Pete Zaitcev, which was
4 * adapted from the i386 version by Paul Mackerras
5 *
6 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
7 * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
8 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
9 */
10
11#ifndef _ASM_THREAD_INFO_H
12#define _ASM_THREAD_INFO_H
13
14#ifdef __KERNEL__
15
16#ifndef __ASSEMBLY__
17
18#include <asm/btfixup.h>
19#include <asm/ptrace.h>
20#include <asm/page.h>
21
22/*
23 * Low level task data.
24 *
25 * If you change this, change the TI_* offsets below to match.
26 */
27#define NSWINS 8
28struct thread_info {
29 unsigned long uwinmask;
30 struct task_struct *task; /* main task structure */
31 struct exec_domain *exec_domain; /* execution domain */
32 unsigned long flags; /* low level flags */
33 int cpu; /* cpu we're on */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 int softirq_count;
37 int hardirq_count;
38
39 /* Context switch saved kernel state. */
40 unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
41 unsigned long kpc;
42 unsigned long kpsr;
43 unsigned long kwim;
44
45 /* A place to store user windows and stack pointers
46 * when the stack needs inspection.
47 */
48 struct reg_window reg_window[NSWINS]; /* align for ldd! */
49 unsigned long rwbuf_stkptrs[NSWINS];
50 unsigned long w_saved;
51
52 struct restart_block restart_block;
53};
54
55/*
56 * macros/functions for gaining access to the thread information structure
57 *
58 * preempt_count needs to be 1 initially, until the scheduler is functional.
59 */
60#define INIT_THREAD_INFO(tsk) \
61{ \
62 .uwinmask = 0, \
63 .task = &tsk, \
64 .exec_domain = &default_exec_domain, \
65 .flags = 0, \
66 .cpu = 0, \
67 .preempt_count = 1, \
68 .restart_block = { \
69 .fn = do_no_restart_syscall, \
70 }, \
71}
72
73#define init_thread_info (init_thread_union.thread_info)
74#define init_stack (init_thread_union.stack)
75
76/* how to get the thread information struct from C */
77register struct thread_info *current_thread_info_reg asm("g6");
78#define current_thread_info() (current_thread_info_reg)
79
80/*
81 * thread information allocation
82 */
83#if PAGE_SHIFT == 13
84#define THREAD_INFO_ORDER 0
85#else /* PAGE_SHIFT */
86#define THREAD_INFO_ORDER 1
87#endif
88
89BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
90#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
91
92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
94
95#endif /* __ASSEMBLY__ */
96
97/*
98 * Size of kernel stack for each process.
99 * Observe the order of get_free_pages() in alloc_thread_info().
100 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
101 */
102#define THREAD_SIZE 8192
103
104/*
105 * Offsets in thread_info structure, used in assembly code
106 * The "#define REGWIN_SZ 0x40" was abolished, so no multiplications.
107 */
108#define TI_UWINMASK 0x00 /* uwinmask */
109#define TI_TASK 0x04
110#define TI_EXECDOMAIN 0x08 /* exec_domain */
111#define TI_FLAGS 0x0c
112#define TI_CPU 0x10
113#define TI_PREEMPT 0x14 /* preempt_count */
114#define TI_SOFTIRQ 0x18 /* softirq_count */
115#define TI_HARDIRQ 0x1c /* hardirq_count */
116#define TI_KSP 0x20 /* ksp */
117#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
118#define TI_KPSR 0x28 /* kpsr */
119#define TI_KWIM 0x2c /* kwim (ldd'ed with kpsr) */
120#define TI_REG_WINDOW 0x30
121#define TI_RWIN_SPTRS 0x230
122#define TI_W_SAVED 0x250
123/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
124
125#define PREEMPT_ACTIVE 0x4000000
126
127/*
128 * thread information flag bit numbers
129 */
130#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
131/* flag bit 1 is available */
132#define TIF_SIGPENDING 2 /* signal pending */
133#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
134#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
135#define TIF_USEDFPU 8 /* FPU was used by this task
136 * this quantum (SMP) */
137#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
138 * TIF_NEED_RESCHED */
139#define TIF_MEMDIE 10
140
141/* as above, but as bit values */
142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
144#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
145#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
146#define _TIF_USEDFPU (1<<TIF_USEDFPU)
147#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
148
149#endif /* __KERNEL__ */
150
151#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc/thread_info_64.h b/include/asm-sparc/thread_info_64.h
new file mode 100644
index 000000000000..c6d2e6c7f844
--- /dev/null
+++ b/include/asm-sparc/thread_info_64.h
@@ -0,0 +1,277 @@
1/* thread_info.h: sparc64 low-level thread information
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 */
5
6#ifndef _ASM_THREAD_INFO_H
7#define _ASM_THREAD_INFO_H
8
9#ifdef __KERNEL__
10
11#define NSWINS 7
12
13#define TI_FLAG_BYTE_FAULT_CODE 0
14#define TI_FLAG_FAULT_CODE_SHIFT 56
15#define TI_FLAG_BYTE_WSTATE 1
16#define TI_FLAG_WSTATE_SHIFT 48
17#define TI_FLAG_BYTE_CWP 2
18#define TI_FLAG_CWP_SHIFT 40
19#define TI_FLAG_BYTE_CURRENT_DS 3
20#define TI_FLAG_CURRENT_DS_SHIFT 32
21#define TI_FLAG_BYTE_FPDEPTH 4
22#define TI_FLAG_FPDEPTH_SHIFT 24
23#define TI_FLAG_BYTE_WSAVED 5
24#define TI_FLAG_WSAVED_SHIFT 16
25
26#include <asm/page.h>
27
28#ifndef __ASSEMBLY__
29
30#include <asm/ptrace.h>
31#include <asm/types.h>
32
33struct task_struct;
34struct exec_domain;
35
36struct thread_info {
37 /* D$ line 1 */
38 struct task_struct *task;
39 unsigned long flags;
40 __u8 fpsaved[7];
41 __u8 status;
42 unsigned long ksp;
43
44 /* D$ line 2 */
45 unsigned long fault_address;
46 struct pt_regs *kregs;
47 struct exec_domain *exec_domain;
48 int preempt_count; /* 0 => preemptable, <0 => BUG */
49 __u8 new_child;
50 __u8 syscall_noerror;
51 __u16 cpu;
52
53 unsigned long *utraps;
54
55 struct reg_window reg_window[NSWINS];
56 unsigned long rwbuf_stkptrs[NSWINS];
57
58 unsigned long gsr[7];
59 unsigned long xfsr[7];
60
61 __u64 __user *user_cntd0;
62 __u64 __user *user_cntd1;
63 __u64 kernel_cntd0, kernel_cntd1;
64 __u64 pcr_reg;
65
66 struct restart_block restart_block;
67
68 struct pt_regs *kern_una_regs;
69 unsigned int kern_una_insn;
70
71 unsigned long fpregs[0] __attribute__ ((aligned(64)));
72};
73
74#endif /* !(__ASSEMBLY__) */
75
76/* offsets into the thread_info struct for assembly code access */
77#define TI_TASK 0x00000000
78#define TI_FLAGS 0x00000008
79#define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE)
80#define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE)
81#define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP)
82#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
83#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
84#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
85#define TI_FPSAVED 0x00000010
86#define TI_KSP 0x00000018
87#define TI_FAULT_ADDR 0x00000020
88#define TI_KREGS 0x00000028
89#define TI_EXEC_DOMAIN 0x00000030
90#define TI_PRE_COUNT 0x00000038
91#define TI_NEW_CHILD 0x0000003c
92#define TI_SYS_NOERROR 0x0000003d
93#define TI_CPU 0x0000003e
94#define TI_UTRAPS 0x00000040
95#define TI_REG_WINDOW 0x00000048
96#define TI_RWIN_SPTRS 0x000003c8
97#define TI_GSR 0x00000400
98#define TI_XFSR 0x00000438
99#define TI_USER_CNTD0 0x00000470
100#define TI_USER_CNTD1 0x00000478
101#define TI_KERN_CNTD0 0x00000480
102#define TI_KERN_CNTD1 0x00000488
103#define TI_PCR 0x00000490
104#define TI_RESTART_BLOCK 0x00000498
105#define TI_KUNA_REGS 0x000004c0
106#define TI_KUNA_INSN 0x000004c8
107#define TI_FPREGS 0x00000500
108
109/* We embed this in the uppermost byte of thread_info->flags */
110#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
111#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
112#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
113#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
114#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
115
116#if PAGE_SHIFT == 13
117#define THREAD_SIZE (2*PAGE_SIZE)
118#define THREAD_SHIFT (PAGE_SHIFT + 1)
119#else /* PAGE_SHIFT == 13 */
120#define THREAD_SIZE PAGE_SIZE
121#define THREAD_SHIFT PAGE_SHIFT
122#endif /* PAGE_SHIFT == 13 */
123
124#define PREEMPT_ACTIVE 0x4000000
125
126/*
127 * macros/functions for gaining access to the thread information structure
128 *
129 * preempt_count needs to be 1 initially, until the scheduler is functional.
130 */
131#ifndef __ASSEMBLY__
132
133#define INIT_THREAD_INFO(tsk) \
134{ \
135 .task = &tsk, \
136 .flags = ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
137 .exec_domain = &default_exec_domain, \
138 .preempt_count = 1, \
139 .restart_block = { \
140 .fn = do_no_restart_syscall, \
141 }, \
142}
143
144#define init_thread_info (init_thread_union.thread_info)
145#define init_stack (init_thread_union.stack)
146
147/* how to get the thread information struct from C */
148register struct thread_info *current_thread_info_reg asm("g6");
149#define current_thread_info() (current_thread_info_reg)
150
151/* thread information allocation */
152#if PAGE_SHIFT == 13
153#define __THREAD_INFO_ORDER 1
154#else /* PAGE_SHIFT == 13 */
155#define __THREAD_INFO_ORDER 0
156#endif /* PAGE_SHIFT == 13 */
157
158#ifdef CONFIG_DEBUG_STACK_USAGE
159#define alloc_thread_info(tsk) \
160({ \
161 struct thread_info *ret; \
162 \
163 ret = (struct thread_info *) \
164 __get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER); \
165 if (ret) \
166 memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER); \
167 ret; \
168})
169#else
170#define alloc_thread_info(tsk) \
171 ((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER))
172#endif
173
174#define free_thread_info(ti) \
175 free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
176
177#define __thread_flag_byte_ptr(ti) \
178 ((unsigned char *)(&((ti)->flags)))
179#define __cur_thread_flag_byte_ptr __thread_flag_byte_ptr(current_thread_info())
180
181#define get_thread_fault_code() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE])
182#define set_thread_fault_code(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE] = (val))
183#define get_thread_wstate() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE])
184#define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val))
185#define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP])
186#define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val))
187#define get_thread_current_ds() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS])
188#define set_thread_current_ds(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val))
189#define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH])
190#define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val))
191#define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED])
192#define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val))
193
194#endif /* !(__ASSEMBLY__) */
195
196/*
197 * Thread information flags, only 16 bits are available as we encode
198 * other values into the upper 6 bytes.
199 *
200 * On trap return we need to test several values:
201 *
202 * user: need_resched, notify_resume, sigpending, wsaved, perfctr
203 * kernel: fpdepth
204 *
205 * So to check for work in the kernel case we simply load the fpdepth
206 * byte out of the flags and test it. For the user case we encode the
207 * lower 3 bytes of flags as follows:
208 * ----------------------------------------
209 * | wsaved | flags byte 1 | flags byte 2 |
210 * ----------------------------------------
211 * This optimizes the user test into:
212 * ldx [%g6 + TI_FLAGS], REG1
213 * sethi %hi(_TIF_USER_WORK_MASK), REG2
214 * or REG2, %lo(_TIF_USER_WORK_MASK), REG2
215 * andcc REG1, REG2, %g0
216 * be,pt no_work_to_do
217 * nop
218 */
219#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
220/* flags bit 1 is available */
221#define TIF_SIGPENDING 2 /* signal pending */
222#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
223#define TIF_PERFCTR 4 /* performance counters active */
224#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
225/* flag bit 6 is available */
226#define TIF_32BIT 7 /* 32-bit binary */
227/* flag bit 8 is available */
228#define TIF_SECCOMP 9 /* secure computing */
229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
230/* flag bit 11 is available */
231/* NOTE: Thread flags >= 12 should be ones we have no interest
232 * in using in assembly, else we can't use the mask as
233 * an immediate value in instructions such as andcc.
234 */
235#define TIF_ABI_PENDING 12
236#define TIF_MEMDIE 13
237#define TIF_POLLING_NRFLAG 14
238
239#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
240#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
241#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
242#define _TIF_PERFCTR (1<<TIF_PERFCTR)
243#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
244#define _TIF_32BIT (1<<TIF_32BIT)
245#define _TIF_SECCOMP (1<<TIF_SECCOMP)
246#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
247#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
248#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
249
250#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
251 (_TIF_SIGPENDING | \
252 _TIF_NEED_RESCHED | _TIF_PERFCTR))
253
254/*
255 * Thread-synchronous status.
256 *
257 * This is different from the flags in that nobody else
258 * ever touches our thread-synchronous status, so we don't
259 * have to worry about atomic accesses.
260 *
261 * Note that there are only 8 bits available.
262 */
263#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
264
265#ifndef __ASSEMBLY__
266#define HAVE_SET_RESTORE_SIGMASK 1
267static inline void set_restore_sigmask(void)
268{
269 struct thread_info *ti = current_thread_info();
270 ti->status |= TS_RESTORE_SIGMASK;
271 set_bit(TIF_SIGPENDING, &ti->flags);
272}
273#endif /* !__ASSEMBLY__ */
274
275#endif /* __KERNEL__ */
276
277#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc/timer.h b/include/asm-sparc/timer.h
index 361e53898dd7..475baa05a96e 100644
--- a/include/asm-sparc/timer.h
+++ b/include/asm-sparc/timer.h
@@ -1,107 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_TIMER_H
2 * timer.h: Definitions for the timer chips on the Sparc. 2#define ___ASM_SPARC_TIMER_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/timer_64.h>
5 */
6
7
8#ifndef _SPARC_TIMER_H
9#define _SPARC_TIMER_H
10
11#include <asm/system.h> /* For SUN4M_NCPUS */
12#include <asm/sun4paddr.h>
13#include <asm/btfixup.h>
14
15/* Timer structures. The interrupt timer has two properties which
16 * are the counter (which is handled in do_timer in sched.c) and the limit.
17 * This limit is where the timer's counter 'wraps' around. Oddly enough,
18 * the sun4c timer when it hits the limit wraps back to 1 and not zero
19 * thus when calculating the value at which it will fire a microsecond you
20 * must adjust by one. Thanks SUN for designing such great hardware ;(
21 */
22
23/* Note that I am only going to use the timer that interrupts at
24 * Sparc IRQ 10. There is another one available that can fire at
25 * IRQ 14. Currently it is left untouched, we keep the PROM's limit
26 * register value and let the prom take these interrupts. This allows
27 * L1-A to work.
28 */
29
30struct sun4c_timer_info {
31 __volatile__ unsigned int cur_count10;
32 __volatile__ unsigned int timer_limit10;
33 __volatile__ unsigned int cur_count14;
34 __volatile__ unsigned int timer_limit14;
35};
36
37#define SUN4C_TIMER_PHYSADDR 0xf3000000
38#ifdef CONFIG_SUN4
39#define SUN_TIMER_PHYSADDR SUN4_300_TIMER_PHYSADDR
40#else 5#else
41#define SUN_TIMER_PHYSADDR SUN4C_TIMER_PHYSADDR 6#include <asm-sparc/timer_32.h>
7#endif
42#endif 8#endif
43
44/* A sun4m has two blocks of registers which are probably of the same
45 * structure. LSI Logic's L64851 is told to _decrement_ from the limit
46 * value. Aurora behaves similarly but its limit value is compacted in
47 * other fashion (it's wider). Documented fields are defined here.
48 */
49
50/* As with the interrupt register, we have two classes of timer registers
51 * which are per-cpu and master. Per-cpu timers only hit that cpu and are
52 * only level 14 ticks, master timer hits all cpus and is level 10.
53 */
54
55#define SUN4M_PRM_CNT_L 0x80000000
56#define SUN4M_PRM_CNT_LVALUE 0x7FFFFC00
57
58struct sun4m_timer_percpu_info {
59 __volatile__ unsigned int l14_timer_limit; /* Initial value is 0x009c4000 */
60 __volatile__ unsigned int l14_cur_count;
61
62 /* This register appears to be write only and/or inaccessible
63 * on Uni-Processor sun4m machines.
64 */
65 __volatile__ unsigned int l14_limit_noclear; /* Data access error is here */
66
67 __volatile__ unsigned int cntrl; /* =1 after POST on Aurora */
68 __volatile__ unsigned char space[PAGE_SIZE - 16];
69};
70
71struct sun4m_timer_regs {
72 struct sun4m_timer_percpu_info cpu_timers[SUN4M_NCPUS];
73 volatile unsigned int l10_timer_limit;
74 volatile unsigned int l10_cur_count;
75
76 /* Again, this appears to be write only and/or inaccessible
77 * on uni-processor sun4m machines.
78 */
79 volatile unsigned int l10_limit_noclear;
80
81 /* This register too, it must be magic. */
82 volatile unsigned int foobar;
83
84 volatile unsigned int cfg; /* equals zero at boot time... */
85};
86
87#define SUN4D_PRM_CNT_L 0x80000000
88#define SUN4D_PRM_CNT_LVALUE 0x7FFFFC00
89
90struct sun4d_timer_regs {
91 volatile unsigned int l10_timer_limit;
92 volatile unsigned int l10_cur_countx;
93 volatile unsigned int l10_limit_noclear;
94 volatile unsigned int ctrl;
95 volatile unsigned int l10_cur_count;
96};
97
98extern struct sun4d_timer_regs *sun4d_timers;
99
100extern __volatile__ unsigned int *master_l10_counter;
101extern __volatile__ unsigned int *master_l10_limit;
102
103/* FIXME: Make do_[gs]ettimeofday btfixup calls */
104BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
105#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
106
107#endif /* !(_SPARC_TIMER_H) */
diff --git a/include/asm-sparc/timer_32.h b/include/asm-sparc/timer_32.h
new file mode 100644
index 000000000000..361e53898dd7
--- /dev/null
+++ b/include/asm-sparc/timer_32.h
@@ -0,0 +1,107 @@
1/*
2 * timer.h: Definitions for the timer chips on the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7
8#ifndef _SPARC_TIMER_H
9#define _SPARC_TIMER_H
10
11#include <asm/system.h> /* For SUN4M_NCPUS */
12#include <asm/sun4paddr.h>
13#include <asm/btfixup.h>
14
15/* Timer structures. The interrupt timer has two properties which
16 * are the counter (which is handled in do_timer in sched.c) and the limit.
17 * This limit is where the timer's counter 'wraps' around. Oddly enough,
18 * the sun4c timer when it hits the limit wraps back to 1 and not zero
19 * thus when calculating the value at which it will fire a microsecond you
20 * must adjust by one. Thanks SUN for designing such great hardware ;(
21 */
22
23/* Note that I am only going to use the timer that interrupts at
24 * Sparc IRQ 10. There is another one available that can fire at
25 * IRQ 14. Currently it is left untouched, we keep the PROM's limit
26 * register value and let the prom take these interrupts. This allows
27 * L1-A to work.
28 */
29
30struct sun4c_timer_info {
31 __volatile__ unsigned int cur_count10;
32 __volatile__ unsigned int timer_limit10;
33 __volatile__ unsigned int cur_count14;
34 __volatile__ unsigned int timer_limit14;
35};
36
37#define SUN4C_TIMER_PHYSADDR 0xf3000000
38#ifdef CONFIG_SUN4
39#define SUN_TIMER_PHYSADDR SUN4_300_TIMER_PHYSADDR
40#else
41#define SUN_TIMER_PHYSADDR SUN4C_TIMER_PHYSADDR
42#endif
43
44/* A sun4m has two blocks of registers which are probably of the same
45 * structure. LSI Logic's L64851 is told to _decrement_ from the limit
46 * value. Aurora behaves similarly but its limit value is compacted in
47 * other fashion (it's wider). Documented fields are defined here.
48 */
49
50/* As with the interrupt register, we have two classes of timer registers
51 * which are per-cpu and master. Per-cpu timers only hit that cpu and are
52 * only level 14 ticks, master timer hits all cpus and is level 10.
53 */
54
55#define SUN4M_PRM_CNT_L 0x80000000
56#define SUN4M_PRM_CNT_LVALUE 0x7FFFFC00
57
58struct sun4m_timer_percpu_info {
59 __volatile__ unsigned int l14_timer_limit; /* Initial value is 0x009c4000 */
60 __volatile__ unsigned int l14_cur_count;
61
62 /* This register appears to be write only and/or inaccessible
63 * on Uni-Processor sun4m machines.
64 */
65 __volatile__ unsigned int l14_limit_noclear; /* Data access error is here */
66
67 __volatile__ unsigned int cntrl; /* =1 after POST on Aurora */
68 __volatile__ unsigned char space[PAGE_SIZE - 16];
69};
70
71struct sun4m_timer_regs {
72 struct sun4m_timer_percpu_info cpu_timers[SUN4M_NCPUS];
73 volatile unsigned int l10_timer_limit;
74 volatile unsigned int l10_cur_count;
75
76 /* Again, this appears to be write only and/or inaccessible
77 * on uni-processor sun4m machines.
78 */
79 volatile unsigned int l10_limit_noclear;
80
81 /* This register too, it must be magic. */
82 volatile unsigned int foobar;
83
84 volatile unsigned int cfg; /* equals zero at boot time... */
85};
86
87#define SUN4D_PRM_CNT_L 0x80000000
88#define SUN4D_PRM_CNT_LVALUE 0x7FFFFC00
89
90struct sun4d_timer_regs {
91 volatile unsigned int l10_timer_limit;
92 volatile unsigned int l10_cur_countx;
93 volatile unsigned int l10_limit_noclear;
94 volatile unsigned int ctrl;
95 volatile unsigned int l10_cur_count;
96};
97
98extern struct sun4d_timer_regs *sun4d_timers;
99
100extern __volatile__ unsigned int *master_l10_counter;
101extern __volatile__ unsigned int *master_l10_limit;
102
103/* FIXME: Make do_[gs]ettimeofday btfixup calls */
104BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
105#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
106
107#endif /* !(_SPARC_TIMER_H) */
diff --git a/include/asm-sparc/timer_64.h b/include/asm-sparc/timer_64.h
new file mode 100644
index 000000000000..5b779fd1f788
--- /dev/null
+++ b/include/asm-sparc/timer_64.h
@@ -0,0 +1,30 @@
1/* timer.h: System timer definitions for sun5.
2 *
3 * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_TIMER_H
7#define _SPARC64_TIMER_H
8
9#include <linux/types.h>
10#include <linux/init.h>
11
12struct sparc64_tick_ops {
13 unsigned long (*get_tick)(void);
14 int (*add_compare)(unsigned long);
15 unsigned long softint_mask;
16 void (*disable_irq)(void);
17
18 void (*init_tick)(void);
19 unsigned long (*add_tick)(unsigned long);
20
21 char *name;
22};
23
24extern struct sparc64_tick_ops *tick_ops;
25
26extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
27extern void __devinit setup_sparc64_timer(void);
28extern void __init time_init(void);
29
30#endif /* _SPARC64_TIMER_H */
diff --git a/include/asm-sparc/timex.h b/include/asm-sparc/timex.h
index 71b45c90ccae..01d9f199d452 100644
--- a/include/asm-sparc/timex.h
+++ b/include/asm-sparc/timex.h
@@ -1,15 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_TIMEX_H
2 * linux/include/asm-sparc/timex.h 2#define ___ASM_SPARC_TIMEX_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * sparc architecture timex specifications 4#include <asm-sparc/timex_64.h>
5 */ 5#else
6#ifndef _ASMsparc_TIMEX_H 6#include <asm-sparc/timex_32.h>
7#define _ASMsparc_TIMEX_H 7#endif
8
9#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
10
11/* XXX Maybe do something better at some point... -DaveM */
12typedef unsigned long cycles_t;
13#define get_cycles() (0)
14
15#endif 8#endif
diff --git a/include/asm-sparc/timex_32.h b/include/asm-sparc/timex_32.h
new file mode 100644
index 000000000000..71b45c90ccae
--- /dev/null
+++ b/include/asm-sparc/timex_32.h
@@ -0,0 +1,15 @@
1/*
2 * linux/include/asm-sparc/timex.h
3 *
4 * sparc architecture timex specifications
5 */
6#ifndef _ASMsparc_TIMEX_H
7#define _ASMsparc_TIMEX_H
8
9#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
10
11/* XXX Maybe do something better at some point... -DaveM */
12typedef unsigned long cycles_t;
13#define get_cycles() (0)
14
15#endif
diff --git a/include/asm-sparc/timex_64.h b/include/asm-sparc/timex_64.h
new file mode 100644
index 000000000000..c622535c4560
--- /dev/null
+++ b/include/asm-sparc/timex_64.h
@@ -0,0 +1,19 @@
1/*
2 * linux/include/asm-sparc64/timex.h
3 *
4 * sparc64 architecture timex specifications
5 */
6#ifndef _ASMsparc64_TIMEX_H
7#define _ASMsparc64_TIMEX_H
8
9#include <asm/timer.h>
10
11#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
12
13/* Getting on the cycle counter on sparc64. */
14typedef unsigned long cycles_t;
15#define get_cycles() tick_ops->get_tick()
16
17#define ARCH_HAS_READ_CURRENT_TIMER
18
19#endif
diff --git a/include/asm-sparc/tlb.h b/include/asm-sparc/tlb.h
index 6d02d1ce53f3..a821057327c4 100644
--- a/include/asm-sparc/tlb.h
+++ b/include/asm-sparc/tlb.h
@@ -1,24 +1,8 @@
1#ifndef _SPARC_TLB_H 1#ifndef ___ASM_SPARC_TLB_H
2#define _SPARC_TLB_H 2#define ___ASM_SPARC_TLB_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define tlb_start_vma(tlb, vma) \ 4#include <asm-sparc/tlb_64.h>
5do { \ 5#else
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 6#include <asm-sparc/tlb_32.h>
7} while (0) 7#endif
8 8#endif
9#define tlb_end_vma(tlb, vma) \
10do { \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
12} while (0)
13
14#define __tlb_remove_tlb_entry(tlb, pte, address) \
15 do { } while (0)
16
17#define tlb_flush(tlb) \
18do { \
19 flush_tlb_mm((tlb)->mm); \
20} while (0)
21
22#include <asm-generic/tlb.h>
23
24#endif /* _SPARC_TLB_H */
diff --git a/include/asm-sparc/tlb_32.h b/include/asm-sparc/tlb_32.h
new file mode 100644
index 000000000000..6d02d1ce53f3
--- /dev/null
+++ b/include/asm-sparc/tlb_32.h
@@ -0,0 +1,24 @@
1#ifndef _SPARC_TLB_H
2#define _SPARC_TLB_H
3
4#define tlb_start_vma(tlb, vma) \
5do { \
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
7} while (0)
8
9#define tlb_end_vma(tlb, vma) \
10do { \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
12} while (0)
13
14#define __tlb_remove_tlb_entry(tlb, pte, address) \
15 do { } while (0)
16
17#define tlb_flush(tlb) \
18do { \
19 flush_tlb_mm((tlb)->mm); \
20} while (0)
21
22#include <asm-generic/tlb.h>
23
24#endif /* _SPARC_TLB_H */
diff --git a/include/asm-sparc/tlb_64.h b/include/asm-sparc/tlb_64.h
new file mode 100644
index 000000000000..ec81cdedef2c
--- /dev/null
+++ b/include/asm-sparc/tlb_64.h
@@ -0,0 +1,111 @@
1#ifndef _SPARC64_TLB_H
2#define _SPARC64_TLB_H
3
4#include <linux/swap.h>
5#include <linux/pagemap.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/mmu_context.h>
9
10#define TLB_BATCH_NR 192
11
12/*
13 * For UP we don't need to worry about TLB flush
14 * and page free order so much..
15 */
16#ifdef CONFIG_SMP
17 #define FREE_PTE_NR 506
18 #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
19#else
20 #define FREE_PTE_NR 1
21 #define tlb_fast_mode(bp) 1
22#endif
23
24struct mmu_gather {
25 struct mm_struct *mm;
26 unsigned int pages_nr;
27 unsigned int need_flush;
28 unsigned int fullmm;
29 unsigned int tlb_nr;
30 unsigned long vaddrs[TLB_BATCH_NR];
31 struct page *pages[FREE_PTE_NR];
32};
33
34DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36#ifdef CONFIG_SMP
37extern void smp_flush_tlb_pending(struct mm_struct *,
38 unsigned long, unsigned long *);
39#endif
40
41extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
42extern void flush_tlb_pending(void);
43
44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45{
46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
47
48 BUG_ON(mp->tlb_nr);
49
50 mp->mm = mm;
51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
52 mp->fullmm = full_mm_flush;
53
54 return mp;
55}
56
57
58static inline void tlb_flush_mmu(struct mmu_gather *mp)
59{
60 if (mp->need_flush) {
61 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
62 mp->pages_nr = 0;
63 mp->need_flush = 0;
64 }
65
66}
67
68#ifdef CONFIG_SMP
69extern void smp_flush_tlb_mm(struct mm_struct *mm);
70#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
71#else
72#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
73#endif
74
75static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
76{
77 tlb_flush_mmu(mp);
78
79 if (mp->fullmm)
80 mp->fullmm = 0;
81 else
82 flush_tlb_pending();
83
84 /* keep the page table cache within bounds */
85 check_pgt_cache();
86
87 put_cpu_var(mmu_gathers);
88}
89
90static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
91{
92 if (tlb_fast_mode(mp)) {
93 free_page_and_swap_cache(page);
94 return;
95 }
96 mp->need_flush = 1;
97 mp->pages[mp->pages_nr++] = page;
98 if (mp->pages_nr >= FREE_PTE_NR)
99 tlb_flush_mmu(mp);
100}
101
102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
103#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
104#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
105#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
106
107#define tlb_migrate_finish(mm) do { } while (0)
108#define tlb_start_vma(tlb, vma) do { } while (0)
109#define tlb_end_vma(tlb, vma) do { } while (0)
110
111#endif /* _SPARC64_TLB_H */
diff --git a/include/asm-sparc/tlbflush.h b/include/asm-sparc/tlbflush.h
index b957e29d2ae1..6e6bc12227b8 100644
--- a/include/asm-sparc/tlbflush.h
+++ b/include/asm-sparc/tlbflush.h
@@ -1,60 +1,8 @@
1#ifndef _SPARC_TLBFLUSH_H 1#ifndef ___ASM_SPARC_TLBFLUSH_H
2#define _SPARC_TLBFLUSH_H 2#define ___ASM_SPARC_TLBFLUSH_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/mm.h> 4#include <asm-sparc/tlbflush_64.h>
5// #include <asm/processor.h> 5#else
6 6#include <asm-sparc/tlbflush_32.h>
7/* 7#endif
8 * TLB flushing: 8#endif
9 *
10 * - flush_tlb() flushes the current mm struct TLBs XXX Exists?
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 */
17
18#ifdef CONFIG_SMP
19
20BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
21BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
22BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
23BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
24
25#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
26#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
27#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
28#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
29
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_mm(struct mm_struct *mm);
32extern void smp_flush_tlb_range(struct vm_area_struct *vma,
33 unsigned long start,
34 unsigned long end);
35extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
36
37#endif /* CONFIG_SMP */
38
39BTFIXUPDEF_CALL(void, flush_tlb_all, void)
40BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
41BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
42BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
43
44#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
45#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
46#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
47#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
48
49// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
50
51/*
52 * This is a kludge, until I know better. --zaitcev XXX
53 */
54static inline void flush_tlb_kernel_range(unsigned long start,
55 unsigned long end)
56{
57 flush_tlb_all();
58}
59
60#endif /* _SPARC_TLBFLUSH_H */
diff --git a/include/asm-sparc/tlbflush_32.h b/include/asm-sparc/tlbflush_32.h
new file mode 100644
index 000000000000..fe0a71abc9bb
--- /dev/null
+++ b/include/asm-sparc/tlbflush_32.h
@@ -0,0 +1,60 @@
1#ifndef _SPARC_TLBFLUSH_H
2#define _SPARC_TLBFLUSH_H
3
4#include <linux/mm.h>
5// #include <asm/processor.h>
6
7/*
8 * TLB flushing:
9 *
10 * - flush_tlb() flushes the current mm struct TLBs XXX Exists?
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 */
17
18#ifdef CONFIG_SMP
19
20BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
21BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
22BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
23BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
24
25#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
26#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
27#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
28#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
29
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_mm(struct mm_struct *mm);
32extern void smp_flush_tlb_range(struct vm_area_struct *vma,
33 unsigned long start,
34 unsigned long end);
35extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
36
37#endif /* CONFIG_SMP */
38
39BTFIXUPDEF_CALL(void, flush_tlb_all, void)
40BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
41BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
42BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
43
44#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
45#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
46#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
47#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
48
49// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
50
51/*
52 * This is a kludge, until I know better. --zaitcev XXX
53 */
54static inline void flush_tlb_kernel_range(unsigned long start,
55 unsigned long end)
56{
57 flush_tlb_all();
58}
59
60#endif /* _SPARC_TLBFLUSH_H */
diff --git a/include/asm-sparc/tlbflush_64.h b/include/asm-sparc/tlbflush_64.h
new file mode 100644
index 000000000000..fbb675dbe0c9
--- /dev/null
+++ b/include/asm-sparc/tlbflush_64.h
@@ -0,0 +1,44 @@
1#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/mmu_context.h>
6
7/* TSB flush operations. */
8struct mmu_gather;
9extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
10extern void flush_tsb_user(struct mmu_gather *mp);
11
12/* TLB flush operations. */
13
14extern void flush_tlb_pending(void);
15
16#define flush_tlb_range(vma,start,end) \
17 do { (void)(start); flush_tlb_pending(); } while (0)
18#define flush_tlb_page(vma,addr) flush_tlb_pending()
19#define flush_tlb_mm(mm) flush_tlb_pending()
20
21/* Local cpu only. */
22extern void __flush_tlb_all(void);
23
24extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
25
26#ifndef CONFIG_SMP
27
28#define flush_tlb_kernel_range(start,end) \
29do { flush_tsb_kernel_range(start,end); \
30 __flush_tlb_kernel_range(start,end); \
31} while (0)
32
33#else /* CONFIG_SMP */
34
35extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
36
37#define flush_tlb_kernel_range(start, end) \
38do { flush_tsb_kernel_range(start,end); \
39 smp_flush_tlb_kernel_range(start, end); \
40} while (0)
41
42#endif /* ! CONFIG_SMP */
43
44#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/include/asm-sparc/topology.h b/include/asm-sparc/topology.h
index ee5ac9c9da28..ed13630f32e2 100644
--- a/include/asm-sparc/topology.h
+++ b/include/asm-sparc/topology.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_SPARC_TOPOLOGY_H 1#ifndef ___ASM_SPARC_TOPOLOGY_H
2#define _ASM_SPARC_TOPOLOGY_H 2#define ___ASM_SPARC_TOPOLOGY_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/topology.h> 4#include <asm-sparc/topology_64.h>
5 5#else
6#endif /* _ASM_SPARC_TOPOLOGY_H */ 6#include <asm-sparc/topology_32.h>
7#endif
8#endif
diff --git a/include/asm-sparc/topology_32.h b/include/asm-sparc/topology_32.h
new file mode 100644
index 000000000000..ee5ac9c9da28
--- /dev/null
+++ b/include/asm-sparc/topology_32.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_SPARC_TOPOLOGY_H
2#define _ASM_SPARC_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* _ASM_SPARC_TOPOLOGY_H */
diff --git a/include/asm-sparc/topology_64.h b/include/asm-sparc/topology_64.h
new file mode 100644
index 000000000000..001c04027c82
--- /dev/null
+++ b/include/asm-sparc/topology_64.h
@@ -0,0 +1,86 @@
1#ifndef _ASM_SPARC64_TOPOLOGY_H
2#define _ASM_SPARC64_TOPOLOGY_H
3
4#ifdef CONFIG_NUMA
5
6#include <asm/mmzone.h>
7
8static inline int cpu_to_node(int cpu)
9{
10 return numa_cpu_lookup_table[cpu];
11}
12
13#define parent_node(node) (node)
14
15static inline cpumask_t node_to_cpumask(int node)
16{
17 return numa_cpumask_lookup_table[node];
18}
19
20/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
21#define node_to_cpumask_ptr(v, node) \
22 cpumask_t *v = &(numa_cpumask_lookup_table[node])
23
24#define node_to_cpumask_ptr_next(v, node) \
25 v = &(numa_cpumask_lookup_table[node])
26
27static inline int node_to_first_cpu(int node)
28{
29 cpumask_t tmp;
30 tmp = node_to_cpumask(node);
31 return first_cpu(tmp);
32}
33
34struct pci_bus;
35#ifdef CONFIG_PCI
36extern int pcibus_to_node(struct pci_bus *pbus);
37#else
38static inline int pcibus_to_node(struct pci_bus *pbus)
39{
40 return -1;
41}
42#endif
43
44#define pcibus_to_cpumask(bus) \
45 (pcibus_to_node(bus) == -1 ? \
46 CPU_MASK_ALL : \
47 node_to_cpumask(pcibus_to_node(bus)))
48
49#define SD_NODE_INIT (struct sched_domain) { \
50 .min_interval = 8, \
51 .max_interval = 32, \
52 .busy_factor = 32, \
53 .imbalance_pct = 125, \
54 .cache_nice_tries = 2, \
55 .busy_idx = 3, \
56 .idle_idx = 2, \
57 .newidle_idx = 0, \
58 .wake_idx = 1, \
59 .forkexec_idx = 1, \
60 .flags = SD_LOAD_BALANCE \
61 | SD_BALANCE_FORK \
62 | SD_BALANCE_EXEC \
63 | SD_SERIALIZE \
64 | SD_WAKE_BALANCE, \
65 .last_balance = jiffies, \
66 .balance_interval = 1, \
67}
68
69#else /* CONFIG_NUMA */
70
71#include <asm-generic/topology.h>
72
73#endif /* !(CONFIG_NUMA) */
74
75#ifdef CONFIG_SMP
76#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
77#define topology_core_id(cpu) (cpu_data(cpu).core_id)
78#define topology_core_siblings(cpu) (cpu_core_map[cpu])
79#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
80#define mc_capable() (sparc64_multi_core)
81#define smt_capable() (sparc64_multi_core)
82#endif /* CONFIG_SMP */
83
84#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
85
86#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h
index 47d5619d43fa..424facce5238 100644
--- a/include/asm-sparc/uaccess.h
+++ b/include/asm-sparc/uaccess.h
@@ -1,336 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_UACCESS_H
2 * uaccess.h: User space memore access functions. 2#define ___ASM_SPARC_UACCESS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/uaccess_64.h>
5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5#else
6 */ 6#include <asm-sparc/uaccess_32.h>
7#ifndef _ASM_UACCESS_H 7#endif
8#define _ASM_UACCESS_H
9
10#ifdef __KERNEL__
11#include <linux/compiler.h>
12#include <linux/sched.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <asm/vac-ops.h>
16#endif 8#endif
17
18#ifndef __ASSEMBLY__
19
20/* Sparc is not segmented, however we need to be able to fool access_ok()
21 * when doing system calls from kernel mode legitimately.
22 *
23 * "For historical reasons, these macros are grossly misnamed." -Linus
24 */
25
26#define KERNEL_DS ((mm_segment_t) { 0 })
27#define USER_DS ((mm_segment_t) { -1 })
28
29#define VERIFY_READ 0
30#define VERIFY_WRITE 1
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.current_ds)
34#define set_fs(val) ((current->thread.current_ds) = (val))
35
36#define segment_eq(a,b) ((a).seg == (b).seg)
37
38/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
39 * can be fairly lightweight.
40 * No one can read/write anything from userland in the kernel space by setting
41 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
42 */
43#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
44#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
45#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
46#define access_ok(type, addr, size) \
47 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
48
49/*
50 * The exception table consists of pairs of addresses: the first is the
51 * address of an instruction that is allowed to fault, and the second is
52 * the address at which the program should continue. No registers are
53 * modified, so it is entirely up to the continuation code to figure out
54 * what to do.
55 *
56 * All the routines below use bits of fixup code that are out of line
57 * with the main instruction path. This means when everything is well,
58 * we don't even have to jump over them. Further, they do not intrude
59 * on our cache or tlb entries.
60 *
61 * There is a special way how to put a range of potentially faulting
62 * insns (like twenty ldd/std's with now intervening other instructions)
63 * You specify address of first in insn and 0 in fixup and in the next
64 * exception_table_entry you specify last potentially faulting insn + 1
65 * and in fixup the routine which should handle the fault.
66 * That fixup code will get
67 * (faulting_insn_address - first_insn_in_the_range_address)/4
68 * in %g2 (ie. index of the faulting instruction in the range).
69 */
70
71struct exception_table_entry
72{
73 unsigned long insn, fixup;
74};
75
76/* Returns 0 if exception not found and fixup otherwise. */
77extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
78
79extern void __ret_efault(void);
80
81/* Uh, these should become the main single-value transfer routines..
82 * They automatically use the right size if we just have the right
83 * pointer type..
84 *
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
89 */
90#define put_user(x,ptr) ({ \
91unsigned long __pu_addr = (unsigned long)(ptr); \
92__chk_user_ptr(ptr); \
93__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94
95#define get_user(x,ptr) ({ \
96unsigned long __gu_addr = (unsigned long)(ptr); \
97__chk_user_ptr(ptr); \
98__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99
100/*
101 * The "__xxx" versions do not do address space checking, useful when
102 * doing multiple accesses to the same area (the user has to do the
103 * checks by hand with "access_ok()")
104 */
105#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
107
108struct __large_struct { unsigned long buf[100]; };
109#define __m(x) ((struct __large_struct __user *)(x))
110
111#define __put_user_check(x,addr,size) ({ \
112register int __pu_ret; \
113if (__access_ok(addr,size)) { \
114switch (size) { \
115case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
116case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
117case 4: __put_user_asm(x,,addr,__pu_ret); break; \
118case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
119default: __pu_ret = __put_user_bad(); break; \
120} } else { __pu_ret = -EFAULT; } __pu_ret; })
121
122#define __put_user_nocheck(x,addr,size) ({ \
123register int __pu_ret; \
124switch (size) { \
125case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
126case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
127case 4: __put_user_asm(x,,addr,__pu_ret); break; \
128case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
129default: __pu_ret = __put_user_bad(); break; \
130} __pu_ret; })
131
132#define __put_user_asm(x,size,addr,ret) \
133__asm__ __volatile__( \
134 "/* Put user asm, inline. */\n" \
135"1:\t" "st"#size " %1, %2\n\t" \
136 "clr %0\n" \
137"2:\n\n\t" \
138 ".section .fixup,#alloc,#execinstr\n\t" \
139 ".align 4\n" \
140"3:\n\t" \
141 "b 2b\n\t" \
142 " mov %3, %0\n\t" \
143 ".previous\n\n\t" \
144 ".section __ex_table,#alloc\n\t" \
145 ".align 4\n\t" \
146 ".word 1b, 3b\n\t" \
147 ".previous\n\n\t" \
148 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
149 "i" (-EFAULT))
150
151extern int __put_user_bad(void);
152
153#define __get_user_check(x,addr,size,type) ({ \
154register int __gu_ret; \
155register unsigned long __gu_val; \
156if (__access_ok(addr,size)) { \
157switch (size) { \
158case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
159case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
160case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
161case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
162default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
163} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
164
165#define __get_user_check_ret(x,addr,size,type,retval) ({ \
166register unsigned long __gu_val __asm__ ("l1"); \
167if (__access_ok(addr,size)) { \
168switch (size) { \
169case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
170case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
171case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
172case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
173default: if (__get_user_bad()) return retval; \
174} x = (type) __gu_val; } else return retval; })
175
176#define __get_user_nocheck(x,addr,size,type) ({ \
177register int __gu_ret; \
178register unsigned long __gu_val; \
179switch (size) { \
180case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
181case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
182case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
183case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
184default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
185} x = (type) __gu_val; __gu_ret; })
186
187#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
188register unsigned long __gu_val __asm__ ("l1"); \
189switch (size) { \
190case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
191case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
192case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
193case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
194default: if (__get_user_bad()) return retval; \
195} x = (type) __gu_val; })
196
197#define __get_user_asm(x,size,addr,ret) \
198__asm__ __volatile__( \
199 "/* Get user asm, inline. */\n" \
200"1:\t" "ld"#size " %2, %1\n\t" \
201 "clr %0\n" \
202"2:\n\n\t" \
203 ".section .fixup,#alloc,#execinstr\n\t" \
204 ".align 4\n" \
205"3:\n\t" \
206 "clr %1\n\t" \
207 "b 2b\n\t" \
208 " mov %3, %0\n\n\t" \
209 ".previous\n\t" \
210 ".section __ex_table,#alloc\n\t" \
211 ".align 4\n\t" \
212 ".word 1b, 3b\n\n\t" \
213 ".previous\n\t" \
214 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
215 "i" (-EFAULT))
216
217#define __get_user_asm_ret(x,size,addr,retval) \
218if (__builtin_constant_p(retval) && retval == -EFAULT) \
219__asm__ __volatile__( \
220 "/* Get user asm ret, inline. */\n" \
221"1:\t" "ld"#size " %1, %0\n\n\t" \
222 ".section __ex_table,#alloc\n\t" \
223 ".align 4\n\t" \
224 ".word 1b,__ret_efault\n\n\t" \
225 ".previous\n\t" \
226 : "=&r" (x) : "m" (*__m(addr))); \
227else \
228__asm__ __volatile__( \
229 "/* Get user asm ret, inline. */\n" \
230"1:\t" "ld"#size " %1, %0\n\n\t" \
231 ".section .fixup,#alloc,#execinstr\n\t" \
232 ".align 4\n" \
233"3:\n\t" \
234 "ret\n\t" \
235 " restore %%g0, %2, %%o0\n\n\t" \
236 ".previous\n\t" \
237 ".section __ex_table,#alloc\n\t" \
238 ".align 4\n\t" \
239 ".word 1b, 3b\n\n\t" \
240 ".previous\n\t" \
241 : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
242
243extern int __get_user_bad(void);
244
245extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
246
247static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
248{
249 if (n && __access_ok((unsigned long) to, n))
250 return __copy_user(to, (__force void __user *) from, n);
251 else
252 return n;
253}
254
255static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
256{
257 return __copy_user(to, (__force void __user *) from, n);
258}
259
260static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
261{
262 if (n && __access_ok((unsigned long) from, n))
263 return __copy_user((__force void __user *) to, from, n);
264 else
265 return n;
266}
267
268static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
269{
270 return __copy_user((__force void __user *) to, from, n);
271}
272
273#define __copy_to_user_inatomic __copy_to_user
274#define __copy_from_user_inatomic __copy_from_user
275
276static inline unsigned long __clear_user(void __user *addr, unsigned long size)
277{
278 unsigned long ret;
279
280 __asm__ __volatile__ (
281 ".section __ex_table,#alloc\n\t"
282 ".align 4\n\t"
283 ".word 1f,3\n\t"
284 ".previous\n\t"
285 "mov %2, %%o1\n"
286 "1:\n\t"
287 "call __bzero\n\t"
288 " mov %1, %%o0\n\t"
289 "mov %%o0, %0\n"
290 : "=r" (ret) : "r" (addr), "r" (size) :
291 "o0", "o1", "o2", "o3", "o4", "o5", "o7",
292 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
293
294 return ret;
295}
296
297static inline unsigned long clear_user(void __user *addr, unsigned long n)
298{
299 if (n && __access_ok((unsigned long) addr, n))
300 return __clear_user(addr, n);
301 else
302 return n;
303}
304
305extern long __strncpy_from_user(char *dest, const char __user *src, long count);
306
307static inline long strncpy_from_user(char *dest, const char __user *src, long count)
308{
309 if (__access_ok((unsigned long) src, count))
310 return __strncpy_from_user(dest, src, count);
311 else
312 return -EFAULT;
313}
314
315extern long __strlen_user(const char __user *);
316extern long __strnlen_user(const char __user *, long len);
317
318static inline long strlen_user(const char __user *str)
319{
320 if (!access_ok(VERIFY_READ, str, 0))
321 return 0;
322 else
323 return __strlen_user(str);
324}
325
326static inline long strnlen_user(const char __user *str, long len)
327{
328 if (!access_ok(VERIFY_READ, str, 0))
329 return 0;
330 else
331 return __strnlen_user(str, len);
332}
333
334#endif /* __ASSEMBLY__ */
335
336#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc/uaccess_32.h b/include/asm-sparc/uaccess_32.h
new file mode 100644
index 000000000000..47d5619d43fa
--- /dev/null
+++ b/include/asm-sparc/uaccess_32.h
@@ -0,0 +1,336 @@
1/*
2 * uaccess.h: User space memore access functions.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7#ifndef _ASM_UACCESS_H
8#define _ASM_UACCESS_H
9
10#ifdef __KERNEL__
11#include <linux/compiler.h>
12#include <linux/sched.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <asm/vac-ops.h>
16#endif
17
18#ifndef __ASSEMBLY__
19
20/* Sparc is not segmented, however we need to be able to fool access_ok()
21 * when doing system calls from kernel mode legitimately.
22 *
23 * "For historical reasons, these macros are grossly misnamed." -Linus
24 */
25
26#define KERNEL_DS ((mm_segment_t) { 0 })
27#define USER_DS ((mm_segment_t) { -1 })
28
29#define VERIFY_READ 0
30#define VERIFY_WRITE 1
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.current_ds)
34#define set_fs(val) ((current->thread.current_ds) = (val))
35
36#define segment_eq(a,b) ((a).seg == (b).seg)
37
38/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
39 * can be fairly lightweight.
40 * No one can read/write anything from userland in the kernel space by setting
41 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
42 */
43#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
44#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
45#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
46#define access_ok(type, addr, size) \
47 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
48
49/*
50 * The exception table consists of pairs of addresses: the first is the
51 * address of an instruction that is allowed to fault, and the second is
52 * the address at which the program should continue. No registers are
53 * modified, so it is entirely up to the continuation code to figure out
54 * what to do.
55 *
56 * All the routines below use bits of fixup code that are out of line
57 * with the main instruction path. This means when everything is well,
58 * we don't even have to jump over them. Further, they do not intrude
59 * on our cache or tlb entries.
60 *
61 * There is a special way how to put a range of potentially faulting
62 * insns (like twenty ldd/std's with now intervening other instructions)
63 * You specify address of first in insn and 0 in fixup and in the next
64 * exception_table_entry you specify last potentially faulting insn + 1
65 * and in fixup the routine which should handle the fault.
66 * That fixup code will get
67 * (faulting_insn_address - first_insn_in_the_range_address)/4
68 * in %g2 (ie. index of the faulting instruction in the range).
69 */
70
71struct exception_table_entry
72{
73 unsigned long insn, fixup;
74};
75
76/* Returns 0 if exception not found and fixup otherwise. */
77extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
78
79extern void __ret_efault(void);
80
81/* Uh, these should become the main single-value transfer routines..
82 * They automatically use the right size if we just have the right
83 * pointer type..
84 *
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
89 */
90#define put_user(x,ptr) ({ \
91unsigned long __pu_addr = (unsigned long)(ptr); \
92__chk_user_ptr(ptr); \
93__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94
95#define get_user(x,ptr) ({ \
96unsigned long __gu_addr = (unsigned long)(ptr); \
97__chk_user_ptr(ptr); \
98__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99
100/*
101 * The "__xxx" versions do not do address space checking, useful when
102 * doing multiple accesses to the same area (the user has to do the
103 * checks by hand with "access_ok()")
104 */
105#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
107
108struct __large_struct { unsigned long buf[100]; };
109#define __m(x) ((struct __large_struct __user *)(x))
110
111#define __put_user_check(x,addr,size) ({ \
112register int __pu_ret; \
113if (__access_ok(addr,size)) { \
114switch (size) { \
115case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
116case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
117case 4: __put_user_asm(x,,addr,__pu_ret); break; \
118case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
119default: __pu_ret = __put_user_bad(); break; \
120} } else { __pu_ret = -EFAULT; } __pu_ret; })
121
122#define __put_user_nocheck(x,addr,size) ({ \
123register int __pu_ret; \
124switch (size) { \
125case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
126case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
127case 4: __put_user_asm(x,,addr,__pu_ret); break; \
128case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
129default: __pu_ret = __put_user_bad(); break; \
130} __pu_ret; })
131
132#define __put_user_asm(x,size,addr,ret) \
133__asm__ __volatile__( \
134 "/* Put user asm, inline. */\n" \
135"1:\t" "st"#size " %1, %2\n\t" \
136 "clr %0\n" \
137"2:\n\n\t" \
138 ".section .fixup,#alloc,#execinstr\n\t" \
139 ".align 4\n" \
140"3:\n\t" \
141 "b 2b\n\t" \
142 " mov %3, %0\n\t" \
143 ".previous\n\n\t" \
144 ".section __ex_table,#alloc\n\t" \
145 ".align 4\n\t" \
146 ".word 1b, 3b\n\t" \
147 ".previous\n\n\t" \
148 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
149 "i" (-EFAULT))
150
151extern int __put_user_bad(void);
152
153#define __get_user_check(x,addr,size,type) ({ \
154register int __gu_ret; \
155register unsigned long __gu_val; \
156if (__access_ok(addr,size)) { \
157switch (size) { \
158case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
159case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
160case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
161case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
162default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
163} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
164
165#define __get_user_check_ret(x,addr,size,type,retval) ({ \
166register unsigned long __gu_val __asm__ ("l1"); \
167if (__access_ok(addr,size)) { \
168switch (size) { \
169case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
170case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
171case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
172case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
173default: if (__get_user_bad()) return retval; \
174} x = (type) __gu_val; } else return retval; })
175
176#define __get_user_nocheck(x,addr,size,type) ({ \
177register int __gu_ret; \
178register unsigned long __gu_val; \
179switch (size) { \
180case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
181case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
182case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
183case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
184default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
185} x = (type) __gu_val; __gu_ret; })
186
187#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
188register unsigned long __gu_val __asm__ ("l1"); \
189switch (size) { \
190case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
191case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
192case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
193case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
194default: if (__get_user_bad()) return retval; \
195} x = (type) __gu_val; })
196
197#define __get_user_asm(x,size,addr,ret) \
198__asm__ __volatile__( \
199 "/* Get user asm, inline. */\n" \
200"1:\t" "ld"#size " %2, %1\n\t" \
201 "clr %0\n" \
202"2:\n\n\t" \
203 ".section .fixup,#alloc,#execinstr\n\t" \
204 ".align 4\n" \
205"3:\n\t" \
206 "clr %1\n\t" \
207 "b 2b\n\t" \
208 " mov %3, %0\n\n\t" \
209 ".previous\n\t" \
210 ".section __ex_table,#alloc\n\t" \
211 ".align 4\n\t" \
212 ".word 1b, 3b\n\n\t" \
213 ".previous\n\t" \
214 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
215 "i" (-EFAULT))
216
217#define __get_user_asm_ret(x,size,addr,retval) \
218if (__builtin_constant_p(retval) && retval == -EFAULT) \
219__asm__ __volatile__( \
220 "/* Get user asm ret, inline. */\n" \
221"1:\t" "ld"#size " %1, %0\n\n\t" \
222 ".section __ex_table,#alloc\n\t" \
223 ".align 4\n\t" \
224 ".word 1b,__ret_efault\n\n\t" \
225 ".previous\n\t" \
226 : "=&r" (x) : "m" (*__m(addr))); \
227else \
228__asm__ __volatile__( \
229 "/* Get user asm ret, inline. */\n" \
230"1:\t" "ld"#size " %1, %0\n\n\t" \
231 ".section .fixup,#alloc,#execinstr\n\t" \
232 ".align 4\n" \
233"3:\n\t" \
234 "ret\n\t" \
235 " restore %%g0, %2, %%o0\n\n\t" \
236 ".previous\n\t" \
237 ".section __ex_table,#alloc\n\t" \
238 ".align 4\n\t" \
239 ".word 1b, 3b\n\n\t" \
240 ".previous\n\t" \
241 : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
242
243extern int __get_user_bad(void);
244
245extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
246
247static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
248{
249 if (n && __access_ok((unsigned long) to, n))
250 return __copy_user(to, (__force void __user *) from, n);
251 else
252 return n;
253}
254
255static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
256{
257 return __copy_user(to, (__force void __user *) from, n);
258}
259
260static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
261{
262 if (n && __access_ok((unsigned long) from, n))
263 return __copy_user((__force void __user *) to, from, n);
264 else
265 return n;
266}
267
268static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
269{
270 return __copy_user((__force void __user *) to, from, n);
271}
272
273#define __copy_to_user_inatomic __copy_to_user
274#define __copy_from_user_inatomic __copy_from_user
275
276static inline unsigned long __clear_user(void __user *addr, unsigned long size)
277{
278 unsigned long ret;
279
280 __asm__ __volatile__ (
281 ".section __ex_table,#alloc\n\t"
282 ".align 4\n\t"
283 ".word 1f,3\n\t"
284 ".previous\n\t"
285 "mov %2, %%o1\n"
286 "1:\n\t"
287 "call __bzero\n\t"
288 " mov %1, %%o0\n\t"
289 "mov %%o0, %0\n"
290 : "=r" (ret) : "r" (addr), "r" (size) :
291 "o0", "o1", "o2", "o3", "o4", "o5", "o7",
292 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
293
294 return ret;
295}
296
297static inline unsigned long clear_user(void __user *addr, unsigned long n)
298{
299 if (n && __access_ok((unsigned long) addr, n))
300 return __clear_user(addr, n);
301 else
302 return n;
303}
304
305extern long __strncpy_from_user(char *dest, const char __user *src, long count);
306
307static inline long strncpy_from_user(char *dest, const char __user *src, long count)
308{
309 if (__access_ok((unsigned long) src, count))
310 return __strncpy_from_user(dest, src, count);
311 else
312 return -EFAULT;
313}
314
315extern long __strlen_user(const char __user *);
316extern long __strnlen_user(const char __user *, long len);
317
318static inline long strlen_user(const char __user *str)
319{
320 if (!access_ok(VERIFY_READ, str, 0))
321 return 0;
322 else
323 return __strlen_user(str);
324}
325
326static inline long strnlen_user(const char __user *str, long len)
327{
328 if (!access_ok(VERIFY_READ, str, 0))
329 return 0;
330 else
331 return __strnlen_user(str, len);
332}
333
334#endif /* __ASSEMBLY__ */
335
336#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc/uaccess_64.h b/include/asm-sparc/uaccess_64.h
new file mode 100644
index 000000000000..296ef30e05c8
--- /dev/null
+++ b/include/asm-sparc/uaccess_64.h
@@ -0,0 +1,273 @@
1#ifndef _ASM_UACCESS_H
2#define _ASM_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7
8#ifdef __KERNEL__
9#include <linux/compiler.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <asm/asi.h>
13#include <asm/system.h>
14#include <asm/spitfire.h>
15#include <asm-generic/uaccess.h>
16#endif
17
18#ifndef __ASSEMBLY__
19
20/*
21 * Sparc64 is segmented, though more like the M68K than the I386.
22 * We use the secondary ASI to address user memory, which references a
23 * completely different VM map, thus there is zero chance of the user
24 * doing something queer and tricking us into poking kernel memory.
25 *
26 * What is left here is basically what is needed for the other parts of
27 * the kernel that expect to be able to manipulate, erum, "segments".
28 * Or perhaps more properly, permissions.
29 *
30 * "For historical reasons, these macros are grossly misnamed." -Linus
31 */
32
33#define KERNEL_DS ((mm_segment_t) { ASI_P })
34#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
35
36#define VERIFY_READ 0
37#define VERIFY_WRITE 1
38
39#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
40#define get_ds() (KERNEL_DS)
41
42#define segment_eq(a,b) ((a).seg == (b).seg)
43
44#define set_fs(val) \
45do { \
46 set_thread_current_ds((val).seg); \
47 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
48} while(0)
49
50static inline int __access_ok(const void __user * addr, unsigned long size)
51{
52 return 1;
53}
54
55static inline int access_ok(int type, const void __user * addr, unsigned long size)
56{
57 return 1;
58}
59
60/*
61 * The exception table consists of pairs of addresses: the first is the
62 * address of an instruction that is allowed to fault, and the second is
63 * the address at which the program should continue. No registers are
64 * modified, so it is entirely up to the continuation code to figure out
65 * what to do.
66 *
67 * All the routines below use bits of fixup code that are out of line
68 * with the main instruction path. This means when everything is well,
69 * we don't even have to jump over them. Further, they do not intrude
70 * on our cache or tlb entries.
71 */
72
73struct exception_table_entry {
74 unsigned int insn, fixup;
75};
76
77extern void __ret_efault(void);
78extern void __retl_efault(void);
79
80/* Uh, these should become the main single-value transfer routines..
81 * They automatically use the right size if we just have the right
82 * pointer type..
83 *
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
88 */
89#define put_user(x,ptr) ({ \
90unsigned long __pu_addr = (unsigned long)(ptr); \
91__chk_user_ptr(ptr); \
92__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
93
94#define get_user(x,ptr) ({ \
95unsigned long __gu_addr = (unsigned long)(ptr); \
96__chk_user_ptr(ptr); \
97__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
98
99#define __put_user(x,ptr) put_user(x,ptr)
100#define __get_user(x,ptr) get_user(x,ptr)
101
102struct __large_struct { unsigned long buf[100]; };
103#define __m(x) ((struct __large_struct *)(x))
104
105#define __put_user_nocheck(data,addr,size) ({ \
106register int __pu_ret; \
107switch (size) { \
108case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
109case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
110case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
111case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
112default: __pu_ret = __put_user_bad(); break; \
113} __pu_ret; })
114
115#define __put_user_asm(x,size,addr,ret) \
116__asm__ __volatile__( \
117 "/* Put user asm, inline. */\n" \
118"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
119 "clr %0\n" \
120"2:\n\n\t" \
121 ".section .fixup,#alloc,#execinstr\n\t" \
122 ".align 4\n" \
123"3:\n\t" \
124 "sethi %%hi(2b), %0\n\t" \
125 "jmpl %0 + %%lo(2b), %%g0\n\t" \
126 " mov %3, %0\n\n\t" \
127 ".previous\n\t" \
128 ".section __ex_table,\"a\"\n\t" \
129 ".align 4\n\t" \
130 ".word 1b, 3b\n\t" \
131 ".previous\n\n\t" \
132 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
133 "i" (-EFAULT))
134
135extern int __put_user_bad(void);
136
137#define __get_user_nocheck(data,addr,size,type) ({ \
138register int __gu_ret; \
139register unsigned long __gu_val; \
140switch (size) { \
141case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
142case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
143case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
144case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
145default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
146} data = (type) __gu_val; __gu_ret; })
147
148#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
149register unsigned long __gu_val __asm__ ("l1"); \
150switch (size) { \
151case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
152case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
153case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
154case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
155default: if (__get_user_bad()) return retval; \
156} data = (type) __gu_val; })
157
158#define __get_user_asm(x,size,addr,ret) \
159__asm__ __volatile__( \
160 "/* Get user asm, inline. */\n" \
161"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
162 "clr %0\n" \
163"2:\n\n\t" \
164 ".section .fixup,#alloc,#execinstr\n\t" \
165 ".align 4\n" \
166"3:\n\t" \
167 "sethi %%hi(2b), %0\n\t" \
168 "clr %1\n\t" \
169 "jmpl %0 + %%lo(2b), %%g0\n\t" \
170 " mov %3, %0\n\n\t" \
171 ".previous\n\t" \
172 ".section __ex_table,\"a\"\n\t" \
173 ".align 4\n\t" \
174 ".word 1b, 3b\n\n\t" \
175 ".previous\n\t" \
176 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
177 "i" (-EFAULT))
178
179#define __get_user_asm_ret(x,size,addr,retval) \
180if (__builtin_constant_p(retval) && retval == -EFAULT) \
181__asm__ __volatile__( \
182 "/* Get user asm ret, inline. */\n" \
183"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
184 ".section __ex_table,\"a\"\n\t" \
185 ".align 4\n\t" \
186 ".word 1b,__ret_efault\n\n\t" \
187 ".previous\n\t" \
188 : "=r" (x) : "r" (__m(addr))); \
189else \
190__asm__ __volatile__( \
191 "/* Get user asm ret, inline. */\n" \
192"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
193 ".section .fixup,#alloc,#execinstr\n\t" \
194 ".align 4\n" \
195"3:\n\t" \
196 "ret\n\t" \
197 " restore %%g0, %2, %%o0\n\n\t" \
198 ".previous\n\t" \
199 ".section __ex_table,\"a\"\n\t" \
200 ".align 4\n\t" \
201 ".word 1b, 3b\n\n\t" \
202 ".previous\n\t" \
203 : "=r" (x) : "r" (__m(addr)), "i" (retval))
204
205extern int __get_user_bad(void);
206
207extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from,
209 unsigned long size);
210extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
211 unsigned long size);
212static inline unsigned long __must_check
213copy_from_user(void *to, const void __user *from, unsigned long size)
214{
215 unsigned long ret = ___copy_from_user(to, from, size);
216
217 if (unlikely(ret))
218 ret = copy_from_user_fixup(to, from, size);
219 return ret;
220}
221#define __copy_from_user copy_from_user
222
223extern unsigned long __must_check ___copy_to_user(void __user *to,
224 const void *from,
225 unsigned long size);
226extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
227 unsigned long size);
228static inline unsigned long __must_check
229copy_to_user(void __user *to, const void *from, unsigned long size)
230{
231 unsigned long ret = ___copy_to_user(to, from, size);
232
233 if (unlikely(ret))
234 ret = copy_to_user_fixup(to, from, size);
235 return ret;
236}
237#define __copy_to_user copy_to_user
238
239extern unsigned long __must_check ___copy_in_user(void __user *to,
240 const void __user *from,
241 unsigned long size);
242extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
243 unsigned long size);
244static inline unsigned long __must_check
245copy_in_user(void __user *to, void __user *from, unsigned long size)
246{
247 unsigned long ret = ___copy_in_user(to, from, size);
248
249 if (unlikely(ret))
250 ret = copy_in_user_fixup(to, from, size);
251 return ret;
252}
253#define __copy_in_user copy_in_user
254
255extern unsigned long __must_check __clear_user(void __user *, unsigned long);
256
257#define clear_user __clear_user
258
259extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
260
261#define strncpy_from_user __strncpy_from_user
262
263extern long __strlen_user(const char __user *);
264extern long __strnlen_user(const char __user *, long len);
265
266#define strlen_user __strlen_user
267#define strnlen_user __strnlen_user
268#define __copy_to_user_inatomic __copy_to_user
269#define __copy_from_user_inatomic __copy_from_user
270
271#endif /* __ASSEMBLY__ */
272
273#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 2338a0276377..3c2609618a09 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -1,378 +1,8 @@
1#ifndef _SPARC_UNISTD_H 1#ifndef ___ASM_SPARC_UNISTD_H
2#define _SPARC_UNISTD_H 2#define ___ASM_SPARC_UNISTD_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/unistd_64.h>
5 * System calls under the Sparc. 5#else
6 * 6#include <asm-sparc/unistd_32.h>
7 * Don't be scared by the ugly clobbers, it is the only way I can 7#endif
8 * think of right now to force the arguments into fixed registers 8#endif
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49#define __NR_lchown32 31 /* Linux sparc32 specific */
50#define __NR_fchown32 32 /* Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53#define __NR_chown32 35 /* Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62#define __NR_getuid32 44 /* Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70/* #define __NR_memory_ordering 52 Linux sparc64 specific */
71#define __NR_getgid32 53 /* Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74#define __NR_mmap2 56 /* Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87#define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */
88#define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90#define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95#define __NR_truncate64 77 /* Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100#define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102#define __NR_ftruncate64 84 /* Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105#define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107#define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109#define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112#define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */
130#define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133#define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173#define __NR_fcntl64 155 /* Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182/* #define __NR_utrap_install 164 Linux sparc64 specific */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#define __NR_time 231 /* Linux Specific */
250#define __NR_splice 232 /* Linux Specific */
251#define __NR_stime 233 /* Linux Specific */
252#define __NR_statfs64 234 /* Linux Specific */
253#define __NR_fstatfs64 235 /* Linux Specific */
254#define __NR__llseek 236 /* Linux Specific */
255#define __NR_mlock 237
256#define __NR_munlock 238
257#define __NR_mlockall 239
258#define __NR_munlockall 240
259#define __NR_sched_setparam 241
260#define __NR_sched_getparam 242
261#define __NR_sched_setscheduler 243
262#define __NR_sched_getscheduler 244
263#define __NR_sched_yield 245
264#define __NR_sched_get_priority_max 246
265#define __NR_sched_get_priority_min 247
266#define __NR_sched_rr_get_interval 248
267#define __NR_nanosleep 249
268#define __NR_mremap 250
269#define __NR__sysctl 251
270#define __NR_getsid 252
271#define __NR_fdatasync 253
272#define __NR_nfsservctl 254
273#define __NR_sync_file_range 255
274#define __NR_clock_settime 256
275#define __NR_clock_gettime 257
276#define __NR_clock_getres 258
277#define __NR_clock_nanosleep 259
278#define __NR_sched_getaffinity 260
279#define __NR_sched_setaffinity 261
280#define __NR_timer_settime 262
281#define __NR_timer_gettime 263
282#define __NR_timer_getoverrun 264
283#define __NR_timer_delete 265
284#define __NR_timer_create 266
285/* #define __NR_vserver 267 Reserved for VSERVER */
286#define __NR_io_setup 268
287#define __NR_io_destroy 269
288#define __NR_io_submit 270
289#define __NR_io_cancel 271
290#define __NR_io_getevents 272
291#define __NR_mq_open 273
292#define __NR_mq_unlink 274
293#define __NR_mq_timedsend 275
294#define __NR_mq_timedreceive 276
295#define __NR_mq_notify 277
296#define __NR_mq_getsetattr 278
297#define __NR_waitid 279
298#define __NR_tee 280
299#define __NR_add_key 281
300#define __NR_request_key 282
301#define __NR_keyctl 283
302#define __NR_openat 284
303#define __NR_mkdirat 285
304#define __NR_mknodat 286
305#define __NR_fchownat 287
306#define __NR_futimesat 288
307#define __NR_fstatat64 289
308#define __NR_unlinkat 290
309#define __NR_renameat 291
310#define __NR_linkat 292
311#define __NR_symlinkat 293
312#define __NR_readlinkat 294
313#define __NR_fchmodat 295
314#define __NR_faccessat 296
315#define __NR_pselect6 297
316#define __NR_ppoll 298
317#define __NR_unshare 299
318#define __NR_set_robust_list 300
319#define __NR_get_robust_list 301
320#define __NR_migrate_pages 302
321#define __NR_mbind 303
322#define __NR_get_mempolicy 304
323#define __NR_set_mempolicy 305
324#define __NR_kexec_load 306
325#define __NR_move_pages 307
326#define __NR_getcpu 308
327#define __NR_epoll_pwait 309
328#define __NR_utimensat 310
329#define __NR_signalfd 311
330#define __NR_timerfd_create 312
331#define __NR_eventfd 313
332#define __NR_fallocate 314
333#define __NR_timerfd_settime 315
334#define __NR_timerfd_gettime 316
335
336#define NR_SYSCALLS 317
337
338/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
339 * it never had the plain ones and there is no value to adding those
340 * old versions into the syscall table.
341 */
342#define __IGNORE_setresuid
343#define __IGNORE_getresuid
344#define __IGNORE_setresgid
345#define __IGNORE_getresgid
346
347#ifdef __KERNEL__
348#define __ARCH_WANT_IPC_PARSE_VERSION
349#define __ARCH_WANT_OLD_READDIR
350#define __ARCH_WANT_STAT64
351#define __ARCH_WANT_SYS_ALARM
352#define __ARCH_WANT_SYS_GETHOSTNAME
353#define __ARCH_WANT_SYS_PAUSE
354#define __ARCH_WANT_SYS_SGETMASK
355#define __ARCH_WANT_SYS_SIGNAL
356#define __ARCH_WANT_SYS_TIME
357#define __ARCH_WANT_SYS_UTIME
358#define __ARCH_WANT_SYS_WAITPID
359#define __ARCH_WANT_SYS_SOCKETCALL
360#define __ARCH_WANT_SYS_FADVISE64
361#define __ARCH_WANT_SYS_GETPGRP
362#define __ARCH_WANT_SYS_LLSEEK
363#define __ARCH_WANT_SYS_NICE
364#define __ARCH_WANT_SYS_OLDUMOUNT
365#define __ARCH_WANT_SYS_SIGPENDING
366#define __ARCH_WANT_SYS_SIGPROCMASK
367#define __ARCH_WANT_SYS_RT_SIGSUSPEND
368
369/*
370 * "Conditional" syscalls
371 *
372 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
373 * but it doesn't work on all toolchains, so we just do it by hand
374 */
375#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
376
377#endif /* __KERNEL__ */
378#endif /* _SPARC_UNISTD_H */
diff --git a/include/asm-sparc/unistd_32.h b/include/asm-sparc/unistd_32.h
new file mode 100644
index 000000000000..2338a0276377
--- /dev/null
+++ b/include/asm-sparc/unistd_32.h
@@ -0,0 +1,378 @@
1#ifndef _SPARC_UNISTD_H
2#define _SPARC_UNISTD_H
3
4/*
5 * System calls under the Sparc.
6 *
7 * Don't be scared by the ugly clobbers, it is the only way I can
8 * think of right now to force the arguments into fixed registers
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49#define __NR_lchown32 31 /* Linux sparc32 specific */
50#define __NR_fchown32 32 /* Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53#define __NR_chown32 35 /* Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62#define __NR_getuid32 44 /* Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70/* #define __NR_memory_ordering 52 Linux sparc64 specific */
71#define __NR_getgid32 53 /* Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74#define __NR_mmap2 56 /* Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87#define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */
88#define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90#define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95#define __NR_truncate64 77 /* Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100#define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102#define __NR_ftruncate64 84 /* Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105#define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107#define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109#define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112#define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */
130#define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133#define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173#define __NR_fcntl64 155 /* Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182/* #define __NR_utrap_install 164 Linux sparc64 specific */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#define __NR_time 231 /* Linux Specific */
250#define __NR_splice 232 /* Linux Specific */
251#define __NR_stime 233 /* Linux Specific */
252#define __NR_statfs64 234 /* Linux Specific */
253#define __NR_fstatfs64 235 /* Linux Specific */
254#define __NR__llseek 236 /* Linux Specific */
255#define __NR_mlock 237
256#define __NR_munlock 238
257#define __NR_mlockall 239
258#define __NR_munlockall 240
259#define __NR_sched_setparam 241
260#define __NR_sched_getparam 242
261#define __NR_sched_setscheduler 243
262#define __NR_sched_getscheduler 244
263#define __NR_sched_yield 245
264#define __NR_sched_get_priority_max 246
265#define __NR_sched_get_priority_min 247
266#define __NR_sched_rr_get_interval 248
267#define __NR_nanosleep 249
268#define __NR_mremap 250
269#define __NR__sysctl 251
270#define __NR_getsid 252
271#define __NR_fdatasync 253
272#define __NR_nfsservctl 254
273#define __NR_sync_file_range 255
274#define __NR_clock_settime 256
275#define __NR_clock_gettime 257
276#define __NR_clock_getres 258
277#define __NR_clock_nanosleep 259
278#define __NR_sched_getaffinity 260
279#define __NR_sched_setaffinity 261
280#define __NR_timer_settime 262
281#define __NR_timer_gettime 263
282#define __NR_timer_getoverrun 264
283#define __NR_timer_delete 265
284#define __NR_timer_create 266
285/* #define __NR_vserver 267 Reserved for VSERVER */
286#define __NR_io_setup 268
287#define __NR_io_destroy 269
288#define __NR_io_submit 270
289#define __NR_io_cancel 271
290#define __NR_io_getevents 272
291#define __NR_mq_open 273
292#define __NR_mq_unlink 274
293#define __NR_mq_timedsend 275
294#define __NR_mq_timedreceive 276
295#define __NR_mq_notify 277
296#define __NR_mq_getsetattr 278
297#define __NR_waitid 279
298#define __NR_tee 280
299#define __NR_add_key 281
300#define __NR_request_key 282
301#define __NR_keyctl 283
302#define __NR_openat 284
303#define __NR_mkdirat 285
304#define __NR_mknodat 286
305#define __NR_fchownat 287
306#define __NR_futimesat 288
307#define __NR_fstatat64 289
308#define __NR_unlinkat 290
309#define __NR_renameat 291
310#define __NR_linkat 292
311#define __NR_symlinkat 293
312#define __NR_readlinkat 294
313#define __NR_fchmodat 295
314#define __NR_faccessat 296
315#define __NR_pselect6 297
316#define __NR_ppoll 298
317#define __NR_unshare 299
318#define __NR_set_robust_list 300
319#define __NR_get_robust_list 301
320#define __NR_migrate_pages 302
321#define __NR_mbind 303
322#define __NR_get_mempolicy 304
323#define __NR_set_mempolicy 305
324#define __NR_kexec_load 306
325#define __NR_move_pages 307
326#define __NR_getcpu 308
327#define __NR_epoll_pwait 309
328#define __NR_utimensat 310
329#define __NR_signalfd 311
330#define __NR_timerfd_create 312
331#define __NR_eventfd 313
332#define __NR_fallocate 314
333#define __NR_timerfd_settime 315
334#define __NR_timerfd_gettime 316
335
336#define NR_SYSCALLS 317
337
338/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
339 * it never had the plain ones and there is no value to adding those
340 * old versions into the syscall table.
341 */
342#define __IGNORE_setresuid
343#define __IGNORE_getresuid
344#define __IGNORE_setresgid
345#define __IGNORE_getresgid
346
347#ifdef __KERNEL__
348#define __ARCH_WANT_IPC_PARSE_VERSION
349#define __ARCH_WANT_OLD_READDIR
350#define __ARCH_WANT_STAT64
351#define __ARCH_WANT_SYS_ALARM
352#define __ARCH_WANT_SYS_GETHOSTNAME
353#define __ARCH_WANT_SYS_PAUSE
354#define __ARCH_WANT_SYS_SGETMASK
355#define __ARCH_WANT_SYS_SIGNAL
356#define __ARCH_WANT_SYS_TIME
357#define __ARCH_WANT_SYS_UTIME
358#define __ARCH_WANT_SYS_WAITPID
359#define __ARCH_WANT_SYS_SOCKETCALL
360#define __ARCH_WANT_SYS_FADVISE64
361#define __ARCH_WANT_SYS_GETPGRP
362#define __ARCH_WANT_SYS_LLSEEK
363#define __ARCH_WANT_SYS_NICE
364#define __ARCH_WANT_SYS_OLDUMOUNT
365#define __ARCH_WANT_SYS_SIGPENDING
366#define __ARCH_WANT_SYS_SIGPROCMASK
367#define __ARCH_WANT_SYS_RT_SIGSUSPEND
368
369/*
370 * "Conditional" syscalls
371 *
372 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
373 * but it doesn't work on all toolchains, so we just do it by hand
374 */
375#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
376
377#endif /* __KERNEL__ */
378#endif /* _SPARC_UNISTD_H */
diff --git a/include/asm-sparc/unistd_64.h b/include/asm-sparc/unistd_64.h
new file mode 100644
index 000000000000..13be4453a1f0
--- /dev/null
+++ b/include/asm-sparc/unistd_64.h
@@ -0,0 +1,373 @@
1#ifndef _SPARC64_UNISTD_H
2#define _SPARC64_UNISTD_H
3
4/*
5 * System calls under the Sparc.
6 *
7 * Don't be scared by the ugly clobbers, it is the only way I can
8 * think of right now to force the arguments into fixed registers
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49/* #define __NR_lchown32 31 Linux sparc32 specific */
50/* #define __NR_fchown32 32 Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53/* #define __NR_chown32 35 Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62/* #define __NR_getuid32 44 Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70#define __NR_memory_ordering 52 /* Linux Specific */
71/* #define __NR_getgid32 53 Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74/* #define __NR_mmap2 56 Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87/* #define __NR_geteuid32 69 Linux sparc32, sbrk under SunOS */
88/* #define __NR_getegid32 70 Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90/* #define __NR_setreuid32 72 Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95/* #define __NR_truncate64 77 Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100/* #define __NR_setgroups32 82 Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102/* #define __NR_ftruncate64 84 Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105/* #define __NR_setuid32 87 Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107/* #define __NR_setgid32 89 Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109/* #define __NR_setfsuid32 91 Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112/* #define __NR_setfsgid32 94 Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */
130/* #define __NR_setregid32 75 Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133/* #define __NR_getgroups32 115 Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173/* #define __NR_fcntl64 155 Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182#define __NR_utrap_install 164 /* SYSV ABI/v9 required */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#ifdef __KERNEL__
250#define __NR_time 231 /* Linux sparc32 */
251#endif
252#define __NR_splice 232 /* Linux Specific */
253#define __NR_stime 233 /* Linux Specific */
254#define __NR_statfs64 234 /* Linux Specific */
255#define __NR_fstatfs64 235 /* Linux Specific */
256#define __NR__llseek 236 /* Linux Specific */
257#define __NR_mlock 237
258#define __NR_munlock 238
259#define __NR_mlockall 239
260#define __NR_munlockall 240
261#define __NR_sched_setparam 241
262#define __NR_sched_getparam 242
263#define __NR_sched_setscheduler 243
264#define __NR_sched_getscheduler 244
265#define __NR_sched_yield 245
266#define __NR_sched_get_priority_max 246
267#define __NR_sched_get_priority_min 247
268#define __NR_sched_rr_get_interval 248
269#define __NR_nanosleep 249
270#define __NR_mremap 250
271#define __NR__sysctl 251
272#define __NR_getsid 252
273#define __NR_fdatasync 253
274#define __NR_nfsservctl 254
275#define __NR_sync_file_range 255
276#define __NR_clock_settime 256
277#define __NR_clock_gettime 257
278#define __NR_clock_getres 258
279#define __NR_clock_nanosleep 259
280#define __NR_sched_getaffinity 260
281#define __NR_sched_setaffinity 261
282#define __NR_timer_settime 262
283#define __NR_timer_gettime 263
284#define __NR_timer_getoverrun 264
285#define __NR_timer_delete 265
286#define __NR_timer_create 266
287/* #define __NR_vserver 267 Reserved for VSERVER */
288#define __NR_io_setup 268
289#define __NR_io_destroy 269
290#define __NR_io_submit 270
291#define __NR_io_cancel 271
292#define __NR_io_getevents 272
293#define __NR_mq_open 273
294#define __NR_mq_unlink 274
295#define __NR_mq_timedsend 275
296#define __NR_mq_timedreceive 276
297#define __NR_mq_notify 277
298#define __NR_mq_getsetattr 278
299#define __NR_waitid 279
300#define __NR_tee 280
301#define __NR_add_key 281
302#define __NR_request_key 282
303#define __NR_keyctl 283
304#define __NR_openat 284
305#define __NR_mkdirat 285
306#define __NR_mknodat 286
307#define __NR_fchownat 287
308#define __NR_futimesat 288
309#define __NR_fstatat64 289
310#define __NR_unlinkat 290
311#define __NR_renameat 291
312#define __NR_linkat 292
313#define __NR_symlinkat 293
314#define __NR_readlinkat 294
315#define __NR_fchmodat 295
316#define __NR_faccessat 296
317#define __NR_pselect6 297
318#define __NR_ppoll 298
319#define __NR_unshare 299
320#define __NR_set_robust_list 300
321#define __NR_get_robust_list 301
322#define __NR_migrate_pages 302
323#define __NR_mbind 303
324#define __NR_get_mempolicy 304
325#define __NR_set_mempolicy 305
326#define __NR_kexec_load 306
327#define __NR_move_pages 307
328#define __NR_getcpu 308
329#define __NR_epoll_pwait 309
330#define __NR_utimensat 310
331#define __NR_signalfd 311
332#define __NR_timerfd_create 312
333#define __NR_eventfd 313
334#define __NR_fallocate 314
335#define __NR_timerfd_settime 315
336#define __NR_timerfd_gettime 316
337
338#define NR_SYSCALLS 317
339
340#ifdef __KERNEL__
341#define __ARCH_WANT_IPC_PARSE_VERSION
342#define __ARCH_WANT_OLD_READDIR
343#define __ARCH_WANT_STAT64
344#define __ARCH_WANT_SYS_ALARM
345#define __ARCH_WANT_SYS_GETHOSTNAME
346#define __ARCH_WANT_SYS_PAUSE
347#define __ARCH_WANT_SYS_SGETMASK
348#define __ARCH_WANT_SYS_SIGNAL
349#define __ARCH_WANT_SYS_TIME
350#define __ARCH_WANT_COMPAT_SYS_TIME
351#define __ARCH_WANT_SYS_UTIME
352#define __ARCH_WANT_SYS_WAITPID
353#define __ARCH_WANT_SYS_SOCKETCALL
354#define __ARCH_WANT_SYS_FADVISE64
355#define __ARCH_WANT_SYS_GETPGRP
356#define __ARCH_WANT_SYS_LLSEEK
357#define __ARCH_WANT_SYS_NICE
358#define __ARCH_WANT_SYS_OLDUMOUNT
359#define __ARCH_WANT_SYS_SIGPENDING
360#define __ARCH_WANT_SYS_SIGPROCMASK
361#define __ARCH_WANT_SYS_RT_SIGSUSPEND
362#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
363
364/*
365 * "Conditional" syscalls
366 *
367 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
368 * but it doesn't work on all toolchains, so we just do it by hand
369 */
370#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
371
372#endif /* __KERNEL__ */
373#endif /* _SPARC64_UNISTD_H */
diff --git a/include/asm-sparc/xor.h b/include/asm-sparc/xor.h
index f34b2cfa8206..35089a838c3f 100644
--- a/include/asm-sparc/xor.h
+++ b/include/asm-sparc/xor.h
@@ -1,269 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_XOR_H
2 * include/asm-sparc/xor.h 2#define ___ASM_SPARC_XOR_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Optimized RAID-5 checksumming functions for 32-bit Sparc. 4#include <asm-sparc/xor_64.h>
5 * 5#else
6 * This program is free software; you can redistribute it and/or modify 6#include <asm-sparc/xor_32.h>
7 * it under the terms of the GNU General Public License as published by 7#endif
8 * the Free Software Foundation; either version 2, or (at your option) 8#endif
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16/*
17 * High speed xor_block operation for RAID4/5 utilizing the
18 * ldd/std SPARC instructions.
19 *
20 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
21 */
22
23static void
24sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
25{
26 int lines = bytes / (sizeof (long)) / 8;
27
28 do {
29 __asm__ __volatile__(
30 "ldd [%0 + 0x00], %%g2\n\t"
31 "ldd [%0 + 0x08], %%g4\n\t"
32 "ldd [%0 + 0x10], %%o0\n\t"
33 "ldd [%0 + 0x18], %%o2\n\t"
34 "ldd [%1 + 0x00], %%o4\n\t"
35 "ldd [%1 + 0x08], %%l0\n\t"
36 "ldd [%1 + 0x10], %%l2\n\t"
37 "ldd [%1 + 0x18], %%l4\n\t"
38 "xor %%g2, %%o4, %%g2\n\t"
39 "xor %%g3, %%o5, %%g3\n\t"
40 "xor %%g4, %%l0, %%g4\n\t"
41 "xor %%g5, %%l1, %%g5\n\t"
42 "xor %%o0, %%l2, %%o0\n\t"
43 "xor %%o1, %%l3, %%o1\n\t"
44 "xor %%o2, %%l4, %%o2\n\t"
45 "xor %%o3, %%l5, %%o3\n\t"
46 "std %%g2, [%0 + 0x00]\n\t"
47 "std %%g4, [%0 + 0x08]\n\t"
48 "std %%o0, [%0 + 0x10]\n\t"
49 "std %%o2, [%0 + 0x18]\n"
50 :
51 : "r" (p1), "r" (p2)
52 : "g2", "g3", "g4", "g5",
53 "o0", "o1", "o2", "o3", "o4", "o5",
54 "l0", "l1", "l2", "l3", "l4", "l5");
55 p1 += 8;
56 p2 += 8;
57 } while (--lines > 0);
58}
59
60static void
61sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
62 unsigned long *p3)
63{
64 int lines = bytes / (sizeof (long)) / 8;
65
66 do {
67 __asm__ __volatile__(
68 "ldd [%0 + 0x00], %%g2\n\t"
69 "ldd [%0 + 0x08], %%g4\n\t"
70 "ldd [%0 + 0x10], %%o0\n\t"
71 "ldd [%0 + 0x18], %%o2\n\t"
72 "ldd [%1 + 0x00], %%o4\n\t"
73 "ldd [%1 + 0x08], %%l0\n\t"
74 "ldd [%1 + 0x10], %%l2\n\t"
75 "ldd [%1 + 0x18], %%l4\n\t"
76 "xor %%g2, %%o4, %%g2\n\t"
77 "xor %%g3, %%o5, %%g3\n\t"
78 "ldd [%2 + 0x00], %%o4\n\t"
79 "xor %%g4, %%l0, %%g4\n\t"
80 "xor %%g5, %%l1, %%g5\n\t"
81 "ldd [%2 + 0x08], %%l0\n\t"
82 "xor %%o0, %%l2, %%o0\n\t"
83 "xor %%o1, %%l3, %%o1\n\t"
84 "ldd [%2 + 0x10], %%l2\n\t"
85 "xor %%o2, %%l4, %%o2\n\t"
86 "xor %%o3, %%l5, %%o3\n\t"
87 "ldd [%2 + 0x18], %%l4\n\t"
88 "xor %%g2, %%o4, %%g2\n\t"
89 "xor %%g3, %%o5, %%g3\n\t"
90 "xor %%g4, %%l0, %%g4\n\t"
91 "xor %%g5, %%l1, %%g5\n\t"
92 "xor %%o0, %%l2, %%o0\n\t"
93 "xor %%o1, %%l3, %%o1\n\t"
94 "xor %%o2, %%l4, %%o2\n\t"
95 "xor %%o3, %%l5, %%o3\n\t"
96 "std %%g2, [%0 + 0x00]\n\t"
97 "std %%g4, [%0 + 0x08]\n\t"
98 "std %%o0, [%0 + 0x10]\n\t"
99 "std %%o2, [%0 + 0x18]\n"
100 :
101 : "r" (p1), "r" (p2), "r" (p3)
102 : "g2", "g3", "g4", "g5",
103 "o0", "o1", "o2", "o3", "o4", "o5",
104 "l0", "l1", "l2", "l3", "l4", "l5");
105 p1 += 8;
106 p2 += 8;
107 p3 += 8;
108 } while (--lines > 0);
109}
110
111static void
112sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
113 unsigned long *p3, unsigned long *p4)
114{
115 int lines = bytes / (sizeof (long)) / 8;
116
117 do {
118 __asm__ __volatile__(
119 "ldd [%0 + 0x00], %%g2\n\t"
120 "ldd [%0 + 0x08], %%g4\n\t"
121 "ldd [%0 + 0x10], %%o0\n\t"
122 "ldd [%0 + 0x18], %%o2\n\t"
123 "ldd [%1 + 0x00], %%o4\n\t"
124 "ldd [%1 + 0x08], %%l0\n\t"
125 "ldd [%1 + 0x10], %%l2\n\t"
126 "ldd [%1 + 0x18], %%l4\n\t"
127 "xor %%g2, %%o4, %%g2\n\t"
128 "xor %%g3, %%o5, %%g3\n\t"
129 "ldd [%2 + 0x00], %%o4\n\t"
130 "xor %%g4, %%l0, %%g4\n\t"
131 "xor %%g5, %%l1, %%g5\n\t"
132 "ldd [%2 + 0x08], %%l0\n\t"
133 "xor %%o0, %%l2, %%o0\n\t"
134 "xor %%o1, %%l3, %%o1\n\t"
135 "ldd [%2 + 0x10], %%l2\n\t"
136 "xor %%o2, %%l4, %%o2\n\t"
137 "xor %%o3, %%l5, %%o3\n\t"
138 "ldd [%2 + 0x18], %%l4\n\t"
139 "xor %%g2, %%o4, %%g2\n\t"
140 "xor %%g3, %%o5, %%g3\n\t"
141 "ldd [%3 + 0x00], %%o4\n\t"
142 "xor %%g4, %%l0, %%g4\n\t"
143 "xor %%g5, %%l1, %%g5\n\t"
144 "ldd [%3 + 0x08], %%l0\n\t"
145 "xor %%o0, %%l2, %%o0\n\t"
146 "xor %%o1, %%l3, %%o1\n\t"
147 "ldd [%3 + 0x10], %%l2\n\t"
148 "xor %%o2, %%l4, %%o2\n\t"
149 "xor %%o3, %%l5, %%o3\n\t"
150 "ldd [%3 + 0x18], %%l4\n\t"
151 "xor %%g2, %%o4, %%g2\n\t"
152 "xor %%g3, %%o5, %%g3\n\t"
153 "xor %%g4, %%l0, %%g4\n\t"
154 "xor %%g5, %%l1, %%g5\n\t"
155 "xor %%o0, %%l2, %%o0\n\t"
156 "xor %%o1, %%l3, %%o1\n\t"
157 "xor %%o2, %%l4, %%o2\n\t"
158 "xor %%o3, %%l5, %%o3\n\t"
159 "std %%g2, [%0 + 0x00]\n\t"
160 "std %%g4, [%0 + 0x08]\n\t"
161 "std %%o0, [%0 + 0x10]\n\t"
162 "std %%o2, [%0 + 0x18]\n"
163 :
164 : "r" (p1), "r" (p2), "r" (p3), "r" (p4)
165 : "g2", "g3", "g4", "g5",
166 "o0", "o1", "o2", "o3", "o4", "o5",
167 "l0", "l1", "l2", "l3", "l4", "l5");
168 p1 += 8;
169 p2 += 8;
170 p3 += 8;
171 p4 += 8;
172 } while (--lines > 0);
173}
174
175static void
176sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
177 unsigned long *p3, unsigned long *p4, unsigned long *p5)
178{
179 int lines = bytes / (sizeof (long)) / 8;
180
181 do {
182 __asm__ __volatile__(
183 "ldd [%0 + 0x00], %%g2\n\t"
184 "ldd [%0 + 0x08], %%g4\n\t"
185 "ldd [%0 + 0x10], %%o0\n\t"
186 "ldd [%0 + 0x18], %%o2\n\t"
187 "ldd [%1 + 0x00], %%o4\n\t"
188 "ldd [%1 + 0x08], %%l0\n\t"
189 "ldd [%1 + 0x10], %%l2\n\t"
190 "ldd [%1 + 0x18], %%l4\n\t"
191 "xor %%g2, %%o4, %%g2\n\t"
192 "xor %%g3, %%o5, %%g3\n\t"
193 "ldd [%2 + 0x00], %%o4\n\t"
194 "xor %%g4, %%l0, %%g4\n\t"
195 "xor %%g5, %%l1, %%g5\n\t"
196 "ldd [%2 + 0x08], %%l0\n\t"
197 "xor %%o0, %%l2, %%o0\n\t"
198 "xor %%o1, %%l3, %%o1\n\t"
199 "ldd [%2 + 0x10], %%l2\n\t"
200 "xor %%o2, %%l4, %%o2\n\t"
201 "xor %%o3, %%l5, %%o3\n\t"
202 "ldd [%2 + 0x18], %%l4\n\t"
203 "xor %%g2, %%o4, %%g2\n\t"
204 "xor %%g3, %%o5, %%g3\n\t"
205 "ldd [%3 + 0x00], %%o4\n\t"
206 "xor %%g4, %%l0, %%g4\n\t"
207 "xor %%g5, %%l1, %%g5\n\t"
208 "ldd [%3 + 0x08], %%l0\n\t"
209 "xor %%o0, %%l2, %%o0\n\t"
210 "xor %%o1, %%l3, %%o1\n\t"
211 "ldd [%3 + 0x10], %%l2\n\t"
212 "xor %%o2, %%l4, %%o2\n\t"
213 "xor %%o3, %%l5, %%o3\n\t"
214 "ldd [%3 + 0x18], %%l4\n\t"
215 "xor %%g2, %%o4, %%g2\n\t"
216 "xor %%g3, %%o5, %%g3\n\t"
217 "ldd [%4 + 0x00], %%o4\n\t"
218 "xor %%g4, %%l0, %%g4\n\t"
219 "xor %%g5, %%l1, %%g5\n\t"
220 "ldd [%4 + 0x08], %%l0\n\t"
221 "xor %%o0, %%l2, %%o0\n\t"
222 "xor %%o1, %%l3, %%o1\n\t"
223 "ldd [%4 + 0x10], %%l2\n\t"
224 "xor %%o2, %%l4, %%o2\n\t"
225 "xor %%o3, %%l5, %%o3\n\t"
226 "ldd [%4 + 0x18], %%l4\n\t"
227 "xor %%g2, %%o4, %%g2\n\t"
228 "xor %%g3, %%o5, %%g3\n\t"
229 "xor %%g4, %%l0, %%g4\n\t"
230 "xor %%g5, %%l1, %%g5\n\t"
231 "xor %%o0, %%l2, %%o0\n\t"
232 "xor %%o1, %%l3, %%o1\n\t"
233 "xor %%o2, %%l4, %%o2\n\t"
234 "xor %%o3, %%l5, %%o3\n\t"
235 "std %%g2, [%0 + 0x00]\n\t"
236 "std %%g4, [%0 + 0x08]\n\t"
237 "std %%o0, [%0 + 0x10]\n\t"
238 "std %%o2, [%0 + 0x18]\n"
239 :
240 : "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
241 : "g2", "g3", "g4", "g5",
242 "o0", "o1", "o2", "o3", "o4", "o5",
243 "l0", "l1", "l2", "l3", "l4", "l5");
244 p1 += 8;
245 p2 += 8;
246 p3 += 8;
247 p4 += 8;
248 p5 += 8;
249 } while (--lines > 0);
250}
251
252static struct xor_block_template xor_block_SPARC = {
253 .name = "SPARC",
254 .do_2 = sparc_2,
255 .do_3 = sparc_3,
256 .do_4 = sparc_4,
257 .do_5 = sparc_5,
258};
259
260/* For grins, also test the generic routines. */
261#include <asm-generic/xor.h>
262
263#undef XOR_TRY_TEMPLATES
264#define XOR_TRY_TEMPLATES \
265 do { \
266 xor_speed(&xor_block_8regs); \
267 xor_speed(&xor_block_32regs); \
268 xor_speed(&xor_block_SPARC); \
269 } while (0)
diff --git a/include/asm-sparc/xor_32.h b/include/asm-sparc/xor_32.h
new file mode 100644
index 000000000000..f34b2cfa8206
--- /dev/null
+++ b/include/asm-sparc/xor_32.h
@@ -0,0 +1,269 @@
1/*
2 * include/asm-sparc/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for 32-bit Sparc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16/*
17 * High speed xor_block operation for RAID4/5 utilizing the
18 * ldd/std SPARC instructions.
19 *
20 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
21 */
22
23static void
24sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
25{
26 int lines = bytes / (sizeof (long)) / 8;
27
28 do {
29 __asm__ __volatile__(
30 "ldd [%0 + 0x00], %%g2\n\t"
31 "ldd [%0 + 0x08], %%g4\n\t"
32 "ldd [%0 + 0x10], %%o0\n\t"
33 "ldd [%0 + 0x18], %%o2\n\t"
34 "ldd [%1 + 0x00], %%o4\n\t"
35 "ldd [%1 + 0x08], %%l0\n\t"
36 "ldd [%1 + 0x10], %%l2\n\t"
37 "ldd [%1 + 0x18], %%l4\n\t"
38 "xor %%g2, %%o4, %%g2\n\t"
39 "xor %%g3, %%o5, %%g3\n\t"
40 "xor %%g4, %%l0, %%g4\n\t"
41 "xor %%g5, %%l1, %%g5\n\t"
42 "xor %%o0, %%l2, %%o0\n\t"
43 "xor %%o1, %%l3, %%o1\n\t"
44 "xor %%o2, %%l4, %%o2\n\t"
45 "xor %%o3, %%l5, %%o3\n\t"
46 "std %%g2, [%0 + 0x00]\n\t"
47 "std %%g4, [%0 + 0x08]\n\t"
48 "std %%o0, [%0 + 0x10]\n\t"
49 "std %%o2, [%0 + 0x18]\n"
50 :
51 : "r" (p1), "r" (p2)
52 : "g2", "g3", "g4", "g5",
53 "o0", "o1", "o2", "o3", "o4", "o5",
54 "l0", "l1", "l2", "l3", "l4", "l5");
55 p1 += 8;
56 p2 += 8;
57 } while (--lines > 0);
58}
59
60static void
61sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
62 unsigned long *p3)
63{
64 int lines = bytes / (sizeof (long)) / 8;
65
66 do {
67 __asm__ __volatile__(
68 "ldd [%0 + 0x00], %%g2\n\t"
69 "ldd [%0 + 0x08], %%g4\n\t"
70 "ldd [%0 + 0x10], %%o0\n\t"
71 "ldd [%0 + 0x18], %%o2\n\t"
72 "ldd [%1 + 0x00], %%o4\n\t"
73 "ldd [%1 + 0x08], %%l0\n\t"
74 "ldd [%1 + 0x10], %%l2\n\t"
75 "ldd [%1 + 0x18], %%l4\n\t"
76 "xor %%g2, %%o4, %%g2\n\t"
77 "xor %%g3, %%o5, %%g3\n\t"
78 "ldd [%2 + 0x00], %%o4\n\t"
79 "xor %%g4, %%l0, %%g4\n\t"
80 "xor %%g5, %%l1, %%g5\n\t"
81 "ldd [%2 + 0x08], %%l0\n\t"
82 "xor %%o0, %%l2, %%o0\n\t"
83 "xor %%o1, %%l3, %%o1\n\t"
84 "ldd [%2 + 0x10], %%l2\n\t"
85 "xor %%o2, %%l4, %%o2\n\t"
86 "xor %%o3, %%l5, %%o3\n\t"
87 "ldd [%2 + 0x18], %%l4\n\t"
88 "xor %%g2, %%o4, %%g2\n\t"
89 "xor %%g3, %%o5, %%g3\n\t"
90 "xor %%g4, %%l0, %%g4\n\t"
91 "xor %%g5, %%l1, %%g5\n\t"
92 "xor %%o0, %%l2, %%o0\n\t"
93 "xor %%o1, %%l3, %%o1\n\t"
94 "xor %%o2, %%l4, %%o2\n\t"
95 "xor %%o3, %%l5, %%o3\n\t"
96 "std %%g2, [%0 + 0x00]\n\t"
97 "std %%g4, [%0 + 0x08]\n\t"
98 "std %%o0, [%0 + 0x10]\n\t"
99 "std %%o2, [%0 + 0x18]\n"
100 :
101 : "r" (p1), "r" (p2), "r" (p3)
102 : "g2", "g3", "g4", "g5",
103 "o0", "o1", "o2", "o3", "o4", "o5",
104 "l0", "l1", "l2", "l3", "l4", "l5");
105 p1 += 8;
106 p2 += 8;
107 p3 += 8;
108 } while (--lines > 0);
109}
110
111static void
112sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
113 unsigned long *p3, unsigned long *p4)
114{
115 int lines = bytes / (sizeof (long)) / 8;
116
117 do {
118 __asm__ __volatile__(
119 "ldd [%0 + 0x00], %%g2\n\t"
120 "ldd [%0 + 0x08], %%g4\n\t"
121 "ldd [%0 + 0x10], %%o0\n\t"
122 "ldd [%0 + 0x18], %%o2\n\t"
123 "ldd [%1 + 0x00], %%o4\n\t"
124 "ldd [%1 + 0x08], %%l0\n\t"
125 "ldd [%1 + 0x10], %%l2\n\t"
126 "ldd [%1 + 0x18], %%l4\n\t"
127 "xor %%g2, %%o4, %%g2\n\t"
128 "xor %%g3, %%o5, %%g3\n\t"
129 "ldd [%2 + 0x00], %%o4\n\t"
130 "xor %%g4, %%l0, %%g4\n\t"
131 "xor %%g5, %%l1, %%g5\n\t"
132 "ldd [%2 + 0x08], %%l0\n\t"
133 "xor %%o0, %%l2, %%o0\n\t"
134 "xor %%o1, %%l3, %%o1\n\t"
135 "ldd [%2 + 0x10], %%l2\n\t"
136 "xor %%o2, %%l4, %%o2\n\t"
137 "xor %%o3, %%l5, %%o3\n\t"
138 "ldd [%2 + 0x18], %%l4\n\t"
139 "xor %%g2, %%o4, %%g2\n\t"
140 "xor %%g3, %%o5, %%g3\n\t"
141 "ldd [%3 + 0x00], %%o4\n\t"
142 "xor %%g4, %%l0, %%g4\n\t"
143 "xor %%g5, %%l1, %%g5\n\t"
144 "ldd [%3 + 0x08], %%l0\n\t"
145 "xor %%o0, %%l2, %%o0\n\t"
146 "xor %%o1, %%l3, %%o1\n\t"
147 "ldd [%3 + 0x10], %%l2\n\t"
148 "xor %%o2, %%l4, %%o2\n\t"
149 "xor %%o3, %%l5, %%o3\n\t"
150 "ldd [%3 + 0x18], %%l4\n\t"
151 "xor %%g2, %%o4, %%g2\n\t"
152 "xor %%g3, %%o5, %%g3\n\t"
153 "xor %%g4, %%l0, %%g4\n\t"
154 "xor %%g5, %%l1, %%g5\n\t"
155 "xor %%o0, %%l2, %%o0\n\t"
156 "xor %%o1, %%l3, %%o1\n\t"
157 "xor %%o2, %%l4, %%o2\n\t"
158 "xor %%o3, %%l5, %%o3\n\t"
159 "std %%g2, [%0 + 0x00]\n\t"
160 "std %%g4, [%0 + 0x08]\n\t"
161 "std %%o0, [%0 + 0x10]\n\t"
162 "std %%o2, [%0 + 0x18]\n"
163 :
164 : "r" (p1), "r" (p2), "r" (p3), "r" (p4)
165 : "g2", "g3", "g4", "g5",
166 "o0", "o1", "o2", "o3", "o4", "o5",
167 "l0", "l1", "l2", "l3", "l4", "l5");
168 p1 += 8;
169 p2 += 8;
170 p3 += 8;
171 p4 += 8;
172 } while (--lines > 0);
173}
174
175static void
176sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
177 unsigned long *p3, unsigned long *p4, unsigned long *p5)
178{
179 int lines = bytes / (sizeof (long)) / 8;
180
181 do {
182 __asm__ __volatile__(
183 "ldd [%0 + 0x00], %%g2\n\t"
184 "ldd [%0 + 0x08], %%g4\n\t"
185 "ldd [%0 + 0x10], %%o0\n\t"
186 "ldd [%0 + 0x18], %%o2\n\t"
187 "ldd [%1 + 0x00], %%o4\n\t"
188 "ldd [%1 + 0x08], %%l0\n\t"
189 "ldd [%1 + 0x10], %%l2\n\t"
190 "ldd [%1 + 0x18], %%l4\n\t"
191 "xor %%g2, %%o4, %%g2\n\t"
192 "xor %%g3, %%o5, %%g3\n\t"
193 "ldd [%2 + 0x00], %%o4\n\t"
194 "xor %%g4, %%l0, %%g4\n\t"
195 "xor %%g5, %%l1, %%g5\n\t"
196 "ldd [%2 + 0x08], %%l0\n\t"
197 "xor %%o0, %%l2, %%o0\n\t"
198 "xor %%o1, %%l3, %%o1\n\t"
199 "ldd [%2 + 0x10], %%l2\n\t"
200 "xor %%o2, %%l4, %%o2\n\t"
201 "xor %%o3, %%l5, %%o3\n\t"
202 "ldd [%2 + 0x18], %%l4\n\t"
203 "xor %%g2, %%o4, %%g2\n\t"
204 "xor %%g3, %%o5, %%g3\n\t"
205 "ldd [%3 + 0x00], %%o4\n\t"
206 "xor %%g4, %%l0, %%g4\n\t"
207 "xor %%g5, %%l1, %%g5\n\t"
208 "ldd [%3 + 0x08], %%l0\n\t"
209 "xor %%o0, %%l2, %%o0\n\t"
210 "xor %%o1, %%l3, %%o1\n\t"
211 "ldd [%3 + 0x10], %%l2\n\t"
212 "xor %%o2, %%l4, %%o2\n\t"
213 "xor %%o3, %%l5, %%o3\n\t"
214 "ldd [%3 + 0x18], %%l4\n\t"
215 "xor %%g2, %%o4, %%g2\n\t"
216 "xor %%g3, %%o5, %%g3\n\t"
217 "ldd [%4 + 0x00], %%o4\n\t"
218 "xor %%g4, %%l0, %%g4\n\t"
219 "xor %%g5, %%l1, %%g5\n\t"
220 "ldd [%4 + 0x08], %%l0\n\t"
221 "xor %%o0, %%l2, %%o0\n\t"
222 "xor %%o1, %%l3, %%o1\n\t"
223 "ldd [%4 + 0x10], %%l2\n\t"
224 "xor %%o2, %%l4, %%o2\n\t"
225 "xor %%o3, %%l5, %%o3\n\t"
226 "ldd [%4 + 0x18], %%l4\n\t"
227 "xor %%g2, %%o4, %%g2\n\t"
228 "xor %%g3, %%o5, %%g3\n\t"
229 "xor %%g4, %%l0, %%g4\n\t"
230 "xor %%g5, %%l1, %%g5\n\t"
231 "xor %%o0, %%l2, %%o0\n\t"
232 "xor %%o1, %%l3, %%o1\n\t"
233 "xor %%o2, %%l4, %%o2\n\t"
234 "xor %%o3, %%l5, %%o3\n\t"
235 "std %%g2, [%0 + 0x00]\n\t"
236 "std %%g4, [%0 + 0x08]\n\t"
237 "std %%o0, [%0 + 0x10]\n\t"
238 "std %%o2, [%0 + 0x18]\n"
239 :
240 : "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
241 : "g2", "g3", "g4", "g5",
242 "o0", "o1", "o2", "o3", "o4", "o5",
243 "l0", "l1", "l2", "l3", "l4", "l5");
244 p1 += 8;
245 p2 += 8;
246 p3 += 8;
247 p4 += 8;
248 p5 += 8;
249 } while (--lines > 0);
250}
251
252static struct xor_block_template xor_block_SPARC = {
253 .name = "SPARC",
254 .do_2 = sparc_2,
255 .do_3 = sparc_3,
256 .do_4 = sparc_4,
257 .do_5 = sparc_5,
258};
259
260/* For grins, also test the generic routines. */
261#include <asm-generic/xor.h>
262
263#undef XOR_TRY_TEMPLATES
264#define XOR_TRY_TEMPLATES \
265 do { \
266 xor_speed(&xor_block_8regs); \
267 xor_speed(&xor_block_32regs); \
268 xor_speed(&xor_block_SPARC); \
269 } while (0)
diff --git a/include/asm-sparc/xor_64.h b/include/asm-sparc/xor_64.h
new file mode 100644
index 000000000000..a0233884fc94
--- /dev/null
+++ b/include/asm-sparc/xor_64.h
@@ -0,0 +1,70 @@
1/*
2 * include/asm-sparc64/xor.h
3 *
4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set and Niagara block-init
6 * twin-load instructions.
7 *
8 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
9 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <asm/spitfire.h>
22
23extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
24extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
25 unsigned long *);
26extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
27 unsigned long *, unsigned long *);
28extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
29 unsigned long *, unsigned long *, unsigned long *);
30
31/* XXX Ugh, write cheetah versions... -DaveM */
32
33static struct xor_block_template xor_block_VIS = {
34 .name = "VIS",
35 .do_2 = xor_vis_2,
36 .do_3 = xor_vis_3,
37 .do_4 = xor_vis_4,
38 .do_5 = xor_vis_5,
39};
40
41extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
42extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
43 unsigned long *);
44extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
45 unsigned long *, unsigned long *);
46extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
47 unsigned long *, unsigned long *, unsigned long *);
48
49static struct xor_block_template xor_block_niagara = {
50 .name = "Niagara",
51 .do_2 = xor_niagara_2,
52 .do_3 = xor_niagara_3,
53 .do_4 = xor_niagara_4,
54 .do_5 = xor_niagara_5,
55};
56
57#undef XOR_TRY_TEMPLATES
58#define XOR_TRY_TEMPLATES \
59 do { \
60 xor_speed(&xor_block_VIS); \
61 xor_speed(&xor_block_niagara); \
62 } while (0)
63
64/* For VIS for everything except Niagara. */
65#define XOR_SELECT_TEMPLATE(FASTEST) \
66 ((tlb_type == hypervisor && \
67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2)) ? \
69 &xor_block_niagara : \
70 &xor_block_VIS)
diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild
index 7e88c1292d10..6cdaf9d33b38 100644
--- a/include/asm-sparc64/Kbuild
+++ b/include/asm-sparc64/Kbuild
@@ -1,21 +1 @@
1include include/asm-generic/Kbuild.asm # dummy file to avoid breaking make headers_install
2
3ALTARCH := sparc
4ARCHDEF := defined __sparc__ && defined __arch64__
5ALTARCHDEF := defined __sparc__ && !defined __arch64__
6
7header-y += asi.h
8header-y += bpp.h
9header-y += display7seg.h
10header-y += envctrl.h
11header-y += openprom.h
12header-y += openpromio.h
13header-y += psrcompat.h
14header-y += pstate.h
15header-y += reg.h
16header-y += uctx.h
17header-y += utrap.h
18header-y += watchdog.h
19
20unifdef-y += fbio.h
21unifdef-y += perfctr.h
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 2c71ec4a3b18..f5126826ba34 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -1,128 +1 @@
1/* atomic.h: Thankfully the V9 is at least reasonable for this #include <asm-sparc/atomic.h>
2 * stuff.
3 *
4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#ifndef __ARCH_SPARC64_ATOMIC__
8#define __ARCH_SPARC64_ATOMIC__
9
10#include <linux/types.h>
11#include <asm/system.h>
12
13typedef struct { volatile int counter; } atomic_t;
14typedef struct { volatile __s64 counter; } atomic64_t;
15
16#define ATOMIC_INIT(i) { (i) }
17#define ATOMIC64_INIT(i) { (i) }
18
19#define atomic_read(v) ((v)->counter)
20#define atomic64_read(v) ((v)->counter)
21
22#define atomic_set(v, i) (((v)->counter) = i)
23#define atomic64_set(v, i) (((v)->counter) = i)
24
25extern void atomic_add(int, atomic_t *);
26extern void atomic64_add(int, atomic64_t *);
27extern void atomic_sub(int, atomic_t *);
28extern void atomic64_sub(int, atomic64_t *);
29
30extern int atomic_add_ret(int, atomic_t *);
31extern int atomic64_add_ret(int, atomic64_t *);
32extern int atomic_sub_ret(int, atomic_t *);
33extern int atomic64_sub_ret(int, atomic64_t *);
34
35#define atomic_dec_return(v) atomic_sub_ret(1, v)
36#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
37
38#define atomic_inc_return(v) atomic_add_ret(1, v)
39#define atomic64_inc_return(v) atomic64_add_ret(1, v)
40
41#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
42#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
43
44#define atomic_add_return(i, v) atomic_add_ret(i, v)
45#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
46
47/*
48 * atomic_inc_and_test - increment and test
49 * @v: pointer of type atomic_t
50 *
51 * Atomically increments @v by 1
52 * and returns true if the result is zero, or false for all
53 * other cases.
54 */
55#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
56#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
57
58#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
59#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
60
61#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
62#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
63
64#define atomic_inc(v) atomic_add(1, v)
65#define atomic64_inc(v) atomic64_add(1, v)
66
67#define atomic_dec(v) atomic_sub(1, v)
68#define atomic64_dec(v) atomic64_sub(1, v)
69
70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
72
73#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
74#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
75
76static inline int atomic_add_unless(atomic_t *v, int a, int u)
77{
78 int c, old;
79 c = atomic_read(v);
80 for (;;) {
81 if (unlikely(c == (u)))
82 break;
83 old = atomic_cmpxchg((v), c, c + (a));
84 if (likely(old == c))
85 break;
86 c = old;
87 }
88 return c != (u);
89}
90
91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
92
93#define atomic64_cmpxchg(v, o, n) \
94 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
95#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
96
97static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
98{
99 long c, old;
100 c = atomic64_read(v);
101 for (;;) {
102 if (unlikely(c == (u)))
103 break;
104 old = atomic64_cmpxchg((v), c, c + (a));
105 if (likely(old == c))
106 break;
107 c = old;
108 }
109 return c != (u);
110}
111
112#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
113
114/* Atomic operations are already serializing */
115#ifdef CONFIG_SMP
116#define smp_mb__before_atomic_dec() membar_storeload_loadload();
117#define smp_mb__after_atomic_dec() membar_storeload_storestore();
118#define smp_mb__before_atomic_inc() membar_storeload_loadload();
119#define smp_mb__after_atomic_inc() membar_storeload_storestore();
120#else
121#define smp_mb__before_atomic_dec() barrier()
122#define smp_mb__after_atomic_dec() barrier()
123#define smp_mb__before_atomic_inc() barrier()
124#define smp_mb__after_atomic_inc() barrier()
125#endif
126
127#include <asm-generic/atomic.h>
128#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/asm-sparc64/auxio.h b/include/asm-sparc64/auxio.h
index c4100494c7a5..46c9042f30b4 100644
--- a/include/asm-sparc64/auxio.h
+++ b/include/asm-sparc64/auxio.h
@@ -1,100 +1 @@
1/* #include <asm-sparc/auxio.h>
2 * auxio.h: Definitions and code for the Auxiliary I/O registers.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 *
6 * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
7 */
8#ifndef _SPARC64_AUXIO_H
9#define _SPARC64_AUXIO_H
10
11/* AUXIO implementations:
12 * sbus-based NCR89C105 "Slavio"
13 * LED/Floppy (AUX1) register
14 * Power (AUX2) register
15 *
16 * ebus-based auxio on PCIO
17 * LED Auxio Register
18 * Power Auxio Register
19 *
20 * Register definitions from NCR _NCR89C105 Chip Specification_
21 *
22 * SLAVIO AUX1 @ 0x1900000
23 * -------------------------------------------------
24 * | (R) | (R) | D | (R) | E | M | T | L |
25 * -------------------------------------------------
26 * (R) - bit 7:6,4 are reserved and should be masked in s/w
27 * D - Floppy Density Sense (1=high density) R/O
28 * E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
29 * M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
30 * T - Terminal Count: sends TC pulse to 82077 floppy controller
31 * L - System LED on front panel (0=off, 1=on)
32 */
33#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
34#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
35#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
36#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
37#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
38#define AUXIO_AUX1_LED 0x01 /* System LED */
39
40/* SLAVIO AUX2 @ 0x1910000
41 * -------------------------------------------------
42 * | (R) | (R) | D | (R) | (R) | (R) | C | F |
43 * -------------------------------------------------
44 * (R) - bits 7:6,4:2 are reserved and should be masked in s/w
45 * D - Power Failure Detect (1=power fail)
46 * C - Clear Power Failure Detect Int (1=clear)
47 * F - Power Off (1=power off)
48 */
49#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
50#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
51#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
52#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
53
54/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
55 *
56 * PCIO LED Auxio @ 0x726000
57 * -------------------------------------------------
58 * | 31:1 Unused | LED |
59 * -------------------------------------------------
60 * Bits 31:1 unused
61 * LED - System LED on front panel (0=off, 1=on)
62 */
63#define AUXIO_PCIO_LED 0x01 /* System LED */
64
65/* PCIO Power Auxio @ 0x724000
66 * -------------------------------------------------
67 * | 31:2 Unused | CPO | SPO |
68 * -------------------------------------------------
69 * Bits 31:2 unused
70 * CPO - Courtesy Power Off (1=off)
71 * SPO - System Power Off (1=off)
72 */
73#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
74#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
75
76#ifndef __ASSEMBLY__
77
78extern void __iomem *auxio_register;
79
80#define AUXIO_LTE_ON 1
81#define AUXIO_LTE_OFF 0
82
83/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
84 *
85 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
86 */
87extern void auxio_set_lte(int on);
88
89#define AUXIO_LED_ON 1
90#define AUXIO_LED_OFF 0
91
92/* auxio_set_led - Set system front panel LED
93 *
94 * on - AUXIO_LED_ON or AUXIO_LED_OFF
95 */
96extern void auxio_set_led(int on);
97
98#endif /* ifndef __ASSEMBLY__ */
99
100#endif /* !(_SPARC64_AUXIO_H) */
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index bb87b8080220..204404355bdd 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -1,107 +1 @@
1/* #include <asm-sparc/bitops.h>
2 * bitops.h: Bit string operations on the V9.
3 *
4 * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC64_BITOPS_H
8#define _SPARC64_BITOPS_H
9
10#ifndef _LINUX_BITOPS_H
11#error only <linux/bitops.h> can be included directly
12#endif
13
14#include <linux/compiler.h>
15#include <asm/byteorder.h>
16
17extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
18extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
19extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
20extern void set_bit(unsigned long nr, volatile unsigned long *addr);
21extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
22extern void change_bit(unsigned long nr, volatile unsigned long *addr);
23
24#include <asm-generic/bitops/non-atomic.h>
25
26#ifdef CONFIG_SMP
27#define smp_mb__before_clear_bit() membar_storeload_loadload()
28#define smp_mb__after_clear_bit() membar_storeload_storestore()
29#else
30#define smp_mb__before_clear_bit() barrier()
31#define smp_mb__after_clear_bit() barrier()
32#endif
33
34#include <asm-generic/bitops/ffz.h>
35#include <asm-generic/bitops/__ffs.h>
36#include <asm-generic/bitops/fls.h>
37#include <asm-generic/bitops/__fls.h>
38#include <asm-generic/bitops/fls64.h>
39
40#ifdef __KERNEL__
41
42#include <asm-generic/bitops/sched.h>
43#include <asm-generic/bitops/ffs.h>
44
45/*
46 * hweightN: returns the hamming weight (i.e. the number
47 * of bits set) of a N-bit word
48 */
49
50#ifdef ULTRA_HAS_POPULATION_COUNT
51
52static inline unsigned int hweight64(unsigned long w)
53{
54 unsigned int res;
55
56 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
57 return res;
58}
59
60static inline unsigned int hweight32(unsigned int w)
61{
62 unsigned int res;
63
64 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
65 return res;
66}
67
68static inline unsigned int hweight16(unsigned int w)
69{
70 unsigned int res;
71
72 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
73 return res;
74}
75
76static inline unsigned int hweight8(unsigned int w)
77{
78 unsigned int res;
79
80 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
81 return res;
82}
83
84#else
85
86#include <asm-generic/bitops/hweight.h>
87
88#endif
89#include <asm-generic/bitops/lock.h>
90#endif /* __KERNEL__ */
91
92#include <asm-generic/bitops/find.h>
93
94#ifdef __KERNEL__
95
96#include <asm-generic/bitops/ext2-non-atomic.h>
97
98#define ext2_set_bit_atomic(lock,nr,addr) \
99 test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
100#define ext2_clear_bit_atomic(lock,nr,addr) \
101 test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
102
103#include <asm-generic/bitops/minix.h>
104
105#endif /* __KERNEL__ */
106
107#endif /* defined(_SPARC64_BITOPS_H) */
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h
index 122e4058dd9e..cf5b6b3e8a55 100644
--- a/include/asm-sparc64/cacheflush.h
+++ b/include/asm-sparc64/cacheflush.h
@@ -1,76 +1 @@
1#ifndef _SPARC64_CACHEFLUSH_H #include <asm-sparc/cacheflush.h>
2#define _SPARC64_CACHEFLUSH_H
3
4#include <asm/page.h>
5
6#ifndef __ASSEMBLY__
7
8#include <linux/mm.h>
9
10/* Cache flush operations. */
11
12/* These are the same regardless of whether this is an SMP kernel or not. */
13#define flush_cache_mm(__mm) \
14 do { if ((__mm) == current->mm) flushw_user(); } while(0)
15#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
16#define flush_cache_range(vma, start, end) \
17 flush_cache_mm((vma)->vm_mm)
18#define flush_cache_page(vma, page, pfn) \
19 flush_cache_mm((vma)->vm_mm)
20
21/*
22 * On spitfire, the icache doesn't snoop local stores and we don't
23 * use block commit stores (which invalidate icache lines) during
24 * module load, so we need this.
25 */
26extern void flush_icache_range(unsigned long start, unsigned long end);
27extern void __flush_icache_page(unsigned long);
28
29extern void __flush_dcache_page(void *addr, int flush_icache);
30extern void flush_dcache_page_impl(struct page *page);
31#ifdef CONFIG_SMP
32extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
33extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
34#else
35#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
36#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
37#endif
38
39extern void __flush_dcache_range(unsigned long start, unsigned long end);
40extern void flush_dcache_page(struct page *page);
41
42#define flush_icache_page(vma, pg) do { } while(0)
43#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
44
45extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
46 unsigned long uaddr, void *kaddr,
47 unsigned long len, int write);
48
49#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
50 do { \
51 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
52 memcpy(dst, src, len); \
53 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
54 } while (0)
55
56#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
57 do { \
58 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
59 memcpy(dst, src, len); \
60 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
61 } while (0)
62
63#define flush_dcache_mmap_lock(mapping) do { } while (0)
64#define flush_dcache_mmap_unlock(mapping) do { } while (0)
65
66#define flush_cache_vmap(start, end) do { } while (0)
67#define flush_cache_vunmap(start, end) do { } while (0)
68
69#ifdef CONFIG_DEBUG_PAGEALLOC
70/* internal debugging function */
71void kernel_map_pages(struct page *page, int numpages, int enable);
72#endif
73
74#endif /* !__ASSEMBLY__ */
75
76#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h
index b290564c8ce0..c3966c5e29d8 100644
--- a/include/asm-sparc64/checksum.h
+++ b/include/asm-sparc64/checksum.h
@@ -1,167 +1 @@
1#ifndef __SPARC64_CHECKSUM_H #include <asm-sparc/checksum.h>
2#define __SPARC64_CHECKSUM_H
3
4/* checksum.h: IP/UDP/TCP checksum routines on the V9.
5 *
6 * Copyright(C) 1995 Linus Torvalds
7 * Copyright(C) 1995 Miguel de Icaza
8 * Copyright(C) 1996 David S. Miller
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void * buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from user space while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
41 int len, __wsum sum);
42
43extern long __csum_partial_copy_from_user(const void __user *src,
44 void *dst, int len,
45 __wsum sum);
46
47static inline __wsum
48csum_partial_copy_from_user(const void __user *src,
49 void *dst, int len,
50 __wsum sum, int *err)
51{
52 long ret = __csum_partial_copy_from_user(src, dst, len, sum);
53 if (ret < 0)
54 *err = -EFAULT;
55 return (__force __wsum) ret;
56}
57
58/*
59 * Copy and checksum to user
60 */
61#define HAVE_CSUM_COPY_USER
62extern long __csum_partial_copy_to_user(const void *src,
63 void __user *dst, int len,
64 __wsum sum);
65
66static inline __wsum
67csum_and_copy_to_user(const void *src,
68 void __user *dst, int len,
69 __wsum sum, int *err)
70{
71 long ret = __csum_partial_copy_to_user(src, dst, len, sum);
72 if (ret < 0)
73 *err = -EFAULT;
74 return (__force __wsum) ret;
75}
76
77/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
78 * the majority of the time.
79 */
80extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
81
82/* Fold a partial checksum without adding pseudo headers. */
83static inline __sum16 csum_fold(__wsum sum)
84{
85 unsigned int tmp;
86
87 __asm__ __volatile__(
88" addcc %0, %1, %1\n"
89" srl %1, 16, %1\n"
90" addc %1, %%g0, %1\n"
91" xnor %%g0, %1, %0\n"
92 : "=&r" (sum), "=r" (tmp)
93 : "0" (sum), "1" ((__force u32)sum<<16)
94 : "cc");
95 return (__force __sum16)sum;
96}
97
98static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
99 unsigned int len,
100 unsigned short proto,
101 __wsum sum)
102{
103 __asm__ __volatile__(
104" addcc %1, %0, %0\n"
105" addccc %2, %0, %0\n"
106" addccc %3, %0, %0\n"
107" addc %0, %%g0, %0\n"
108 : "=r" (sum), "=r" (saddr)
109 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
110 : "cc");
111 return sum;
112}
113
114/*
115 * computes the checksum of the TCP/UDP pseudo-header
116 * returns a 16-bit checksum, already complemented
117 */
118static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
119 unsigned short len,
120 unsigned short proto,
121 __wsum sum)
122{
123 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
124}
125
126#define _HAVE_ARCH_IPV6_CSUM
127
128static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
129 const struct in6_addr *daddr,
130 __u32 len, unsigned short proto,
131 __wsum sum)
132{
133 __asm__ __volatile__ (
134" addcc %3, %4, %%g7\n"
135" addccc %5, %%g7, %%g7\n"
136" lduw [%2 + 0x0c], %%g2\n"
137" lduw [%2 + 0x08], %%g3\n"
138" addccc %%g2, %%g7, %%g7\n"
139" lduw [%2 + 0x04], %%g2\n"
140" addccc %%g3, %%g7, %%g7\n"
141" lduw [%2 + 0x00], %%g3\n"
142" addccc %%g2, %%g7, %%g7\n"
143" lduw [%1 + 0x0c], %%g2\n"
144" addccc %%g3, %%g7, %%g7\n"
145" lduw [%1 + 0x08], %%g3\n"
146" addccc %%g2, %%g7, %%g7\n"
147" lduw [%1 + 0x04], %%g2\n"
148" addccc %%g3, %%g7, %%g7\n"
149" lduw [%1 + 0x00], %%g3\n"
150" addccc %%g2, %%g7, %%g7\n"
151" addccc %%g3, %%g7, %0\n"
152" addc 0, %0, %0\n"
153 : "=&r" (sum)
154 : "r" (saddr), "r" (daddr), "r"(htonl(len)),
155 "r"(htonl(proto)), "r"(sum)
156 : "g2", "g3", "g7", "cc");
157
158 return csum_fold(sum);
159}
160
161/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
162static inline __sum16 ip_compute_csum(const void *buff, int len)
163{
164 return csum_fold(csum_partial(buff, len, 0));
165}
166
167#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 532975ecfe10..3220e134a579 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -1,240 +1 @@
1/* cpudata.h: Per-cpu parameters. #include <asm-sparc/cpudata.h>
2 *
3 * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H
8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__
13
14#include <linux/percpu.h>
15#include <linux/threads.h>
16
17typedef struct {
18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int __pad0;
21 unsigned long clock_tick; /* %tick's per second */
22 unsigned long __pad;
23 unsigned int __pad1;
24 unsigned int __pad2;
25
26 /* Dcache line 2, rarely used */
27 unsigned int dcache_size;
28 unsigned int dcache_line_size;
29 unsigned int icache_size;
30 unsigned int icache_line_size;
31 unsigned int ecache_size;
32 unsigned int ecache_line_size;
33 int core_id;
34 int proc_id;
35} cpuinfo_sparc;
36
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data)
40
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long tsb_huge;
75 unsigned long tsb_huge_temp;
76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned long irq_worklist_pa;
79 unsigned int cpu_mondo_qmask;
80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 void *hdesc;
84} __attribute__((aligned(64)));
85extern struct trap_per_cpu trap_block[NR_CPUS];
86extern void init_cur_cpu_trap(struct thread_info *);
87extern void setup_tba(void);
88extern int ncpus_probed;
89extern void __init cpu_probe(void);
90extern const struct seq_operations cpuinfo_op;
91
92extern unsigned long real_hard_smp_processor_id(void);
93
94struct cpuid_patch_entry {
95 unsigned int addr;
96 unsigned int cheetah_safari[4];
97 unsigned int cheetah_jbus[4];
98 unsigned int starfire[4];
99 unsigned int sun4v[4];
100};
101extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
102
103struct sun4v_1insn_patch_entry {
104 unsigned int addr;
105 unsigned int insn;
106};
107extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
108 __sun4v_1insn_patch_end;
109
110struct sun4v_2insn_patch_entry {
111 unsigned int addr;
112 unsigned int insns[2];
113};
114extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
115 __sun4v_2insn_patch_end;
116
117#endif /* !(__ASSEMBLY__) */
118
119#define TRAP_PER_CPU_THREAD 0x00
120#define TRAP_PER_CPU_PGD_PADDR 0x08
121#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
122#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
123#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
124#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
125#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
126#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
127#define TRAP_PER_CPU_FAULT_INFO 0x40
128#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
129#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
130#define TRAP_PER_CPU_TSB_HUGE 0xd0
131#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
132#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
133#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
134#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
135#define TRAP_PER_CPU_RESUM_QMASK 0xf0
136#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
137
138#define TRAP_BLOCK_SZ_SHIFT 8
139
140#include <asm/scratchpad.h>
141
142#define __GET_CPUID(REG) \
143 /* Spitfire implementation (default). */ \
144661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
145 srlx REG, 17, REG; \
146 and REG, 0x1f, REG; \
147 nop; \
148 .section .cpuid_patch, "ax"; \
149 /* Instruction location. */ \
150 .word 661b; \
151 /* Cheetah Safari implementation. */ \
152 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
153 srlx REG, 17, REG; \
154 and REG, 0x3ff, REG; \
155 nop; \
156 /* Cheetah JBUS implementation. */ \
157 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
158 srlx REG, 17, REG; \
159 and REG, 0x1f, REG; \
160 nop; \
161 /* Starfire implementation. */ \
162 sethi %hi(0x1fff40000d0 >> 9), REG; \
163 sllx REG, 9, REG; \
164 or REG, 0xd0, REG; \
165 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
166 /* sun4v implementation. */ \
167 mov SCRATCHPAD_CPUID, REG; \
168 ldxa [REG] ASI_SCRATCHPAD, REG; \
169 nop; \
170 nop; \
171 .previous;
172
173#ifdef CONFIG_SMP
174
175#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
176 __GET_CPUID(TMP) \
177 sethi %hi(trap_block), DEST; \
178 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
179 or DEST, %lo(trap_block), DEST; \
180 add DEST, TMP, DEST; \
181
182/* Clobbers TMP, current address space PGD phys address into DEST. */
183#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
184 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
185 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
186
187/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
188#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
189 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
190 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
191
192/* Clobbers TMP, loads DEST with current thread info pointer. */
193#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
194 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
195 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
196
197/* Given the current thread info pointer in THR, load the per-cpu
198 * area base of the current processor into DEST. REG1, REG2, and REG3 are
199 * clobbered.
200 *
201 * You absolutely cannot use DEST as a temporary in this code. The
202 * reason is that traps can happen during execution, and return from
203 * trap will load the fully resolved DEST per-cpu base. This can corrupt
204 * the calculations done by the macro mid-stream.
205 */
206#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
207 lduh [THR + TI_CPU], REG1; \
208 sethi %hi(__per_cpu_shift), REG3; \
209 sethi %hi(__per_cpu_base), REG2; \
210 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
211 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
212 sllx REG1, REG3, REG3; \
213 add REG3, REG2, DEST;
214
215#else
216
217#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
218 sethi %hi(trap_block), DEST; \
219 or DEST, %lo(trap_block), DEST; \
220
221/* Uniprocessor versions, we know the cpuid is zero. */
222#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
223 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
224 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
225
226/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
227#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
228 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
229 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
230
231#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
232 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
233 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
234
235/* No per-cpu areas on uniprocessor, so no need to load DEST. */
236#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
237
238#endif /* !(CONFIG_SMP) */
239
240#endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc64/delay.h b/include/asm-sparc64/delay.h
index a77aa622d762..33dc5589d841 100644
--- a/include/asm-sparc64/delay.h
+++ b/include/asm-sparc64/delay.h
@@ -1,17 +1 @@
1/* delay.h: Linux delay routines on sparc64. #include <asm-sparc/delay.h>
2 *
3 * Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
4 */
5
6#ifndef _SPARC64_DELAY_H
7#define _SPARC64_DELAY_H
8
9#ifndef __ASSEMBLY__
10
11extern void __delay(unsigned long loops);
12extern void udelay(unsigned long usecs);
13#define mdelay(n) udelay((n) * 1000)
14
15#endif /* !__ASSEMBLY__ */
16
17#endif /* _SPARC64_DELAY_H */
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 38cbec76a33f..380b7b63147f 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -1,154 +1 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H #include <asm-sparc/dma-mapping.h>
2#define _ASM_SPARC64_DMA_MAPPING_H
3
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
6
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9struct dma_ops {
10 void *(*alloc_coherent)(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flag);
12 void (*free_coherent)(struct device *dev, size_t size,
13 void *cpu_addr, dma_addr_t dma_handle);
14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
15 size_t size,
16 enum dma_data_direction direction);
17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
18 size_t size,
19 enum dma_data_direction direction);
20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
21 enum dma_data_direction direction);
22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
23 int nhwentries,
24 enum dma_data_direction direction);
25 void (*sync_single_for_cpu)(struct device *dev,
26 dma_addr_t dma_handle, size_t size,
27 enum dma_data_direction direction);
28 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
29 int nelems,
30 enum dma_data_direction direction);
31};
32extern const struct dma_ops *dma_ops;
33
34extern int dma_supported(struct device *dev, u64 mask);
35extern int dma_set_mask(struct device *dev, u64 dma_mask);
36
37static inline void *dma_alloc_coherent(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flag)
39{
40 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
41}
42
43static inline void dma_free_coherent(struct device *dev, size_t size,
44 void *cpu_addr, dma_addr_t dma_handle)
45{
46 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
50 size_t size,
51 enum dma_data_direction direction)
52{
53 return dma_ops->map_single(dev, cpu_addr, size, direction);
54}
55
56static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
57 size_t size,
58 enum dma_data_direction direction)
59{
60 dma_ops->unmap_single(dev, dma_addr, size, direction);
61}
62
63static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64 unsigned long offset, size_t size,
65 enum dma_data_direction direction)
66{
67 return dma_ops->map_single(dev, page_address(page) + offset,
68 size, direction);
69}
70
71static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
72 size_t size,
73 enum dma_data_direction direction)
74{
75 dma_ops->unmap_single(dev, dma_address, size, direction);
76}
77
78static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction direction)
80{
81 return dma_ops->map_sg(dev, sg, nents, direction);
82}
83
84static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
85 int nents, enum dma_data_direction direction)
86{
87 dma_ops->unmap_sg(dev, sg, nents, direction);
88}
89
90static inline void dma_sync_single_for_cpu(struct device *dev,
91 dma_addr_t dma_handle, size_t size,
92 enum dma_data_direction direction)
93{
94 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
95}
96
97static inline void dma_sync_single_for_device(struct device *dev,
98 dma_addr_t dma_handle,
99 size_t size,
100 enum dma_data_direction direction)
101{
102 /* No flushing needed to sync cpu writes to the device. */
103}
104
105static inline void dma_sync_single_range_for_cpu(struct device *dev,
106 dma_addr_t dma_handle,
107 unsigned long offset,
108 size_t size,
109 enum dma_data_direction direction)
110{
111 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
112}
113
114static inline void dma_sync_single_range_for_device(struct device *dev,
115 dma_addr_t dma_handle,
116 unsigned long offset,
117 size_t size,
118 enum dma_data_direction direction)
119{
120 /* No flushing needed to sync cpu writes to the device. */
121}
122
123
124static inline void dma_sync_sg_for_cpu(struct device *dev,
125 struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
129}
130
131static inline void dma_sync_sg_for_device(struct device *dev,
132 struct scatterlist *sg, int nelems,
133 enum dma_data_direction direction)
134{
135 /* No flushing needed to sync cpu writes to the device. */
136}
137
138static inline int dma_mapping_error(dma_addr_t dma_addr)
139{
140 return (dma_addr == DMA_ERROR_CODE);
141}
142
143static inline int dma_get_cache_alignment(void)
144{
145 /* no easy way to get cache size on all processors, so return
146 * the maximum possible, to be safe */
147 return (1 << INTERNODE_CACHE_SHIFT);
148}
149
150#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
151#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
152#define dma_is_consistent(d, h) (1)
153
154#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index 9d4c024bd3b3..2e36248e6b59 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -1,205 +1 @@
1/* #include <asm-sparc/dma.h>
2 * include/asm-sparc64/dma.h
3 *
4 * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _ASM_SPARC64_DMA_H
8#define _ASM_SPARC64_DMA_H
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/spinlock.h>
13
14#include <asm/sbus.h>
15#include <asm/delay.h>
16#include <asm/oplib.h>
17
18/* These are irrelevant for Sparc DMA, but we leave it in so that
19 * things can compile.
20 */
21#define MAX_DMA_CHANNELS 8
22#define DMA_MODE_READ 1
23#define DMA_MODE_WRITE 2
24#define MAX_DMA_ADDRESS (~0UL)
25
26/* Useful constants */
27#define SIZE_16MB (16*1024*1024)
28#define SIZE_64K (64*1024)
29
30/* SBUS DMA controller reg offsets */
31#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
32#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
33#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
34#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
35
36/* DVMA chip revisions */
37enum dvma_rev {
38 dvmarev0,
39 dvmaesc1,
40 dvmarev1,
41 dvmarev2,
42 dvmarev3,
43 dvmarevplus,
44 dvmahme
45};
46
47#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
48
49/* Linux DMA information structure, filled during probe. */
50struct sbus_dma {
51 struct sbus_dma *next;
52 struct sbus_dev *sdev;
53 void __iomem *regs;
54
55 /* Status, misc info */
56 int node; /* Prom node for this DMA device */
57 int running; /* Are we doing DMA now? */
58 int allocated; /* Are we "owned" by anyone yet? */
59
60 /* Transfer information. */
61 u32 addr; /* Start address of current transfer */
62 int nbytes; /* Size of current transfer */
63 int realbytes; /* For splitting up large transfers, etc. */
64
65 /* DMA revision */
66 enum dvma_rev revision;
67};
68
69extern struct sbus_dma *dma_chain;
70
71/* Broken hardware... */
72#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
73#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
74
75/* Main routines in dma.c */
76extern void dvma_init(struct sbus_bus *);
77
78/* Fields in the cond_reg register */
79/* First, the version identification bits */
80#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
81#define DMA_VERS0 0x00000000 /* Sunray DMA version */
82#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
83#define DMA_VERS1 0x80000000 /* DMA rev 1 */
84#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
85#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
86#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
87
88#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
89#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
90#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
91#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
92#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
93#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
94#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
95#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
96#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
97#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
98#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
99#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
100#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
101#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
102#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
103#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
104#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
105#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
106#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
107#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
108#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
109#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
110#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
111#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
112#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
113#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
114#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
115#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
116#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
117#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
118#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
119#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
120#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
121#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
122#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
123#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
124#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
125#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
126#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
127#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
128#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
129
130/* Values describing the burst-size property from the PROM */
131#define DMA_BURST1 0x01
132#define DMA_BURST2 0x02
133#define DMA_BURST4 0x04
134#define DMA_BURST8 0x08
135#define DMA_BURST16 0x10
136#define DMA_BURST32 0x20
137#define DMA_BURST64 0x40
138#define DMA_BURSTBITS 0x7f
139
140/* Determine highest possible final transfer address given a base */
141#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
142
143/* Yes, I hack a lot of elisp in my spare time... */
144#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
145#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
146#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
147#define DMA_OFF(__regs) \
148do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
149 tmp &= ~DMA_ENABLE; \
150 sbus_writel(tmp, (__regs) + DMA_CSR); \
151} while(0)
152#define DMA_INTSOFF(__regs) \
153do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
154 tmp &= ~DMA_INT_ENAB; \
155 sbus_writel(tmp, (__regs) + DMA_CSR); \
156} while(0)
157#define DMA_INTSON(__regs) \
158do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
159 tmp |= DMA_INT_ENAB; \
160 sbus_writel(tmp, (__regs) + DMA_CSR); \
161} while(0)
162#define DMA_PUNTFIFO(__regs) \
163do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
164 tmp |= DMA_FIFO_INV; \
165 sbus_writel(tmp, (__regs) + DMA_CSR); \
166} while(0)
167#define DMA_SETSTART(__regs, __addr) \
168 sbus_writel((u32)(__addr), (__regs) + DMA_ADDR);
169#define DMA_BEGINDMA_W(__regs) \
170do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
171 tmp |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB); \
172 sbus_writel(tmp, (__regs) + DMA_CSR); \
173} while(0)
174#define DMA_BEGINDMA_R(__regs) \
175do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
176 tmp |= (DMA_ENABLE|DMA_INT_ENAB); \
177 tmp &= ~DMA_ST_WRITE; \
178 sbus_writel(tmp, (__regs) + DMA_CSR); \
179} while(0)
180
181/* For certain DMA chips, we need to disable ints upon irq entry
182 * and turn them back on when we are done. So in any ESP interrupt
183 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
184 * when leaving the handler. You have been warned...
185 */
186#define DMA_IRQ_ENTRY(dma, dregs) do { \
187 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
188 } while (0)
189
190#define DMA_IRQ_EXIT(dma, dregs) do { \
191 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
192 } while(0)
193
194#define for_each_dvma(dma) \
195 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
196
197/* From PCI */
198
199#ifdef CONFIG_PCI
200extern int isa_dma_bridge_buggy;
201#else
202#define isa_dma_bridge_buggy (0)
203#endif
204
205#endif /* !(_ASM_SPARC64_DMA_H) */
diff --git a/include/asm-sparc64/ebus.h b/include/asm-sparc64/ebus.h
index fcc62b97ced5..d7d476158bd5 100644
--- a/include/asm-sparc64/ebus.h
+++ b/include/asm-sparc64/ebus.h
@@ -1,94 +1 @@
1/* #include <asm-sparc/ebus.h>
2 * ebus.h: PCI to Ebus pseudo driver software state.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8#ifndef __SPARC64_EBUS_H
9#define __SPARC64_EBUS_H
10
11#include <asm/oplib.h>
12#include <asm/prom.h>
13#include <asm/of_device.h>
14
15struct linux_ebus_child {
16 struct linux_ebus_child *next;
17 struct linux_ebus_device *parent;
18 struct linux_ebus *bus;
19 struct device_node *prom_node;
20 struct resource resource[PROMREG_MAX];
21 int num_addrs;
22 unsigned int irqs[PROMINTR_MAX];
23 int num_irqs;
24};
25
26struct linux_ebus_device {
27 struct of_device ofdev;
28 struct linux_ebus_device *next;
29 struct linux_ebus_child *children;
30 struct linux_ebus *bus;
31 struct device_node *prom_node;
32 struct resource resource[PROMREG_MAX];
33 int num_addrs;
34 unsigned int irqs[PROMINTR_MAX];
35 int num_irqs;
36};
37#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
38
39struct linux_ebus {
40 struct of_device ofdev;
41 struct linux_ebus *next;
42 struct linux_ebus_device *devices;
43 struct pci_dev *self;
44 int index;
45 int is_rio;
46 struct device_node *prom_node;
47};
48#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
49
50struct ebus_dma_info {
51 spinlock_t lock;
52 void __iomem *regs;
53
54 unsigned int flags;
55#define EBUS_DMA_FLAG_USE_EBDMA_HANDLER 0x00000001
56#define EBUS_DMA_FLAG_TCI_DISABLE 0x00000002
57
58 /* These are only valid is EBUS_DMA_FLAG_USE_EBDMA_HANDLER is
59 * set.
60 */
61 void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
62 void *client_cookie;
63 unsigned int irq;
64#define EBUS_DMA_EVENT_ERROR 1
65#define EBUS_DMA_EVENT_DMA 2
66#define EBUS_DMA_EVENT_DEVICE 4
67
68 unsigned char name[64];
69};
70
71extern int ebus_dma_register(struct ebus_dma_info *p);
72extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
73extern void ebus_dma_unregister(struct ebus_dma_info *p);
74extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
75 size_t len);
76extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
77extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
78extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
79extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
80
81extern struct linux_ebus *ebus_chain;
82
83extern void ebus_init(void);
84
85#define for_each_ebus(bus) \
86 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
87
88#define for_each_ebusdev(dev, bus) \
89 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
90
91#define for_each_edevchild(dev, child) \
92 for((child) = (dev)->children; (child); (child) = (child)->next)
93
94#endif /* !(__SPARC64_EBUS_H) */
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 0818a1308f4e..f256d9472c82 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -1,217 +1 @@
1#ifndef __ASM_SPARC64_ELF_H #include <asm-sparc/elf.h>
2#define __ASM_SPARC64_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11#include <asm/spitfire.h>
12
13/*
14 * Sparc section types
15 */
16#define STT_REGISTER 13
17
18/*
19 * Sparc ELF relocation types
20 */
21#define R_SPARC_NONE 0
22#define R_SPARC_8 1
23#define R_SPARC_16 2
24#define R_SPARC_32 3
25#define R_SPARC_DISP8 4
26#define R_SPARC_DISP16 5
27#define R_SPARC_DISP32 6
28#define R_SPARC_WDISP30 7
29#define R_SPARC_WDISP22 8
30#define R_SPARC_HI22 9
31#define R_SPARC_22 10
32#define R_SPARC_13 11
33#define R_SPARC_LO10 12
34#define R_SPARC_GOT10 13
35#define R_SPARC_GOT13 14
36#define R_SPARC_GOT22 15
37#define R_SPARC_PC10 16
38#define R_SPARC_PC22 17
39#define R_SPARC_WPLT30 18
40#define R_SPARC_COPY 19
41#define R_SPARC_GLOB_DAT 20
42#define R_SPARC_JMP_SLOT 21
43#define R_SPARC_RELATIVE 22
44#define R_SPARC_UA32 23
45#define R_SPARC_PLT32 24
46#define R_SPARC_HIPLT22 25
47#define R_SPARC_LOPLT10 26
48#define R_SPARC_PCPLT32 27
49#define R_SPARC_PCPLT22 28
50#define R_SPARC_PCPLT10 29
51#define R_SPARC_10 30
52#define R_SPARC_11 31
53#define R_SPARC_64 32
54#define R_SPARC_OLO10 33
55#define R_SPARC_WDISP16 40
56#define R_SPARC_WDISP19 41
57#define R_SPARC_7 43
58#define R_SPARC_5 44
59#define R_SPARC_6 45
60
61/* Bits present in AT_HWCAP, primarily for Sparc32. */
62
63#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
64#define HWCAP_SPARC_STBAR 2
65#define HWCAP_SPARC_SWAP 4
66#define HWCAP_SPARC_MULDIV 8
67#define HWCAP_SPARC_V9 16
68#define HWCAP_SPARC_ULTRA3 32
69#define HWCAP_SPARC_BLKINIT 64
70#define HWCAP_SPARC_N2 128
71
72#define CORE_DUMP_USE_REGSET
73
74/*
75 * These are used to set parameters in the core dumps.
76 */
77#define ELF_ARCH EM_SPARCV9
78#define ELF_CLASS ELFCLASS64
79#define ELF_DATA ELFDATA2MSB
80
81/* Format of 64-bit elf_gregset_t is:
82 * G0 --> G7
83 * O0 --> O7
84 * L0 --> L7
85 * I0 --> I7
86 * TSTATE
87 * TPC
88 * TNPC
89 * Y
90 */
91typedef unsigned long elf_greg_t;
92#define ELF_NGREG 36
93typedef elf_greg_t elf_gregset_t[ELF_NGREG];
94
95typedef struct {
96 unsigned long pr_regs[32];
97 unsigned long pr_fsr;
98 unsigned long pr_gsr;
99 unsigned long pr_fprs;
100} elf_fpregset_t;
101
102/* Format of 32-bit elf_gregset_t is:
103 * G0 --> G7
104 * O0 --> O7
105 * L0 --> L7
106 * I0 --> I7
107 * PSR, PC, nPC, Y, WIM, TBR
108 */
109typedef unsigned int compat_elf_greg_t;
110#define COMPAT_ELF_NGREG 38
111typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
112
113typedef struct {
114 union {
115 unsigned int pr_regs[32];
116 unsigned long pr_dregs[16];
117 } pr_fr;
118 unsigned int __unused;
119 unsigned int pr_fsr;
120 unsigned char pr_qcnt;
121 unsigned char pr_q_entrysize;
122 unsigned char pr_en;
123 unsigned int pr_q[64];
124} compat_elf_fpregset_t;
125
126/* UltraSparc extensions. Still unused, but will be eventually. */
127typedef struct {
128 unsigned int pr_type;
129 unsigned int pr_align;
130 union {
131 struct {
132 union {
133 unsigned int pr_regs[32];
134 unsigned long pr_dregs[16];
135 long double pr_qregs[8];
136 } pr_xfr;
137 } pr_v8p;
138 unsigned int pr_xfsr;
139 unsigned int pr_fprs;
140 unsigned int pr_xg[8];
141 unsigned int pr_xo[8];
142 unsigned long pr_tstate;
143 unsigned int pr_filler[8];
144 } pr_un;
145} elf_xregset_t;
146
147/*
148 * This is used to ensure we don't load something for the wrong architecture.
149 */
150#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
151#define compat_elf_check_arch(x) ((x)->e_machine == EM_SPARC || \
152 (x)->e_machine == EM_SPARC32PLUS)
153#define compat_start_thread start_thread32
154
155#define USE_ELF_CORE_DUMP
156#define ELF_EXEC_PAGESIZE PAGE_SIZE
157
158/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
159 use of this is to invoke "./ld.so someprog" to test out a new version of
160 the loader. We need to make sure that it is out of the way of the program
161 that it will "exec", and that there is sufficient room for the brk. */
162
163#define ELF_ET_DYN_BASE 0x0000010000000000UL
164#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
165
166
167/* This yields a mask that user programs can use to figure out what
168 instruction set this cpu supports. */
169
170/* On Ultra, we support all of the v8 capabilities. */
171static inline unsigned int sparc64_elf_hwcap(void)
172{
173 unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
174 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
175 HWCAP_SPARC_V9);
176
177 if (tlb_type == cheetah || tlb_type == cheetah_plus)
178 cap |= HWCAP_SPARC_ULTRA3;
179 else if (tlb_type == hypervisor) {
180 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
181 sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
182 cap |= HWCAP_SPARC_BLKINIT;
183 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
184 cap |= HWCAP_SPARC_N2;
185 }
186
187 return cap;
188}
189
190#define ELF_HWCAP sparc64_elf_hwcap();
191
192/* This yields a string that ld.so will use to load implementation
193 specific libraries for optimization. This is more specific in
194 intent than poking at uname or /proc/cpuinfo. */
195
196#define ELF_PLATFORM (NULL)
197
198#define SET_PERSONALITY(ex, ibcs2) \
199do { unsigned long new_flags = current_thread_info()->flags; \
200 new_flags &= _TIF_32BIT; \
201 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
202 new_flags |= _TIF_32BIT; \
203 else \
204 new_flags &= ~_TIF_32BIT; \
205 if ((current_thread_info()->flags & _TIF_32BIT) \
206 != new_flags) \
207 set_thread_flag(TIF_ABI_PENDING); \
208 else \
209 clear_thread_flag(TIF_ABI_PENDING); \
210 /* flush_thread will update pgd cache */ \
211 if (ibcs2) \
212 set_personality(PER_SVR4); \
213 else if (current->personality != PER_LINUX32) \
214 set_personality(PER_LINUX); \
215} while (0)
216
217#endif /* !(__ASM_SPARC64_ELF_H) */
diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h
index ca19f80a9b7d..214878114436 100644
--- a/include/asm-sparc64/floppy.h
+++ b/include/asm-sparc64/floppy.h
@@ -1,782 +1 @@
1/* floppy.h: Sparc specific parts of the Floppy driver. #include <asm-sparc/floppy.h>
2 *
3 * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 *
6 * Ultra/PCI support added: Sep 1997 Eddie C. Dost (ecd@skynet.be)
7 */
8
9#ifndef __ASM_SPARC64_FLOPPY_H
10#define __ASM_SPARC64_FLOPPY_H
11
12#include <linux/init.h>
13#include <linux/pci.h>
14
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/system.h>
18#include <asm/idprom.h>
19#include <asm/oplib.h>
20#include <asm/auxio.h>
21#include <asm/sbus.h>
22#include <asm/irq.h>
23
24
25/*
26 * Define this to enable exchanging drive 0 and 1 if only drive 1 is
27 * probed on PCI machines.
28 */
29#undef PCI_FDC_SWAP_DRIVES
30
31
32/* References:
33 * 1) Netbsd Sun floppy driver.
34 * 2) NCR 82077 controller manual
35 * 3) Intel 82077 controller manual
36 */
37struct sun_flpy_controller {
38 volatile unsigned char status1_82077; /* Auxiliary Status reg. 1 */
39 volatile unsigned char status2_82077; /* Auxiliary Status reg. 2 */
40 volatile unsigned char dor_82077; /* Digital Output reg. */
41 volatile unsigned char tapectl_82077; /* Tape Control reg */
42 volatile unsigned char status_82077; /* Main Status Register. */
43#define drs_82077 status_82077 /* Digital Rate Select reg. */
44 volatile unsigned char data_82077; /* Data fifo. */
45 volatile unsigned char ___unused;
46 volatile unsigned char dir_82077; /* Digital Input reg. */
47#define dcr_82077 dir_82077 /* Config Control reg. */
48};
49
50/* You'll only ever find one controller on an Ultra anyways. */
51static struct sun_flpy_controller *sun_fdc = (struct sun_flpy_controller *)-1;
52unsigned long fdc_status;
53static struct sbus_dev *floppy_sdev = NULL;
54
55struct sun_floppy_ops {
56 unsigned char (*fd_inb) (unsigned long port);
57 void (*fd_outb) (unsigned char value, unsigned long port);
58 void (*fd_enable_dma) (void);
59 void (*fd_disable_dma) (void);
60 void (*fd_set_dma_mode) (int);
61 void (*fd_set_dma_addr) (char *);
62 void (*fd_set_dma_count) (int);
63 unsigned int (*get_dma_residue) (void);
64 int (*fd_request_irq) (void);
65 void (*fd_free_irq) (void);
66 int (*fd_eject) (int);
67};
68
69static struct sun_floppy_ops sun_fdops;
70
71#define fd_inb(port) sun_fdops.fd_inb(port)
72#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
73#define fd_enable_dma() sun_fdops.fd_enable_dma()
74#define fd_disable_dma() sun_fdops.fd_disable_dma()
75#define fd_request_dma() (0) /* nothing... */
76#define fd_free_dma() /* nothing... */
77#define fd_clear_dma_ff() /* nothing... */
78#define fd_set_dma_mode(mode) sun_fdops.fd_set_dma_mode(mode)
79#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
80#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
81#define get_dma_residue(x) sun_fdops.get_dma_residue()
82#define fd_cacheflush(addr, size) /* nothing... */
83#define fd_request_irq() sun_fdops.fd_request_irq()
84#define fd_free_irq() sun_fdops.fd_free_irq()
85#define fd_eject(drive) sun_fdops.fd_eject(drive)
86
87/* Super paranoid... */
88#undef HAVE_DISABLE_HLT
89
90static int sun_floppy_types[2] = { 0, 0 };
91
92/* Here is where we catch the floppy driver trying to initialize,
93 * therefore this is where we call the PROM device tree probing
94 * routine etc. on the Sparc.
95 */
96#define FLOPPY0_TYPE sun_floppy_init()
97#define FLOPPY1_TYPE sun_floppy_types[1]
98
99#define FDC1 ((unsigned long)sun_fdc)
100
101#define N_FDC 1
102#define N_DRIVE 8
103
104/* No 64k boundary crossing problems on the Sparc. */
105#define CROSS_64KB(a,s) (0)
106
107static unsigned char sun_82077_fd_inb(unsigned long port)
108{
109 udelay(5);
110 switch(port & 7) {
111 default:
112 printk("floppy: Asked to read unknown port %lx\n", port);
113 panic("floppy: Port bolixed.");
114 case 4: /* FD_STATUS */
115 return sbus_readb(&sun_fdc->status_82077) & ~STATUS_DMA;
116 case 5: /* FD_DATA */
117 return sbus_readb(&sun_fdc->data_82077);
118 case 7: /* FD_DIR */
119 /* XXX: Is DCL on 0x80 in sun4m? */
120 return sbus_readb(&sun_fdc->dir_82077);
121 };
122 panic("sun_82072_fd_inb: How did I get here?");
123}
124
125static void sun_82077_fd_outb(unsigned char value, unsigned long port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to write to unknown port %lx\n", port);
131 panic("floppy: Port bolixed.");
132 case 2: /* FD_DOR */
133 /* Happily, the 82077 has a real DOR register. */
134 sbus_writeb(value, &sun_fdc->dor_82077);
135 break;
136 case 5: /* FD_DATA */
137 sbus_writeb(value, &sun_fdc->data_82077);
138 break;
139 case 7: /* FD_DCR */
140 sbus_writeb(value, &sun_fdc->dcr_82077);
141 break;
142 case 4: /* FD_STATUS */
143 sbus_writeb(value, &sun_fdc->status_82077);
144 break;
145 };
146 return;
147}
148
149/* For pseudo-dma (Sun floppy drives have no real DMA available to
150 * them so we must eat the data fifo bytes directly ourselves) we have
151 * three state variables. doing_pdma tells our inline low-level
152 * assembly floppy interrupt entry point whether it should sit and eat
153 * bytes from the fifo or just transfer control up to the higher level
154 * floppy interrupt c-code. I tried very hard but I could not get the
155 * pseudo-dma to work in c-code without getting many overruns and
156 * underruns. If non-zero, doing_pdma encodes the direction of
157 * the transfer for debugging. 1=read 2=write
158 */
159unsigned char *pdma_vaddr;
160unsigned long pdma_size;
161volatile int doing_pdma = 0;
162
163/* This is software state */
164char *pdma_base = NULL;
165unsigned long pdma_areasize;
166
167/* Common routines to all controller types on the Sparc. */
168static void sun_fd_disable_dma(void)
169{
170 doing_pdma = 0;
171 if (pdma_base) {
172 mmu_unlockarea(pdma_base, pdma_areasize);
173 pdma_base = NULL;
174 }
175}
176
177static void sun_fd_set_dma_mode(int mode)
178{
179 switch(mode) {
180 case DMA_MODE_READ:
181 doing_pdma = 1;
182 break;
183 case DMA_MODE_WRITE:
184 doing_pdma = 2;
185 break;
186 default:
187 printk("Unknown dma mode %d\n", mode);
188 panic("floppy: Giving up...");
189 }
190}
191
192static void sun_fd_set_dma_addr(char *buffer)
193{
194 pdma_vaddr = buffer;
195}
196
197static void sun_fd_set_dma_count(int length)
198{
199 pdma_size = length;
200}
201
202static void sun_fd_enable_dma(void)
203{
204 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
205 pdma_base = pdma_vaddr;
206 pdma_areasize = pdma_size;
207}
208
209irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie)
210{
211 if (likely(doing_pdma)) {
212 void __iomem *stat = (void __iomem *) fdc_status;
213 unsigned char *vaddr = pdma_vaddr;
214 unsigned long size = pdma_size;
215 u8 val;
216
217 while (size) {
218 val = readb(stat);
219 if (unlikely(!(val & 0x80))) {
220 pdma_vaddr = vaddr;
221 pdma_size = size;
222 return IRQ_HANDLED;
223 }
224 if (unlikely(!(val & 0x20))) {
225 pdma_vaddr = vaddr;
226 pdma_size = size;
227 doing_pdma = 0;
228 goto main_interrupt;
229 }
230 if (val & 0x40) {
231 /* read */
232 *vaddr++ = readb(stat + 1);
233 } else {
234 unsigned char data = *vaddr++;
235
236 /* write */
237 writeb(data, stat + 1);
238 }
239 size--;
240 }
241
242 pdma_vaddr = vaddr;
243 pdma_size = size;
244
245 /* Send Terminal Count pulse to floppy controller. */
246 val = readb(auxio_register);
247 val |= AUXIO_AUX1_FTCNT;
248 writeb(val, auxio_register);
249 val &= ~AUXIO_AUX1_FTCNT;
250 writeb(val, auxio_register);
251
252 doing_pdma = 0;
253 }
254
255main_interrupt:
256 return floppy_interrupt(irq, dev_cookie);
257}
258
259static int sun_fd_request_irq(void)
260{
261 static int once = 0;
262 int error;
263
264 if(!once) {
265 once = 1;
266
267 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
268 IRQF_DISABLED, "floppy", NULL);
269
270 return ((error == 0) ? 0 : -1);
271 }
272 return 0;
273}
274
275static void sun_fd_free_irq(void)
276{
277}
278
279static unsigned int sun_get_dma_residue(void)
280{
281 /* XXX This isn't really correct. XXX */
282 return 0;
283}
284
285static int sun_fd_eject(int drive)
286{
287 set_dor(0x00, 0xff, 0x90);
288 udelay(500);
289 set_dor(0x00, 0x6f, 0x00);
290 udelay(500);
291 return 0;
292}
293
294#ifdef CONFIG_PCI
295#include <asm/ebus.h>
296#include <asm/ns87303.h>
297
298static struct ebus_dma_info sun_pci_fd_ebus_dma;
299static struct pci_dev *sun_pci_ebus_dev;
300static int sun_pci_broken_drive = -1;
301
302struct sun_pci_dma_op {
303 unsigned int addr;
304 int len;
305 int direction;
306 char *buf;
307};
308static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
309static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
310
311extern irqreturn_t floppy_interrupt(int irq, void *dev_id);
312
313static unsigned char sun_pci_fd_inb(unsigned long port)
314{
315 udelay(5);
316 return inb(port);
317}
318
319static void sun_pci_fd_outb(unsigned char val, unsigned long port)
320{
321 udelay(5);
322 outb(val, port);
323}
324
325static void sun_pci_fd_broken_outb(unsigned char val, unsigned long port)
326{
327 udelay(5);
328 /*
329 * XXX: Due to SUN's broken floppy connector on AX and AXi
330 * we need to turn on MOTOR_0 also, if the floppy is
331 * jumpered to DS1 (like most PC floppies are). I hope
332 * this does not hurt correct hardware like the AXmp.
333 * (Eddie, Sep 12 1998).
334 */
335 if (port == ((unsigned long)sun_fdc) + 2) {
336 if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x20)) {
337 val |= 0x10;
338 }
339 }
340 outb(val, port);
341}
342
343#ifdef PCI_FDC_SWAP_DRIVES
344static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port)
345{
346 udelay(5);
347 /*
348 * XXX: Due to SUN's broken floppy connector on AX and AXi
349 * we need to turn on MOTOR_0 also, if the floppy is
350 * jumpered to DS1 (like most PC floppies are). I hope
351 * this does not hurt correct hardware like the AXmp.
352 * (Eddie, Sep 12 1998).
353 */
354 if (port == ((unsigned long)sun_fdc) + 2) {
355 if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x10)) {
356 val &= ~(0x03);
357 val |= 0x21;
358 }
359 }
360 outb(val, port);
361}
362#endif /* PCI_FDC_SWAP_DRIVES */
363
364static void sun_pci_fd_enable_dma(void)
365{
366 BUG_ON((NULL == sun_pci_dma_pending.buf) ||
367 (0 == sun_pci_dma_pending.len) ||
368 (0 == sun_pci_dma_pending.direction));
369
370 sun_pci_dma_current.buf = sun_pci_dma_pending.buf;
371 sun_pci_dma_current.len = sun_pci_dma_pending.len;
372 sun_pci_dma_current.direction = sun_pci_dma_pending.direction;
373
374 sun_pci_dma_pending.buf = NULL;
375 sun_pci_dma_pending.len = 0;
376 sun_pci_dma_pending.direction = 0;
377 sun_pci_dma_pending.addr = -1U;
378
379 sun_pci_dma_current.addr =
380 pci_map_single(sun_pci_ebus_dev,
381 sun_pci_dma_current.buf,
382 sun_pci_dma_current.len,
383 sun_pci_dma_current.direction);
384
385 ebus_dma_enable(&sun_pci_fd_ebus_dma, 1);
386
387 if (ebus_dma_request(&sun_pci_fd_ebus_dma,
388 sun_pci_dma_current.addr,
389 sun_pci_dma_current.len))
390 BUG();
391}
392
393static void sun_pci_fd_disable_dma(void)
394{
395 ebus_dma_enable(&sun_pci_fd_ebus_dma, 0);
396 if (sun_pci_dma_current.addr != -1U)
397 pci_unmap_single(sun_pci_ebus_dev,
398 sun_pci_dma_current.addr,
399 sun_pci_dma_current.len,
400 sun_pci_dma_current.direction);
401 sun_pci_dma_current.addr = -1U;
402}
403
404static void sun_pci_fd_set_dma_mode(int mode)
405{
406 if (mode == DMA_MODE_WRITE)
407 sun_pci_dma_pending.direction = PCI_DMA_TODEVICE;
408 else
409 sun_pci_dma_pending.direction = PCI_DMA_FROMDEVICE;
410
411 ebus_dma_prepare(&sun_pci_fd_ebus_dma, mode != DMA_MODE_WRITE);
412}
413
414static void sun_pci_fd_set_dma_count(int length)
415{
416 sun_pci_dma_pending.len = length;
417}
418
419static void sun_pci_fd_set_dma_addr(char *buffer)
420{
421 sun_pci_dma_pending.buf = buffer;
422}
423
424static unsigned int sun_pci_get_dma_residue(void)
425{
426 return ebus_dma_residue(&sun_pci_fd_ebus_dma);
427}
428
429static int sun_pci_fd_request_irq(void)
430{
431 return ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 1);
432}
433
434static void sun_pci_fd_free_irq(void)
435{
436 ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 0);
437}
438
439static int sun_pci_fd_eject(int drive)
440{
441 return -EINVAL;
442}
443
444void sun_pci_fd_dma_callback(struct ebus_dma_info *p, int event, void *cookie)
445{
446 floppy_interrupt(0, NULL);
447}
448
449/*
450 * Floppy probing, we'd like to use /dev/fd0 for a single Floppy on PCI,
451 * even if this is configured using DS1, thus looks like /dev/fd1 with
452 * the cabling used in Ultras.
453 */
454#define DOR (port + 2)
455#define MSR (port + 4)
456#define FIFO (port + 5)
457
458static void sun_pci_fd_out_byte(unsigned long port, unsigned char val,
459 unsigned long reg)
460{
461 unsigned char status;
462 int timeout = 1000;
463
464 while (!((status = inb(MSR)) & 0x80) && --timeout)
465 udelay(100);
466 outb(val, reg);
467}
468
469static unsigned char sun_pci_fd_sensei(unsigned long port)
470{
471 unsigned char result[2] = { 0x70, 0x00 };
472 unsigned char status;
473 int i = 0;
474
475 sun_pci_fd_out_byte(port, 0x08, FIFO);
476 do {
477 int timeout = 1000;
478
479 while (!((status = inb(MSR)) & 0x80) && --timeout)
480 udelay(100);
481
482 if (!timeout)
483 break;
484
485 if ((status & 0xf0) == 0xd0)
486 result[i++] = inb(FIFO);
487 else
488 break;
489 } while (i < 2);
490
491 return result[0];
492}
493
494static void sun_pci_fd_reset(unsigned long port)
495{
496 unsigned char mask = 0x00;
497 unsigned char status;
498 int timeout = 10000;
499
500 outb(0x80, MSR);
501 do {
502 status = sun_pci_fd_sensei(port);
503 if ((status & 0xc0) == 0xc0)
504 mask |= 1 << (status & 0x03);
505 else
506 udelay(100);
507 } while ((mask != 0x0f) && --timeout);
508}
509
510static int sun_pci_fd_test_drive(unsigned long port, int drive)
511{
512 unsigned char status, data;
513 int timeout = 1000;
514 int ready;
515
516 sun_pci_fd_reset(port);
517
518 data = (0x10 << drive) | 0x0c | drive;
519 sun_pci_fd_out_byte(port, data, DOR);
520
521 sun_pci_fd_out_byte(port, 0x07, FIFO);
522 sun_pci_fd_out_byte(port, drive & 0x03, FIFO);
523
524 do {
525 udelay(100);
526 status = sun_pci_fd_sensei(port);
527 } while (((status & 0xc0) == 0x80) && --timeout);
528
529 if (!timeout)
530 ready = 0;
531 else
532 ready = (status & 0x10) ? 0 : 1;
533
534 sun_pci_fd_reset(port);
535 return ready;
536}
537#undef FIFO
538#undef MSR
539#undef DOR
540
541#endif /* CONFIG_PCI */
542
543#ifdef CONFIG_PCI
544static int __init ebus_fdthree_p(struct linux_ebus_device *edev)
545{
546 if (!strcmp(edev->prom_node->name, "fdthree"))
547 return 1;
548 if (!strcmp(edev->prom_node->name, "floppy")) {
549 const char *compat;
550
551 compat = of_get_property(edev->prom_node,
552 "compatible", NULL);
553 if (compat && !strcmp(compat, "fdthree"))
554 return 1;
555 }
556 return 0;
557}
558#endif
559
560static unsigned long __init sun_floppy_init(void)
561{
562 char state[128];
563 struct sbus_bus *bus;
564 struct sbus_dev *sdev = NULL;
565 static int initialized = 0;
566
567 if (initialized)
568 return sun_floppy_types[0];
569 initialized = 1;
570
571 for_all_sbusdev (sdev, bus) {
572 if (!strcmp(sdev->prom_name, "SUNW,fdtwo"))
573 break;
574 }
575 if(sdev) {
576 floppy_sdev = sdev;
577 FLOPPY_IRQ = sdev->irqs[0];
578 } else {
579#ifdef CONFIG_PCI
580 struct linux_ebus *ebus;
581 struct linux_ebus_device *edev = NULL;
582 unsigned long config = 0;
583 void __iomem *auxio_reg;
584 const char *state_prop;
585
586 for_each_ebus(ebus) {
587 for_each_ebusdev(edev, ebus) {
588 if (ebus_fdthree_p(edev))
589 goto ebus_done;
590 }
591 }
592 ebus_done:
593 if (!edev)
594 return 0;
595
596 state_prop = of_get_property(edev->prom_node, "status", NULL);
597 if (state_prop && !strncmp(state_prop, "disabled", 8))
598 return 0;
599
600 FLOPPY_IRQ = edev->irqs[0];
601
602 /* Make sure the high density bit is set, some systems
603 * (most notably Ultra5/Ultra10) come up with it clear.
604 */
605 auxio_reg = (void __iomem *) edev->resource[2].start;
606 writel(readl(auxio_reg)|0x2, auxio_reg);
607
608 sun_pci_ebus_dev = ebus->self;
609
610 spin_lock_init(&sun_pci_fd_ebus_dma.lock);
611
612 /* XXX ioremap */
613 sun_pci_fd_ebus_dma.regs = (void __iomem *)
614 edev->resource[1].start;
615 if (!sun_pci_fd_ebus_dma.regs)
616 return 0;
617
618 sun_pci_fd_ebus_dma.flags = (EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
619 EBUS_DMA_FLAG_TCI_DISABLE);
620 sun_pci_fd_ebus_dma.callback = sun_pci_fd_dma_callback;
621 sun_pci_fd_ebus_dma.client_cookie = NULL;
622 sun_pci_fd_ebus_dma.irq = FLOPPY_IRQ;
623 strcpy(sun_pci_fd_ebus_dma.name, "floppy");
624 if (ebus_dma_register(&sun_pci_fd_ebus_dma))
625 return 0;
626
627 /* XXX ioremap */
628 sun_fdc = (struct sun_flpy_controller *)edev->resource[0].start;
629
630 sun_fdops.fd_inb = sun_pci_fd_inb;
631 sun_fdops.fd_outb = sun_pci_fd_outb;
632
633 can_use_virtual_dma = use_virtual_dma = 0;
634 sun_fdops.fd_enable_dma = sun_pci_fd_enable_dma;
635 sun_fdops.fd_disable_dma = sun_pci_fd_disable_dma;
636 sun_fdops.fd_set_dma_mode = sun_pci_fd_set_dma_mode;
637 sun_fdops.fd_set_dma_addr = sun_pci_fd_set_dma_addr;
638 sun_fdops.fd_set_dma_count = sun_pci_fd_set_dma_count;
639 sun_fdops.get_dma_residue = sun_pci_get_dma_residue;
640
641 sun_fdops.fd_request_irq = sun_pci_fd_request_irq;
642 sun_fdops.fd_free_irq = sun_pci_fd_free_irq;
643
644 sun_fdops.fd_eject = sun_pci_fd_eject;
645
646 fdc_status = (unsigned long) &sun_fdc->status_82077;
647
648 /*
649 * XXX: Find out on which machines this is really needed.
650 */
651 if (1) {
652 sun_pci_broken_drive = 1;
653 sun_fdops.fd_outb = sun_pci_fd_broken_outb;
654 }
655
656 allowed_drive_mask = 0;
657 if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 0))
658 sun_floppy_types[0] = 4;
659 if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 1))
660 sun_floppy_types[1] = 4;
661
662 /*
663 * Find NS87303 SuperIO config registers (through ecpp).
664 */
665 for_each_ebus(ebus) {
666 for_each_ebusdev(edev, ebus) {
667 if (!strcmp(edev->prom_node->name, "ecpp")) {
668 config = edev->resource[1].start;
669 goto config_done;
670 }
671 }
672 }
673 config_done:
674
675 /*
676 * Sanity check, is this really the NS87303?
677 */
678 switch (config & 0x3ff) {
679 case 0x02e:
680 case 0x15c:
681 case 0x26e:
682 case 0x398:
683 break;
684 default:
685 config = 0;
686 }
687
688 if (!config)
689 return sun_floppy_types[0];
690
691 /* Enable PC-AT mode. */
692 ns87303_modify(config, ASC, 0, 0xc0);
693
694#ifdef PCI_FDC_SWAP_DRIVES
695 /*
696 * If only Floppy 1 is present, swap drives.
697 */
698 if (!sun_floppy_types[0] && sun_floppy_types[1]) {
699 /*
700 * Set the drive exchange bit in FCR on NS87303,
701 * make sure other bits are sane before doing so.
702 */
703 ns87303_modify(config, FER, FER_EDM, 0);
704 ns87303_modify(config, ASC, ASC_DRV2_SEL, 0);
705 ns87303_modify(config, FCR, 0, FCR_LDE);
706
707 config = sun_floppy_types[0];
708 sun_floppy_types[0] = sun_floppy_types[1];
709 sun_floppy_types[1] = config;
710
711 if (sun_pci_broken_drive != -1) {
712 sun_pci_broken_drive = 1 - sun_pci_broken_drive;
713 sun_fdops.fd_outb = sun_pci_fd_lde_broken_outb;
714 }
715 }
716#endif /* PCI_FDC_SWAP_DRIVES */
717
718 return sun_floppy_types[0];
719#else
720 return 0;
721#endif
722 }
723 prom_getproperty(sdev->prom_node, "status", state, sizeof(state));
724 if(!strncmp(state, "disabled", 8))
725 return 0;
726
727 /*
728 * We cannot do sbus_ioremap here: it does request_region,
729 * which the generic floppy driver tries to do once again.
730 * But we must use the sdev resource values as they have
731 * had parent ranges applied.
732 */
733 sun_fdc = (struct sun_flpy_controller *)
734 (sdev->resource[0].start +
735 ((sdev->resource[0].flags & 0x1ffUL) << 32UL));
736
737 /* Last minute sanity check... */
738 if(sbus_readb(&sun_fdc->status1_82077) == 0xff) {
739 sun_fdc = (struct sun_flpy_controller *)-1;
740 return 0;
741 }
742
743 sun_fdops.fd_inb = sun_82077_fd_inb;
744 sun_fdops.fd_outb = sun_82077_fd_outb;
745
746 can_use_virtual_dma = use_virtual_dma = 1;
747 sun_fdops.fd_enable_dma = sun_fd_enable_dma;
748 sun_fdops.fd_disable_dma = sun_fd_disable_dma;
749 sun_fdops.fd_set_dma_mode = sun_fd_set_dma_mode;
750 sun_fdops.fd_set_dma_addr = sun_fd_set_dma_addr;
751 sun_fdops.fd_set_dma_count = sun_fd_set_dma_count;
752 sun_fdops.get_dma_residue = sun_get_dma_residue;
753
754 sun_fdops.fd_request_irq = sun_fd_request_irq;
755 sun_fdops.fd_free_irq = sun_fd_free_irq;
756
757 sun_fdops.fd_eject = sun_fd_eject;
758
759 fdc_status = (unsigned long) &sun_fdc->status_82077;
760
761 /* Success... */
762 allowed_drive_mask = 0x01;
763 sun_floppy_types[0] = 4;
764 sun_floppy_types[1] = 0;
765
766 return sun_floppy_types[0];
767}
768
769#define EXTRA_FLOPPY_PARAMS
770
771static DEFINE_SPINLOCK(dma_spin_lock);
772
773#define claim_dma_lock() \
774({ unsigned long flags; \
775 spin_lock_irqsave(&dma_spin_lock, flags); \
776 flags; \
777})
778
779#define release_dma_lock(__flags) \
780 spin_unlock_irqrestore(&dma_spin_lock, __flags);
781
782#endif /* !(__ASM_SPARC64_FLOPPY_H) */
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index d8378935ae90..1ceb0bb2fe53 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -1,110 +1 @@
1#ifndef _SPARC64_FUTEX_H #include <asm-sparc/futex.h>
2#define _SPARC64_FUTEX_H
3
4#include <linux/futex.h>
5#include <linux/uaccess.h>
6#include <asm/errno.h>
7#include <asm/system.h>
8
9#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
10 __asm__ __volatile__( \
11 "\n1: lduwa [%3] %%asi, %2\n" \
12 " " insn "\n" \
13 "2: casa [%3] %%asi, %2, %1\n" \
14 " cmp %2, %1\n" \
15 " bne,pn %%icc, 1b\n" \
16 " mov 0, %0\n" \
17 "3:\n" \
18 " .section .fixup,#alloc,#execinstr\n" \
19 " .align 4\n" \
20 "4: sethi %%hi(3b), %0\n" \
21 " jmpl %0 + %%lo(3b), %%g0\n" \
22 " mov %5, %0\n" \
23 " .previous\n" \
24 " .section __ex_table,\"a\"\n" \
25 " .align 4\n" \
26 " .word 1b, 4b\n" \
27 " .word 2b, 4b\n" \
28 " .previous\n" \
29 : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \
30 : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
31 : "memory")
32
33static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
34{
35 int op = (encoded_op >> 28) & 7;
36 int cmp = (encoded_op >> 24) & 15;
37 int oparg = (encoded_op << 8) >> 20;
38 int cmparg = (encoded_op << 20) >> 20;
39 int oldval = 0, ret, tem;
40
41 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
42 return -EFAULT;
43 if (unlikely((((unsigned long) uaddr) & 0x3UL)))
44 return -EINVAL;
45
46 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
47 oparg = 1 << oparg;
48
49 pagefault_disable();
50
51 switch (op) {
52 case FUTEX_OP_SET:
53 __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg);
54 break;
55 case FUTEX_OP_ADD:
56 __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg);
57 break;
58 case FUTEX_OP_OR:
59 __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
60 break;
61 case FUTEX_OP_ANDN:
62 __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
63 break;
64 case FUTEX_OP_XOR:
65 __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
66 break;
67 default:
68 ret = -ENOSYS;
69 }
70
71 pagefault_enable();
72
73 if (!ret) {
74 switch (cmp) {
75 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
76 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
77 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
78 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
79 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
80 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
81 default: ret = -ENOSYS;
82 }
83 }
84 return ret;
85}
86
87static inline int
88futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
89{
90 __asm__ __volatile__(
91 "\n1: casa [%3] %%asi, %2, %0\n"
92 "2:\n"
93 " .section .fixup,#alloc,#execinstr\n"
94 " .align 4\n"
95 "3: sethi %%hi(2b), %0\n"
96 " jmpl %0 + %%lo(2b), %%g0\n"
97 " mov %4, %0\n"
98 " .previous\n"
99 " .section __ex_table,\"a\"\n"
100 " .align 4\n"
101 " .word 1b, 3b\n"
102 " .previous\n"
103 : "=r" (newval)
104 : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
105 : "memory");
106
107 return newval;
108}
109
110#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h
index 7c29fd1a87aa..63dca3db11f3 100644
--- a/include/asm-sparc64/hardirq.h
+++ b/include/asm-sparc64/hardirq.h
@@ -1,19 +1 @@
1/* hardirq.h: 64-bit Sparc hard IRQ support. #include <asm-sparc/hardirq.h>
2 *
3 * Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef __SPARC64_HARDIRQ_H
7#define __SPARC64_HARDIRQ_H
8
9#include <asm/cpudata.h>
10
11#define __ARCH_IRQ_STAT
12#define local_softirq_pending() \
13 (local_cpu_data().__softirq_pending)
14
15void ack_bad_irq(unsigned int irq);
16
17#define HARDIRQ_BITS 8
18
19#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index 10e9dabc4c41..2254c09e53f9 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -1,76 +1 @@
1#ifndef _SPARC64_HEAD_H #include <asm-sparc/head.h>
2#define _SPARC64_HEAD_H
3
4#include <asm/pstate.h>
5
6 /* wrpr %g0, val, %gl */
7#define SET_GL(val) \
8 .word 0xa1902000 | val
9
10 /* rdpr %gl, %gN */
11#define GET_GL_GLOBAL(N) \
12 .word 0x81540000 | (N << 25)
13
14#define KERNBASE 0x400000
15
16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
17
18#define __CHEETAH_ID 0x003e0014
19#define __JALAPENO_ID 0x003e0016
20#define __SERRANO_ID 0x003e0022
21
22#define CHEETAH_MANUF 0x003e
23#define CHEETAH_IMPL 0x0014 /* Ultra-III */
24#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */
25#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */
26#define JAGUAR_IMPL 0x0018 /* Ultra-IV */
27#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
28#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
29
30#define BRANCH_IF_SUN4V(tmp1,label) \
31 sethi %hi(is_sun4v), %tmp1; \
32 lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \
33 brnz,pn %tmp1, label; \
34 nop
35
36#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
37 rdpr %ver, %tmp1; \
38 sethi %hi(__CHEETAH_ID), %tmp2; \
39 srlx %tmp1, 32, %tmp1; \
40 or %tmp2, %lo(__CHEETAH_ID), %tmp2;\
41 cmp %tmp1, %tmp2; \
42 be,pn %icc, label; \
43 nop;
44
45#define BRANCH_IF_JALAPENO(tmp1,tmp2,label) \
46 rdpr %ver, %tmp1; \
47 sethi %hi(__JALAPENO_ID), %tmp2; \
48 srlx %tmp1, 32, %tmp1; \
49 or %tmp2, %lo(__JALAPENO_ID), %tmp2;\
50 cmp %tmp1, %tmp2; \
51 be,pn %icc, label; \
52 nop;
53
54#define BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(tmp1,tmp2,label) \
55 rdpr %ver, %tmp1; \
56 srlx %tmp1, (32 + 16), %tmp2; \
57 cmp %tmp2, CHEETAH_MANUF; \
58 bne,pt %xcc, 99f; \
59 sllx %tmp1, 16, %tmp1; \
60 srlx %tmp1, (32 + 16), %tmp2; \
61 cmp %tmp2, CHEETAH_PLUS_IMPL; \
62 bgeu,pt %xcc, label; \
6399: nop;
64
65#define BRANCH_IF_ANY_CHEETAH(tmp1,tmp2,label) \
66 rdpr %ver, %tmp1; \
67 srlx %tmp1, (32 + 16), %tmp2; \
68 cmp %tmp2, CHEETAH_MANUF; \
69 bne,pt %xcc, 99f; \
70 sllx %tmp1, 16, %tmp1; \
71 srlx %tmp1, (32 + 16), %tmp2; \
72 cmp %tmp2, CHEETAH_IMPL; \
73 bgeu,pt %xcc, label; \
7499: nop;
75
76#endif /* !(_SPARC64_HEAD_H) */
diff --git a/include/asm-sparc64/ide.h b/include/asm-sparc64/ide.h
index 1282676da1cd..7125317a428d 100644
--- a/include/asm-sparc64/ide.h
+++ b/include/asm-sparc64/ide.h
@@ -1,118 +1 @@
1/* #include <asm-sparc/ide.h>
2 * ide.h: Ultra/PCI specific IDE glue.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 */
7
8#ifndef _SPARC64_IDE_H
9#define _SPARC64_IDE_H
10
11#ifdef __KERNEL__
12
13#include <asm/pgalloc.h>
14#include <asm/io.h>
15#include <asm/spitfire.h>
16#include <asm/cacheflush.h>
17#include <asm/page.h>
18
19#ifndef MAX_HWIFS
20# ifdef CONFIG_BLK_DEV_IDEPCI
21#define MAX_HWIFS 10
22# else
23#define MAX_HWIFS 2
24# endif
25#endif
26
27#define __ide_insl(data_reg, buffer, wcount) \
28 __ide_insw(data_reg, buffer, (wcount)<<1)
29#define __ide_outsl(data_reg, buffer, wcount) \
30 __ide_outsw(data_reg, buffer, (wcount)<<1)
31
32/* On sparc64, I/O ports and MMIO registers are accessed identically. */
33#define __ide_mm_insw __ide_insw
34#define __ide_mm_insl __ide_insl
35#define __ide_mm_outsw __ide_outsw
36#define __ide_mm_outsl __ide_outsl
37
38static inline unsigned int inw_be(void __iomem *addr)
39{
40 unsigned int ret;
41
42 __asm__ __volatile__("lduha [%1] %2, %0"
43 : "=r" (ret)
44 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
45
46 return ret;
47}
48
49static inline void __ide_insw(void __iomem *port, void *dst, u32 count)
50{
51#ifdef DCACHE_ALIASING_POSSIBLE
52 unsigned long end = (unsigned long)dst + (count << 1);
53#endif
54 u16 *ps = dst;
55 u32 *pi;
56
57 if(((u64)ps) & 0x2) {
58 *ps++ = inw_be(port);
59 count--;
60 }
61 pi = (u32 *)ps;
62 while(count >= 2) {
63 u32 w;
64
65 w = inw_be(port) << 16;
66 w |= inw_be(port);
67 *pi++ = w;
68 count -= 2;
69 }
70 ps = (u16 *)pi;
71 if(count)
72 *ps++ = inw_be(port);
73
74#ifdef DCACHE_ALIASING_POSSIBLE
75 __flush_dcache_range((unsigned long)dst, end);
76#endif
77}
78
79static inline void outw_be(unsigned short w, void __iomem *addr)
80{
81 __asm__ __volatile__("stha %0, [%1] %2"
82 : /* no outputs */
83 : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
84}
85
86static inline void __ide_outsw(void __iomem *port, void *src, u32 count)
87{
88#ifdef DCACHE_ALIASING_POSSIBLE
89 unsigned long end = (unsigned long)src + (count << 1);
90#endif
91 const u16 *ps = src;
92 const u32 *pi;
93
94 if(((u64)src) & 0x2) {
95 outw_be(*ps++, port);
96 count--;
97 }
98 pi = (const u32 *)ps;
99 while(count >= 2) {
100 u32 w;
101
102 w = *pi++;
103 outw_be((w >> 16), port);
104 outw_be(w, port);
105 count -= 2;
106 }
107 ps = (const u16 *)pi;
108 if(count)
109 outw_be(*ps, port);
110
111#ifdef DCACHE_ALIASING_POSSIBLE
112 __flush_dcache_range((unsigned long)src, end);
113#endif
114}
115
116#endif /* __KERNEL__ */
117
118#endif /* _SPARC64_IDE_H */
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index 3158960f3eb5..25ff258dfd33 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,511 +1 @@
1#ifndef __SPARC64_IO_H #include <asm-sparc/io.h>
2#define __SPARC64_IO_H
3
4#include <linux/kernel.h>
5#include <linux/compiler.h>
6#include <linux/types.h>
7
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10#include <asm/asi.h>
11
12/* PC crapola... */
13#define __SLOW_DOWN_IO do { } while (0)
14#define SLOW_DOWN_IO do { } while (0)
15
16/* BIO layer definitions. */
17extern unsigned long kern_base, kern_size;
18#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
19#define BIO_VMERGE_BOUNDARY 8192
20
21static inline u8 _inb(unsigned long addr)
22{
23 u8 ret;
24
25 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */"
26 : "=r" (ret)
27 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
28 : "memory");
29
30 return ret;
31}
32
33static inline u16 _inw(unsigned long addr)
34{
35 u16 ret;
36
37 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */"
38 : "=r" (ret)
39 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
40 : "memory");
41
42 return ret;
43}
44
45static inline u32 _inl(unsigned long addr)
46{
47 u32 ret;
48
49 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */"
50 : "=r" (ret)
51 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
52 : "memory");
53
54 return ret;
55}
56
57static inline void _outb(u8 b, unsigned long addr)
58{
59 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */"
60 : /* no outputs */
61 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
62 : "memory");
63}
64
65static inline void _outw(u16 w, unsigned long addr)
66{
67 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */"
68 : /* no outputs */
69 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
70 : "memory");
71}
72
73static inline void _outl(u32 l, unsigned long addr)
74{
75 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */"
76 : /* no outputs */
77 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
78 : "memory");
79}
80
81#define inb(__addr) (_inb((unsigned long)(__addr)))
82#define inw(__addr) (_inw((unsigned long)(__addr)))
83#define inl(__addr) (_inl((unsigned long)(__addr)))
84#define outb(__b, __addr) (_outb((u8)(__b), (unsigned long)(__addr)))
85#define outw(__w, __addr) (_outw((u16)(__w), (unsigned long)(__addr)))
86#define outl(__l, __addr) (_outl((u32)(__l), (unsigned long)(__addr)))
87
88#define inb_p(__addr) inb(__addr)
89#define outb_p(__b, __addr) outb(__b, __addr)
90#define inw_p(__addr) inw(__addr)
91#define outw_p(__w, __addr) outw(__w, __addr)
92#define inl_p(__addr) inl(__addr)
93#define outl_p(__l, __addr) outl(__l, __addr)
94
95extern void outsb(unsigned long, const void *, unsigned long);
96extern void outsw(unsigned long, const void *, unsigned long);
97extern void outsl(unsigned long, const void *, unsigned long);
98extern void insb(unsigned long, void *, unsigned long);
99extern void insw(unsigned long, void *, unsigned long);
100extern void insl(unsigned long, void *, unsigned long);
101
102static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
103{
104 insb((unsigned long __force)port, buf, count);
105}
106static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
107{
108 insw((unsigned long __force)port, buf, count);
109}
110
111static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
112{
113 insl((unsigned long __force)port, buf, count);
114}
115
116static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
117{
118 outsb((unsigned long __force)port, buf, count);
119}
120
121static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
122{
123 outsw((unsigned long __force)port, buf, count);
124}
125
126static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
127{
128 outsl((unsigned long __force)port, buf, count);
129}
130
131/* Memory functions, same as I/O accesses on Ultra. */
132static inline u8 _readb(const volatile void __iomem *addr)
133{ u8 ret;
134
135 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
136 : "=r" (ret)
137 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
138 : "memory");
139 return ret;
140}
141
142static inline u16 _readw(const volatile void __iomem *addr)
143{ u16 ret;
144
145 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
146 : "=r" (ret)
147 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
148 : "memory");
149
150 return ret;
151}
152
153static inline u32 _readl(const volatile void __iomem *addr)
154{ u32 ret;
155
156 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
157 : "=r" (ret)
158 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
159 : "memory");
160
161 return ret;
162}
163
164static inline u64 _readq(const volatile void __iomem *addr)
165{ u64 ret;
166
167 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
168 : "=r" (ret)
169 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
170 : "memory");
171
172 return ret;
173}
174
175static inline void _writeb(u8 b, volatile void __iomem *addr)
176{
177 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
178 : /* no outputs */
179 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
180 : "memory");
181}
182
183static inline void _writew(u16 w, volatile void __iomem *addr)
184{
185 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
186 : /* no outputs */
187 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
188 : "memory");
189}
190
191static inline void _writel(u32 l, volatile void __iomem *addr)
192{
193 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
194 : /* no outputs */
195 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
196 : "memory");
197}
198
199static inline void _writeq(u64 q, volatile void __iomem *addr)
200{
201 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
202 : /* no outputs */
203 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
204 : "memory");
205}
206
207#define readb(__addr) _readb(__addr)
208#define readw(__addr) _readw(__addr)
209#define readl(__addr) _readl(__addr)
210#define readq(__addr) _readq(__addr)
211#define readb_relaxed(__addr) _readb(__addr)
212#define readw_relaxed(__addr) _readw(__addr)
213#define readl_relaxed(__addr) _readl(__addr)
214#define readq_relaxed(__addr) _readq(__addr)
215#define writeb(__b, __addr) _writeb(__b, __addr)
216#define writew(__w, __addr) _writew(__w, __addr)
217#define writel(__l, __addr) _writel(__l, __addr)
218#define writeq(__q, __addr) _writeq(__q, __addr)
219
220/* Now versions without byte-swapping. */
221static inline u8 _raw_readb(unsigned long addr)
222{
223 u8 ret;
224
225 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
226 : "=r" (ret)
227 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
228
229 return ret;
230}
231
232static inline u16 _raw_readw(unsigned long addr)
233{
234 u16 ret;
235
236 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
237 : "=r" (ret)
238 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
239
240 return ret;
241}
242
243static inline u32 _raw_readl(unsigned long addr)
244{
245 u32 ret;
246
247 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
248 : "=r" (ret)
249 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
250
251 return ret;
252}
253
254static inline u64 _raw_readq(unsigned long addr)
255{
256 u64 ret;
257
258 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
259 : "=r" (ret)
260 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
261
262 return ret;
263}
264
265static inline void _raw_writeb(u8 b, unsigned long addr)
266{
267 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
268 : /* no outputs */
269 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
270}
271
272static inline void _raw_writew(u16 w, unsigned long addr)
273{
274 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
275 : /* no outputs */
276 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
277}
278
279static inline void _raw_writel(u32 l, unsigned long addr)
280{
281 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
282 : /* no outputs */
283 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
284}
285
286static inline void _raw_writeq(u64 q, unsigned long addr)
287{
288 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
289 : /* no outputs */
290 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
291}
292
293#define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr)))
294#define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr)))
295#define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr)))
296#define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr)))
297#define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
298#define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr)))
299#define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr)))
300#define __raw_writeq(__q, __addr) (_raw_writeq((u64)(__q), (unsigned long)(__addr)))
301
302/* Valid I/O Space regions are anywhere, because each PCI bus supported
303 * can live in an arbitrary area of the physical address range.
304 */
305#define IO_SPACE_LIMIT 0xffffffffffffffffUL
306
307/* Now, SBUS variants, only difference from PCI is that we do
308 * not use little-endian ASIs.
309 */
310static inline u8 _sbus_readb(const volatile void __iomem *addr)
311{
312 u8 ret;
313
314 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */"
315 : "=r" (ret)
316 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
317 : "memory");
318
319 return ret;
320}
321
322static inline u16 _sbus_readw(const volatile void __iomem *addr)
323{
324 u16 ret;
325
326 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */"
327 : "=r" (ret)
328 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
329 : "memory");
330
331 return ret;
332}
333
334static inline u32 _sbus_readl(const volatile void __iomem *addr)
335{
336 u32 ret;
337
338 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */"
339 : "=r" (ret)
340 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
341 : "memory");
342
343 return ret;
344}
345
346static inline u64 _sbus_readq(const volatile void __iomem *addr)
347{
348 u64 ret;
349
350 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */"
351 : "=r" (ret)
352 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
353 : "memory");
354
355 return ret;
356}
357
358static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
359{
360 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
361 : /* no outputs */
362 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
363 : "memory");
364}
365
366static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
367{
368 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
369 : /* no outputs */
370 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
371 : "memory");
372}
373
374static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
375{
376 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
377 : /* no outputs */
378 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
379 : "memory");
380}
381
382static inline void _sbus_writeq(u64 l, volatile void __iomem *addr)
383{
384 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
385 : /* no outputs */
386 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
387 : "memory");
388}
389
390#define sbus_readb(__addr) _sbus_readb(__addr)
391#define sbus_readw(__addr) _sbus_readw(__addr)
392#define sbus_readl(__addr) _sbus_readl(__addr)
393#define sbus_readq(__addr) _sbus_readq(__addr)
394#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
395#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
396#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
397#define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr)
398
399static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
400{
401 while(n--) {
402 sbus_writeb(c, dst);
403 dst++;
404 }
405}
406
407#define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz)
408
409static inline void
410_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
411{
412 volatile void __iomem *d = dst;
413
414 while (n--) {
415 writeb(c, d);
416 d++;
417 }
418}
419
420#define memset_io(d,c,sz) _memset_io(d,c,sz)
421
422static inline void
423_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
424{
425 char *d = dst;
426
427 while (n--) {
428 char tmp = readb(src);
429 *d++ = tmp;
430 src++;
431 }
432}
433
434#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
435
436static inline void
437_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
438{
439 const char *s = src;
440 volatile void __iomem *d = dst;
441
442 while (n--) {
443 char tmp = *s++;
444 writeb(tmp, d);
445 d++;
446 }
447}
448
449#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
450
451#define mmiowb()
452
453#ifdef __KERNEL__
454
455/* On sparc64 we have the whole physical IO address space accessible
456 * using physically addressed loads and stores, so this does nothing.
457 */
458static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
459{
460 return (void __iomem *)offset;
461}
462
463#define ioremap_nocache(X,Y) ioremap((X),(Y))
464
465static inline void iounmap(volatile void __iomem *addr)
466{
467}
468
469#define ioread8(X) readb(X)
470#define ioread16(X) readw(X)
471#define ioread32(X) readl(X)
472#define iowrite8(val,X) writeb(val,X)
473#define iowrite16(val,X) writew(val,X)
474#define iowrite32(val,X) writel(val,X)
475
476/* Create a virtual mapping cookie for an IO port range */
477extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
478extern void ioport_unmap(void __iomem *);
479
480/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
481struct pci_dev;
482extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
483extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
484
485/* Similarly for SBUS. */
486#define sbus_ioremap(__res, __offset, __size, __name) \
487({ unsigned long __ret; \
488 __ret = (__res)->start + (((__res)->flags & 0x1ffUL) << 32UL); \
489 __ret += (unsigned long) (__offset); \
490 if (! request_region((__ret), (__size), (__name))) \
491 __ret = 0UL; \
492 (void __iomem *) __ret; \
493})
494
495#define sbus_iounmap(__addr, __size) \
496 release_region((unsigned long)(__addr), (__size))
497
498/*
499 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
500 * access
501 */
502#define xlate_dev_mem_ptr(p) __va(p)
503
504/*
505 * Convert a virtual cached pointer to an uncached pointer
506 */
507#define xlate_dev_kmem_ptr(p) p
508
509#endif
510
511#endif /* !(__SPARC64_IO_H) */
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h
index d7b9afcba08b..76252bb85e97 100644
--- a/include/asm-sparc64/iommu.h
+++ b/include/asm-sparc64/iommu.h
@@ -1,62 +1 @@
1/* iommu.h: Definitions for the sun5 IOMMU. #include <asm-sparc/iommu.h>
2 *
3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5#ifndef _SPARC64_IOMMU_H
6#define _SPARC64_IOMMU_H
7
8/* The format of an iopte in the page tables. */
9#define IOPTE_VALID 0x8000000000000000UL
10#define IOPTE_64K 0x2000000000000000UL
11#define IOPTE_STBUF 0x1000000000000000UL
12#define IOPTE_INTRA 0x0800000000000000UL
13#define IOPTE_CONTEXT 0x07ff800000000000UL
14#define IOPTE_PAGE 0x00007fffffffe000UL
15#define IOPTE_CACHE 0x0000000000000010UL
16#define IOPTE_WRITE 0x0000000000000002UL
17
18#define IOMMU_NUM_CTXS 4096
19
20struct iommu_arena {
21 unsigned long *map;
22 unsigned int hint;
23 unsigned int limit;
24};
25
26struct iommu {
27 spinlock_t lock;
28 struct iommu_arena arena;
29 void (*flush_all)(struct iommu *);
30 iopte_t *page_table;
31 u32 page_table_map_base;
32 unsigned long iommu_control;
33 unsigned long iommu_tsbbase;
34 unsigned long iommu_flush;
35 unsigned long iommu_flushinv;
36 unsigned long iommu_tags;
37 unsigned long iommu_ctxflush;
38 unsigned long write_complete_reg;
39 unsigned long dummy_page;
40 unsigned long dummy_page_pa;
41 unsigned long ctx_lowest_free;
42 DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
43 u32 dma_addr_mask;
44};
45
46struct strbuf {
47 int strbuf_enabled;
48 unsigned long strbuf_control;
49 unsigned long strbuf_pflush;
50 unsigned long strbuf_fsync;
51 unsigned long strbuf_ctxflush;
52 unsigned long strbuf_ctxmatch_base;
53 unsigned long strbuf_flushflag_pa;
54 volatile unsigned long *strbuf_flushflag;
55 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
56};
57
58extern int iommu_table_init(struct iommu *iommu, int tsbsize,
59 u32 dma_offset, u32 dma_addr_mask,
60 int numa_node);
61
62#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/include/asm-sparc64/ipcbuf.h b/include/asm-sparc64/ipcbuf.h
index 9c5bf1bc423f..41dfaf1149b5 100644
--- a/include/asm-sparc64/ipcbuf.h
+++ b/include/asm-sparc64/ipcbuf.h
@@ -1,28 +1 @@
1#ifndef _SPARC64_IPCBUF_H #include <asm-sparc/ipcbuf.h>
2#define _SPARC64_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for sparc64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit seq
11 * - 2 miscellaneous 64-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _SPARC64_IPCBUF_H */
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 0bb9bf531745..b2102e65947c 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -1,93 +1 @@
1/* irq.h: IRQ registers on the 64-bit Sparc. #include <asm-sparc/irq.h>
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7#ifndef _SPARC64_IRQ_H
8#define _SPARC64_IRQ_H
9
10#include <linux/linkage.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <asm/pil.h>
15#include <asm/ptrace.h>
16
17/* IMAP/ICLR register defines */
18#define IMAP_VALID 0x80000000UL /* IRQ Enabled */
19#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */
20#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */
21#define IMAP_TID_SHIFT 26
22#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */
23#define IMAP_AID_SHIFT 26
24#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */
25#define IMAP_NID_SHIFT 21
26#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */
27#define IMAP_INO 0x0000003fUL /* IRQ Number */
28#define IMAP_INR 0x000007ffUL /* Full interrupt number*/
29
30#define ICLR_IDLE 0x00000000UL /* Idle state */
31#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */
32#define ICLR_PENDING 0x00000003UL /* Pending state */
33
34/* The largest number of unique interrupt sources we support.
35 * If this needs to ever be larger than 255, you need to change
36 * the type of ino_bucket->virt_irq as appropriate.
37 *
38 * ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
39 */
40#define NR_IRQS 255
41
42extern void irq_install_pre_handler(int virt_irq,
43 void (*func)(unsigned int, void *, void *),
44 void *arg1, void *arg2);
45#define irq_canonicalize(irq) (irq)
46extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
47extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
48extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
49extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
50 unsigned int msi_devino_start,
51 unsigned int msi_devino_end);
52extern void sun4v_destroy_msi(unsigned int virt_irq);
53extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
54 unsigned int msi_devino_start,
55 unsigned int msi_devino_end,
56 unsigned long imap_base,
57 unsigned long iclr_base);
58extern void sun4u_destroy_msi(unsigned int virt_irq);
59extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
60
61extern unsigned char virt_irq_alloc(unsigned int dev_handle,
62 unsigned int dev_ino);
63#ifdef CONFIG_PCI_MSI
64extern void virt_irq_free(unsigned int virt_irq);
65#endif
66
67extern void __init init_IRQ(void);
68extern void fixup_irqs(void);
69
70static inline void set_softint(unsigned long bits)
71{
72 __asm__ __volatile__("wr %0, 0x0, %%set_softint"
73 : /* No outputs */
74 : "r" (bits));
75}
76
77static inline void clear_softint(unsigned long bits)
78{
79 __asm__ __volatile__("wr %0, 0x0, %%clear_softint"
80 : /* No outputs */
81 : "r" (bits));
82}
83
84static inline unsigned long get_softint(void)
85{
86 unsigned long retval;
87
88 __asm__ __volatile__("rd %%softint, %0"
89 : "=r" (retval));
90 return retval;
91}
92
93#endif
diff --git a/include/asm-sparc64/irqflags.h b/include/asm-sparc64/irqflags.h
index 024fc54d0682..27b091fc3fa0 100644
--- a/include/asm-sparc64/irqflags.h
+++ b/include/asm-sparc64/irqflags.h
@@ -1,89 +1 @@
1/* #include <asm-sparc/irqflags.h>
2 * include/asm-sparc64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "rdpr %%pil, %0"
21 : "=r" (flags)
22 );
23
24 return flags;
25}
26
27#define raw_local_save_flags(flags) \
28 do { (flags) = __raw_local_save_flags(); } while (0)
29
30static inline void raw_local_irq_restore(unsigned long flags)
31{
32 __asm__ __volatile__(
33 "wrpr %0, %%pil"
34 : /* no output */
35 : "r" (flags)
36 : "memory"
37 );
38}
39
40static inline void raw_local_irq_disable(void)
41{
42 __asm__ __volatile__(
43 "wrpr 15, %%pil"
44 : /* no outputs */
45 : /* no inputs */
46 : "memory"
47 );
48}
49
50static inline void raw_local_irq_enable(void)
51{
52 __asm__ __volatile__(
53 "wrpr 0, %%pil"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory"
57 );
58}
59
60static inline int raw_irqs_disabled_flags(unsigned long flags)
61{
62 return (flags > 0);
63}
64
65static inline int raw_irqs_disabled(void)
66{
67 unsigned long flags = __raw_local_save_flags();
68
69 return raw_irqs_disabled_flags(flags);
70}
71
72/*
73 * For spinlocks, etc:
74 */
75static inline unsigned long __raw_local_irq_save(void)
76{
77 unsigned long flags = __raw_local_save_flags();
78
79 raw_local_irq_disable();
80
81 return flags;
82}
83
84#define raw_local_irq_save(flags) \
85 do { (flags) = __raw_local_irq_save(); } while (0)
86
87#endif /* (__ASSEMBLY__) */
88
89#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index f905b773235a..78cfd5d2749b 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -1,19 +1 @@
1#ifndef _SPARC64_KDEBUG_H #include <asm-sparc/kdebug.h>
2#define _SPARC64_KDEBUG_H
3
4struct pt_regs;
5
6extern void bad_trap(struct pt_regs *, long);
7
8/* Grossly misnamed. */
9enum die_val {
10 DIE_OOPS = 1,
11 DIE_DEBUG, /* ta 0x70 */
12 DIE_DEBUG_2, /* ta 0x71 */
13 DIE_DIE,
14 DIE_TRAP,
15 DIE_TRAP_TL1,
16 DIE_CALL,
17};
18
19#endif
diff --git a/include/asm-sparc64/mc146818rtc.h b/include/asm-sparc64/mc146818rtc.h
index e9c0fcc25c6f..97842e6ed1c2 100644
--- a/include/asm-sparc64/mc146818rtc.h
+++ b/include/asm-sparc64/mc146818rtc.h
@@ -1,34 +1 @@
1/* #include <asm-sparc/mc146818rtc.h>
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef __ASM_SPARC64_MC146818RTC_H
5#define __ASM_SPARC64_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#ifdef CONFIG_PCI
11extern unsigned long ds1287_regs;
12#else
13#define ds1287_regs (0UL)
14#endif
15#define RTC_PORT(x) (ds1287_regs + (x))
16#define RTC_ALWAYS_BCD 0
17#endif
18
19/*
20 * The yet supported machines all access the RTC index register via
21 * an ISA port access but the way to access the date register differs ...
22 */
23#define CMOS_READ(addr) ({ \
24outb_p((addr),RTC_PORT(0)); \
25inb_p(RTC_PORT(1)); \
26})
27#define CMOS_WRITE(val, addr) ({ \
28outb_p((addr),RTC_PORT(0)); \
29outb_p((val),RTC_PORT(1)); \
30})
31
32#define RTC_IRQ 8
33
34#endif /* __ASM_SPARC64_MC146818RTC_H */
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 8abc58f0f9d7..e677a64d8db1 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -1,127 +1 @@
1#ifndef __MMU_H #include <asm-sparc/mmu.h>
2#define __MMU_H
3
4#include <linux/const.h>
5#include <asm/page.h>
6#include <asm/hypervisor.h>
7
8#define CTX_NR_BITS 13
9
10#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
11
12/* UltraSPARC-III+ and later have a feature whereby you can
13 * select what page size the various Data-TLB instances in the
14 * chip. In order to gracefully support this, we put the version
15 * field in a spot outside of the areas of the context register
16 * where this parameter is specified.
17 */
18#define CTX_VERSION_SHIFT 22
19#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
20
21#define CTX_PGSZ_8KB _AC(0x0,UL)
22#define CTX_PGSZ_64KB _AC(0x1,UL)
23#define CTX_PGSZ_512KB _AC(0x2,UL)
24#define CTX_PGSZ_4MB _AC(0x3,UL)
25#define CTX_PGSZ_BITS _AC(0x7,UL)
26#define CTX_PGSZ0_NUC_SHIFT 61
27#define CTX_PGSZ1_NUC_SHIFT 58
28#define CTX_PGSZ0_SHIFT 16
29#define CTX_PGSZ1_SHIFT 19
30#define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \
31 (CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT))
32
33#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
34#define CTX_PGSZ_BASE CTX_PGSZ_8KB
35#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
36#define CTX_PGSZ_BASE CTX_PGSZ_64KB
37#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
38#define CTX_PGSZ_BASE CTX_PGSZ_512KB
39#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
40#define CTX_PGSZ_BASE CTX_PGSZ_4MB
41#else
42#error No page size specified in kernel configuration
43#endif
44
45#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
46#define CTX_PGSZ_HUGE CTX_PGSZ_4MB
47#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
48#define CTX_PGSZ_HUGE CTX_PGSZ_512KB
49#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
50#define CTX_PGSZ_HUGE CTX_PGSZ_64KB
51#endif
52
53#define CTX_PGSZ_KERN CTX_PGSZ_4MB
54
55/* Thus, when running on UltraSPARC-III+ and later, we use the following
56 * PRIMARY_CONTEXT register values for the kernel context.
57 */
58#define CTX_CHEETAH_PLUS_NUC \
59 ((CTX_PGSZ_KERN << CTX_PGSZ0_NUC_SHIFT) | \
60 (CTX_PGSZ_BASE << CTX_PGSZ1_NUC_SHIFT))
61
62#define CTX_CHEETAH_PLUS_CTX0 \
63 ((CTX_PGSZ_KERN << CTX_PGSZ0_SHIFT) | \
64 (CTX_PGSZ_BASE << CTX_PGSZ1_SHIFT))
65
66/* If you want "the TLB context number" use CTX_NR_MASK. If you
67 * want "the bits I program into the context registers" use
68 * CTX_HW_MASK.
69 */
70#define CTX_NR_MASK TAG_CONTEXT_BITS
71#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
72
73#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
74#define CTX_VALID(__ctx) \
75 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
76#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
77#define CTX_NRBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_NR_MASK)
78
79#ifndef __ASSEMBLY__
80
81#define TSB_ENTRY_ALIGNMENT 16
82
83struct tsb {
84 unsigned long tag;
85 unsigned long pte;
86} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
87
88extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
89extern void tsb_flush(unsigned long ent, unsigned long tag);
90extern void tsb_init(struct tsb *tsb, unsigned long size);
91
92struct tsb_config {
93 struct tsb *tsb;
94 unsigned long tsb_rss_limit;
95 unsigned long tsb_nentries;
96 unsigned long tsb_reg_val;
97 unsigned long tsb_map_vaddr;
98 unsigned long tsb_map_pte;
99};
100
101#define MM_TSB_BASE 0
102
103#ifdef CONFIG_HUGETLB_PAGE
104#define MM_TSB_HUGE 1
105#define MM_NUM_TSBS 2
106#else
107#define MM_NUM_TSBS 1
108#endif
109
110typedef struct {
111 spinlock_t lock;
112 unsigned long sparc64_ctx_val;
113 unsigned long huge_pte_count;
114 struct tsb_config tsb_block[MM_NUM_TSBS];
115 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
116} mm_context_t;
117
118#endif /* !__ASSEMBLY__ */
119
120#define TSB_CONFIG_TSB 0x00
121#define TSB_CONFIG_RSS_LIMIT 0x08
122#define TSB_CONFIG_NENTRIES 0x10
123#define TSB_CONFIG_REG_VAL 0x18
124#define TSB_CONFIG_MAP_VADDR 0x20
125#define TSB_CONFIG_MAP_PTE 0x28
126
127#endif /* __MMU_H */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 5693ab482606..877fee94bd4e 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,155 +1 @@
1#ifndef __SPARC64_MMU_CONTEXT_H #include <asm-sparc/mmu_context.h>
2#define __SPARC64_MMU_CONTEXT_H
3
4/* Derived heavily from Linus's Alpha/AXP ASN code... */
5
6#ifndef __ASSEMBLY__
7
8#include <linux/spinlock.h>
9#include <asm/system.h>
10#include <asm/spitfire.h>
11#include <asm-generic/mm_hooks.h>
12
13static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14{
15}
16
17extern spinlock_t ctx_alloc_lock;
18extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[];
20
21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
29extern void destroy_context(struct mm_struct *mm);
30
31extern void __tsb_context_switch(unsigned long pgd_pa,
32 struct tsb_config *tsb_base,
33 struct tsb_config *tsb_huge,
34 unsigned long tsb_descr_pa);
35
36static inline void tsb_context_switch(struct mm_struct *mm)
37{
38 __tsb_context_switch(__pa(mm->pgd),
39 &mm->context.tsb_block[0],
40#ifdef CONFIG_HUGETLB_PAGE
41 (mm->context.tsb_block[1].tsb ?
42 &mm->context.tsb_block[1] :
43 NULL)
44#else
45 NULL
46#endif
47 , __pa(&mm->context.tsb_descr[0]));
48}
49
50extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
51#ifdef CONFIG_SMP
52extern void smp_tsb_sync(struct mm_struct *mm);
53#else
54#define smp_tsb_sync(__mm) do { } while (0)
55#endif
56
57/* Set MMU context in the actual hardware. */
58#define load_secondary_context(__mm) \
59 __asm__ __volatile__( \
60 "\n661: stxa %0, [%1] %2\n" \
61 " .section .sun4v_1insn_patch, \"ax\"\n" \
62 " .word 661b\n" \
63 " stxa %0, [%1] %3\n" \
64 " .previous\n" \
65 " flush %%g6\n" \
66 : /* No outputs */ \
67 : "r" (CTX_HWBITS((__mm)->context)), \
68 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
69
70extern void __flush_tlb_mm(unsigned long, unsigned long);
71
72/* Switch the current MM context. Interrupts are disabled. */
73static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
74{
75 unsigned long ctx_valid, flags;
76 int cpu;
77
78 if (unlikely(mm == &init_mm))
79 return;
80
81 spin_lock_irqsave(&mm->context.lock, flags);
82 ctx_valid = CTX_VALID(mm->context);
83 if (!ctx_valid)
84 get_new_mmu_context(mm);
85
86 /* We have to be extremely careful here or else we will miss
87 * a TSB grow if we switch back and forth between a kernel
88 * thread and an address space which has it's TSB size increased
89 * on another processor.
90 *
91 * It is possible to play some games in order to optimize the
92 * switch, but the safest thing to do is to unconditionally
93 * perform the secondary context load and the TSB context switch.
94 *
95 * For reference the bad case is, for address space "A":
96 *
97 * CPU 0 CPU 1
98 * run address space A
99 * set cpu0's bits in cpu_vm_mask
100 * switch to kernel thread, borrow
101 * address space A via entry_lazy_tlb
102 * run address space A
103 * set cpu1's bit in cpu_vm_mask
104 * flush_tlb_pending()
105 * reset cpu_vm_mask to just cpu1
106 * TSB grow
107 * run address space A
108 * context was valid, so skip
109 * TSB context switch
110 *
111 * At that point cpu0 continues to use a stale TSB, the one from
112 * before the TSB grow performed on cpu1. cpu1 did not cross-call
113 * cpu0 to update it's TSB because at that point the cpu_vm_mask
114 * only had cpu1 set in it.
115 */
116 load_secondary_context(mm);
117 tsb_context_switch(mm);
118
119 /* Any time a processor runs a context on an address space
120 * for the first time, we must flush that context out of the
121 * local TLB.
122 */
123 cpu = smp_processor_id();
124 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
125 cpu_set(cpu, mm->cpu_vm_mask);
126 __flush_tlb_mm(CTX_HWBITS(mm->context),
127 SECONDARY_CONTEXT);
128 }
129 spin_unlock_irqrestore(&mm->context.lock, flags);
130}
131
132#define deactivate_mm(tsk,mm) do { } while (0)
133
134/* Activate a new MM instance for the current task. */
135static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
136{
137 unsigned long flags;
138 int cpu;
139
140 spin_lock_irqsave(&mm->context.lock, flags);
141 if (!CTX_VALID(mm->context))
142 get_new_mmu_context(mm);
143 cpu = smp_processor_id();
144 if (!cpu_isset(cpu, mm->cpu_vm_mask))
145 cpu_set(cpu, mm->cpu_vm_mask);
146
147 load_secondary_context(mm);
148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
149 tsb_context_switch(mm);
150 spin_unlock_irqrestore(&mm->context.lock, flags);
151}
152
153#endif /* !(__ASSEMBLY__) */
154
155#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc64/module.h b/include/asm-sparc64/module.h
index 3d77ba465783..a9606db55e4a 100644
--- a/include/asm-sparc64/module.h
+++ b/include/asm-sparc64/module.h
@@ -1,7 +1 @@
1#ifndef _ASM_SPARC64_MODULE_H #include <asm-sparc/module.h>
2#define _ASM_SPARC64_MODULE_H
3struct mod_arch_specific { };
4#define Elf_Shdr Elf64_Shdr
5#define Elf_Sym Elf64_Sym
6#define Elf_Ehdr Elf64_Ehdr
7#endif /* _ASM_SPARC64_MODULE_H */
diff --git a/include/asm-sparc64/mostek.h b/include/asm-sparc64/mostek.h
index c5652de2ace2..95a752f7e875 100644
--- a/include/asm-sparc64/mostek.h
+++ b/include/asm-sparc64/mostek.h
@@ -1,143 +1 @@
1/* mostek.h: Describes the various Mostek time of day clock registers. #include <asm-sparc/mostek.h>
2 *
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
5 */
6
7#ifndef _SPARC64_MOSTEK_H
8#define _SPARC64_MOSTEK_H
9
10#include <asm/idprom.h>
11
12/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
13 *
14 * Data
15 * Address Function
16 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
17 * 7ff - - - - - - - - Year 00-99
18 * 7fe 0 0 0 - - - - - Month 01-12
19 * 7fd 0 0 - - - - - - Date 01-31
20 * 7fc 0 FT 0 0 0 - - - Day 01-07
21 * 7fb KS 0 - - - - - - Hours 00-23
22 * 7fa 0 - - - - - - - Minutes 00-59
23 * 7f9 ST - - - - - - - Seconds 00-59
24 * 7f8 W R S - - - - - Control
25 *
26 * * ST is STOP BIT
27 * * W is WRITE BIT
28 * * R is READ BIT
29 * * S is SIGN BIT
30 * * FT is FREQ TEST BIT
31 * * KS is KICK START BIT
32 */
33
34/* The Mostek 48t02 real time clock and NVRAM chip. The registers
35 * other than the control register are in binary coded decimal. Some
36 * control bits also live outside the control register.
37 *
38 * We now deal with physical addresses for I/O to the chip. -DaveM
39 */
40static inline u8 mostek_read(void __iomem *addr)
41{
42 u8 ret;
43
44 __asm__ __volatile__("lduba [%1] %2, %0"
45 : "=r" (ret)
46 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
47 return ret;
48}
49
50static inline void mostek_write(void __iomem *addr, u8 val)
51{
52 __asm__ __volatile__("stba %0, [%1] %2"
53 : /* no outputs */
54 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
55}
56
57#define MOSTEK_EEPROM 0x0000UL
58#define MOSTEK_IDPROM 0x07d8UL
59#define MOSTEK_CREG 0x07f8UL
60#define MOSTEK_SEC 0x07f9UL
61#define MOSTEK_MIN 0x07faUL
62#define MOSTEK_HOUR 0x07fbUL
63#define MOSTEK_DOW 0x07fcUL
64#define MOSTEK_DOM 0x07fdUL
65#define MOSTEK_MONTH 0x07feUL
66#define MOSTEK_YEAR 0x07ffUL
67
68extern spinlock_t mostek_lock;
69extern void __iomem *mstk48t02_regs;
70
71/* Control register values. */
72#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
73#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
74#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
75
76/* Control bits that live in the other registers. */
77#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
78#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
79#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
80
81#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
82#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
83
84/* Masks that define how much space each value takes up. */
85#define MSTK_SEC_MASK 0x7f
86#define MSTK_MIN_MASK 0x7f
87#define MSTK_HOUR_MASK 0x3f
88#define MSTK_DOW_MASK 0x07
89#define MSTK_DOM_MASK 0x3f
90#define MSTK_MONTH_MASK 0x1f
91#define MSTK_YEAR_MASK 0xffU
92
93/* Binary coded decimal conversion macros. */
94#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
95#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
96
97/* Generic register set and get macros for internal use. */
98#define MSTK_GET(regs,name) \
99 (MSTK_REGVAL_TO_DECIMAL(mostek_read(regs + MOSTEK_ ## name) & MSTK_ ## name ## _MASK))
100#define MSTK_SET(regs,name,value) \
101do { u8 __val = mostek_read(regs + MOSTEK_ ## name); \
102 __val &= ~(MSTK_ ## name ## _MASK); \
103 __val |= (MSTK_DECIMAL_TO_REGVAL(value) & \
104 (MSTK_ ## name ## _MASK)); \
105 mostek_write(regs + MOSTEK_ ## name, __val); \
106} while(0)
107
108/* Macros to make register access easier on our fingers. These give you
109 * the decimal value of the register requested if applicable. You pass
110 * the a pointer to a 'struct mostek48t02'.
111 */
112#define MSTK_REG_CREG(regs) (mostek_read((regs) + MOSTEK_CREG))
113#define MSTK_REG_SEC(regs) MSTK_GET(regs,SEC)
114#define MSTK_REG_MIN(regs) MSTK_GET(regs,MIN)
115#define MSTK_REG_HOUR(regs) MSTK_GET(regs,HOUR)
116#define MSTK_REG_DOW(regs) MSTK_GET(regs,DOW)
117#define MSTK_REG_DOM(regs) MSTK_GET(regs,DOM)
118#define MSTK_REG_MONTH(regs) MSTK_GET(regs,MONTH)
119#define MSTK_REG_YEAR(regs) MSTK_GET(regs,YEAR)
120
121#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,SEC,value)
122#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,MIN,value)
123#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,HOUR,value)
124#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,DOW,value)
125#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,DOM,value)
126#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,MONTH,value)
127#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,YEAR,value)
128
129
130/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
131 * same (basically) layout of the 48t02 chip except for the extra
132 * NVRAM on board (8 KB against the 48t02's 2 KB).
133 */
134#define MOSTEK_48T08_OFFSET 0x0000UL /* Lower NVRAM portions */
135#define MOSTEK_48T08_48T02 0x1800UL /* Offset to 48T02 chip */
136
137/* SUN5 systems usually have 48t59 model clock chipsets. But we keep the older
138 * clock chip definitions around just in case.
139 */
140#define MOSTEK_48T59_OFFSET 0x0000UL /* Lower NVRAM portions */
141#define MOSTEK_48T59_48T02 0x1800UL /* Offset to 48T02 chip */
142
143#endif /* !(_SPARC64_MOSTEK_H) */
diff --git a/include/asm-sparc64/namei.h b/include/asm-sparc64/namei.h
index cbc1b4c06891..1344a910ba2f 100644
--- a/include/asm-sparc64/namei.h
+++ b/include/asm-sparc64/namei.h
@@ -1,13 +1 @@
1/* #include <asm-sparc/namei.h>
2 * linux/include/asm-sparc64/namei.h
3 *
4 * Routines to handle famous /usr/gnemul/s*.
5 * Included from linux/fs/namei.c
6 */
7
8#ifndef __SPARC64_NAMEI_H
9#define __SPARC64_NAMEI_H
10
11#define __emul_prefix() NULL
12
13#endif /* __SPARC64_NAMEI_H */
diff --git a/include/asm-sparc64/of_platform.h b/include/asm-sparc64/of_platform.h
index 78aa032b674c..f7c427b8bc61 100644
--- a/include/asm-sparc64/of_platform.h
+++ b/include/asm-sparc64/of_platform.h
@@ -1,25 +1 @@
1#ifndef _ASM_SPARC64_OF_PLATFORM_H #include <asm-sparc/of_platform.h>
2#define _ASM_SPARC64_OF_PLATFORM_H
3/*
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h
7 * by Stephen Rothwell
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type isa_bus_type;
20extern struct bus_type ebus_bus_type;
21extern struct bus_type sbus_bus_type;
22
23#define of_bus_type of_platform_bus_type /* for compatibility */
24
25#endif /* _ASM_SPARC64_OF_PLATFORM_H */
diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h
index b69e4a8c9170..acf4b234fae3 100644
--- a/include/asm-sparc64/openprom.h
+++ b/include/asm-sparc64/openprom.h
@@ -1,280 +1 @@
1#ifndef __SPARC64_OPENPROM_H #include <asm-sparc/openprom.h>
2#define __SPARC64_OPENPROM_H
3
4/* openprom.h: Prom structures and defines for access to the OPENBOOT
5 * prom routines and data areas.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10#ifndef __ASSEMBLY__
11/* V0 prom device operations. */
12struct linux_dev_v0_funcs {
13 int (*v0_devopen)(char *device_str);
14 int (*v0_devclose)(int dev_desc);
15 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
16 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
17 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
18 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
19 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
20 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
21 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
22};
23
24/* V2 and later prom device operations. */
25struct linux_dev_v2_funcs {
26 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
27 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
28 void (*v2_dumb_mem_free)(char *va, unsigned sz);
29
30 /* To map devices into virtual I/O space. */
31 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
32 void (*v2_dumb_munmap)(char *virta, unsigned size);
33
34 int (*v2_dev_open)(char *devpath);
35 void (*v2_dev_close)(int d);
36 int (*v2_dev_read)(int d, char *buf, int nbytes);
37 int (*v2_dev_write)(int d, char *buf, int nbytes);
38 int (*v2_dev_seek)(int d, int hi, int lo);
39
40 /* Never issued (multistage load support) */
41 void (*v2_wheee2)(void);
42 void (*v2_wheee3)(void);
43};
44
45struct linux_mlist_v0 {
46 struct linux_mlist_v0 *theres_more;
47 unsigned start_adr;
48 unsigned num_bytes;
49};
50
51struct linux_mem_v0 {
52 struct linux_mlist_v0 **v0_totphys;
53 struct linux_mlist_v0 **v0_prommap;
54 struct linux_mlist_v0 **v0_available; /* What we can use */
55};
56
57/* Arguments sent to the kernel from the boot prompt. */
58struct linux_arguments_v0 {
59 char *argv[8];
60 char args[100];
61 char boot_dev[2];
62 int boot_dev_ctrl;
63 int boot_dev_unit;
64 int dev_partition;
65 char *kernel_file_name;
66 void *aieee1; /* XXX */
67};
68
69/* V2 and up boot things. */
70struct linux_bootargs_v2 {
71 char **bootpath;
72 char **bootargs;
73 int *fd_stdin;
74 int *fd_stdout;
75};
76
77/* The top level PROM vector. */
78struct linux_romvec {
79 /* Version numbers. */
80 unsigned int pv_magic_cookie;
81 unsigned int pv_romvers;
82 unsigned int pv_plugin_revision;
83 unsigned int pv_printrev;
84
85 /* Version 0 memory descriptors. */
86 struct linux_mem_v0 pv_v0mem;
87
88 /* Node operations. */
89 struct linux_nodeops *pv_nodeops;
90
91 char **pv_bootstr;
92 struct linux_dev_v0_funcs pv_v0devops;
93
94 char *pv_stdin;
95 char *pv_stdout;
96#define PROMDEV_KBD 0 /* input from keyboard */
97#define PROMDEV_SCREEN 0 /* output to screen */
98#define PROMDEV_TTYA 1 /* in/out to ttya */
99#define PROMDEV_TTYB 2 /* in/out to ttyb */
100
101 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
102 int (*pv_getchar)(void);
103 void (*pv_putchar)(int ch);
104
105 /* Non-blocking variants. */
106 int (*pv_nbgetchar)(void);
107 int (*pv_nbputchar)(int ch);
108
109 void (*pv_putstr)(char *str, int len);
110
111 /* Miscellany. */
112 void (*pv_reboot)(char *bootstr);
113 void (*pv_printf)(__const__ char *fmt, ...);
114 void (*pv_abort)(void);
115 __volatile__ int *pv_ticks;
116 void (*pv_halt)(void);
117 void (**pv_synchook)(void);
118
119 /* Evaluate a forth string, not different proto for V0 and V2->up. */
120 union {
121 void (*v0_eval)(int len, char *str);
122 void (*v2_eval)(char *str);
123 } pv_fortheval;
124
125 struct linux_arguments_v0 **pv_v0bootargs;
126
127 /* Get ether address. */
128 unsigned int (*pv_enaddr)(int d, char *enaddr);
129
130 struct linux_bootargs_v2 pv_v2bootargs;
131 struct linux_dev_v2_funcs pv_v2devops;
132
133 int filler[15];
134
135 /* This one is sun4c/sun4 only. */
136 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
137
138 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
139 * No joke. Calling these when there is only one cpu probably
140 * crashes the machine, have to test this. :-)
141 */
142
143 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
144 * 'thiscontext' executing at address 'prog_counter'
145 */
146 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
147 int thiscontext, char *prog_counter);
148
149 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
150 * until a resume cpu call is made.
151 */
152 int (*v3_cpustop)(unsigned int whichcpu);
153
154 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
155 * resume cpu call is made.
156 */
157 int (*v3_cpuidle)(unsigned int whichcpu);
158
159 /* v3_cpuresume() will resume processor 'whichcpu' executing
160 * starting with whatever 'pc' and 'npc' were left at the
161 * last 'idle' or 'stop' call.
162 */
163 int (*v3_cpuresume)(unsigned int whichcpu);
164};
165
166/* Routines for traversing the prom device tree. */
167struct linux_nodeops {
168 int (*no_nextnode)(int node);
169 int (*no_child)(int node);
170 int (*no_proplen)(int node, char *name);
171 int (*no_getprop)(int node, char *name, char *val);
172 int (*no_setprop)(int node, char *name, char *val, int len);
173 char * (*no_nextprop)(int node, char *name);
174};
175
176/* More fun PROM structures for device probing. */
177#define PROMREG_MAX 24
178#define PROMVADDR_MAX 16
179#define PROMINTR_MAX 32
180
181struct linux_prom_registers {
182 unsigned which_io; /* hi part of physical address */
183 unsigned phys_addr; /* The physical address of this register */
184 int reg_size; /* How many bytes does this register take up? */
185};
186
187struct linux_prom64_registers {
188 unsigned long phys_addr;
189 unsigned long reg_size;
190};
191
192struct linux_prom_irqs {
193 int pri; /* IRQ priority */
194 int vector; /* This is foobar, what does it do? */
195};
196
197/* Element of the "ranges" vector */
198struct linux_prom_ranges {
199 unsigned int ot_child_space;
200 unsigned int ot_child_base; /* Bus feels this */
201 unsigned int ot_parent_space;
202 unsigned int ot_parent_base; /* CPU looks from here */
203 unsigned int or_size;
204};
205
206struct linux_prom64_ranges {
207 unsigned long ot_child_base; /* Bus feels this */
208 unsigned long ot_parent_base; /* CPU looks from here */
209 unsigned long or_size;
210};
211
212/* Ranges and reg properties are a bit different for PCI. */
213struct linux_prom_pci_registers {
214 unsigned int phys_hi;
215 unsigned int phys_mid;
216 unsigned int phys_lo;
217
218 unsigned int size_hi;
219 unsigned int size_lo;
220};
221
222struct linux_prom_pci_ranges {
223 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
224 unsigned int child_phys_mid;
225 unsigned int child_phys_lo;
226
227 unsigned int parent_phys_hi;
228 unsigned int parent_phys_lo;
229
230 unsigned int size_hi;
231 unsigned int size_lo;
232};
233
234struct linux_prom_pci_intmap {
235 unsigned int phys_hi;
236 unsigned int phys_mid;
237 unsigned int phys_lo;
238
239 unsigned int interrupt;
240
241 int cnode;
242 unsigned int cinterrupt;
243};
244
245struct linux_prom_pci_intmask {
246 unsigned int phys_hi;
247 unsigned int phys_mid;
248 unsigned int phys_lo;
249 unsigned int interrupt;
250};
251
252struct linux_prom_ebus_ranges {
253 unsigned int child_phys_hi;
254 unsigned int child_phys_lo;
255
256 unsigned int parent_phys_hi;
257 unsigned int parent_phys_mid;
258 unsigned int parent_phys_lo;
259
260 unsigned int size;
261};
262
263struct linux_prom_ebus_intmap {
264 unsigned int phys_hi;
265 unsigned int phys_lo;
266
267 unsigned int interrupt;
268
269 int cnode;
270 unsigned int cinterrupt;
271};
272
273struct linux_prom_ebus_intmask {
274 unsigned int phys_hi;
275 unsigned int phys_lo;
276 unsigned int interrupt;
277};
278#endif /* !(__ASSEMBLY__) */
279
280#endif /* !(__SPARC64_OPENPROM_H) */
diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h
index 55c5bb27e4da..d93e44e63510 100644
--- a/include/asm-sparc64/oplib.h
+++ b/include/asm-sparc64/oplib.h
@@ -1,322 +1 @@
1/* oplib.h: Describes the interface and available routines in the #include <asm-sparc/oplib.h>
2 * Linux Prom library.
3 *
4 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef __SPARC64_OPLIB_H
9#define __SPARC64_OPLIB_H
10
11#include <asm/openprom.h>
12
13/* OBP version string. */
14extern char prom_version[];
15
16/* Root node of the prom device tree, this stays constant after
17 * initialization is complete.
18 */
19extern int prom_root_node;
20
21/* PROM stdin and stdout */
22extern int prom_stdin, prom_stdout;
23
24/* /chosen node of the prom device tree, this stays constant after
25 * initialization is complete.
26 */
27extern int prom_chosen_node;
28
29/* Helper values and strings in arch/sparc64/kernel/head.S */
30extern const char prom_peer_name[];
31extern const char prom_compatible_name[];
32extern const char prom_root_compatible[];
33extern const char prom_cpu_compatible[];
34extern const char prom_finddev_name[];
35extern const char prom_chosen_path[];
36extern const char prom_cpu_path[];
37extern const char prom_getprop_name[];
38extern const char prom_mmu_name[];
39extern const char prom_callmethod_name[];
40extern const char prom_translate_name[];
41extern const char prom_map_name[];
42extern const char prom_unmap_name[];
43extern int prom_mmu_ihandle_cache;
44extern unsigned int prom_boot_mapped_pc;
45extern unsigned int prom_boot_mapping_mode;
46extern unsigned long prom_boot_mapping_phys_high, prom_boot_mapping_phys_low;
47
48struct linux_mlist_p1275 {
49 struct linux_mlist_p1275 *theres_more;
50 unsigned long start_adr;
51 unsigned long num_bytes;
52};
53
54struct linux_mem_p1275 {
55 struct linux_mlist_p1275 **p1275_totphys;
56 struct linux_mlist_p1275 **p1275_prommap;
57 struct linux_mlist_p1275 **p1275_available; /* What we can use */
58};
59
60/* The functions... */
61
62/* You must call prom_init() before using any of the library services,
63 * preferably as early as possible. Pass it the romvec pointer.
64 */
65extern void prom_init(void *cif_handler, void *cif_stack);
66
67/* Boot argument acquisition, returns the boot command line string. */
68extern char *prom_getbootargs(void);
69
70/* Device utilities. */
71
72/* Device operations. */
73
74/* Open the device described by the passed string. Note, that the format
75 * of the string is different on V0 vs. V2->higher proms. The caller must
76 * know what he/she is doing! Returns the device descriptor, an int.
77 */
78extern int prom_devopen(const char *device_string);
79
80/* Close a previously opened device described by the passed integer
81 * descriptor.
82 */
83extern int prom_devclose(int device_handle);
84
85/* Do a seek operation on the device described by the passed integer
86 * descriptor.
87 */
88extern void prom_seek(int device_handle, unsigned int seek_hival,
89 unsigned int seek_lowval);
90
91/* Miscellaneous routines, don't really fit in any category per se. */
92
93/* Reboot the machine with the command line passed. */
94extern void prom_reboot(const char *boot_command);
95
96/* Evaluate the forth string passed. */
97extern void prom_feval(const char *forth_string);
98
99/* Enter the prom, with possibility of continuation with the 'go'
100 * command in newer proms.
101 */
102extern void prom_cmdline(void);
103
104/* Enter the prom, with no chance of continuation for the stand-alone
105 * which calls this.
106 */
107extern void prom_halt(void) __attribute__ ((noreturn));
108
109/* Halt and power-off the machine. */
110extern void prom_halt_power_off(void) __attribute__ ((noreturn));
111
112/* Set the PROM 'sync' callback function to the passed function pointer.
113 * When the user gives the 'sync' command at the prom prompt while the
114 * kernel is still active, the prom will call this routine.
115 *
116 */
117typedef int (*callback_func_t)(long *cmd);
118extern void prom_setcallback(callback_func_t func_ptr);
119
120/* Acquire the IDPROM of the root node in the prom device tree. This
121 * gets passed a buffer where you would like it stuffed. The return value
122 * is the format type of this idprom or 0xff on error.
123 */
124extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
125
126/* Character operations to/from the console.... */
127
128/* Non-blocking get character from console. */
129extern int prom_nbgetchar(void);
130
131/* Non-blocking put character to console. */
132extern int prom_nbputchar(char character);
133
134/* Blocking get character from console. */
135extern char prom_getchar(void);
136
137/* Blocking put character to console. */
138extern void prom_putchar(char character);
139
140/* Prom's internal routines, don't use in kernel/boot code. */
141extern void prom_printf(const char *fmt, ...);
142extern void prom_write(const char *buf, unsigned int len);
143
144/* Multiprocessor operations... */
145#ifdef CONFIG_SMP
146/* Start the CPU with the given device tree node at the passed program
147 * counter with the given arg passed in via register %o0.
148 */
149extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
150
151/* Start the CPU with the given cpu ID at the passed program
152 * counter with the given arg passed in via register %o0.
153 */
154extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
155
156/* Stop the CPU with the given cpu ID. */
157extern void prom_stopcpu_cpuid(int cpuid);
158
159/* Stop the current CPU. */
160extern void prom_stopself(void);
161
162/* Idle the current CPU. */
163extern void prom_idleself(void);
164
165/* Resume the CPU with the passed device tree node. */
166extern void prom_resumecpu(int cpunode);
167#endif
168
169/* Power management interfaces. */
170
171/* Put the current CPU to sleep. */
172extern void prom_sleepself(void);
173
174/* Put the entire system to sleep. */
175extern int prom_sleepsystem(void);
176
177/* Initiate a wakeup event. */
178extern int prom_wakeupsystem(void);
179
180/* MMU and memory related OBP interfaces. */
181
182/* Get unique string identifying SIMM at given physical address. */
183extern int prom_getunumber(int syndrome_code,
184 unsigned long phys_addr,
185 char *buf, int buflen);
186
187/* Retain physical memory to the caller across soft resets. */
188extern unsigned long prom_retain(const char *name,
189 unsigned long pa_low, unsigned long pa_high,
190 long size, long align);
191
192/* Load explicit I/D TLB entries into the calling processor. */
193extern long prom_itlb_load(unsigned long index,
194 unsigned long tte_data,
195 unsigned long vaddr);
196
197extern long prom_dtlb_load(unsigned long index,
198 unsigned long tte_data,
199 unsigned long vaddr);
200
201/* Map/Unmap client program address ranges. First the format of
202 * the mapping mode argument.
203 */
204#define PROM_MAP_WRITE 0x0001 /* Writable */
205#define PROM_MAP_READ 0x0002 /* Readable - sw */
206#define PROM_MAP_EXEC 0x0004 /* Executable - sw */
207#define PROM_MAP_LOCKED 0x0010 /* Locked, use i/dtlb load calls for this instead */
208#define PROM_MAP_CACHED 0x0020 /* Cacheable in both L1 and L2 caches */
209#define PROM_MAP_SE 0x0040 /* Side-Effects */
210#define PROM_MAP_GLOB 0x0080 /* Global */
211#define PROM_MAP_IE 0x0100 /* Invert-Endianness */
212#define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
213
214extern int prom_map(int mode, unsigned long size,
215 unsigned long vaddr, unsigned long paddr);
216extern void prom_unmap(unsigned long size, unsigned long vaddr);
217
218
219/* PROM device tree traversal functions... */
220
221#ifdef PROMLIB_INTERNAL
222
223/* Internal version of prom_getchild. */
224extern int __prom_getchild(int parent_node);
225
226/* Internal version of prom_getsibling. */
227extern int __prom_getsibling(int node);
228
229#endif
230
231/* Get the child node of the given node, or zero if no child exists. */
232extern int prom_getchild(int parent_node);
233
234/* Get the next sibling node of the given node, or zero if no further
235 * siblings exist.
236 */
237extern int prom_getsibling(int node);
238
239/* Get the length, at the passed node, of the given property type.
240 * Returns -1 on error (ie. no such property at this node).
241 */
242extern int prom_getproplen(int thisnode, const char *property);
243
244/* Fetch the requested property using the given buffer. Returns
245 * the number of bytes the prom put into your buffer or -1 on error.
246 */
247extern int prom_getproperty(int thisnode, const char *property,
248 char *prop_buffer, int propbuf_size);
249
250/* Acquire an integer property. */
251extern int prom_getint(int node, const char *property);
252
253/* Acquire an integer property, with a default value. */
254extern int prom_getintdefault(int node, const char *property, int defval);
255
256/* Acquire a boolean property, 0=FALSE 1=TRUE. */
257extern int prom_getbool(int node, const char *prop);
258
259/* Acquire a string property, null string on error. */
260extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
261
262/* Does the passed node have the given "name"? YES=1 NO=0 */
263extern int prom_nodematch(int thisnode, const char *name);
264
265/* Search all siblings starting at the passed node for "name" matching
266 * the given string. Returns the node on success, zero on failure.
267 */
268extern int prom_searchsiblings(int node_start, const char *name);
269
270/* Return the first property type, as a string, for the given node.
271 * Returns a null string on error. Buffer should be at least 32B long.
272 */
273extern char *prom_firstprop(int node, char *buffer);
274
275/* Returns the next property after the passed property for the given
276 * node. Returns null string on failure. Buffer should be at least 32B long.
277 */
278extern char *prom_nextprop(int node, const char *prev_property, char *buffer);
279
280/* Returns 1 if the specified node has given property. */
281extern int prom_node_has_property(int node, const char *property);
282
283/* Returns phandle of the path specified */
284extern int prom_finddevice(const char *name);
285
286/* Set the indicated property at the given node with the passed value.
287 * Returns the number of bytes of your value that the prom took.
288 */
289extern int prom_setprop(int node, const char *prop_name, char *prop_value,
290 int value_size);
291
292extern int prom_pathtoinode(const char *path);
293extern int prom_inst2pkg(int);
294extern int prom_service_exists(const char *service_name);
295extern void prom_sun4v_guest_soft_state(void);
296
297extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
298
299/* Client interface level routines. */
300extern long p1275_cmd(const char *, long, ...);
301
302#if 0
303#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
304#else
305#define P1275_SIZE(x) x
306#endif
307
308/* We support at most 16 input and 1 output argument */
309#define P1275_ARG_NUMBER 0
310#define P1275_ARG_IN_STRING 1
311#define P1275_ARG_OUT_BUF 2
312#define P1275_ARG_OUT_32B 3
313#define P1275_ARG_IN_FUNCTION 4
314#define P1275_ARG_IN_BUF 5
315#define P1275_ARG_IN_64B 6
316
317#define P1275_IN(x) ((x) & 0xf)
318#define P1275_OUT(x) (((x) << 4) & 0xf0)
319#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
320#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
321
322#endif /* !(__SPARC64_OPLIB_H) */
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 93f0881b766e..f46c1fb53028 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -1,142 +1 @@
1#ifndef _SPARC64_PAGE_H #include <asm-sparc/page.h>
2#define _SPARC64_PAGE_H
3
4#include <linux/const.h>
5
6#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
7#define PAGE_SHIFT 13
8#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
9#define PAGE_SHIFT 16
10#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
11#define PAGE_SHIFT 19
12#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
13#define PAGE_SHIFT 22
14#else
15#error No page size specified in kernel configuration
16#endif
17
18#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
19#define PAGE_MASK (~(PAGE_SIZE-1))
20
21/* Flushing for D-cache alias handling is only needed if
22 * the page size is smaller than 16K.
23 */
24#if PAGE_SHIFT < 14
25#define DCACHE_ALIASING_POSSIBLE
26#endif
27
28#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
29#define HPAGE_SHIFT 22
30#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
31#define HPAGE_SHIFT 19
32#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
33#define HPAGE_SHIFT 16
34#endif
35
36#ifdef CONFIG_HUGETLB_PAGE
37#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
38#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
39#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
40#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
41#endif
42
43#ifndef __ASSEMBLY__
44
45extern void _clear_page(void *page);
46#define clear_page(X) _clear_page((void *)(X))
47struct page;
48extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
49#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
50extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
51
52/* Unlike sparc32, sparc64's parameter passing API is more
53 * sane in that structures which as small enough are passed
54 * in registers instead of on the stack. Thus, setting
55 * STRICT_MM_TYPECHECKS does not generate worse code so
56 * let's enable it to get the type checking.
57 */
58
59#define STRICT_MM_TYPECHECKS
60
61#ifdef STRICT_MM_TYPECHECKS
62/* These are used to make use of C type-checking.. */
63typedef struct { unsigned long pte; } pte_t;
64typedef struct { unsigned long iopte; } iopte_t;
65typedef struct { unsigned int pmd; } pmd_t;
66typedef struct { unsigned int pgd; } pgd_t;
67typedef struct { unsigned long pgprot; } pgprot_t;
68
69#define pte_val(x) ((x).pte)
70#define iopte_val(x) ((x).iopte)
71#define pmd_val(x) ((x).pmd)
72#define pgd_val(x) ((x).pgd)
73#define pgprot_val(x) ((x).pgprot)
74
75#define __pte(x) ((pte_t) { (x) } )
76#define __iopte(x) ((iopte_t) { (x) } )
77#define __pmd(x) ((pmd_t) { (x) } )
78#define __pgd(x) ((pgd_t) { (x) } )
79#define __pgprot(x) ((pgprot_t) { (x) } )
80
81#else
82/* .. while these make it easier on the compiler */
83typedef unsigned long pte_t;
84typedef unsigned long iopte_t;
85typedef unsigned int pmd_t;
86typedef unsigned int pgd_t;
87typedef unsigned long pgprot_t;
88
89#define pte_val(x) (x)
90#define iopte_val(x) (x)
91#define pmd_val(x) (x)
92#define pgd_val(x) (x)
93#define pgprot_val(x) (x)
94
95#define __pte(x) (x)
96#define __iopte(x) (x)
97#define __pmd(x) (x)
98#define __pgd(x) (x)
99#define __pgprot(x) (x)
100
101#endif /* (STRICT_MM_TYPECHECKS) */
102
103typedef struct page *pgtable_t;
104
105#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
106 (_AC(0x0000000070000000,UL)) : \
107 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
108
109#include <asm-generic/memory_model.h>
110
111#endif /* !(__ASSEMBLY__) */
112
113/* to align the pointer to the (next) page boundary */
114#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
115
116/* We used to stick this into a hard-coded global register (%g4)
117 * but that does not make sense anymore.
118 */
119#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
120
121#ifndef __ASSEMBLY__
122
123#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
124#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
125
126#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
127
128#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
129
130#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
131
132#define virt_to_phys __pa
133#define phys_to_virt __va
134
135#endif /* !(__ASSEMBLY__) */
136
137#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
138 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
139
140#include <asm-generic/page.h>
141
142#endif /* _SPARC64_PAGE_H */
diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h
index f59f2571295b..da54c4d1f39c 100644
--- a/include/asm-sparc64/pci.h
+++ b/include/asm-sparc64/pci.h
@@ -1,209 +1 @@
1#ifndef __SPARC64_PCI_H #include <asm-sparc/pci.h>
2#define __SPARC64_PCI_H
3
4#ifdef __KERNEL__
5
6#include <linux/dma-mapping.h>
7
8/* Can be used to override the logic in pci_scan_bus for skipping
9 * already-configured bus numbers - to be used for buggy BIOSes
10 * or architectures with incomplete PCI setup by the loader.
11 */
12#define pcibios_assign_all_busses() 0
13#define pcibios_scan_all_fns(a, b) 0
14
15#define PCIBIOS_MIN_IO 0UL
16#define PCIBIOS_MIN_MEM 0UL
17
18#define PCI_IRQ_NONE 0xffffffff
19
20#define PCI_CACHE_LINE_BYTES 64
21
22static inline void pcibios_set_master(struct pci_dev *dev)
23{
24 /* No special bus mastering setup handling */
25}
26
27static inline void pcibios_penalize_isa_irq(int irq, int active)
28{
29 /* We don't do dynamic PCI IRQ allocation */
30}
31
32/* The PCI address space does not equal the physical memory
33 * address space. The networking and block device layers use
34 * this boolean for bounce buffer decisions.
35 */
36#define PCI_DMA_BUS_IS_PHYS (0)
37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME;
72#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
73 __u32 LEN_NAME;
74#define pci_unmap_addr(PTR, ADDR_NAME) \
75 ((PTR)->ADDR_NAME)
76#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
77 (((PTR)->ADDR_NAME) = (VAL))
78#define pci_unmap_len(PTR, LEN_NAME) \
79 ((PTR)->LEN_NAME)
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL))
82
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */
135
136/* PCI 64-bit addressing works for all slots on all controller
137 * types on sparc64. However, it requires that the device
138 * can drive enough of the 64 bits.
139 */
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL
142
143static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
144{
145 return dma_mapping_error(dma_addr);
146}
147
148#ifdef CONFIG_PCI
149static inline void pci_dma_burst_advice(struct pci_dev *pdev,
150 enum pci_dma_burst_strategy *strat,
151 unsigned long *strategy_parameter)
152{
153 unsigned long cacheline_size;
154 u8 byte;
155
156 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
157 if (byte == 0)
158 cacheline_size = 1024;
159 else
160 cacheline_size = (int) byte * 4;
161
162 *strat = PCI_DMA_BURST_BOUNDARY;
163 *strategy_parameter = cacheline_size;
164}
165#endif
166
167/* Return the index of the PCI controller for device PDEV. */
168
169extern int pci_domain_nr(struct pci_bus *bus);
170static inline int pci_proc_domain(struct pci_bus *bus)
171{
172 return 1;
173}
174
175/* Platform support for /proc/bus/pci/X/Y mmap()s. */
176
177#define HAVE_PCI_MMAP
178#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
179#define get_pci_unmapped_area get_fb_unmapped_area
180
181extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
182 enum pci_mmap_state mmap_state,
183 int write_combine);
184
185extern void
186pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
187 struct resource *res);
188
189extern void
190pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
191 struct pci_bus_region *region);
192
193extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *);
194
195static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
196{
197 return PCI_IRQ_NONE;
198}
199
200struct device_node;
201extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
202
203#define HAVE_ARCH_PCI_RESOURCE_TO_USER
204extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
205 const struct resource *rsrc,
206 resource_size_t *start, resource_size_t *end);
207#endif /* __KERNEL__ */
208
209#endif /* __SPARC64_PCI_H */
diff --git a/include/asm-sparc64/percpu.h b/include/asm-sparc64/percpu.h
index bee64593023e..292729bb350f 100644
--- a/include/asm-sparc64/percpu.h
+++ b/include/asm-sparc64/percpu.h
@@ -1,28 +1 @@
1#ifndef __ARCH_SPARC64_PERCPU__ #include <asm-sparc/percpu.h>
2#define __ARCH_SPARC64_PERCPU__
3
4#include <linux/compiler.h>
5
6register unsigned long __local_per_cpu_offset asm("g5");
7
8#ifdef CONFIG_SMP
9
10extern void real_setup_per_cpu_areas(void);
11
12extern unsigned long __per_cpu_base;
13extern unsigned long __per_cpu_shift;
14#define __per_cpu_offset(__cpu) \
15 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
16#define per_cpu_offset(x) (__per_cpu_offset(x))
17
18#define __my_cpu_offset __local_per_cpu_offset
19
20#else /* ! SMP */
21
22#define real_setup_per_cpu_areas() do { } while (0)
23
24#endif /* SMP */
25
26#include <asm-generic/percpu.h>
27
28#endif /* __ARCH_SPARC64_PERCPU__ */
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index 326de104d014..bec31641011c 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -1,81 +1 @@
1#ifndef _SPARC64_PGALLOC_H #include <asm-sparc/pgalloc.h>
2#define _SPARC64_PGALLOC_H
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/quicklist.h>
9
10#include <asm/spitfire.h>
11#include <asm/cpudata.h>
12#include <asm/cacheflush.h>
13#include <asm/page.h>
14
15/* Page table allocation/freeing. */
16
17static inline pgd_t *pgd_alloc(struct mm_struct *mm)
18{
19 return quicklist_alloc(0, GFP_KERNEL, NULL);
20}
21
22static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23{
24 quicklist_free(0, NULL, pgd);
25}
26
27#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
28
29static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
30{
31 return quicklist_alloc(0, GFP_KERNEL, NULL);
32}
33
34static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
35{
36 quicklist_free(0, NULL, pmd);
37}
38
39static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
40 unsigned long address)
41{
42 return quicklist_alloc(0, GFP_KERNEL, NULL);
43}
44
45static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
46 unsigned long address)
47{
48 struct page *page;
49 void *pg;
50
51 pg = quicklist_alloc(0, GFP_KERNEL, NULL);
52 if (!pg)
53 return NULL;
54 page = virt_to_page(pg);
55 pgtable_page_ctor(page);
56 return page;
57}
58
59static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
60{
61 quicklist_free(0, NULL, pte);
62}
63
64static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
65{
66 pgtable_page_dtor(ptepage);
67 quicklist_free_page(0, NULL, ptepage);
68}
69
70
71#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
72#define pmd_populate(MM,PMD,PTE_PAGE) \
73 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
74#define pmd_pgtable(pmd) pmd_page(pmd)
75
76static inline void check_pgt_cache(void)
77{
78 quicklist_trim(0, NULL, 25, 16);
79}
80
81#endif /* _SPARC64_PGALLOC_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index b87017747b5d..9decbd99aeff 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -1,781 +1 @@
1/* #include <asm-sparc/pgtable.h>
2 * pgtable.h: SpitFire page table operations.
3 *
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef _SPARC64_PGTABLE_H
9#define _SPARC64_PGTABLE_H
10
11/* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
13 */
14
15#include <asm-generic/pgtable-nopud.h>
16
17#include <linux/compiler.h>
18#include <linux/const.h>
19#include <asm/types.h>
20#include <asm/spitfire.h>
21#include <asm/asi.h>
22#include <asm/system.h>
23#include <asm/page.h>
24#include <asm/processor.h>
25
26/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The TSB is mapped in the 0x8000000 to 0xa000000 range.
29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space,
32 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
33 * There is a single static kernel PMD which maps from 0x0 to address
34 * 0x400000000.
35 */
36#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
37#define TSBMAP_BASE _AC(0x0000000008000000,UL)
38#define MODULES_VADDR _AC(0x0000000010000000,UL)
39#define MODULES_LEN _AC(0x00000000e0000000,UL)
40#define MODULES_END _AC(0x00000000f0000000,UL)
41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43#define VMALLOC_START _AC(0x0000000100000000,UL)
44#define VMALLOC_END _AC(0x0000000200000000,UL)
45#define VMEMMAP_BASE _AC(0x0000000200000000,UL)
46
47#define vmemmap ((struct page *)VMEMMAP_BASE)
48
49/* XXX All of this needs to be rethought so we can take advantage
50 * XXX cheetah's full 64-bit virtual address space, ie. no more hole
51 * XXX in the middle like on spitfire. -DaveM
52 */
53/*
54 * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
55 * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
56 * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
57 * table is a single page long). The next higher PMD_BITS determine pmd#
58 * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
59 * since the pmd entries are 4 bytes, and each pmd page is a single page
60 * long). Finally, the higher few bits determine pgde#.
61 */
62
63/* PMD_SHIFT determines the size of the area a second-level page
64 * table can map
65 */
66#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
67#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
68#define PMD_MASK (~(PMD_SIZE-1))
69#define PMD_BITS (PAGE_SHIFT - 2)
70
71/* PGDIR_SHIFT determines what a third-level page table entry can map */
72#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
73#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
74#define PGDIR_MASK (~(PGDIR_SIZE-1))
75#define PGDIR_BITS (PAGE_SHIFT - 2)
76
77#ifndef __ASSEMBLY__
78
79#include <linux/sched.h>
80
81/* Entries per page directory level. */
82#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
83#define PTRS_PER_PMD (1UL << PMD_BITS)
84#define PTRS_PER_PGD (1UL << PGDIR_BITS)
85
86/* Kernel has a separate 44bit address space. */
87#define FIRST_USER_ADDRESS 0
88
89#define pte_ERROR(e) __builtin_trap()
90#define pmd_ERROR(e) __builtin_trap()
91#define pgd_ERROR(e) __builtin_trap()
92
93#endif /* !(__ASSEMBLY__) */
94
95/* PTE bits which are the same in SUN4U and SUN4V format. */
96#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
97#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
98
99/* SUN4U pte bits... */
100#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
101#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
102#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
103#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
104#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
105#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
106#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
107#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
108#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
109#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
110#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
111#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
112#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
113#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
114#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
115#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
116#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
117#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
118#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
119#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
120#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
121#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
122#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
123#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
124#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
125#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
126#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
127#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
128
129/* SUN4V pte bits... */
130#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
131#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
132#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
133#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
134#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
135#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
136#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
137#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
138#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
139#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
140#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
141#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
142#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
143#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
144#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
145#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
146#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
147#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
148#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
149#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
150#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
151#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
152#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
153#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
154#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
155#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
156#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
157
158#if PAGE_SHIFT == 13
159#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
160#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
161#elif PAGE_SHIFT == 16
162#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
163#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
164#elif PAGE_SHIFT == 19
165#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
166#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
167#elif PAGE_SHIFT == 22
168#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
169#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
170#else
171#error Wrong PAGE_SHIFT specified
172#endif
173
174#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
175#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
176#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
177#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
178#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
179#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
180#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
181#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
182#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
183#endif
184
185/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
186#define __P000 __pgprot(0)
187#define __P001 __pgprot(0)
188#define __P010 __pgprot(0)
189#define __P011 __pgprot(0)
190#define __P100 __pgprot(0)
191#define __P101 __pgprot(0)
192#define __P110 __pgprot(0)
193#define __P111 __pgprot(0)
194
195#define __S000 __pgprot(0)
196#define __S001 __pgprot(0)
197#define __S010 __pgprot(0)
198#define __S011 __pgprot(0)
199#define __S100 __pgprot(0)
200#define __S101 __pgprot(0)
201#define __S110 __pgprot(0)
202#define __S111 __pgprot(0)
203
204#ifndef __ASSEMBLY__
205
206extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
207
208extern unsigned long pte_sz_bits(unsigned long size);
209
210extern pgprot_t PAGE_KERNEL;
211extern pgprot_t PAGE_KERNEL_LOCKED;
212extern pgprot_t PAGE_COPY;
213extern pgprot_t PAGE_SHARED;
214
215/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
216extern unsigned long _PAGE_IE;
217extern unsigned long _PAGE_E;
218extern unsigned long _PAGE_CACHE;
219
220extern unsigned long pg_iobits;
221extern unsigned long _PAGE_ALL_SZ_BITS;
222extern unsigned long _PAGE_SZBITS;
223
224extern struct page *mem_map_zero;
225#define ZERO_PAGE(vaddr) (mem_map_zero)
226
227/* PFNs are real physical page numbers. However, mem_map only begins to record
228 * per-page information starting at pfn_base. This is to handle systems where
229 * the first physical page in the machine is at some huge physical address,
230 * such as 4GB. This is common on a partitioned E10000, for example.
231 */
232static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
233{
234 unsigned long paddr = pfn << PAGE_SHIFT;
235 unsigned long sz_bits;
236
237 sz_bits = 0UL;
238 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
239 __asm__ __volatile__(
240 "\n661: sethi %%uhi(%1), %0\n"
241 " sllx %0, 32, %0\n"
242 " .section .sun4v_2insn_patch, \"ax\"\n"
243 " .word 661b\n"
244 " mov %2, %0\n"
245 " nop\n"
246 " .previous\n"
247 : "=r" (sz_bits)
248 : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
249 }
250 return __pte(paddr | sz_bits | pgprot_val(prot));
251}
252#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
253
254/* This one can be done with two shifts. */
255static inline unsigned long pte_pfn(pte_t pte)
256{
257 unsigned long ret;
258
259 __asm__ __volatile__(
260 "\n661: sllx %1, %2, %0\n"
261 " srlx %0, %3, %0\n"
262 " .section .sun4v_2insn_patch, \"ax\"\n"
263 " .word 661b\n"
264 " sllx %1, %4, %0\n"
265 " srlx %0, %5, %0\n"
266 " .previous\n"
267 : "=r" (ret)
268 : "r" (pte_val(pte)),
269 "i" (21), "i" (21 + PAGE_SHIFT),
270 "i" (8), "i" (8 + PAGE_SHIFT));
271
272 return ret;
273}
274#define pte_page(x) pfn_to_page(pte_pfn(x))
275
276static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
277{
278 unsigned long mask, tmp;
279
280 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
281 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
282 *
283 * Even if we use negation tricks the result is still a 6
284 * instruction sequence, so don't try to play fancy and just
285 * do the most straightforward implementation.
286 *
287 * Note: We encode this into 3 sun4v 2-insn patch sequences.
288 */
289
290 __asm__ __volatile__(
291 "\n661: sethi %%uhi(%2), %1\n"
292 " sethi %%hi(%2), %0\n"
293 "\n662: or %1, %%ulo(%2), %1\n"
294 " or %0, %%lo(%2), %0\n"
295 "\n663: sllx %1, 32, %1\n"
296 " or %0, %1, %0\n"
297 " .section .sun4v_2insn_patch, \"ax\"\n"
298 " .word 661b\n"
299 " sethi %%uhi(%3), %1\n"
300 " sethi %%hi(%3), %0\n"
301 " .word 662b\n"
302 " or %1, %%ulo(%3), %1\n"
303 " or %0, %%lo(%3), %0\n"
304 " .word 663b\n"
305 " sllx %1, 32, %1\n"
306 " or %0, %1, %0\n"
307 " .previous\n"
308 : "=r" (mask), "=r" (tmp)
309 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
310 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
311 _PAGE_SZBITS_4U),
312 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
313 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
314 _PAGE_SZBITS_4V));
315
316 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
317}
318
319static inline pte_t pgoff_to_pte(unsigned long off)
320{
321 off <<= PAGE_SHIFT;
322
323 __asm__ __volatile__(
324 "\n661: or %0, %2, %0\n"
325 " .section .sun4v_1insn_patch, \"ax\"\n"
326 " .word 661b\n"
327 " or %0, %3, %0\n"
328 " .previous\n"
329 : "=r" (off)
330 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
331
332 return __pte(off);
333}
334
335static inline pgprot_t pgprot_noncached(pgprot_t prot)
336{
337 unsigned long val = pgprot_val(prot);
338
339 __asm__ __volatile__(
340 "\n661: andn %0, %2, %0\n"
341 " or %0, %3, %0\n"
342 " .section .sun4v_2insn_patch, \"ax\"\n"
343 " .word 661b\n"
344 " andn %0, %4, %0\n"
345 " or %0, %5, %0\n"
346 " .previous\n"
347 : "=r" (val)
348 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
349 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
350
351 return __pgprot(val);
352}
353/* Various pieces of code check for platform support by ifdef testing
354 * on "pgprot_noncached". That's broken and should be fixed, but for
355 * now...
356 */
357#define pgprot_noncached pgprot_noncached
358
359#ifdef CONFIG_HUGETLB_PAGE
360static inline pte_t pte_mkhuge(pte_t pte)
361{
362 unsigned long mask;
363
364 __asm__ __volatile__(
365 "\n661: sethi %%uhi(%1), %0\n"
366 " sllx %0, 32, %0\n"
367 " .section .sun4v_2insn_patch, \"ax\"\n"
368 " .word 661b\n"
369 " mov %2, %0\n"
370 " nop\n"
371 " .previous\n"
372 : "=r" (mask)
373 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
374
375 return __pte(pte_val(pte) | mask);
376}
377#endif
378
379static inline pte_t pte_mkdirty(pte_t pte)
380{
381 unsigned long val = pte_val(pte), tmp;
382
383 __asm__ __volatile__(
384 "\n661: or %0, %3, %0\n"
385 " nop\n"
386 "\n662: nop\n"
387 " nop\n"
388 " .section .sun4v_2insn_patch, \"ax\"\n"
389 " .word 661b\n"
390 " sethi %%uhi(%4), %1\n"
391 " sllx %1, 32, %1\n"
392 " .word 662b\n"
393 " or %1, %%lo(%4), %1\n"
394 " or %0, %1, %0\n"
395 " .previous\n"
396 : "=r" (val), "=r" (tmp)
397 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
398 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
399
400 return __pte(val);
401}
402
403static inline pte_t pte_mkclean(pte_t pte)
404{
405 unsigned long val = pte_val(pte), tmp;
406
407 __asm__ __volatile__(
408 "\n661: andn %0, %3, %0\n"
409 " nop\n"
410 "\n662: nop\n"
411 " nop\n"
412 " .section .sun4v_2insn_patch, \"ax\"\n"
413 " .word 661b\n"
414 " sethi %%uhi(%4), %1\n"
415 " sllx %1, 32, %1\n"
416 " .word 662b\n"
417 " or %1, %%lo(%4), %1\n"
418 " andn %0, %1, %0\n"
419 " .previous\n"
420 : "=r" (val), "=r" (tmp)
421 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
422 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
423
424 return __pte(val);
425}
426
427static inline pte_t pte_mkwrite(pte_t pte)
428{
429 unsigned long val = pte_val(pte), mask;
430
431 __asm__ __volatile__(
432 "\n661: mov %1, %0\n"
433 " nop\n"
434 " .section .sun4v_2insn_patch, \"ax\"\n"
435 " .word 661b\n"
436 " sethi %%uhi(%2), %0\n"
437 " sllx %0, 32, %0\n"
438 " .previous\n"
439 : "=r" (mask)
440 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
441
442 return __pte(val | mask);
443}
444
445static inline pte_t pte_wrprotect(pte_t pte)
446{
447 unsigned long val = pte_val(pte), tmp;
448
449 __asm__ __volatile__(
450 "\n661: andn %0, %3, %0\n"
451 " nop\n"
452 "\n662: nop\n"
453 " nop\n"
454 " .section .sun4v_2insn_patch, \"ax\"\n"
455 " .word 661b\n"
456 " sethi %%uhi(%4), %1\n"
457 " sllx %1, 32, %1\n"
458 " .word 662b\n"
459 " or %1, %%lo(%4), %1\n"
460 " andn %0, %1, %0\n"
461 " .previous\n"
462 : "=r" (val), "=r" (tmp)
463 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
464 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
465
466 return __pte(val);
467}
468
469static inline pte_t pte_mkold(pte_t pte)
470{
471 unsigned long mask;
472
473 __asm__ __volatile__(
474 "\n661: mov %1, %0\n"
475 " nop\n"
476 " .section .sun4v_2insn_patch, \"ax\"\n"
477 " .word 661b\n"
478 " sethi %%uhi(%2), %0\n"
479 " sllx %0, 32, %0\n"
480 " .previous\n"
481 : "=r" (mask)
482 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
483
484 mask |= _PAGE_R;
485
486 return __pte(pte_val(pte) & ~mask);
487}
488
489static inline pte_t pte_mkyoung(pte_t pte)
490{
491 unsigned long mask;
492
493 __asm__ __volatile__(
494 "\n661: mov %1, %0\n"
495 " nop\n"
496 " .section .sun4v_2insn_patch, \"ax\"\n"
497 " .word 661b\n"
498 " sethi %%uhi(%2), %0\n"
499 " sllx %0, 32, %0\n"
500 " .previous\n"
501 : "=r" (mask)
502 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
503
504 mask |= _PAGE_R;
505
506 return __pte(pte_val(pte) | mask);
507}
508
509static inline pte_t pte_mkspecial(pte_t pte)
510{
511 return pte;
512}
513
514static inline unsigned long pte_young(pte_t pte)
515{
516 unsigned long mask;
517
518 __asm__ __volatile__(
519 "\n661: mov %1, %0\n"
520 " nop\n"
521 " .section .sun4v_2insn_patch, \"ax\"\n"
522 " .word 661b\n"
523 " sethi %%uhi(%2), %0\n"
524 " sllx %0, 32, %0\n"
525 " .previous\n"
526 : "=r" (mask)
527 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
528
529 return (pte_val(pte) & mask);
530}
531
532static inline unsigned long pte_dirty(pte_t pte)
533{
534 unsigned long mask;
535
536 __asm__ __volatile__(
537 "\n661: mov %1, %0\n"
538 " nop\n"
539 " .section .sun4v_2insn_patch, \"ax\"\n"
540 " .word 661b\n"
541 " sethi %%uhi(%2), %0\n"
542 " sllx %0, 32, %0\n"
543 " .previous\n"
544 : "=r" (mask)
545 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
546
547 return (pte_val(pte) & mask);
548}
549
550static inline unsigned long pte_write(pte_t pte)
551{
552 unsigned long mask;
553
554 __asm__ __volatile__(
555 "\n661: mov %1, %0\n"
556 " nop\n"
557 " .section .sun4v_2insn_patch, \"ax\"\n"
558 " .word 661b\n"
559 " sethi %%uhi(%2), %0\n"
560 " sllx %0, 32, %0\n"
561 " .previous\n"
562 : "=r" (mask)
563 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
564
565 return (pte_val(pte) & mask);
566}
567
568static inline unsigned long pte_exec(pte_t pte)
569{
570 unsigned long mask;
571
572 __asm__ __volatile__(
573 "\n661: sethi %%hi(%1), %0\n"
574 " .section .sun4v_1insn_patch, \"ax\"\n"
575 " .word 661b\n"
576 " mov %2, %0\n"
577 " .previous\n"
578 : "=r" (mask)
579 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
580
581 return (pte_val(pte) & mask);
582}
583
584static inline unsigned long pte_file(pte_t pte)
585{
586 unsigned long val = pte_val(pte);
587
588 __asm__ __volatile__(
589 "\n661: and %0, %2, %0\n"
590 " .section .sun4v_1insn_patch, \"ax\"\n"
591 " .word 661b\n"
592 " and %0, %3, %0\n"
593 " .previous\n"
594 : "=r" (val)
595 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
596
597 return val;
598}
599
600static inline unsigned long pte_present(pte_t pte)
601{
602 unsigned long val = pte_val(pte);
603
604 __asm__ __volatile__(
605 "\n661: and %0, %2, %0\n"
606 " .section .sun4v_1insn_patch, \"ax\"\n"
607 " .word 661b\n"
608 " and %0, %3, %0\n"
609 " .previous\n"
610 : "=r" (val)
611 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
612
613 return val;
614}
615
616static inline int pte_special(pte_t pte)
617{
618 return 0;
619}
620
621#define pmd_set(pmdp, ptep) \
622 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
623#define pud_set(pudp, pmdp) \
624 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
625#define __pmd_page(pmd) \
626 ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
627#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
628#define pud_page_vaddr(pud) \
629 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
630#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
631#define pmd_none(pmd) (!pmd_val(pmd))
632#define pmd_bad(pmd) (0)
633#define pmd_present(pmd) (pmd_val(pmd) != 0U)
634#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
635#define pud_none(pud) (!pud_val(pud))
636#define pud_bad(pud) (0)
637#define pud_present(pud) (pud_val(pud) != 0U)
638#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
639
640/* Same in both SUN4V and SUN4U. */
641#define pte_none(pte) (!pte_val(pte))
642
643/* to find an entry in a page-table-directory. */
644#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
645#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
646
647/* to find an entry in a kernel page-table-directory */
648#define pgd_offset_k(address) pgd_offset(&init_mm, address)
649
650/* Find an entry in the second-level page table.. */
651#define pmd_offset(pudp, address) \
652 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
653 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
654
655/* Find an entry in the third-level page table.. */
656#define pte_index(dir, address) \
657 ((pte_t *) __pmd_page(*(dir)) + \
658 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
659#define pte_offset_kernel pte_index
660#define pte_offset_map pte_index
661#define pte_offset_map_nested pte_index
662#define pte_unmap(pte) do { } while (0)
663#define pte_unmap_nested(pte) do { } while (0)
664
665/* Actual page table PTE updates. */
666extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
667
668static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
669{
670 pte_t orig = *ptep;
671
672 *ptep = pte;
673
674 /* It is more efficient to let flush_tlb_kernel_range()
675 * handle init_mm tlb flushes.
676 *
677 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
678 * and SUN4V pte layout, so this inline test is fine.
679 */
680 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
681 tlb_batch_add(mm, addr, ptep, orig);
682}
683
684#define pte_clear(mm,addr,ptep) \
685 set_pte_at((mm), (addr), (ptep), __pte(0UL))
686
687#ifdef DCACHE_ALIASING_POSSIBLE
688#define __HAVE_ARCH_MOVE_PTE
689#define move_pte(pte, prot, old_addr, new_addr) \
690({ \
691 pte_t newpte = (pte); \
692 if (tlb_type != hypervisor && pte_present(pte)) { \
693 unsigned long this_pfn = pte_pfn(pte); \
694 \
695 if (pfn_valid(this_pfn) && \
696 (((old_addr) ^ (new_addr)) & (1 << 13))) \
697 flush_dcache_page_all(current->mm, \
698 pfn_to_page(this_pfn)); \
699 } \
700 newpte; \
701})
702#endif
703
704extern pgd_t swapper_pg_dir[2048];
705extern pmd_t swapper_low_pmd_dir[2048];
706
707extern void paging_init(void);
708extern unsigned long find_ecache_flush_span(unsigned long size);
709
710/* These do nothing with the way I have things setup. */
711#define mmu_lockarea(vaddr, len) (vaddr)
712#define mmu_unlockarea(vaddr, len) do { } while(0)
713
714struct vm_area_struct;
715extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
716
717/* Encode and de-code a swap entry */
718#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
719#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
720#define __swp_entry(type, offset) \
721 ( (swp_entry_t) \
722 { \
723 (((long)(type) << PAGE_SHIFT) | \
724 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
725 } )
726#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
727#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
728
729/* File offset in PTE support. */
730extern unsigned long pte_file(pte_t);
731#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
732extern pte_t pgoff_to_pte(unsigned long);
733#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
734
735extern unsigned long *sparc64_valid_addr_bitmap;
736
737/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
738#define kern_addr_valid(addr) \
739 (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
740
741extern int page_in_phys_avail(unsigned long paddr);
742
743extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
744 unsigned long pfn,
745 unsigned long size, pgprot_t prot);
746
747/*
748 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
749 * its high 4 bits. These macros/functions put it there or get it from there.
750 */
751#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
752#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
753#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
754
755#include <asm-generic/pgtable.h>
756
757/* We provide our own get_unmapped_area to cope with VA holes and
758 * SHM area cache aliasing for userland.
759 */
760#define HAVE_ARCH_UNMAPPED_AREA
761#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
762
763/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
764 * the largest alignment possible such that larget PTEs can be used.
765 */
766extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
767 unsigned long, unsigned long,
768 unsigned long);
769#define HAVE_ARCH_FB_UNMAPPED_AREA
770
771extern void pgtable_cache_init(void);
772extern void sun4v_register_fault_status(void);
773extern void sun4v_ktsb_register(void);
774extern void __init cheetah_ecache_flush_init(void);
775extern void sun4v_patch_tlb_handlers(void);
776
777extern unsigned long cmdline_memory_size;
778
779#endif /* !(__ASSEMBLY__) */
780
781#endif /* !(_SPARC64_PGTABLE_H) */
diff --git a/include/asm-sparc64/posix_types.h b/include/asm-sparc64/posix_types.h
index 4eaaa0196636..8cee99200232 100644
--- a/include/asm-sparc64/posix_types.h
+++ b/include/asm-sparc64/posix_types.h
@@ -1,122 +1 @@
1#ifndef __ARCH_SPARC64_POSIX_TYPES_H #include <asm-sparc/posix_types.h>
2#define __ARCH_SPARC64_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_size_t;
11typedef long __kernel_ssize_t;
12typedef long __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_clock_t;
15typedef int __kernel_pid_t;
16typedef int __kernel_ipc_pid_t;
17typedef unsigned int __kernel_uid_t;
18typedef unsigned int __kernel_gid_t;
19typedef unsigned long __kernel_ino_t;
20typedef unsigned int __kernel_mode_t;
21typedef unsigned short __kernel_umode_t;
22typedef unsigned int __kernel_nlink_t;
23typedef int __kernel_daddr_t;
24typedef long __kernel_off_t;
25typedef char * __kernel_caddr_t;
26typedef unsigned short __kernel_uid16_t;
27typedef unsigned short __kernel_gid16_t;
28typedef int __kernel_clockid_t;
29typedef int __kernel_timer_t;
30
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef __kernel_uid_t __kernel_uid32_t;
34typedef __kernel_gid_t __kernel_gid32_t;
35
36typedef unsigned int __kernel_old_dev_t;
37
38/* Note this piece of asymmetry from the v9 ABI. */
39typedef int __kernel_suseconds_t;
40
41#ifdef __GNUC__
42typedef long long __kernel_loff_t;
43#endif
44
45typedef struct {
46 int val[2];
47} __kernel_fsid_t;
48
49#if defined(__KERNEL__)
50
51#undef __FD_SET
52static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
53{
54 unsigned long _tmp = fd / __NFDBITS;
55 unsigned long _rem = fd % __NFDBITS;
56 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
57}
58
59#undef __FD_CLR
60static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
61{
62 unsigned long _tmp = fd / __NFDBITS;
63 unsigned long _rem = fd % __NFDBITS;
64 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
65}
66
67#undef __FD_ISSET
68static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
69{
70 unsigned long _tmp = fd / __NFDBITS;
71 unsigned long _rem = fd % __NFDBITS;
72 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
73}
74
75/*
76 * This will unroll the loop for the normal constant cases (8 or 32 longs,
77 * for 256 and 1024-bit fd_sets respectively)
78 */
79#undef __FD_ZERO
80static inline void __FD_ZERO(__kernel_fd_set *p)
81{
82 unsigned long *tmp = p->fds_bits;
83 int i;
84
85 if (__builtin_constant_p(__FDSET_LONGS)) {
86 switch (__FDSET_LONGS) {
87 case 32:
88 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
89 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
90 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
91 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
92 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
93 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
94 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
95 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
96 return;
97 case 16:
98 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
99 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
100 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
101 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
102 return;
103 case 8:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
106 return;
107 case 4:
108 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
109 return;
110 }
111 }
112 i = __FDSET_LONGS;
113 while (i) {
114 i--;
115 *tmp = 0;
116 tmp++;
117 }
118}
119
120#endif /* defined(__KERNEL__) */
121
122#endif /* !(__ARCH_SPARC64_POSIX_TYPES_H) */
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 26b4e5255761..21de6cc182eb 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -1,237 +1 @@
1/* #include <asm-sparc/processor.h>
2 * include/asm-sparc64/processor.h
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef __ASM_SPARC64_PROCESSOR_H
8#define __ASM_SPARC64_PROCESSOR_H
9
10/*
11 * Sparc64 implementation of macro that returns current
12 * instruction pointer ("program counter").
13 */
14#define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
15
16#include <asm/asi.h>
17#include <asm/pstate.h>
18#include <asm/ptrace.h>
19#include <asm/page.h>
20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/*
26 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
28 * address that the kernel will allocate out.
29 *
30 * XXX No longer using virtual page tables, kill this upper limit...
31 */
32#define VA_BITS 44
33#ifndef __ASSEMBLY__
34#define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
35#else
36#define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
37#endif
38
39#define TASK_SIZE ((unsigned long)-VPTE_SIZE)
40#define TASK_SIZE_OF(tsk) \
41 (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
42 (1UL << 32UL) : TASK_SIZE)
43#ifdef __KERNEL__
44
45#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
46#define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
47
48#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
49 STACK_TOP32 : STACK_TOP64)
50
51#define STACK_TOP_MAX STACK_TOP64
52
53#endif
54
55#ifndef __ASSEMBLY__
56
57typedef struct {
58 unsigned char seg;
59} mm_segment_t;
60
61/* The Sparc processor specific thread struct. */
62/* XXX This should die, everything can go into thread_info now. */
63struct thread_struct {
64#ifdef CONFIG_DEBUG_SPINLOCK
65 /* How many spinlocks held by this thread.
66 * Used with spin lock debugging to catch tasks
67 * sleeping illegally with locks held.
68 */
69 int smp_lock_count;
70 unsigned int smp_lock_pc;
71#else
72 int dummy; /* f'in gcc bug... */
73#endif
74};
75
76#endif /* !(__ASSEMBLY__) */
77
78#ifndef CONFIG_DEBUG_SPINLOCK
79#define INIT_THREAD { \
80 0, \
81}
82#else /* CONFIG_DEBUG_SPINLOCK */
83#define INIT_THREAD { \
84/* smp_lock_count, smp_lock_pc, */ \
85 0, 0, \
86}
87#endif /* !(CONFIG_DEBUG_SPINLOCK) */
88
89#ifndef __ASSEMBLY__
90
91#include <linux/types.h>
92
93/* Return saved PC of a blocked thread. */
94struct task_struct;
95extern unsigned long thread_saved_pc(struct task_struct *);
96
97/* On Uniprocessor, even in RMO processes see TSO semantics */
98#ifdef CONFIG_SMP
99#define TSTATE_INITIAL_MM TSTATE_TSO
100#else
101#define TSTATE_INITIAL_MM TSTATE_RMO
102#endif
103
104/* Do necessary setup to start up a newly executed thread. */
105#define start_thread(regs, pc, sp) \
106do { \
107 unsigned long __asi = ASI_PNF; \
108 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
109 regs->tpc = ((pc & (~3)) - 4); \
110 regs->tnpc = regs->tpc + 4; \
111 regs->y = 0; \
112 set_thread_wstate(1 << 3); \
113 if (current_thread_info()->utraps) { \
114 if (*(current_thread_info()->utraps) < 2) \
115 kfree(current_thread_info()->utraps); \
116 else \
117 (*(current_thread_info()->utraps))--; \
118 current_thread_info()->utraps = NULL; \
119 } \
120 __asm__ __volatile__( \
121 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
122 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
123 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
124 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
125 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
126 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
127 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
128 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
129 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
130 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
131 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
132 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
133 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
134 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
135 "stx %1, [%0 + %2 + 0x70]\n\t" \
136 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
137 "wrpr %%g0, (1 << 3), %%wstate\n\t" \
138 : \
139 : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
140 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
141} while (0)
142
143#define start_thread32(regs, pc, sp) \
144do { \
145 unsigned long __asi = ASI_PNF; \
146 pc &= 0x00000000ffffffffUL; \
147 sp &= 0x00000000ffffffffUL; \
148 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
149 regs->tpc = ((pc & (~3)) - 4); \
150 regs->tnpc = regs->tpc + 4; \
151 regs->y = 0; \
152 set_thread_wstate(2 << 3); \
153 if (current_thread_info()->utraps) { \
154 if (*(current_thread_info()->utraps) < 2) \
155 kfree(current_thread_info()->utraps); \
156 else \
157 (*(current_thread_info()->utraps))--; \
158 current_thread_info()->utraps = NULL; \
159 } \
160 __asm__ __volatile__( \
161 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
162 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
163 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
164 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
165 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
166 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
167 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
168 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
169 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
170 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
171 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
172 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
173 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
174 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
175 "stx %1, [%0 + %2 + 0x70]\n\t" \
176 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
177 "wrpr %%g0, (2 << 3), %%wstate\n\t" \
178 : \
179 : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
180 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
181} while (0)
182
183/* Free all resources held by a thread. */
184#define release_thread(tsk) do { } while (0)
185
186/* Prepare to copy thread state - unlazy all lazy status */
187#define prepare_to_copy(tsk) do { } while (0)
188
189extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
190
191extern unsigned long get_wchan(struct task_struct *task);
192
193#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
194#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
195#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
196
197#define cpu_relax() barrier()
198
199/* Prefetch support. This is tuned for UltraSPARC-III and later.
200 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
201 * a shallower prefetch queue than later chips.
202 */
203#define ARCH_HAS_PREFETCH
204#define ARCH_HAS_PREFETCHW
205#define ARCH_HAS_SPINLOCK_PREFETCH
206
207static inline void prefetch(const void *x)
208{
209 /* We do not use the read prefetch mnemonic because that
210 * prefetches into the prefetch-cache which only is accessible
211 * by floating point operations in UltraSPARC-III and later.
212 * By contrast, "#one_write" prefetches into the L2 cache
213 * in shared state.
214 */
215 __asm__ __volatile__("prefetch [%0], #one_write"
216 : /* no outputs */
217 : "r" (x));
218}
219
220static inline void prefetchw(const void *x)
221{
222 /* The most optimal prefetch to use for writes is
223 * "#n_writes". This brings the cacheline into the
224 * L2 cache in "owned" state.
225 */
226 __asm__ __volatile__("prefetch [%0], #n_writes"
227 : /* no outputs */
228 : "r" (x));
229}
230
231#define spin_lock_prefetch(x) prefetchw(x)
232
233#define HAVE_ARCH_PICK_MMAP_LAYOUT
234
235#endif /* !(__ASSEMBLY__) */
236
237#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
index b163da79bb6d..1a55b9fb3b0c 100644
--- a/include/asm-sparc64/ptrace.h
+++ b/include/asm-sparc64/ptrace.h
@@ -1,346 +1 @@
1#ifndef _SPARC64_PTRACE_H #include <asm-sparc/ptrace.h>
2#define _SPARC64_PTRACE_H
3
4#include <asm/pstate.h>
5
6/* This struct defines the way the registers are stored on the
7 * stack during a system call and basically all traps.
8 */
9
10/* This magic value must have the low 9 bits clear,
11 * as that is where we encode the %tt value, see below.
12 */
13#define PT_REGS_MAGIC 0x57ac6c00
14
15#ifndef __ASSEMBLY__
16
17#include <linux/types.h>
18
19struct pt_regs {
20 unsigned long u_regs[16]; /* globals and ins */
21 unsigned long tstate;
22 unsigned long tpc;
23 unsigned long tnpc;
24 unsigned int y;
25
26 /* We encode a magic number, PT_REGS_MAGIC, along
27 * with the %tt (trap type) register value at trap
28 * entry time. The magic number allows us to identify
29 * accurately a trap stack frame in the stack
30 * unwinder, and the %tt value allows us to test
31 * things like "in a system call" etc. for an arbitray
32 * process.
33 *
34 * The PT_REGS_MAGIC is choosen such that it can be
35 * loaded completely using just a sethi instruction.
36 */
37 unsigned int magic;
38};
39
40static inline int pt_regs_trap_type(struct pt_regs *regs)
41{
42 return regs->magic & 0x1ff;
43}
44
45static inline bool pt_regs_is_syscall(struct pt_regs *regs)
46{
47 return (regs->tstate & TSTATE_SYSCALL);
48}
49
50static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
51{
52 return (regs->tstate &= ~TSTATE_SYSCALL);
53}
54
55struct pt_regs32 {
56 unsigned int psr;
57 unsigned int pc;
58 unsigned int npc;
59 unsigned int y;
60 unsigned int u_regs[16]; /* globals and ins */
61};
62
63#define UREG_G0 0
64#define UREG_G1 1
65#define UREG_G2 2
66#define UREG_G3 3
67#define UREG_G4 4
68#define UREG_G5 5
69#define UREG_G6 6
70#define UREG_G7 7
71#define UREG_I0 8
72#define UREG_I1 9
73#define UREG_I2 10
74#define UREG_I3 11
75#define UREG_I4 12
76#define UREG_I5 13
77#define UREG_I6 14
78#define UREG_I7 15
79#define UREG_FP UREG_I6
80#define UREG_RETPC UREG_I7
81
82/* A V9 register window */
83struct reg_window {
84 unsigned long locals[8];
85 unsigned long ins[8];
86};
87
88/* A 32-bit register window. */
89struct reg_window32 {
90 unsigned int locals[8];
91 unsigned int ins[8];
92};
93
94/* A V9 Sparc stack frame */
95struct sparc_stackf {
96 unsigned long locals[8];
97 unsigned long ins[6];
98 struct sparc_stackf *fp;
99 unsigned long callers_pc;
100 char *structptr;
101 unsigned long xargs[6];
102 unsigned long xxargs[1];
103};
104
105/* A 32-bit Sparc stack frame */
106struct sparc_stackf32 {
107 unsigned int locals[8];
108 unsigned int ins[6];
109 unsigned int fp;
110 unsigned int callers_pc;
111 unsigned int structptr;
112 unsigned int xargs[6];
113 unsigned int xxargs[1];
114};
115
116struct sparc_trapf {
117 unsigned long locals[8];
118 unsigned long ins[8];
119 unsigned long _unused;
120 struct pt_regs *regs;
121};
122
123#define TRACEREG_SZ sizeof(struct pt_regs)
124#define STACKFRAME_SZ sizeof(struct sparc_stackf)
125
126#define TRACEREG32_SZ sizeof(struct pt_regs32)
127#define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
128
129#ifdef __KERNEL__
130
131struct global_reg_snapshot {
132 unsigned long tstate;
133 unsigned long tpc;
134 unsigned long tnpc;
135 unsigned long o7;
136 unsigned long i7;
137 struct thread_info *thread;
138 unsigned long pad1;
139 unsigned long pad2;
140};
141
142#define __ARCH_WANT_COMPAT_SYS_PTRACE
143
144#define force_successful_syscall_return() \
145do { current_thread_info()->syscall_noerror = 1; \
146} while (0)
147#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
148#define instruction_pointer(regs) ((regs)->tpc)
149#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
150#ifdef CONFIG_SMP
151extern unsigned long profile_pc(struct pt_regs *);
152#else
153#define profile_pc(regs) instruction_pointer(regs)
154#endif
155extern void show_regs(struct pt_regs *);
156extern void __show_regs(struct pt_regs *);
157#endif
158
159#else /* __ASSEMBLY__ */
160/* For assembly code. */
161#define TRACEREG_SZ 0xa0
162#define STACKFRAME_SZ 0xc0
163
164#define TRACEREG32_SZ 0x50
165#define STACKFRAME32_SZ 0x60
166#endif
167
168#ifdef __KERNEL__
169#define STACK_BIAS 2047
170#endif
171
172/* These are for pt_regs. */
173#define PT_V9_G0 0x00
174#define PT_V9_G1 0x08
175#define PT_V9_G2 0x10
176#define PT_V9_G3 0x18
177#define PT_V9_G4 0x20
178#define PT_V9_G5 0x28
179#define PT_V9_G6 0x30
180#define PT_V9_G7 0x38
181#define PT_V9_I0 0x40
182#define PT_V9_I1 0x48
183#define PT_V9_I2 0x50
184#define PT_V9_I3 0x58
185#define PT_V9_I4 0x60
186#define PT_V9_I5 0x68
187#define PT_V9_I6 0x70
188#define PT_V9_FP PT_V9_I6
189#define PT_V9_I7 0x78
190#define PT_V9_TSTATE 0x80
191#define PT_V9_TPC 0x88
192#define PT_V9_TNPC 0x90
193#define PT_V9_Y 0x98
194#define PT_V9_MAGIC 0x9c
195#define PT_TSTATE PT_V9_TSTATE
196#define PT_TPC PT_V9_TPC
197#define PT_TNPC PT_V9_TNPC
198
199/* These for pt_regs32. */
200#define PT_PSR 0x0
201#define PT_PC 0x4
202#define PT_NPC 0x8
203#define PT_Y 0xc
204#define PT_G0 0x10
205#define PT_WIM PT_G0
206#define PT_G1 0x14
207#define PT_G2 0x18
208#define PT_G3 0x1c
209#define PT_G4 0x20
210#define PT_G5 0x24
211#define PT_G6 0x28
212#define PT_G7 0x2c
213#define PT_I0 0x30
214#define PT_I1 0x34
215#define PT_I2 0x38
216#define PT_I3 0x3c
217#define PT_I4 0x40
218#define PT_I5 0x44
219#define PT_I6 0x48
220#define PT_FP PT_I6
221#define PT_I7 0x4c
222
223/* Reg_window offsets */
224#define RW_V9_L0 0x00
225#define RW_V9_L1 0x08
226#define RW_V9_L2 0x10
227#define RW_V9_L3 0x18
228#define RW_V9_L4 0x20
229#define RW_V9_L5 0x28
230#define RW_V9_L6 0x30
231#define RW_V9_L7 0x38
232#define RW_V9_I0 0x40
233#define RW_V9_I1 0x48
234#define RW_V9_I2 0x50
235#define RW_V9_I3 0x58
236#define RW_V9_I4 0x60
237#define RW_V9_I5 0x68
238#define RW_V9_I6 0x70
239#define RW_V9_I7 0x78
240
241#define RW_L0 0x00
242#define RW_L1 0x04
243#define RW_L2 0x08
244#define RW_L3 0x0c
245#define RW_L4 0x10
246#define RW_L5 0x14
247#define RW_L6 0x18
248#define RW_L7 0x1c
249#define RW_I0 0x20
250#define RW_I1 0x24
251#define RW_I2 0x28
252#define RW_I3 0x2c
253#define RW_I4 0x30
254#define RW_I5 0x34
255#define RW_I6 0x38
256#define RW_I7 0x3c
257
258/* Stack_frame offsets */
259#define SF_V9_L0 0x00
260#define SF_V9_L1 0x08
261#define SF_V9_L2 0x10
262#define SF_V9_L3 0x18
263#define SF_V9_L4 0x20
264#define SF_V9_L5 0x28
265#define SF_V9_L6 0x30
266#define SF_V9_L7 0x38
267#define SF_V9_I0 0x40
268#define SF_V9_I1 0x48
269#define SF_V9_I2 0x50
270#define SF_V9_I3 0x58
271#define SF_V9_I4 0x60
272#define SF_V9_I5 0x68
273#define SF_V9_FP 0x70
274#define SF_V9_PC 0x78
275#define SF_V9_RETP 0x80
276#define SF_V9_XARG0 0x88
277#define SF_V9_XARG1 0x90
278#define SF_V9_XARG2 0x98
279#define SF_V9_XARG3 0xa0
280#define SF_V9_XARG4 0xa8
281#define SF_V9_XARG5 0xb0
282#define SF_V9_XXARG 0xb8
283
284#define SF_L0 0x00
285#define SF_L1 0x04
286#define SF_L2 0x08
287#define SF_L3 0x0c
288#define SF_L4 0x10
289#define SF_L5 0x14
290#define SF_L6 0x18
291#define SF_L7 0x1c
292#define SF_I0 0x20
293#define SF_I1 0x24
294#define SF_I2 0x28
295#define SF_I3 0x2c
296#define SF_I4 0x30
297#define SF_I5 0x34
298#define SF_FP 0x38
299#define SF_PC 0x3c
300#define SF_RETP 0x40
301#define SF_XARG0 0x44
302#define SF_XARG1 0x48
303#define SF_XARG2 0x4c
304#define SF_XARG3 0x50
305#define SF_XARG4 0x54
306#define SF_XARG5 0x58
307#define SF_XXARG 0x5c
308
309#ifdef __KERNEL__
310
311/* global_reg_snapshot offsets */
312#define GR_SNAP_TSTATE 0x00
313#define GR_SNAP_TPC 0x08
314#define GR_SNAP_TNPC 0x10
315#define GR_SNAP_O7 0x18
316#define GR_SNAP_I7 0x20
317#define GR_SNAP_THREAD 0x28
318#define GR_SNAP_PAD1 0x30
319#define GR_SNAP_PAD2 0x38
320
321#endif /* __KERNEL__ */
322
323/* Stuff for the ptrace system call */
324#define PTRACE_SPARC_DETACH 11
325#define PTRACE_GETREGS 12
326#define PTRACE_SETREGS 13
327#define PTRACE_GETFPREGS 14
328#define PTRACE_SETFPREGS 15
329#define PTRACE_READDATA 16
330#define PTRACE_WRITEDATA 17
331#define PTRACE_READTEXT 18
332#define PTRACE_WRITETEXT 19
333#define PTRACE_GETFPAREGS 20
334#define PTRACE_SETFPAREGS 21
335
336/* There are for debugging 64-bit processes, either from a 32 or 64 bit
337 * parent. Thus their complements are for debugging 32-bit processes only.
338 */
339
340#define PTRACE_GETREGS64 22
341#define PTRACE_SETREGS64 23
342/* PTRACE_SYSCALL is 24 */
343#define PTRACE_GETFPREGS64 25
344#define PTRACE_SETFPREGS64 26
345
346#endif /* !(_SPARC64_PTRACE_H) */
diff --git a/include/asm-sparc64/reg.h b/include/asm-sparc64/reg.h
index 77aa4804a60d..495bab27da07 100644
--- a/include/asm-sparc64/reg.h
+++ b/include/asm-sparc64/reg.h
@@ -1,56 +1 @@
1/* #include <asm-sparc/reg.h>
2 * linux/asm-sparc64/reg.h
3 * Layout of the registers as expected by gdb on the Sparc
4 * we should replace the user.h definitions with those in
5 * this file, we don't even use the other
6 * -miguel
7 *
8 * The names of the structures, constants and aliases in this file
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC64_REG_H
15#define __SPARC64_REG_H
16
17struct regs {
18 unsigned long r_g1;
19 unsigned long r_g2;
20 unsigned long r_g3;
21 unsigned long r_g4;
22 unsigned long r_g5;
23 unsigned long r_g6;
24 unsigned long r_g7;
25 unsigned long r_o0;
26 unsigned long r_o1;
27 unsigned long r_o2;
28 unsigned long r_o3;
29 unsigned long r_o4;
30 unsigned long r_o5;
31 unsigned long r_o6;
32 unsigned long r_o7;
33 unsigned long __pad;
34 unsigned long r_tstate;
35 unsigned long r_tpc;
36 unsigned long r_tnpc;
37 unsigned int r_y;
38 unsigned int r_fprs;
39};
40
41#define FPU_REGS_TYPE unsigned int
42#define FPU_FSR_TYPE unsigned long
43
44struct fp_status {
45 unsigned long fpu_fr[32];
46 unsigned long Fpu_fsr;
47};
48
49struct fpu {
50 struct fp_status f_fpstatus;
51};
52
53#define fpu_regs f_fpstatus.fpu_fr
54#define fpu_fsr f_fpstatus.Fpu_fsr
55
56#endif /* __SPARC64_REG_H */
diff --git a/include/asm-sparc64/sbus.h b/include/asm-sparc64/sbus.h
index 24a04a55cf85..0cab0e89b874 100644
--- a/include/asm-sparc64/sbus.h
+++ b/include/asm-sparc64/sbus.h
@@ -1,190 +1 @@
1/* sbus.h: Defines for the Sun SBus. #include <asm-sparc/sbus.h>
2 *
3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SBUS_H
7#define _SPARC64_SBUS_H
8
9#include <linux/dma-mapping.h>
10#include <linux/ioport.h>
11
12#include <asm/oplib.h>
13#include <asm/prom.h>
14#include <asm/of_device.h>
15#include <asm/iommu.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0x00000000
26#define SBUS_OFF_MASK 0x0fffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Tree of SBUS devices */
72 struct sbus_bus *next; /* Next SBUS in system */
73 int prom_node; /* OBP node of SBUS */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int portid;
81};
82#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
83
84extern struct sbus_bus *sbus_root;
85
86/* Device probing routines could find these handy */
87#define for_each_sbus(bus) \
88 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
89
90#define for_each_sbusdev(device, bus) \
91 for((device) = (bus)->devices; (device); (device)=(device)->next)
92
93#define for_all_sbusdev(device, bus) \
94 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
95 for ((device) = (bus)->devices; (device); (device) = (device)->next)
96
97/* Driver DVMA interfaces. */
98#define sbus_can_dma_64bit(sdev) (1)
99#define sbus_can_burst64(sdev) (1)
100extern void sbus_set_sbus64(struct sbus_dev *, int);
101extern void sbus_fill_device_irq(struct sbus_dev *);
102
103static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
104 dma_addr_t *dma_handle)
105{
106 return dma_alloc_coherent(&sdev->ofdev.dev, size,
107 dma_handle, GFP_ATOMIC);
108}
109
110static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
111 void *vaddr, dma_addr_t dma_handle)
112{
113 return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
114}
115
116#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
117#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
118#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
119#define SBUS_DMA_NONE DMA_NONE
120
121/* All the rest use streaming mode mappings. */
122static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
123 size_t size, int direction)
124{
125 return dma_map_single(&sdev->ofdev.dev, ptr, size,
126 (enum dma_data_direction) direction);
127}
128
129static inline void sbus_unmap_single(struct sbus_dev *sdev,
130 dma_addr_t dma_addr, size_t size,
131 int direction)
132{
133 dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
134 (enum dma_data_direction) direction);
135}
136
137static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
138 int nents, int direction)
139{
140 return dma_map_sg(&sdev->ofdev.dev, sg, nents,
141 (enum dma_data_direction) direction);
142}
143
144static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
145 int nents, int direction)
146{
147 dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
148 (enum dma_data_direction) direction);
149}
150
151/* Finally, allow explicit synchronization of streamable mappings. */
152static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
153 dma_addr_t dma_handle,
154 size_t size, int direction)
155{
156 dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
157 (enum dma_data_direction) direction);
158}
159#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
160
161static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
162 dma_addr_t dma_handle,
163 size_t size, int direction)
164{
165 /* No flushing needed to sync cpu writes to the device. */
166}
167
168static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
169 struct scatterlist *sg,
170 int nents, int direction)
171{
172 dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
173 (enum dma_data_direction) direction);
174}
175#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
176
177static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
178 struct scatterlist *sg,
179 int nents, int direction)
180{
181 /* No flushing needed to sync cpu writes to the device. */
182}
183
184extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
185extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
186extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
187extern int sbus_arch_preinit(void);
188extern void sbus_arch_postinit(void);
189
190#endif /* !(_SPARC64_SBUS_H) */
diff --git a/include/asm-sparc64/scatterlist.h b/include/asm-sparc64/scatterlist.h
index 81bd058f9382..b7fef95953ca 100644
--- a/include/asm-sparc64/scatterlist.h
+++ b/include/asm-sparc64/scatterlist.h
@@ -1,27 +1 @@
1#ifndef _SPARC64_SCATTERLIST_H #include <asm-sparc/scatterlist.h>
2#define _SPARC64_SCATTERLIST_H
3
4#include <asm/page.h>
5#include <asm/types.h>
6
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 dma_addr_t dma_address;
17 __u32 dma_length;
18};
19
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->dma_length)
22
23#define ISA_DMA_THRESHOLD (~0UL)
24
25#define ARCH_HAS_SG_CHAIN
26
27#endif /* !(_SPARC64_SCATTERLIST_H) */
diff --git a/include/asm-sparc64/sections.h b/include/asm-sparc64/sections.h
index 3f4b9fdc28d0..721496f8b2be 100644
--- a/include/asm-sparc64/sections.h
+++ b/include/asm-sparc64/sections.h
@@ -1,9 +1 @@
1#ifndef _SPARC64_SECTIONS_H #include <asm-sparc/sections.h>
2#define _SPARC64_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7extern char _start[];
8
9#endif
diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h
index c9331b02d9c8..7bbc4fecdc7d 100644
--- a/include/asm-sparc64/sfp-machine.h
+++ b/include/asm-sparc64/sfp-machine.h
@@ -1,93 +1 @@
1/* Machine-dependent software floating-point definitions. #include <asm-sparc/sfp-machine.h>
2 Sparc64 kernel version.
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz) and
7 David S. Miller (davem@redhat.com).
8
9 The GNU C Library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Library General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version.
13
14 The GNU C Library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Library General Public License for more details.
18
19 You should have received a copy of the GNU Library General Public
20 License along with the GNU C Library; see the file COPYING.LIB. If
21 not, write to the Free Software Foundation, Inc.,
22 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24#ifndef _SFP_MACHINE_H
25#define _SFP_MACHINE_H
26
27#define _FP_W_TYPE_SIZE 64
28#define _FP_W_TYPE unsigned long
29#define _FP_WS_TYPE signed long
30#define _FP_I_TYPE long
31
32#define _FP_MUL_MEAT_S(R,X,Y) \
33 _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
41#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
42
43#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
44#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1)
45#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1
46#define _FP_NANSIGN_S 0
47#define _FP_NANSIGN_D 0
48#define _FP_NANSIGN_Q 0
49
50#define _FP_KEEPNANFRACP 1
51
52/* If one NaN is signaling and the other is not,
53 * we choose that one, otherwise we choose X.
54 */
55/* For _Qp_* and _Q_*, this should prefer X, for
56 * CPU instruction emulation this should prefer Y.
57 * (see SPAMv9 B.2.2 section).
58 */
59#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
60 do { \
61 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
62 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
63 { \
64 R##_s = X##_s; \
65 _FP_FRAC_COPY_##wc(R,X); \
66 } \
67 else \
68 { \
69 R##_s = Y##_s; \
70 _FP_FRAC_COPY_##wc(R,Y); \
71 } \
72 R##_c = FP_CLS_NAN; \
73 } while (0)
74
75/* Obtain the current rounding mode. */
76#ifndef FP_ROUNDMODE
77#define FP_ROUNDMODE ((current_thread_info()->xfsr[0] >> 30) & 0x3)
78#endif
79
80/* Exception flags. */
81#define FP_EX_INVALID (1 << 4)
82#define FP_EX_OVERFLOW (1 << 3)
83#define FP_EX_UNDERFLOW (1 << 2)
84#define FP_EX_DIVZERO (1 << 1)
85#define FP_EX_INEXACT (1 << 0)
86
87#define FP_HANDLE_EXCEPTIONS return _fex
88
89#define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex)
90
91#define FP_TRAPPING_EXCEPTIONS ((current_thread_info()->xfsr[0] >> 23) & 0x1f)
92
93#endif
diff --git a/include/asm-sparc64/shmparam.h b/include/asm-sparc64/shmparam.h
index 1ed0d6701a9b..5fa3a9b05e7f 100644
--- a/include/asm-sparc64/shmparam.h
+++ b/include/asm-sparc64/shmparam.h
@@ -1,10 +1 @@
1#ifndef _ASMSPARC64_SHMPARAM_H #include <asm-sparc/shmparam.h>
2#define _ASMSPARC64_SHMPARAM_H
3
4#include <asm/spitfire.h>
5
6#define __ARCH_FORCE_SHMLBA 1
7/* attach addr a multiple of this */
8#define SHMLBA ((PAGE_SIZE > L1DCACHE_SIZE) ? PAGE_SIZE : L1DCACHE_SIZE)
9
10#endif /* _ASMSPARC64_SHMPARAM_H */
diff --git a/include/asm-sparc64/sigcontext.h b/include/asm-sparc64/sigcontext.h
index 1c868d680cfc..5b16dcce44f2 100644
--- a/include/asm-sparc64/sigcontext.h
+++ b/include/asm-sparc64/sigcontext.h
@@ -1,87 +1 @@
1#ifndef __SPARC64_SIGCONTEXT_H #include <asm-sparc/sigcontext.h>
2#define __SPARC64_SIGCONTEXT_H
3
4#ifdef __KERNEL__
5#include <asm/ptrace.h>
6#endif
7
8#ifndef __ASSEMBLY__
9
10#ifdef __KERNEL__
11
12#define __SUNOS_MAXWIN 31
13
14/* This is what SunOS does, so shall I unless we use new 32bit signals or rt signals. */
15struct sigcontext32 {
16 int sigc_onstack; /* state to restore */
17 int sigc_mask; /* sigmask to restore */
18 int sigc_sp; /* stack pointer */
19 int sigc_pc; /* program counter */
20 int sigc_npc; /* next program counter */
21 int sigc_psr; /* for condition codes etc */
22 int sigc_g1; /* User uses these two registers */
23 int sigc_o0; /* within the trampoline code. */
24
25 /* Now comes information regarding the users window set
26 * at the time of the signal.
27 */
28 int sigc_oswins; /* outstanding windows */
29
30 /* stack ptrs for each regwin buf */
31 unsigned sigc_spbuf[__SUNOS_MAXWIN];
32
33 /* Windows to restore after signal */
34 struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37#endif
38
39#ifdef __KERNEL__
40
41/* This is what we use for 32bit new non-rt signals. */
42
43typedef struct {
44 struct {
45 unsigned int psr;
46 unsigned int pc;
47 unsigned int npc;
48 unsigned int y;
49 unsigned int u_regs[16]; /* globals and ins */
50 } si_regs;
51 int si_mask;
52} __siginfo32_t;
53
54#endif
55
56typedef struct {
57 unsigned int si_float_regs [64];
58 unsigned long si_fsr;
59 unsigned long si_gsr;
60 unsigned long si_fprs;
61} __siginfo_fpu_t;
62
63/* This is what SunOS doesn't, so we have to write this alone
64 and do it properly. */
65struct sigcontext {
66 /* The size of this array has to match SI_MAX_SIZE from siginfo.h */
67 char sigc_info[128];
68 struct {
69 unsigned long u_regs[16]; /* globals and ins */
70 unsigned long tstate;
71 unsigned long tpc;
72 unsigned long tnpc;
73 unsigned int y;
74 unsigned int fprs;
75 } sigc_regs;
76 __siginfo_fpu_t * sigc_fpu_save;
77 struct {
78 void * ss_sp;
79 int ss_flags;
80 unsigned long ss_size;
81 } sigc_stack;
82 unsigned long sigc_mask;
83};
84
85#endif /* !(__ASSEMBLY__) */
86
87#endif /* !(__SPARC64_SIGCONTEXT_H) */
diff --git a/include/asm-sparc64/siginfo.h b/include/asm-sparc64/siginfo.h
index c96e6c30f8b0..8ffd6ebabc7a 100644
--- a/include/asm-sparc64/siginfo.h
+++ b/include/asm-sparc64/siginfo.h
@@ -1,32 +1 @@
1#ifndef _SPARC64_SIGINFO_H #include <asm-sparc/siginfo.h>
2#define _SPARC64_SIGINFO_H
3
4#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
5
6#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
7#define __ARCH_SI_TRAPNO
8#define __ARCH_SI_BAND_T int
9
10#include <asm-generic/siginfo.h>
11
12#ifdef __KERNEL__
13
14#include <linux/compat.h>
15
16#ifdef CONFIG_COMPAT
17
18struct compat_siginfo;
19
20#endif /* CONFIG_COMPAT */
21
22#endif /* __KERNEL__ */
23
24#define SI_NOINFO 32767 /* no information in siginfo_t */
25
26/*
27 * SIGEMT si_codes
28 */
29#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
30#define NSIGEMT 1
31
32#endif
diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h
index 2a7c7934ac0a..79705e5d49c3 100644
--- a/include/asm-sparc64/signal.h
+++ b/include/asm-sparc64/signal.h
@@ -1,194 +1 @@
1#ifndef _ASMSPARC64_SIGNAL_H #include <asm-sparc/signal.h>
2#define _ASMSPARC64_SIGNAL_H
3
4#include <asm/sigcontext.h>
5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__
8#include <linux/personality.h>
9#include <linux/types.h>
10#endif
11#endif
12
13/* On the Sparc the signal handlers get passed a 'sub-signal' code
14 * for certain signal types, which we document here.
15 */
16#define SIGHUP 1
17#define SIGINT 2
18#define SIGQUIT 3
19#define SIGILL 4
20#define SUBSIG_STACK 0
21#define SUBSIG_ILLINST 2
22#define SUBSIG_PRIVINST 3
23#define SUBSIG_BADTRAP(t) (0x80 + (t))
24
25#define SIGTRAP 5
26#define SIGABRT 6
27#define SIGIOT 6
28
29#define SIGEMT 7
30#define SUBSIG_TAG 10
31
32#define SIGFPE 8
33#define SUBSIG_FPDISABLED 0x400
34#define SUBSIG_FPERROR 0x404
35#define SUBSIG_FPINTOVFL 0x001
36#define SUBSIG_FPSTSIG 0x002
37#define SUBSIG_IDIVZERO 0x014
38#define SUBSIG_FPINEXACT 0x0c4
39#define SUBSIG_FPDIVZERO 0x0c8
40#define SUBSIG_FPUNFLOW 0x0cc
41#define SUBSIG_FPOPERROR 0x0d0
42#define SUBSIG_FPOVFLOW 0x0d4
43
44#define SIGKILL 9
45#define SIGBUS 10
46#define SUBSIG_BUSTIMEOUT 1
47#define SUBSIG_ALIGNMENT 2
48#define SUBSIG_MISCERROR 5
49
50#define SIGSEGV 11
51#define SUBSIG_NOMAPPING 3
52#define SUBSIG_PROTECTION 4
53#define SUBSIG_SEGERROR 5
54
55#define SIGSYS 12
56
57#define SIGPIPE 13
58#define SIGALRM 14
59#define SIGTERM 15
60#define SIGURG 16
61
62/* SunOS values which deviate from the Linux/i386 ones */
63#define SIGSTOP 17
64#define SIGTSTP 18
65#define SIGCONT 19
66#define SIGCHLD 20
67#define SIGTTIN 21
68#define SIGTTOU 22
69#define SIGIO 23
70#define SIGPOLL SIGIO /* SysV name for SIGIO */
71#define SIGXCPU 24
72#define SIGXFSZ 25
73#define SIGVTALRM 26
74#define SIGPROF 27
75#define SIGWINCH 28
76#define SIGLOST 29
77#define SIGPWR SIGLOST
78#define SIGUSR1 30
79#define SIGUSR2 31
80
81/* Most things should be clean enough to redefine this at will, if care
82 is taken to make libc match. */
83
84#define __OLD_NSIG 32
85#define __NEW_NSIG 64
86#define _NSIG_BPW 64
87#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
88
89#define SIGRTMIN 32
90#define SIGRTMAX __NEW_NSIG
91
92#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
93#define _NSIG __NEW_NSIG
94#define __new_sigset_t sigset_t
95#define __new_sigaction sigaction
96#define __new_sigaction32 sigaction32
97#define __old_sigset_t old_sigset_t
98#define __old_sigaction old_sigaction
99#define __old_sigaction32 old_sigaction32
100#else
101#define _NSIG __OLD_NSIG
102#define NSIG _NSIG
103#define __old_sigset_t sigset_t
104#define __old_sigaction sigaction
105#define __old_sigaction32 sigaction32
106#endif
107
108#ifndef __ASSEMBLY__
109
110typedef unsigned long __old_sigset_t; /* at least 32 bits */
111
112typedef struct {
113 unsigned long sig[_NSIG_WORDS];
114} __new_sigset_t;
115
116/* A SunOS sigstack */
117struct sigstack {
118 /* XXX 32-bit pointers pinhead XXX */
119 char *the_stack;
120 int cur_status;
121};
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145
146#define SIG_BLOCK 0x01 /* for blocking signals */
147#define SIG_UNBLOCK 0x02 /* for unblocking signals */
148#define SIG_SETMASK 0x04 /* for setting the signal mask */
149
150/*
151 * sigaltstack controls
152 */
153#define SS_ONSTACK 1
154#define SS_DISABLE 2
155
156#define MINSIGSTKSZ 4096
157#define SIGSTKSZ 16384
158
159#include <asm-generic/signal.h>
160
161struct __new_sigaction {
162 __sighandler_t sa_handler;
163 unsigned long sa_flags;
164 __sigrestore_t sa_restorer; /* not used by Linux/SPARC yet */
165 __new_sigset_t sa_mask;
166};
167
168struct __old_sigaction {
169 __sighandler_t sa_handler;
170 __old_sigset_t sa_mask;
171 unsigned long sa_flags;
172 void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
173};
174
175typedef struct sigaltstack {
176 void __user *ss_sp;
177 int ss_flags;
178 size_t ss_size;
179} stack_t;
180
181#ifdef __KERNEL__
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188#define ptrace_signal_deliver(regs, cookie) do { } while (0)
189
190#endif /* !(__KERNEL__) */
191
192#endif /* !(__ASSEMBLY__) */
193
194#endif /* !(_ASMSPARC64_SIGNAL_H) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index cd0311b2e19d..5095a2cbea52 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -1,64 +1 @@
1/* smp.h: Sparc64 specific SMP stuff. #include <asm-sparc/smp.h>
2 *
3 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SMP_H
7#define _SPARC64_SMP_H
8
9#include <linux/threads.h>
10#include <asm/asi.h>
11#include <asm/starfire.h>
12#include <asm/spitfire.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/cpumask.h>
17#include <linux/cache.h>
18
19#endif /* !(__ASSEMBLY__) */
20
21#ifdef CONFIG_SMP
22
23#ifndef __ASSEMBLY__
24
25/*
26 * Private routines/data
27 */
28
29#include <linux/bitops.h>
30#include <asm/atomic.h>
31#include <asm/percpu.h>
32
33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
34extern cpumask_t cpu_core_map[NR_CPUS];
35extern int sparc64_multi_core;
36
37/*
38 * General functions that each host system must provide.
39 */
40
41extern int hard_smp_processor_id(void);
42#define raw_smp_processor_id() (current_thread_info()->cpu)
43
44extern void smp_fill_in_sib_core_maps(void);
45extern void cpu_play_dead(void);
46
47extern void smp_fetch_global_regs(void);
48
49#ifdef CONFIG_HOTPLUG_CPU
50extern int __cpu_disable(void);
51extern void __cpu_die(unsigned int cpu);
52#endif
53
54#endif /* !(__ASSEMBLY__) */
55
56#else
57
58#define hard_smp_processor_id() 0
59#define smp_fill_in_sib_core_maps() do { } while (0)
60#define smp_fetch_global_regs() do { } while (0)
61
62#endif /* !(CONFIG_SMP) */
63
64#endif /* !(_SPARC64_SMP_H) */
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index 0006fe9f8c7a..0115b8156eb8 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -1,250 +1 @@
1/* spinlock.h: 64-bit Sparc spinlock support. #include <asm-sparc/spinlock.h>
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13/* To get debugging spinlocks which detect and catch
14 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
15 * and rebuild your kernel.
16 */
17
18/* All of these locking primitives are expected to work properly
19 * even in an RMO memory model, which currently is what the kernel
20 * runs in.
21 *
22 * There is another issue. Because we play games to save cycles
23 * in the non-contention case, we need to be extra careful about
24 * branch targets into the "spinning" code. They live in their
25 * own section, but the newer V9 branches have a shorter range
26 * than the traditional 32-bit sparc branch variants. The rule
27 * is that the branches that go into and out of the spinner sections
28 * must be pre-V9 branches.
29 */
30
31#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
32
33#define __raw_spin_unlock_wait(lp) \
34 do { rmb(); \
35 } while((lp)->lock)
36
37static inline void __raw_spin_lock(raw_spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42"1: ldstub [%1], %0\n"
43" membar #StoreLoad | #StoreStore\n"
44" brnz,pn %0, 2f\n"
45" nop\n"
46" .subsection 2\n"
47"2: ldub [%1], %0\n"
48" membar #LoadLoad\n"
49" brnz,pt %0, 2b\n"
50" nop\n"
51" ba,a,pt %%xcc, 1b\n"
52" .previous"
53 : "=&r" (tmp)
54 : "r" (lock)
55 : "memory");
56}
57
58static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59{
60 unsigned long result;
61
62 __asm__ __volatile__(
63" ldstub [%1], %0\n"
64" membar #StoreLoad | #StoreStore"
65 : "=r" (result)
66 : "r" (lock)
67 : "memory");
68
69 return (result == 0UL);
70}
71
72static inline void __raw_spin_unlock(raw_spinlock_t *lock)
73{
74 __asm__ __volatile__(
75" membar #StoreStore | #LoadStore\n"
76" stb %%g0, [%0]"
77 : /* No outputs */
78 : "r" (lock)
79 : "memory");
80}
81
82static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
83{
84 unsigned long tmp1, tmp2;
85
86 __asm__ __volatile__(
87"1: ldstub [%2], %0\n"
88" membar #StoreLoad | #StoreStore\n"
89" brnz,pn %0, 2f\n"
90" nop\n"
91" .subsection 2\n"
92"2: rdpr %%pil, %1\n"
93" wrpr %3, %%pil\n"
94"3: ldub [%2], %0\n"
95" membar #LoadLoad\n"
96" brnz,pt %0, 3b\n"
97" nop\n"
98" ba,pt %%xcc, 1b\n"
99" wrpr %1, %%pil\n"
100" .previous"
101 : "=&r" (tmp1), "=&r" (tmp2)
102 : "r"(lock), "r"(flags)
103 : "memory");
104}
105
106/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
107
108static void inline __read_lock(raw_rwlock_t *lock)
109{
110 unsigned long tmp1, tmp2;
111
112 __asm__ __volatile__ (
113"1: ldsw [%2], %0\n"
114" brlz,pn %0, 2f\n"
115"4: add %0, 1, %1\n"
116" cas [%2], %0, %1\n"
117" cmp %0, %1\n"
118" membar #StoreLoad | #StoreStore\n"
119" bne,pn %%icc, 1b\n"
120" nop\n"
121" .subsection 2\n"
122"2: ldsw [%2], %0\n"
123" membar #LoadLoad\n"
124" brlz,pt %0, 2b\n"
125" nop\n"
126" ba,a,pt %%xcc, 4b\n"
127" .previous"
128 : "=&r" (tmp1), "=&r" (tmp2)
129 : "r" (lock)
130 : "memory");
131}
132
133static int inline __read_trylock(raw_rwlock_t *lock)
134{
135 int tmp1, tmp2;
136
137 __asm__ __volatile__ (
138"1: ldsw [%2], %0\n"
139" brlz,a,pn %0, 2f\n"
140" mov 0, %0\n"
141" add %0, 1, %1\n"
142" cas [%2], %0, %1\n"
143" cmp %0, %1\n"
144" membar #StoreLoad | #StoreStore\n"
145" bne,pn %%icc, 1b\n"
146" mov 1, %0\n"
147"2:"
148 : "=&r" (tmp1), "=&r" (tmp2)
149 : "r" (lock)
150 : "memory");
151
152 return tmp1;
153}
154
155static void inline __read_unlock(raw_rwlock_t *lock)
156{
157 unsigned long tmp1, tmp2;
158
159 __asm__ __volatile__(
160" membar #StoreLoad | #LoadLoad\n"
161"1: lduw [%2], %0\n"
162" sub %0, 1, %1\n"
163" cas [%2], %0, %1\n"
164" cmp %0, %1\n"
165" bne,pn %%xcc, 1b\n"
166" nop"
167 : "=&r" (tmp1), "=&r" (tmp2)
168 : "r" (lock)
169 : "memory");
170}
171
172static void inline __write_lock(raw_rwlock_t *lock)
173{
174 unsigned long mask, tmp1, tmp2;
175
176 mask = 0x80000000UL;
177
178 __asm__ __volatile__(
179"1: lduw [%2], %0\n"
180" brnz,pn %0, 2f\n"
181"4: or %0, %3, %1\n"
182" cas [%2], %0, %1\n"
183" cmp %0, %1\n"
184" membar #StoreLoad | #StoreStore\n"
185" bne,pn %%icc, 1b\n"
186" nop\n"
187" .subsection 2\n"
188"2: lduw [%2], %0\n"
189" membar #LoadLoad\n"
190" brnz,pt %0, 2b\n"
191" nop\n"
192" ba,a,pt %%xcc, 4b\n"
193" .previous"
194 : "=&r" (tmp1), "=&r" (tmp2)
195 : "r" (lock), "r" (mask)
196 : "memory");
197}
198
199static void inline __write_unlock(raw_rwlock_t *lock)
200{
201 __asm__ __volatile__(
202" membar #LoadStore | #StoreStore\n"
203" stw %%g0, [%0]"
204 : /* no outputs */
205 : "r" (lock)
206 : "memory");
207}
208
209static int inline __write_trylock(raw_rwlock_t *lock)
210{
211 unsigned long mask, tmp1, tmp2, result;
212
213 mask = 0x80000000UL;
214
215 __asm__ __volatile__(
216" mov 0, %2\n"
217"1: lduw [%3], %0\n"
218" brnz,pn %0, 2f\n"
219" or %0, %4, %1\n"
220" cas [%3], %0, %1\n"
221" cmp %0, %1\n"
222" membar #StoreLoad | #StoreStore\n"
223" bne,pn %%icc, 1b\n"
224" nop\n"
225" mov 1, %2\n"
226"2:"
227 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
228 : "r" (lock), "r" (mask)
229 : "memory");
230
231 return result;
232}
233
234#define __raw_read_lock(p) __read_lock(p)
235#define __raw_read_trylock(p) __read_trylock(p)
236#define __raw_read_unlock(p) __read_unlock(p)
237#define __raw_write_lock(p) __write_lock(p)
238#define __raw_write_unlock(p) __write_unlock(p)
239#define __raw_write_trylock(p) __write_trylock(p)
240
241#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
242#define __raw_write_can_lock(rw) (!(rw)->lock)
243
244#define _raw_spin_relax(lock) cpu_relax()
245#define _raw_read_relax(lock) cpu_relax()
246#define _raw_write_relax(lock) cpu_relax()
247
248#endif /* !(__ASSEMBLY__) */
249
250#endif /* !(__SPARC64_SPINLOCK_H) */
diff --git a/include/asm-sparc64/stat.h b/include/asm-sparc64/stat.h
index 9650fdea847f..b108a866256b 100644
--- a/include/asm-sparc64/stat.h
+++ b/include/asm-sparc64/stat.h
@@ -1,47 +1 @@
1#ifndef _SPARC64_STAT_H #include <asm-sparc/stat.h>
2#define _SPARC64_STAT_H
3
4#include <linux/types.h>
5
6struct stat {
7 unsigned st_dev;
8 ino_t st_ino;
9 mode_t st_mode;
10 short st_nlink;
11 uid_t st_uid;
12 gid_t st_gid;
13 unsigned st_rdev;
14 off_t st_size;
15 time_t st_atime;
16 time_t st_mtime;
17 time_t st_ctime;
18 off_t st_blksize;
19 off_t st_blocks;
20 unsigned long __unused4[2];
21};
22
23struct stat64 {
24 unsigned long st_dev;
25 unsigned long st_ino;
26 unsigned long st_nlink;
27
28 unsigned int st_mode;
29 unsigned int st_uid;
30 unsigned int st_gid;
31 unsigned int __pad0;
32
33 unsigned long st_rdev;
34 long st_size;
35 long st_blksize;
36 long st_blocks;
37
38 unsigned long st_atime;
39 unsigned long st_atime_nsec;
40 unsigned long st_mtime;
41 unsigned long st_mtime_nsec;
42 unsigned long st_ctime;
43 unsigned long st_ctime_nsec;
44 long __unused[3];
45};
46
47#endif
diff --git a/include/asm-sparc64/statfs.h b/include/asm-sparc64/statfs.h
index 79b3c890a5fa..5503d6a4c67e 100644
--- a/include/asm-sparc64/statfs.h
+++ b/include/asm-sparc64/statfs.h
@@ -1,54 +1 @@
1#ifndef _SPARC64_STATFS_H #include <asm-sparc/statfs.h>
2#define _SPARC64_STATFS_H
3
4#ifndef __KERNEL_STRICT_NAMES
5
6#include <linux/types.h>
7
8typedef __kernel_fsid_t fsid_t;
9
10#endif
11
12struct statfs {
13 long f_type;
14 long f_bsize;
15 long f_blocks;
16 long f_bfree;
17 long f_bavail;
18 long f_files;
19 long f_ffree;
20 __kernel_fsid_t f_fsid;
21 long f_namelen;
22 long f_frsize;
23 long f_spare[5];
24};
25
26struct statfs64 {
27 long f_type;
28 long f_bsize;
29 long f_blocks;
30 long f_bfree;
31 long f_bavail;
32 long f_files;
33 long f_ffree;
34 __kernel_fsid_t f_fsid;
35 long f_namelen;
36 long f_frsize;
37 long f_spare[5];
38};
39
40struct compat_statfs64 {
41 __u32 f_type;
42 __u32 f_bsize;
43 __u64 f_blocks;
44 __u64 f_bfree;
45 __u64 f_bavail;
46 __u64 f_files;
47 __u64 f_ffree;
48 __kernel_fsid_t f_fsid;
49 __u32 f_namelen;
50 __u32 f_frsize;
51 __u32 f_spare[5];
52};
53
54#endif
diff --git a/include/asm-sparc64/string.h b/include/asm-sparc64/string.h
index 43161f2d17eb..5018cd8b6ad0 100644
--- a/include/asm-sparc64/string.h
+++ b/include/asm-sparc64/string.h
@@ -1,83 +1 @@
1/* #include <asm-sparc/string.h>
2 * string.h: External definitions for optimized assembly string
3 * routines for the Linux Kernel.
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997,1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#ifndef __SPARC64_STRING_H__
10#define __SPARC64_STRING_H__
11
12/* Really, userland/ksyms should not see any of this stuff. */
13
14#ifdef __KERNEL__
15
16#include <asm/asi.h>
17
18extern void *__memset(void *,int,__kernel_size_t);
19
20#ifndef EXPORT_SYMTAB_STROPS
21
22/* First the mem*() things. */
23#define __HAVE_ARCH_MEMMOVE
24extern void *memmove(void *, const void *, __kernel_size_t);
25
26#define __HAVE_ARCH_MEMCPY
27extern void *memcpy(void *, const void *, __kernel_size_t);
28
29#define __HAVE_ARCH_MEMSET
30extern void *__builtin_memset(void *,int,__kernel_size_t);
31
32static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
33{
34 extern __kernel_size_t __bzero(void *, __kernel_size_t);
35
36 if (!c) {
37 __bzero(s, count);
38 return s;
39 } else
40 return __memset(s, c, count);
41}
42
43#undef memset
44#define memset(s, c, count) \
45((__builtin_constant_p(count) && (count) <= 32) ? \
46 __builtin_memset((s), (c), (count)) : \
47 (__builtin_constant_p(c) ? \
48 __constant_memset((s), (c), (count)) : \
49 __memset((s), (c), (count))))
50
51#define __HAVE_ARCH_MEMSCAN
52
53#undef memscan
54#define memscan(__arg0, __char, __arg2) \
55({ \
56 extern void *__memscan_zero(void *, size_t); \
57 extern void *__memscan_generic(void *, int, size_t); \
58 void *__retval, *__addr = (__arg0); \
59 size_t __size = (__arg2); \
60 \
61 if(__builtin_constant_p(__char) && !(__char)) \
62 __retval = __memscan_zero(__addr, __size); \
63 else \
64 __retval = __memscan_generic(__addr, (__char), __size); \
65 \
66 __retval; \
67})
68
69#define __HAVE_ARCH_MEMCMP
70extern int memcmp(const void *,const void *,__kernel_size_t);
71
72/* Now the str*() stuff... */
73#define __HAVE_ARCH_STRLEN
74extern __kernel_size_t strlen(const char *);
75
76#define __HAVE_ARCH_STRNCMP
77extern int strncmp(const char *, const char *, __kernel_size_t);
78
79#endif /* !EXPORT_SYMTAB_STROPS */
80
81#endif /* __KERNEL__ */
82
83#endif /* !(__SPARC64_STRING_H__) */
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 6897ac31be41..be2603c2e527 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -1,355 +1 @@
1#ifndef __SPARC64_SYSTEM_H #include <asm-sparc/system.h>
2#define __SPARC64_SYSTEM_H
3
4#include <asm/ptrace.h>
5#include <asm/processor.h>
6#include <asm/visasm.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/irqflags.h>
11#include <asm-generic/cmpxchg-local.h>
12
13/*
14 * Sparc (general) CPU types
15 */
16enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
25};
26
27#define sparc_cpu_model sun4u
28
29/* This cannot ever be a sun4c nor sun4 :) That's just history. */
30#define ARCH_SUN4C_SUN4 0
31#define ARCH_SUN4 0
32
33extern char reboot_command[];
34
35/* These are here in an effort to more fully work around Spitfire Errata
36 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
37 * branch, the chip can stop executing instructions until a trap occurs.
38 * Therefore, if interrupts are disabled, the chip can hang forever.
39 *
40 * It used to be believed that the memory barrier had to be right in the
41 * delay slot, but a case has been traced recently wherein the memory barrier
42 * was one instruction after the branch delay slot and the chip still hung.
43 * The offending sequence was the following in sym_wakeup_done() of the
44 * sym53c8xx_2 driver:
45 *
46 * call sym_ccb_from_dsa, 0
47 * movge %icc, 0, %l0
48 * brz,pn %o0, .LL1303
49 * mov %o0, %l2
50 * membar #LoadLoad
51 *
52 * The branch has to be mispredicted for the bug to occur. Therefore, we put
53 * the memory barrier explicitly into a "branch always, predicted taken"
54 * delay slot to avoid the problem case.
55 */
56#define membar_safe(type) \
57do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
58 " membar " type "\n" \
59 "1:\n" \
60 : : : "memory"); \
61} while (0)
62
63#define mb() \
64 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
65#define rmb() \
66 membar_safe("#LoadLoad")
67#define wmb() \
68 membar_safe("#StoreStore")
69#define membar_storeload() \
70 membar_safe("#StoreLoad")
71#define membar_storeload_storestore() \
72 membar_safe("#StoreLoad | #StoreStore")
73#define membar_storeload_loadload() \
74 membar_safe("#StoreLoad | #LoadLoad")
75#define membar_storestore_loadstore() \
76 membar_safe("#StoreStore | #LoadStore")
77
78#endif
79
80#define nop() __asm__ __volatile__ ("nop")
81
82#define read_barrier_depends() do { } while(0)
83#define set_mb(__var, __value) \
84 do { __var = __value; membar_storeload_storestore(); } while(0)
85
86#ifdef CONFIG_SMP
87#define smp_mb() mb()
88#define smp_rmb() rmb()
89#define smp_wmb() wmb()
90#define smp_read_barrier_depends() read_barrier_depends()
91#else
92#define smp_mb() __asm__ __volatile__("":::"memory")
93#define smp_rmb() __asm__ __volatile__("":::"memory")
94#define smp_wmb() __asm__ __volatile__("":::"memory")
95#define smp_read_barrier_depends() do { } while(0)
96#endif
97
98#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
99
100#define flushw_all() __asm__ __volatile__("flushw")
101
102/* Performance counter register access. */
103#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
104#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
105#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
106
107/* Blackbird errata workaround. See commentary in
108 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
109 * for more information.
110 */
111#define reset_pic() \
112 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
113 ".align 64\n" \
114 "99:wr %g0, 0x0, %pic\n\t" \
115 "rd %pic, %g0")
116
117#ifndef __ASSEMBLY__
118
119extern void sun_do_break(void);
120extern int stop_a_enabled;
121
122extern void fault_in_user_windows(void);
123extern void synchronize_user_stack(void);
124
125extern void __flushw_user(void);
126#define flushw_user() __flushw_user()
127
128#define flush_user_windows flushw_user
129#define flush_register_windows flushw_all
130
131/* Don't hold the runqueue lock over context switch */
132#define __ARCH_WANT_UNLOCKED_CTXSW
133#define prepare_arch_switch(next) \
134do { \
135 flushw_all(); \
136} while (0)
137
138 /* See what happens when you design the chip correctly?
139 *
140 * We tell gcc we clobber all non-fixed-usage registers except
141 * for l0/l1. It will use one for 'next' and the other to hold
142 * the output value of 'last'. 'next' is not referenced again
143 * past the invocation of switch_to in the scheduler, so we need
144 * not preserve it's value. Hairy, but it lets us remove 2 loads
145 * and 2 stores in this critical code path. -DaveM
146 */
147#define switch_to(prev, next, last) \
148do { if (test_thread_flag(TIF_PERFCTR)) { \
149 unsigned long __tmp; \
150 read_pcr(__tmp); \
151 current_thread_info()->pcr_reg = __tmp; \
152 read_pic(__tmp); \
153 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
154 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
155 } \
156 flush_tlb_pending(); \
157 save_and_clear_fpu(); \
158 /* If you are tempted to conditionalize the following */ \
159 /* so that ASI is only written if it changes, think again. */ \
160 __asm__ __volatile__("wr %%g0, %0, %%asi" \
161 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
162 trap_block[current_thread_info()->cpu].thread = \
163 task_thread_info(next); \
164 __asm__ __volatile__( \
165 "mov %%g4, %%g7\n\t" \
166 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
167 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
168 "rdpr %%wstate, %%o5\n\t" \
169 "stx %%o6, [%%g6 + %6]\n\t" \
170 "stb %%o5, [%%g6 + %5]\n\t" \
171 "rdpr %%cwp, %%o5\n\t" \
172 "stb %%o5, [%%g6 + %8]\n\t" \
173 "mov %4, %%g6\n\t" \
174 "ldub [%4 + %8], %%g1\n\t" \
175 "wrpr %%g1, %%cwp\n\t" \
176 "ldx [%%g6 + %6], %%o6\n\t" \
177 "ldub [%%g6 + %5], %%o5\n\t" \
178 "ldub [%%g6 + %7], %%o7\n\t" \
179 "wrpr %%o5, 0x0, %%wstate\n\t" \
180 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
181 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
182 "ldx [%%g6 + %9], %%g4\n\t" \
183 "brz,pt %%o7, switch_to_pc\n\t" \
184 " mov %%g7, %0\n\t" \
185 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
186 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
187 " nop\n\t" \
188 ".globl switch_to_pc\n\t" \
189 "switch_to_pc:\n\t" \
190 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
191 "=r" (__local_per_cpu_offset) \
192 : "0" (task_thread_info(next)), \
193 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
194 "i" (TI_CWP), "i" (TI_TASK) \
195 : "cc", \
196 "g1", "g2", "g3", "g7", \
197 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
198 "i0", "i1", "i2", "i3", "i4", "i5", \
199 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
200 /* If you fuck with this, update ret_from_syscall code too. */ \
201 if (test_thread_flag(TIF_PERFCTR)) { \
202 write_pcr(current_thread_info()->pcr_reg); \
203 reset_pic(); \
204 } \
205} while(0)
206
207static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
208{
209 unsigned long tmp1, tmp2;
210
211 __asm__ __volatile__(
212" membar #StoreLoad | #LoadLoad\n"
213" mov %0, %1\n"
214"1: lduw [%4], %2\n"
215" cas [%4], %2, %0\n"
216" cmp %2, %0\n"
217" bne,a,pn %%icc, 1b\n"
218" mov %1, %0\n"
219" membar #StoreLoad | #StoreStore\n"
220 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
221 : "0" (val), "r" (m)
222 : "cc", "memory");
223 return val;
224}
225
226static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
227{
228 unsigned long tmp1, tmp2;
229
230 __asm__ __volatile__(
231" membar #StoreLoad | #LoadLoad\n"
232" mov %0, %1\n"
233"1: ldx [%4], %2\n"
234" casx [%4], %2, %0\n"
235" cmp %2, %0\n"
236" bne,a,pn %%xcc, 1b\n"
237" mov %1, %0\n"
238" membar #StoreLoad | #StoreStore\n"
239 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
240 : "0" (val), "r" (m)
241 : "cc", "memory");
242 return val;
243}
244
245#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
246
247extern void __xchg_called_with_bad_pointer(void);
248
249static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
250 int size)
251{
252 switch (size) {
253 case 4:
254 return xchg32(ptr, x);
255 case 8:
256 return xchg64(ptr, x);
257 };
258 __xchg_called_with_bad_pointer();
259 return x;
260}
261
262extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
263
264/*
265 * Atomic compare and exchange. Compare OLD with MEM, if identical,
266 * store NEW in MEM. Return the initial value in MEM. Success is
267 * indicated by comparing RETURN with OLD.
268 */
269
270#define __HAVE_ARCH_CMPXCHG 1
271
272static inline unsigned long
273__cmpxchg_u32(volatile int *m, int old, int new)
274{
275 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
276 "cas [%2], %3, %0\n\t"
277 "membar #StoreLoad | #StoreStore"
278 : "=&r" (new)
279 : "0" (new), "r" (m), "r" (old)
280 : "memory");
281
282 return new;
283}
284
285static inline unsigned long
286__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
287{
288 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
289 "casx [%2], %3, %0\n\t"
290 "membar #StoreLoad | #StoreStore"
291 : "=&r" (new)
292 : "0" (new), "r" (m), "r" (old)
293 : "memory");
294
295 return new;
296}
297
298/* This function doesn't exist, so you'll get a linker error
299 if something tries to do an invalid cmpxchg(). */
300extern void __cmpxchg_called_with_bad_pointer(void);
301
302static inline unsigned long
303__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
304{
305 switch (size) {
306 case 4:
307 return __cmpxchg_u32(ptr, old, new);
308 case 8:
309 return __cmpxchg_u64(ptr, old, new);
310 }
311 __cmpxchg_called_with_bad_pointer();
312 return old;
313}
314
315#define cmpxchg(ptr,o,n) \
316 ({ \
317 __typeof__(*(ptr)) _o_ = (o); \
318 __typeof__(*(ptr)) _n_ = (n); \
319 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
320 (unsigned long)_n_, sizeof(*(ptr))); \
321 })
322
323/*
324 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
325 * them available.
326 */
327
328static inline unsigned long __cmpxchg_local(volatile void *ptr,
329 unsigned long old,
330 unsigned long new, int size)
331{
332 switch (size) {
333 case 4:
334 case 8: return __cmpxchg(ptr, old, new, size);
335 default:
336 return __cmpxchg_local_generic(ptr, old, new, size);
337 }
338
339 return old;
340}
341
342#define cmpxchg_local(ptr, o, n) \
343 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
344 (unsigned long)(n), sizeof(*(ptr))))
345#define cmpxchg64_local(ptr, o, n) \
346 ({ \
347 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
348 cmpxchg_local((ptr), (o), (n)); \
349 })
350
351#endif /* !(__ASSEMBLY__) */
352
353#define arch_align_stack(x) (x)
354
355#endif /* !(__SPARC64_SYSTEM_H) */
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index e5873e385306..92bed7913395 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -1,277 +1 @@
1/* thread_info.h: sparc64 low-level thread information #include <asm-sparc/thread_info.h>
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 */
5
6#ifndef _ASM_THREAD_INFO_H
7#define _ASM_THREAD_INFO_H
8
9#ifdef __KERNEL__
10
11#define NSWINS 7
12
13#define TI_FLAG_BYTE_FAULT_CODE 0
14#define TI_FLAG_FAULT_CODE_SHIFT 56
15#define TI_FLAG_BYTE_WSTATE 1
16#define TI_FLAG_WSTATE_SHIFT 48
17#define TI_FLAG_BYTE_CWP 2
18#define TI_FLAG_CWP_SHIFT 40
19#define TI_FLAG_BYTE_CURRENT_DS 3
20#define TI_FLAG_CURRENT_DS_SHIFT 32
21#define TI_FLAG_BYTE_FPDEPTH 4
22#define TI_FLAG_FPDEPTH_SHIFT 24
23#define TI_FLAG_BYTE_WSAVED 5
24#define TI_FLAG_WSAVED_SHIFT 16
25
26#include <asm/page.h>
27
28#ifndef __ASSEMBLY__
29
30#include <asm/ptrace.h>
31#include <asm/types.h>
32
33struct task_struct;
34struct exec_domain;
35
36struct thread_info {
37 /* D$ line 1 */
38 struct task_struct *task;
39 unsigned long flags;
40 __u8 fpsaved[7];
41 __u8 status;
42 unsigned long ksp;
43
44 /* D$ line 2 */
45 unsigned long fault_address;
46 struct pt_regs *kregs;
47 struct exec_domain *exec_domain;
48 int preempt_count; /* 0 => preemptable, <0 => BUG */
49 __u8 new_child;
50 __u8 syscall_noerror;
51 __u16 cpu;
52
53 unsigned long *utraps;
54
55 struct reg_window reg_window[NSWINS];
56 unsigned long rwbuf_stkptrs[NSWINS];
57
58 unsigned long gsr[7];
59 unsigned long xfsr[7];
60
61 __u64 __user *user_cntd0;
62 __u64 __user *user_cntd1;
63 __u64 kernel_cntd0, kernel_cntd1;
64 __u64 pcr_reg;
65
66 struct restart_block restart_block;
67
68 struct pt_regs *kern_una_regs;
69 unsigned int kern_una_insn;
70
71 unsigned long fpregs[0] __attribute__ ((aligned(64)));
72};
73
74#endif /* !(__ASSEMBLY__) */
75
76/* offsets into the thread_info struct for assembly code access */
77#define TI_TASK 0x00000000
78#define TI_FLAGS 0x00000008
79#define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE)
80#define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE)
81#define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP)
82#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
83#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
84#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
85#define TI_FPSAVED 0x00000010
86#define TI_KSP 0x00000018
87#define TI_FAULT_ADDR 0x00000020
88#define TI_KREGS 0x00000028
89#define TI_EXEC_DOMAIN 0x00000030
90#define TI_PRE_COUNT 0x00000038
91#define TI_NEW_CHILD 0x0000003c
92#define TI_SYS_NOERROR 0x0000003d
93#define TI_CPU 0x0000003e
94#define TI_UTRAPS 0x00000040
95#define TI_REG_WINDOW 0x00000048
96#define TI_RWIN_SPTRS 0x000003c8
97#define TI_GSR 0x00000400
98#define TI_XFSR 0x00000438
99#define TI_USER_CNTD0 0x00000470
100#define TI_USER_CNTD1 0x00000478
101#define TI_KERN_CNTD0 0x00000480
102#define TI_KERN_CNTD1 0x00000488
103#define TI_PCR 0x00000490
104#define TI_RESTART_BLOCK 0x00000498
105#define TI_KUNA_REGS 0x000004c0
106#define TI_KUNA_INSN 0x000004c8
107#define TI_FPREGS 0x00000500
108
109/* We embed this in the uppermost byte of thread_info->flags */
110#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
111#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
112#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
113#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
114#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
115
116#if PAGE_SHIFT == 13
117#define THREAD_SIZE (2*PAGE_SIZE)
118#define THREAD_SHIFT (PAGE_SHIFT + 1)
119#else /* PAGE_SHIFT == 13 */
120#define THREAD_SIZE PAGE_SIZE
121#define THREAD_SHIFT PAGE_SHIFT
122#endif /* PAGE_SHIFT == 13 */
123
124#define PREEMPT_ACTIVE 0x4000000
125
126/*
127 * macros/functions for gaining access to the thread information structure
128 *
129 * preempt_count needs to be 1 initially, until the scheduler is functional.
130 */
131#ifndef __ASSEMBLY__
132
133#define INIT_THREAD_INFO(tsk) \
134{ \
135 .task = &tsk, \
136 .flags = ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
137 .exec_domain = &default_exec_domain, \
138 .preempt_count = 1, \
139 .restart_block = { \
140 .fn = do_no_restart_syscall, \
141 }, \
142}
143
144#define init_thread_info (init_thread_union.thread_info)
145#define init_stack (init_thread_union.stack)
146
147/* how to get the thread information struct from C */
148register struct thread_info *current_thread_info_reg asm("g6");
149#define current_thread_info() (current_thread_info_reg)
150
151/* thread information allocation */
152#if PAGE_SHIFT == 13
153#define __THREAD_INFO_ORDER 1
154#else /* PAGE_SHIFT == 13 */
155#define __THREAD_INFO_ORDER 0
156#endif /* PAGE_SHIFT == 13 */
157
158#ifdef CONFIG_DEBUG_STACK_USAGE
159#define alloc_thread_info(tsk) \
160({ \
161 struct thread_info *ret; \
162 \
163 ret = (struct thread_info *) \
164 __get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER); \
165 if (ret) \
166 memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER); \
167 ret; \
168})
169#else
170#define alloc_thread_info(tsk) \
171 ((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER))
172#endif
173
174#define free_thread_info(ti) \
175 free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
176
177#define __thread_flag_byte_ptr(ti) \
178 ((unsigned char *)(&((ti)->flags)))
179#define __cur_thread_flag_byte_ptr __thread_flag_byte_ptr(current_thread_info())
180
181#define get_thread_fault_code() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE])
182#define set_thread_fault_code(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE] = (val))
183#define get_thread_wstate() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE])
184#define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val))
185#define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP])
186#define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val))
187#define get_thread_current_ds() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS])
188#define set_thread_current_ds(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val))
189#define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH])
190#define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val))
191#define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED])
192#define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val))
193
194#endif /* !(__ASSEMBLY__) */
195
196/*
197 * Thread information flags, only 16 bits are available as we encode
198 * other values into the upper 6 bytes.
199 *
200 * On trap return we need to test several values:
201 *
202 * user: need_resched, notify_resume, sigpending, wsaved, perfctr
203 * kernel: fpdepth
204 *
205 * So to check for work in the kernel case we simply load the fpdepth
206 * byte out of the flags and test it. For the user case we encode the
207 * lower 3 bytes of flags as follows:
208 * ----------------------------------------
209 * | wsaved | flags byte 1 | flags byte 2 |
210 * ----------------------------------------
211 * This optimizes the user test into:
212 * ldx [%g6 + TI_FLAGS], REG1
213 * sethi %hi(_TIF_USER_WORK_MASK), REG2
214 * or REG2, %lo(_TIF_USER_WORK_MASK), REG2
215 * andcc REG1, REG2, %g0
216 * be,pt no_work_to_do
217 * nop
218 */
219#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
220/* flags bit 1 is available */
221#define TIF_SIGPENDING 2 /* signal pending */
222#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
223#define TIF_PERFCTR 4 /* performance counters active */
224#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
225/* flag bit 6 is available */
226#define TIF_32BIT 7 /* 32-bit binary */
227/* flag bit 8 is available */
228#define TIF_SECCOMP 9 /* secure computing */
229#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
230/* flag bit 11 is available */
231/* NOTE: Thread flags >= 12 should be ones we have no interest
232 * in using in assembly, else we can't use the mask as
233 * an immediate value in instructions such as andcc.
234 */
235#define TIF_ABI_PENDING 12
236#define TIF_MEMDIE 13
237#define TIF_POLLING_NRFLAG 14
238
239#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
240#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
241#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
242#define _TIF_PERFCTR (1<<TIF_PERFCTR)
243#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
244#define _TIF_32BIT (1<<TIF_32BIT)
245#define _TIF_SECCOMP (1<<TIF_SECCOMP)
246#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
247#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
248#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
249
250#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
251 (_TIF_SIGPENDING | \
252 _TIF_NEED_RESCHED | _TIF_PERFCTR))
253
254/*
255 * Thread-synchronous status.
256 *
257 * This is different from the flags in that nobody else
258 * ever touches our thread-synchronous status, so we don't
259 * have to worry about atomic accesses.
260 *
261 * Note that there are only 8 bits available.
262 */
263#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
264
265#ifndef __ASSEMBLY__
266#define HAVE_SET_RESTORE_SIGMASK 1
267static inline void set_restore_sigmask(void)
268{
269 struct thread_info *ti = current_thread_info();
270 ti->status |= TS_RESTORE_SIGMASK;
271 set_bit(TIF_SIGPENDING, &ti->flags);
272}
273#endif /* !__ASSEMBLY__ */
274
275#endif /* __KERNEL__ */
276
277#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h
index 5b779fd1f788..88026d83cc93 100644
--- a/include/asm-sparc64/timer.h
+++ b/include/asm-sparc64/timer.h
@@ -1,30 +1 @@
1/* timer.h: System timer definitions for sun5. #include <asm-sparc/timer.h>
2 *
3 * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_TIMER_H
7#define _SPARC64_TIMER_H
8
9#include <linux/types.h>
10#include <linux/init.h>
11
12struct sparc64_tick_ops {
13 unsigned long (*get_tick)(void);
14 int (*add_compare)(unsigned long);
15 unsigned long softint_mask;
16 void (*disable_irq)(void);
17
18 void (*init_tick)(void);
19 unsigned long (*add_tick)(unsigned long);
20
21 char *name;
22};
23
24extern struct sparc64_tick_ops *tick_ops;
25
26extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
27extern void __devinit setup_sparc64_timer(void);
28extern void __init time_init(void);
29
30#endif /* _SPARC64_TIMER_H */
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h
index c622535c4560..8dd59ee24b48 100644
--- a/include/asm-sparc64/timex.h
+++ b/include/asm-sparc64/timex.h
@@ -1,19 +1 @@
1/* #include <asm-sparc/timex.h>
2 * linux/include/asm-sparc64/timex.h
3 *
4 * sparc64 architecture timex specifications
5 */
6#ifndef _ASMsparc64_TIMEX_H
7#define _ASMsparc64_TIMEX_H
8
9#include <asm/timer.h>
10
11#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
12
13/* Getting on the cycle counter on sparc64. */
14typedef unsigned long cycles_t;
15#define get_cycles() tick_ops->get_tick()
16
17#define ARCH_HAS_READ_CURRENT_TIMER
18
19#endif
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index ec81cdedef2c..ae92fce10936 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -1,111 +1 @@
1#ifndef _SPARC64_TLB_H #include <asm-sparc/tlb.h>
2#define _SPARC64_TLB_H
3
4#include <linux/swap.h>
5#include <linux/pagemap.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/mmu_context.h>
9
10#define TLB_BATCH_NR 192
11
12/*
13 * For UP we don't need to worry about TLB flush
14 * and page free order so much..
15 */
16#ifdef CONFIG_SMP
17 #define FREE_PTE_NR 506
18 #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
19#else
20 #define FREE_PTE_NR 1
21 #define tlb_fast_mode(bp) 1
22#endif
23
24struct mmu_gather {
25 struct mm_struct *mm;
26 unsigned int pages_nr;
27 unsigned int need_flush;
28 unsigned int fullmm;
29 unsigned int tlb_nr;
30 unsigned long vaddrs[TLB_BATCH_NR];
31 struct page *pages[FREE_PTE_NR];
32};
33
34DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36#ifdef CONFIG_SMP
37extern void smp_flush_tlb_pending(struct mm_struct *,
38 unsigned long, unsigned long *);
39#endif
40
41extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
42extern void flush_tlb_pending(void);
43
44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45{
46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
47
48 BUG_ON(mp->tlb_nr);
49
50 mp->mm = mm;
51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
52 mp->fullmm = full_mm_flush;
53
54 return mp;
55}
56
57
58static inline void tlb_flush_mmu(struct mmu_gather *mp)
59{
60 if (mp->need_flush) {
61 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
62 mp->pages_nr = 0;
63 mp->need_flush = 0;
64 }
65
66}
67
68#ifdef CONFIG_SMP
69extern void smp_flush_tlb_mm(struct mm_struct *mm);
70#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
71#else
72#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
73#endif
74
75static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
76{
77 tlb_flush_mmu(mp);
78
79 if (mp->fullmm)
80 mp->fullmm = 0;
81 else
82 flush_tlb_pending();
83
84 /* keep the page table cache within bounds */
85 check_pgt_cache();
86
87 put_cpu_var(mmu_gathers);
88}
89
90static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
91{
92 if (tlb_fast_mode(mp)) {
93 free_page_and_swap_cache(page);
94 return;
95 }
96 mp->need_flush = 1;
97 mp->pages[mp->pages_nr++] = page;
98 if (mp->pages_nr >= FREE_PTE_NR)
99 tlb_flush_mmu(mp);
100}
101
102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
103#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
104#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
105#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
106
107#define tlb_migrate_finish(mm) do { } while (0)
108#define tlb_start_vma(tlb, vma) do { } while (0)
109#define tlb_end_vma(tlb, vma) do { } while (0)
110
111#endif /* _SPARC64_TLB_H */
diff --git a/include/asm-sparc64/tlbflush.h b/include/asm-sparc64/tlbflush.h
index fbb675dbe0c9..a43979a06cd9 100644
--- a/include/asm-sparc64/tlbflush.h
+++ b/include/asm-sparc64/tlbflush.h
@@ -1,44 +1 @@
1#ifndef _SPARC64_TLBFLUSH_H #include <asm-sparc/tlbflush.h>
2#define _SPARC64_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/mmu_context.h>
6
7/* TSB flush operations. */
8struct mmu_gather;
9extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
10extern void flush_tsb_user(struct mmu_gather *mp);
11
12/* TLB flush operations. */
13
14extern void flush_tlb_pending(void);
15
16#define flush_tlb_range(vma,start,end) \
17 do { (void)(start); flush_tlb_pending(); } while (0)
18#define flush_tlb_page(vma,addr) flush_tlb_pending()
19#define flush_tlb_mm(mm) flush_tlb_pending()
20
21/* Local cpu only. */
22extern void __flush_tlb_all(void);
23
24extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
25
26#ifndef CONFIG_SMP
27
28#define flush_tlb_kernel_range(start,end) \
29do { flush_tsb_kernel_range(start,end); \
30 __flush_tlb_kernel_range(start,end); \
31} while (0)
32
33#else /* CONFIG_SMP */
34
35extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
36
37#define flush_tlb_kernel_range(start, end) \
38do { flush_tsb_kernel_range(start,end); \
39 smp_flush_tlb_kernel_range(start, end); \
40} while (0)
41
42#endif /* ! CONFIG_SMP */
43
44#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 001c04027c82..46999b60fbba 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -1,86 +1 @@
1#ifndef _ASM_SPARC64_TOPOLOGY_H #include <asm-sparc/topology.h>
2#define _ASM_SPARC64_TOPOLOGY_H
3
4#ifdef CONFIG_NUMA
5
6#include <asm/mmzone.h>
7
8static inline int cpu_to_node(int cpu)
9{
10 return numa_cpu_lookup_table[cpu];
11}
12
13#define parent_node(node) (node)
14
15static inline cpumask_t node_to_cpumask(int node)
16{
17 return numa_cpumask_lookup_table[node];
18}
19
20/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
21#define node_to_cpumask_ptr(v, node) \
22 cpumask_t *v = &(numa_cpumask_lookup_table[node])
23
24#define node_to_cpumask_ptr_next(v, node) \
25 v = &(numa_cpumask_lookup_table[node])
26
27static inline int node_to_first_cpu(int node)
28{
29 cpumask_t tmp;
30 tmp = node_to_cpumask(node);
31 return first_cpu(tmp);
32}
33
34struct pci_bus;
35#ifdef CONFIG_PCI
36extern int pcibus_to_node(struct pci_bus *pbus);
37#else
38static inline int pcibus_to_node(struct pci_bus *pbus)
39{
40 return -1;
41}
42#endif
43
44#define pcibus_to_cpumask(bus) \
45 (pcibus_to_node(bus) == -1 ? \
46 CPU_MASK_ALL : \
47 node_to_cpumask(pcibus_to_node(bus)))
48
49#define SD_NODE_INIT (struct sched_domain) { \
50 .min_interval = 8, \
51 .max_interval = 32, \
52 .busy_factor = 32, \
53 .imbalance_pct = 125, \
54 .cache_nice_tries = 2, \
55 .busy_idx = 3, \
56 .idle_idx = 2, \
57 .newidle_idx = 0, \
58 .wake_idx = 1, \
59 .forkexec_idx = 1, \
60 .flags = SD_LOAD_BALANCE \
61 | SD_BALANCE_FORK \
62 | SD_BALANCE_EXEC \
63 | SD_SERIALIZE \
64 | SD_WAKE_BALANCE, \
65 .last_balance = jiffies, \
66 .balance_interval = 1, \
67}
68
69#else /* CONFIG_NUMA */
70
71#include <asm-generic/topology.h>
72
73#endif /* !(CONFIG_NUMA) */
74
75#ifdef CONFIG_SMP
76#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
77#define topology_core_id(cpu) (cpu_data(cpu).core_id)
78#define topology_core_siblings(cpu) (cpu_core_map[cpu])
79#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
80#define mc_capable() (sparc64_multi_core)
81#define smt_capable() (sparc64_multi_core)
82#endif /* CONFIG_SMP */
83
84#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
85
86#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index 5fcbaf68c3f6..2872d22844f3 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -1,273 +1 @@
1#ifndef _ASM_UACCESS_H #include <asm-sparc/uaccess.h>
2#define _ASM_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7
8#ifdef __KERNEL__
9#include <linux/compiler.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <asm/asi.h>
13#include <asm/system.h>
14#include <asm/spitfire.h>
15#include <asm-generic/uaccess.h>
16#endif
17
18#ifndef __ASSEMBLY__
19
20/*
21 * Sparc64 is segmented, though more like the M68K than the I386.
22 * We use the secondary ASI to address user memory, which references a
23 * completely different VM map, thus there is zero chance of the user
24 * doing something queer and tricking us into poking kernel memory.
25 *
26 * What is left here is basically what is needed for the other parts of
27 * the kernel that expect to be able to manipulate, erum, "segments".
28 * Or perhaps more properly, permissions.
29 *
30 * "For historical reasons, these macros are grossly misnamed." -Linus
31 */
32
33#define KERNEL_DS ((mm_segment_t) { ASI_P })
34#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
35
36#define VERIFY_READ 0
37#define VERIFY_WRITE 1
38
39#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
40#define get_ds() (KERNEL_DS)
41
42#define segment_eq(a,b) ((a).seg == (b).seg)
43
44#define set_fs(val) \
45do { \
46 set_thread_current_ds((val).seg); \
47 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
48} while(0)
49
50static inline int __access_ok(const void __user * addr, unsigned long size)
51{
52 return 1;
53}
54
55static inline int access_ok(int type, const void __user * addr, unsigned long size)
56{
57 return 1;
58}
59
60/*
61 * The exception table consists of pairs of addresses: the first is the
62 * address of an instruction that is allowed to fault, and the second is
63 * the address at which the program should continue. No registers are
64 * modified, so it is entirely up to the continuation code to figure out
65 * what to do.
66 *
67 * All the routines below use bits of fixup code that are out of line
68 * with the main instruction path. This means when everything is well,
69 * we don't even have to jump over them. Further, they do not intrude
70 * on our cache or tlb entries.
71 */
72
73struct exception_table_entry {
74 unsigned int insn, fixup;
75};
76
77extern void __ret_efault(void);
78extern void __retl_efault(void);
79
80/* Uh, these should become the main single-value transfer routines..
81 * They automatically use the right size if we just have the right
82 * pointer type..
83 *
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
88 */
89#define put_user(x,ptr) ({ \
90unsigned long __pu_addr = (unsigned long)(ptr); \
91__chk_user_ptr(ptr); \
92__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
93
94#define get_user(x,ptr) ({ \
95unsigned long __gu_addr = (unsigned long)(ptr); \
96__chk_user_ptr(ptr); \
97__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
98
99#define __put_user(x,ptr) put_user(x,ptr)
100#define __get_user(x,ptr) get_user(x,ptr)
101
102struct __large_struct { unsigned long buf[100]; };
103#define __m(x) ((struct __large_struct *)(x))
104
105#define __put_user_nocheck(data,addr,size) ({ \
106register int __pu_ret; \
107switch (size) { \
108case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
109case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
110case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
111case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
112default: __pu_ret = __put_user_bad(); break; \
113} __pu_ret; })
114
115#define __put_user_asm(x,size,addr,ret) \
116__asm__ __volatile__( \
117 "/* Put user asm, inline. */\n" \
118"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
119 "clr %0\n" \
120"2:\n\n\t" \
121 ".section .fixup,#alloc,#execinstr\n\t" \
122 ".align 4\n" \
123"3:\n\t" \
124 "sethi %%hi(2b), %0\n\t" \
125 "jmpl %0 + %%lo(2b), %%g0\n\t" \
126 " mov %3, %0\n\n\t" \
127 ".previous\n\t" \
128 ".section __ex_table,\"a\"\n\t" \
129 ".align 4\n\t" \
130 ".word 1b, 3b\n\t" \
131 ".previous\n\n\t" \
132 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
133 "i" (-EFAULT))
134
135extern int __put_user_bad(void);
136
137#define __get_user_nocheck(data,addr,size,type) ({ \
138register int __gu_ret; \
139register unsigned long __gu_val; \
140switch (size) { \
141case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
142case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
143case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
144case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
145default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
146} data = (type) __gu_val; __gu_ret; })
147
148#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
149register unsigned long __gu_val __asm__ ("l1"); \
150switch (size) { \
151case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
152case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
153case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
154case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
155default: if (__get_user_bad()) return retval; \
156} data = (type) __gu_val; })
157
158#define __get_user_asm(x,size,addr,ret) \
159__asm__ __volatile__( \
160 "/* Get user asm, inline. */\n" \
161"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
162 "clr %0\n" \
163"2:\n\n\t" \
164 ".section .fixup,#alloc,#execinstr\n\t" \
165 ".align 4\n" \
166"3:\n\t" \
167 "sethi %%hi(2b), %0\n\t" \
168 "clr %1\n\t" \
169 "jmpl %0 + %%lo(2b), %%g0\n\t" \
170 " mov %3, %0\n\n\t" \
171 ".previous\n\t" \
172 ".section __ex_table,\"a\"\n\t" \
173 ".align 4\n\t" \
174 ".word 1b, 3b\n\n\t" \
175 ".previous\n\t" \
176 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
177 "i" (-EFAULT))
178
179#define __get_user_asm_ret(x,size,addr,retval) \
180if (__builtin_constant_p(retval) && retval == -EFAULT) \
181__asm__ __volatile__( \
182 "/* Get user asm ret, inline. */\n" \
183"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
184 ".section __ex_table,\"a\"\n\t" \
185 ".align 4\n\t" \
186 ".word 1b,__ret_efault\n\n\t" \
187 ".previous\n\t" \
188 : "=r" (x) : "r" (__m(addr))); \
189else \
190__asm__ __volatile__( \
191 "/* Get user asm ret, inline. */\n" \
192"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
193 ".section .fixup,#alloc,#execinstr\n\t" \
194 ".align 4\n" \
195"3:\n\t" \
196 "ret\n\t" \
197 " restore %%g0, %2, %%o0\n\n\t" \
198 ".previous\n\t" \
199 ".section __ex_table,\"a\"\n\t" \
200 ".align 4\n\t" \
201 ".word 1b, 3b\n\n\t" \
202 ".previous\n\t" \
203 : "=r" (x) : "r" (__m(addr)), "i" (retval))
204
205extern int __get_user_bad(void);
206
207extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from,
209 unsigned long size);
210extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
211 unsigned long size);
212static inline unsigned long __must_check
213copy_from_user(void *to, const void __user *from, unsigned long size)
214{
215 unsigned long ret = ___copy_from_user(to, from, size);
216
217 if (unlikely(ret))
218 ret = copy_from_user_fixup(to, from, size);
219 return ret;
220}
221#define __copy_from_user copy_from_user
222
223extern unsigned long __must_check ___copy_to_user(void __user *to,
224 const void *from,
225 unsigned long size);
226extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
227 unsigned long size);
228static inline unsigned long __must_check
229copy_to_user(void __user *to, const void *from, unsigned long size)
230{
231 unsigned long ret = ___copy_to_user(to, from, size);
232
233 if (unlikely(ret))
234 ret = copy_to_user_fixup(to, from, size);
235 return ret;
236}
237#define __copy_to_user copy_to_user
238
239extern unsigned long __must_check ___copy_in_user(void __user *to,
240 const void __user *from,
241 unsigned long size);
242extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
243 unsigned long size);
244static inline unsigned long __must_check
245copy_in_user(void __user *to, void __user *from, unsigned long size)
246{
247 unsigned long ret = ___copy_in_user(to, from, size);
248
249 if (unlikely(ret))
250 ret = copy_in_user_fixup(to, from, size);
251 return ret;
252}
253#define __copy_in_user copy_in_user
254
255extern unsigned long __must_check __clear_user(void __user *, unsigned long);
256
257#define clear_user __clear_user
258
259extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
260
261#define strncpy_from_user __strncpy_from_user
262
263extern long __strlen_user(const char __user *);
264extern long __strnlen_user(const char __user *, long len);
265
266#define strlen_user __strlen_user
267#define strnlen_user __strnlen_user
268#define __copy_to_user_inatomic __copy_to_user
269#define __copy_from_user_inatomic __copy_from_user
270
271#endif /* __ASSEMBLY__ */
272
273#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 13be4453a1f0..ad86e0b7a455 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -1,373 +1 @@
1#ifndef _SPARC64_UNISTD_H #include <asm-sparc/unistd.h>
2#define _SPARC64_UNISTD_H
3
4/*
5 * System calls under the Sparc.
6 *
7 * Don't be scared by the ugly clobbers, it is the only way I can
8 * think of right now to force the arguments into fixed registers
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49/* #define __NR_lchown32 31 Linux sparc32 specific */
50/* #define __NR_fchown32 32 Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53/* #define __NR_chown32 35 Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62/* #define __NR_getuid32 44 Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70#define __NR_memory_ordering 52 /* Linux Specific */
71/* #define __NR_getgid32 53 Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74/* #define __NR_mmap2 56 Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87/* #define __NR_geteuid32 69 Linux sparc32, sbrk under SunOS */
88/* #define __NR_getegid32 70 Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90/* #define __NR_setreuid32 72 Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95/* #define __NR_truncate64 77 Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100/* #define __NR_setgroups32 82 Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102/* #define __NR_ftruncate64 84 Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105/* #define __NR_setuid32 87 Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107/* #define __NR_setgid32 89 Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109/* #define __NR_setfsuid32 91 Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112/* #define __NR_setfsgid32 94 Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */
130/* #define __NR_setregid32 75 Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133/* #define __NR_getgroups32 115 Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173/* #define __NR_fcntl64 155 Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182#define __NR_utrap_install 164 /* SYSV ABI/v9 required */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#ifdef __KERNEL__
250#define __NR_time 231 /* Linux sparc32 */
251#endif
252#define __NR_splice 232 /* Linux Specific */
253#define __NR_stime 233 /* Linux Specific */
254#define __NR_statfs64 234 /* Linux Specific */
255#define __NR_fstatfs64 235 /* Linux Specific */
256#define __NR__llseek 236 /* Linux Specific */
257#define __NR_mlock 237
258#define __NR_munlock 238
259#define __NR_mlockall 239
260#define __NR_munlockall 240
261#define __NR_sched_setparam 241
262#define __NR_sched_getparam 242
263#define __NR_sched_setscheduler 243
264#define __NR_sched_getscheduler 244
265#define __NR_sched_yield 245
266#define __NR_sched_get_priority_max 246
267#define __NR_sched_get_priority_min 247
268#define __NR_sched_rr_get_interval 248
269#define __NR_nanosleep 249
270#define __NR_mremap 250
271#define __NR__sysctl 251
272#define __NR_getsid 252
273#define __NR_fdatasync 253
274#define __NR_nfsservctl 254
275#define __NR_sync_file_range 255
276#define __NR_clock_settime 256
277#define __NR_clock_gettime 257
278#define __NR_clock_getres 258
279#define __NR_clock_nanosleep 259
280#define __NR_sched_getaffinity 260
281#define __NR_sched_setaffinity 261
282#define __NR_timer_settime 262
283#define __NR_timer_gettime 263
284#define __NR_timer_getoverrun 264
285#define __NR_timer_delete 265
286#define __NR_timer_create 266
287/* #define __NR_vserver 267 Reserved for VSERVER */
288#define __NR_io_setup 268
289#define __NR_io_destroy 269
290#define __NR_io_submit 270
291#define __NR_io_cancel 271
292#define __NR_io_getevents 272
293#define __NR_mq_open 273
294#define __NR_mq_unlink 274
295#define __NR_mq_timedsend 275
296#define __NR_mq_timedreceive 276
297#define __NR_mq_notify 277
298#define __NR_mq_getsetattr 278
299#define __NR_waitid 279
300#define __NR_tee 280
301#define __NR_add_key 281
302#define __NR_request_key 282
303#define __NR_keyctl 283
304#define __NR_openat 284
305#define __NR_mkdirat 285
306#define __NR_mknodat 286
307#define __NR_fchownat 287
308#define __NR_futimesat 288
309#define __NR_fstatat64 289
310#define __NR_unlinkat 290
311#define __NR_renameat 291
312#define __NR_linkat 292
313#define __NR_symlinkat 293
314#define __NR_readlinkat 294
315#define __NR_fchmodat 295
316#define __NR_faccessat 296
317#define __NR_pselect6 297
318#define __NR_ppoll 298
319#define __NR_unshare 299
320#define __NR_set_robust_list 300
321#define __NR_get_robust_list 301
322#define __NR_migrate_pages 302
323#define __NR_mbind 303
324#define __NR_get_mempolicy 304
325#define __NR_set_mempolicy 305
326#define __NR_kexec_load 306
327#define __NR_move_pages 307
328#define __NR_getcpu 308
329#define __NR_epoll_pwait 309
330#define __NR_utimensat 310
331#define __NR_signalfd 311
332#define __NR_timerfd_create 312
333#define __NR_eventfd 313
334#define __NR_fallocate 314
335#define __NR_timerfd_settime 315
336#define __NR_timerfd_gettime 316
337
338#define NR_SYSCALLS 317
339
340#ifdef __KERNEL__
341#define __ARCH_WANT_IPC_PARSE_VERSION
342#define __ARCH_WANT_OLD_READDIR
343#define __ARCH_WANT_STAT64
344#define __ARCH_WANT_SYS_ALARM
345#define __ARCH_WANT_SYS_GETHOSTNAME
346#define __ARCH_WANT_SYS_PAUSE
347#define __ARCH_WANT_SYS_SGETMASK
348#define __ARCH_WANT_SYS_SIGNAL
349#define __ARCH_WANT_SYS_TIME
350#define __ARCH_WANT_COMPAT_SYS_TIME
351#define __ARCH_WANT_SYS_UTIME
352#define __ARCH_WANT_SYS_WAITPID
353#define __ARCH_WANT_SYS_SOCKETCALL
354#define __ARCH_WANT_SYS_FADVISE64
355#define __ARCH_WANT_SYS_GETPGRP
356#define __ARCH_WANT_SYS_LLSEEK
357#define __ARCH_WANT_SYS_NICE
358#define __ARCH_WANT_SYS_OLDUMOUNT
359#define __ARCH_WANT_SYS_SIGPENDING
360#define __ARCH_WANT_SYS_SIGPROCMASK
361#define __ARCH_WANT_SYS_RT_SIGSUSPEND
362#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
363
364/*
365 * "Conditional" syscalls
366 *
367 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
368 * but it doesn't work on all toolchains, so we just do it by hand
369 */
370#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
371
372#endif /* __KERNEL__ */
373#endif /* _SPARC64_UNISTD_H */
diff --git a/include/asm-sparc64/xor.h b/include/asm-sparc64/xor.h
index a0233884fc94..ef187cc07ed5 100644
--- a/include/asm-sparc64/xor.h
+++ b/include/asm-sparc64/xor.h
@@ -1,70 +1 @@
1/* #include <asm-sparc/xor.h>
2 * include/asm-sparc64/xor.h
3 *
4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set and Niagara block-init
6 * twin-load instructions.
7 *
8 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
9 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <asm/spitfire.h>
22
23extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
24extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
25 unsigned long *);
26extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
27 unsigned long *, unsigned long *);
28extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
29 unsigned long *, unsigned long *, unsigned long *);
30
31/* XXX Ugh, write cheetah versions... -DaveM */
32
33static struct xor_block_template xor_block_VIS = {
34 .name = "VIS",
35 .do_2 = xor_vis_2,
36 .do_3 = xor_vis_3,
37 .do_4 = xor_vis_4,
38 .do_5 = xor_vis_5,
39};
40
41extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
42extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
43 unsigned long *);
44extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
45 unsigned long *, unsigned long *);
46extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
47 unsigned long *, unsigned long *, unsigned long *);
48
49static struct xor_block_template xor_block_niagara = {
50 .name = "Niagara",
51 .do_2 = xor_niagara_2,
52 .do_3 = xor_niagara_3,
53 .do_4 = xor_niagara_4,
54 .do_5 = xor_niagara_5,
55};
56
57#undef XOR_TRY_TEMPLATES
58#define XOR_TRY_TEMPLATES \
59 do { \
60 xor_speed(&xor_block_VIS); \
61 xor_speed(&xor_block_niagara); \
62 } while (0)
63
64/* For VIS for everything except Niagara. */
65#define XOR_SELECT_TEMPLATE(FASTEST) \
66 ((tlb_type == hypervisor && \
67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2)) ? \
69 &xor_block_niagara : \
70 &xor_block_VIS)